From c814bf958926ff45a9c1e899bd001006ab6cfbae Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Tue, 16 Aug 2022 10:51:06 +0000 Subject: [PATCH 001/214] powerpc/selftests: Use timersub() for gettimeofday() Use timersub() function to simplify the code. Reported-by: Zeal Robot Signed-off-by: ye xingchen Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220816105106.82666-1-ye.xingchen@zte.com.cn --- tools/testing/selftests/powerpc/benchmarks/gettimeofday.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c index 6b415683357b..580fcac0a09f 100644 --- a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c +++ b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c @@ -12,7 +12,7 @@ static int test_gettimeofday(void) { int i; - struct timeval tv_start, tv_end; + struct timeval tv_start, tv_end, tv_diff; gettimeofday(&tv_start, NULL); @@ -20,7 +20,9 @@ static int test_gettimeofday(void) gettimeofday(&tv_end, NULL); } - printf("time = %.6f\n", tv_end.tv_sec - tv_start.tv_sec + (tv_end.tv_usec - tv_start.tv_usec) * 1e-6); + timersub(&tv_start, &tv_end, &tv_diff); + + printf("time = %.6f\n", tv_diff.tv_sec + (tv_diff.tv_usec) * 1e-6); return 0; } From 8a8f7866663588b162031a5348c24e42161461cd Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Thu, 18 Aug 2022 19:31:25 +0200 Subject: [PATCH 002/214] powerpc/vdso: Don't map VDSO at a fixed address on PPC32 PPC64 removed default mapping address from VDSO in commit 30d0b3682887 ("powerpc: Move 64bit VDSO to improve context switch performance"). Do like PPC64 and let get_unmapped_area() place the VDSO mapping at the address it wants, don't force a default address. This allows randomisation of VDSO address. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/cba76f5a5b01fcc49415e632d92c11c1c5998cab.1660843877.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/vdso.h | 3 --- arch/powerpc/kernel/vdso.c | 13 ++----------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 8542e9bbeead..7650b6ce14c8 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -2,9 +2,6 @@ #ifndef _ASM_POWERPC_VDSO_H #define _ASM_POWERPC_VDSO_H -/* Default map addresses for 32bit vDSO */ -#define VDSO32_MBASE 0x100000 - #define VDSO_VERSION_STRING LINUX_2.6.15 #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 0da287544054..bf9574ec26ce 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -200,28 +200,19 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int if (is_32bit_task()) { vdso_spec = &vdso32_spec; vdso_size = &vdso32_end - &vdso32_start; - vdso_base = VDSO32_MBASE; } else { vdso_spec = &vdso64_spec; vdso_size = &vdso64_end - &vdso64_start; - /* - * On 64bit we don't have a preferred map address. This - * allows get_unmapped_area to find an area near other mmaps - * and most likely share a SLB entry. - */ - vdso_base = 0; } mappings_size = vdso_size + vvar_size; mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK; /* - * pick a base address for the vDSO in process space. We try to put it - * at vdso_base which is the "natural" base for it, but we might fail - * and end up putting it elsewhere. + * Pick a base address for the vDSO in process space. * Add enough to the size so that the result can be aligned. */ - vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0); + vdso_base = get_unmapped_area(NULL, 0, mappings_size, 0, 0); if (IS_ERR_VALUE(vdso_base)) return vdso_base; From e01432baf6618296f4d2d557e6b4c3245a5cc3c2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 19 Aug 2022 21:04:30 +1000 Subject: [PATCH 003/214] powerpc/boot: Convert more files to use SPDX tags These files are all plain GPL 2.0, with a second sentence about being licensed as-is. Similar to the rule in commit 577b61cee5b2 ("treewide: Replace GPLv2 boilerplate/reference with SPDX - gpl-2.0_398.RULE"). Signed-off-by: Michael Ellerman Reviewed-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20220819110430.433984-1-mpe@ellerman.id.au --- arch/powerpc/boot/44x.h | 5 +---- arch/powerpc/boot/4xx.h | 5 +---- arch/powerpc/boot/ops.h | 6 ++---- arch/powerpc/boot/serial.c | 6 ++---- arch/powerpc/boot/simple_alloc.c | 6 ++---- 5 files changed, 8 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/boot/44x.h b/arch/powerpc/boot/44x.h index 02563443788a..9b15e59522d6 100644 --- a/arch/powerpc/boot/44x.h +++ b/arch/powerpc/boot/44x.h @@ -1,11 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * PowerPC 44x related functions * * Copyright 2007 David Gibson, IBM Corporation. - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _PPC_BOOT_44X_H_ #define _PPC_BOOT_44X_H_ diff --git a/arch/powerpc/boot/4xx.h b/arch/powerpc/boot/4xx.h index 7dc5d45361bc..77f15d124c81 100644 --- a/arch/powerpc/boot/4xx.h +++ b/arch/powerpc/boot/4xx.h @@ -1,12 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * PowerPC 4xx related functions * * Copyright 2007 IBM Corporation. * Josh Boyer - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #ifndef _POWERPC_BOOT_4XX_H_ #define _POWERPC_BOOT_4XX_H_ diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index 8334bc3cbe49..a40c2162a4e9 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Global definition of all the bootwrapper operations. * * Author: Mark A. Greer * - * 2006 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. + * 2006 (c) MontaVista Software, Inc. */ #ifndef _PPC_BOOT_OPS_H_ #define _PPC_BOOT_OPS_H_ diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 54d2522be485..c6d32a8c3612 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Generic serial console support * @@ -6,10 +7,7 @@ * Code in serial_edit_cmdline() copied from * and was written by Matt Porter . * - * 2001,2006 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. + * 2001,2006 (c) MontaVista Software, Inc. */ #include #include diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c index 65ec135d0157..267d6524caac 100644 --- a/arch/powerpc/boot/simple_alloc.c +++ b/arch/powerpc/boot/simple_alloc.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Implement primitive realloc(3) functionality. * * Author: Mark A. Greer * - * 2006 (c) MontaVista, Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. + * 2006 (c) MontaVista, Software, Inc. */ #include From 7a26c952902d1f29b09237b1698a30084f6a3074 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 19 Aug 2022 21:05:56 +1000 Subject: [PATCH 004/214] powerpc/boot: Drop unused dummy.c The last use of dummy.c was dropped in commit 2bf118197cb4 ("[POWERPC] Create a "wrapper" script and use it in arch/powerpc/boot"). Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220819110556.434970-1-mpe@ellerman.id.au --- arch/powerpc/boot/dummy.c | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 arch/powerpc/boot/dummy.c diff --git a/arch/powerpc/boot/dummy.c b/arch/powerpc/boot/dummy.c deleted file mode 100644 index 31dbf45bf99c..000000000000 --- a/arch/powerpc/boot/dummy.c +++ /dev/null @@ -1,4 +0,0 @@ -int main(void) -{ - return 0; -} From eb316ae798b36b280ef9e6a79d3aa34d146aa0e4 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 18 Aug 2022 15:06:59 +1000 Subject: [PATCH 005/214] powerpc: Move patch sites out of asm-prototypes.h The definitions for the patch sites etc. don't belong in asm-prototypes.h, they are not EXPORT'ed asm symbols. Move them into sections.h which is traditionally used for asm symbols. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220818050659.187181-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/asm-prototypes.h | 13 ------------- arch/powerpc/include/asm/sections.h | 13 +++++++++++++ arch/powerpc/kernel/security.c | 1 + 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 81631e64dbeb..e1b3e90eec94 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -55,19 +55,6 @@ struct kvm_vcpu; void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); -/* Patch sites */ -extern s32 patch__call_flush_branch_caches1; -extern s32 patch__call_flush_branch_caches2; -extern s32 patch__call_flush_branch_caches3; -extern s32 patch__flush_count_cache_return; -extern s32 patch__flush_link_stack_return; -extern s32 patch__call_kvm_flush_link_stack; -extern s32 patch__call_kvm_flush_link_stack_p9; -extern s32 patch__memset_nocache, patch__memcpy_nocache; - -extern long flush_branch_caches; -extern long kvm_flush_link_stack; - #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 8be2c491c733..183e6b8af392 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -14,6 +14,19 @@ typedef struct func_desc func_desc_t; extern char __head_end[]; +/* Patch sites */ +extern s32 patch__call_flush_branch_caches1; +extern s32 patch__call_flush_branch_caches2; +extern s32 patch__call_flush_branch_caches3; +extern s32 patch__flush_count_cache_return; +extern s32 patch__flush_link_stack_return; +extern s32 patch__call_kvm_flush_link_stack; +extern s32 patch__call_kvm_flush_link_stack_p9; +extern s32 patch__memset_nocache, patch__memcpy_nocache; + +extern long flush_branch_caches; +extern long kvm_flush_link_stack; + #ifdef __powerpc64__ extern char __start_interrupts[]; diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index d96fd14bd7c9..b562a1d2c750 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include From 9e1b45fdf25caed521d6851136a0e3213c676656 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 3 Aug 2022 16:32:28 +1000 Subject: [PATCH 006/214] powerpc: Update ISA versions to mention e5500/e6500 Add the NXP (nee Freescale) e5500 and e6500 to the ISA versions documentation. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220803063228.1250030-1-mpe@ellerman.id.au --- Documentation/powerpc/isa-versions.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Documentation/powerpc/isa-versions.rst b/Documentation/powerpc/isa-versions.rst index dfcb1097dce4..5592b8899a48 100644 --- a/Documentation/powerpc/isa-versions.rst +++ b/Documentation/powerpc/isa-versions.rst @@ -10,6 +10,8 @@ CPU Architecture version Power10 Power ISA v3.1 Power9 Power ISA v3.0B Power8 Power ISA v2.07 +e6500 Power ISA v2.06 with some exceptions +e5500 Power ISA v2.06 with some exceptions, no Altivec Power7 Power ISA v2.06 Power6 Power ISA v2.05 PA6T Power ISA v2.04 @@ -36,6 +38,8 @@ CPU VMX (aka. Altivec) Power10 Yes Power9 Yes Power8 Yes +e6500 Yes +e5500 No Power7 Yes Power6 Yes PA6T Yes @@ -52,6 +56,8 @@ CPU VSX Power10 Yes Power9 Yes Power8 Yes +e6500 No +e5500 No Power7 Yes Power6 No PA6T No @@ -68,6 +74,8 @@ CPU Transactional Memory Power10 No (* see Power ISA v3.1, "Appendix A. Notes on the Removal of Transactional Memory from the Architecture") Power9 Yes (* see transactional_memory.txt) Power8 Yes +e6500 No +e5500 No Power7 No Power6 No PA6T No From e38cd72c17fa7d7710088365251feb6c52b501c8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 19 Aug 2022 16:23:43 +0200 Subject: [PATCH 007/214] powerpc: Remove stale declarations in mmu_decl.h rtas_size and rtas_data are not used anymore since at least commit 7c8c6b9776fb ("powerpc: Merge lmb.c and make MM initialization use it.") Remove them. Since commit 4b74a35fc7e9 ("powerpc/32s: Make Hash var static") the forward declaration of struct hash_pte is unneeded. Remove it. __initial_memory_limit_addr was removed by commit e63075a3c937 ("memblock: Introduce default allocation limit and use it to replace explicit ones") Remove the declaration. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a821e8397dd56b8177ecc04966d3b3a7c4bda6d4.1660919016.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/setup.h | 1 - arch/powerpc/mm/mmu_decl.h | 4 ---- 2 files changed, 5 deletions(-) diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index d8c28902cf59..dd461b2c825c 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -7,7 +7,6 @@ #ifndef __ASSEMBLY__ extern void ppc_printk_progress(char *s, unsigned short hex); -extern unsigned int rtas_data; extern unsigned long long memory_limit; extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 229c72e49198..8f5afad1b6af 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -92,15 +92,11 @@ extern void mapin_ram(void); extern void setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot); -extern unsigned int rtas_data, rtas_size; - -struct hash_pte; extern u8 early_hash[]; #endif /* CONFIG_PPC32 */ extern unsigned long __max_low_memory; -extern phys_addr_t __initial_memory_limit_addr; extern phys_addr_t total_memory; extern phys_addr_t total_lowmem; extern phys_addr_t memstart_addr; From f7d5f00702e2da656b2a8f975fdaa0d48329de36 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 19 Aug 2022 16:26:49 +0200 Subject: [PATCH 008/214] powerpc/fsl_booke: Make calc_cam_sz() static calc_cam_sz() is used only in fsl_book3e.c, make it static. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a7469848371b2cf5e8f654ec79800e209d88595e.1660919200.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/mmu_decl.h | 2 -- arch/powerpc/mm/nohash/fsl_book3e.c | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 8f5afad1b6af..6dd4744cc56a 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -122,8 +122,6 @@ unsigned long mmu_mapin_ram(unsigned long base, unsigned long top); #ifdef CONFIG_PPC_FSL_BOOK3E extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init); -extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, - phys_addr_t phys); #ifdef CONFIG_PPC32 extern void adjust_total_lowmem(void); extern int switch_to_as1(void); diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c index b8ae6c08c06f..c1ad173de318 100644 --- a/arch/powerpc/mm/nohash/fsl_book3e.c +++ b/arch/powerpc/mm/nohash/fsl_book3e.c @@ -135,8 +135,8 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, tlbcam_addrs[index].phys = phys; } -unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, - phys_addr_t phys) +static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, + phys_addr_t phys) { unsigned int camsize = __ilog2(ram); unsigned int align = __ffs(virt | phys); From 14be375634c3f8ff750bdce0c10036c2fbfcb282 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 18 Aug 2022 22:59:46 +0200 Subject: [PATCH 009/214] powerpc: move from strlcpy with unused retval to strscpy Follow the advice of the below link and prefer 'strscpy' in this subsystem. Conversion is 1:1 because the return value is not used. Generated by a coccinelle script. Signed-off-by: Wolfram Sang Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/CAHk-=wgfRnXz0W3D37d01q3JFkr_i_uTL=V6A6G1oUZcprmknw@mail.gmail.com/ Link: https://lore.kernel.org/r/20220818205946.6336-1-wsa+renesas@sang-engineering.com --- arch/powerpc/kernel/dt_cpu_ftrs.c | 2 +- arch/powerpc/platforms/powernv/idle.c | 2 +- arch/powerpc/platforms/powernv/pci-ioda.c | 2 +- arch/powerpc/platforms/pseries/hvcserver.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index fc800a9fb2c4..c3fb9fdf5bd7 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -1099,7 +1099,7 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char prop = of_get_flat_dt_prop(node, "display-name", NULL); if (prop && strlen((char *)prop) != 0) { - strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name)); + strscpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name)); cur_cpu_spec->cpu_name = dt_cpu_name; } diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 6f94b808dd39..6e6b3bd9c92f 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -1411,7 +1411,7 @@ static int __init pnv_parse_cpuidle_dt(void) goto out; } for (i = 0; i < nr_idle_states; i++) - strlcpy(pnv_idle_states[i].name, temp_string[i], + strscpy(pnv_idle_states[i].name, temp_string[i], PNV_IDLE_NAME_LEN); nr_pnv_idle_states = nr_idle_states; rc = 0; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 9de9b2fb163d..5c144c05cbfd 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -67,7 +67,7 @@ void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, vaf.va = &args; if (pe->flags & PNV_IODA_PE_DEV) - strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); + strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) sprintf(pfix, "%04x:%02x ", pci_domain_nr(pe->pbus), pe->pbus->number); diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c index 96e18d3b2fcf..d48c9c7ce10f 100644 --- a/arch/powerpc/platforms/pseries/hvcserver.c +++ b/arch/powerpc/platforms/pseries/hvcserver.c @@ -176,7 +176,7 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head, = (unsigned int)last_p_partition_ID; /* copy the Null-term char too */ - strlcpy(&next_partner_info->location_code[0], + strscpy(&next_partner_info->location_code[0], (char *)&pi_buff[2], sizeof(next_partner_info->location_code)); From 4c14d7a3fa097c4ccb6323dd8f0f7746bfebb053 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 18 Aug 2022 23:00:26 +0200 Subject: [PATCH 010/214] macintosh: move from strlcpy with unused retval to strscpy Follow the advice of the below link and prefer 'strscpy' in this subsystem. Conversion is 1:1 because the return value is not used. Generated by a coccinelle script. Signed-off-by: Wolfram Sang Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/CAHk-=wgfRnXz0W3D37d01q3JFkr_i_uTL=V6A6G1oUZcprmknw@mail.gmail.com/ Link: https://lore.kernel.org/r/20220818210026.6940-1-wsa+renesas@sang-engineering.com --- drivers/macintosh/therm_windtunnel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 9226b74fa08f..091278240baa 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -321,7 +321,7 @@ static void do_attach(struct i2c_adapter *adapter) if (np) { of_node_put(np); } else { - strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE); + strscpy(info.type, "MAC,ds1775", I2C_NAME_SIZE); i2c_new_scanned_device(adapter, &info, scan_ds1775, NULL); } @@ -329,7 +329,7 @@ static void do_attach(struct i2c_adapter *adapter) if (np) { of_node_put(np); } else { - strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE); + strscpy(info.type, "MAC,adm1030", I2C_NAME_SIZE); i2c_new_scanned_device(adapter, &info, scan_adm1030, NULL); } } From 395cac7752b905318ae454a8b859d4c190485510 Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Wed, 17 Aug 2022 15:06:39 +1000 Subject: [PATCH 011/214] powerpc/mm: Support execute-only memory on the Radix MMU Add support for execute-only memory (XOM) for the Radix MMU by using an execute-only mapping, as opposed to the RX mapping used by powerpc's other MMUs. The Hash MMU already supports XOM through the execute-only pkey, which is a separate mechanism shared with x86. A PROT_EXEC-only mapping will map to RX, and then the pkey will be applied on top of it. mmap() and mprotect() consumers in userspace should observe the same behaviour on Hash and Radix despite the differences in implementation. Replacing the vma_is_accessible() check in access_error() with a read check should be functionally equivalent for non-Radix MMUs, since it follows write and execute checks. For Radix, the change enables detecting faults on execute-only mappings where vma_is_accessible() would return true. Signed-off-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220817050640.406017-1-ruscur@russell.cc --- arch/powerpc/include/asm/book3s/64/pgtable.h | 2 ++ arch/powerpc/mm/book3s64/pgtable.c | 11 +++++++++-- arch/powerpc/mm/fault.c | 6 +++++- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 392ff48f77df..486902aff040 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -151,6 +151,8 @@ #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) +/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */ +#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC) /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 7b9966402b25..f6151a589298 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -553,8 +553,15 @@ EXPORT_SYMBOL_GPL(memremap_compat_align); pgprot_t vm_get_page_prot(unsigned long vm_flags) { - unsigned long prot = pgprot_val(protection_map[vm_flags & - (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); + unsigned long prot; + + /* Radix supports execute-only, but protection_map maps X -> RX */ + if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) { + prot = pgprot_val(PAGE_EXECONLY); + } else { + prot = pgprot_val(protection_map[vm_flags & + (VM_ACCESS_FLAGS | VM_SHARED)]); + } if (vm_flags & VM_SAO) prot |= _PAGE_SAO; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 014005428687..1566804e4b3d 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -270,7 +270,11 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma return false; } - if (unlikely(!vma_is_accessible(vma))) + /* + * Check for a read fault. This could be caused by a read on an + * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page. + */ + if (unlikely(!(vma->vm_flags & VM_READ))) return true; /* * We should ideally do the vma pkey access check here. But in the From 98acee3f8db451eaab9fbd422e523c228aacf08c Mon Sep 17 00:00:00 2001 From: Nicholas Miehlbradt Date: Wed, 17 Aug 2022 15:06:40 +1000 Subject: [PATCH 012/214] selftests/powerpc: Add a test for execute-only memory This selftest is designed to cover execute-only protections on the Radix MMU but will also work with Hash. The tests are based on those found in pkey_exec_test with modifications to use the generic mprotect() instead of the pkey variants. Signed-off-by: Nicholas Miehlbradt Signed-off-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220817050640.406017-2-ruscur@russell.cc --- tools/testing/selftests/powerpc/mm/Makefile | 3 +- .../testing/selftests/powerpc/mm/exec_prot.c | 231 ++++++++++++++++++ 2 files changed, 233 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/powerpc/mm/exec_prot.c diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 27dc09d0bfee..19dd0b2ea397 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -3,7 +3,7 @@ noarg: $(MAKE) -C ../ TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ - large_vm_fork_separation bad_accesses pkey_exec_prot \ + large_vm_fork_separation bad_accesses exec_prot pkey_exec_prot \ pkey_siginfo stack_expansion_signal stack_expansion_ldst \ large_vm_gpr_corruption TEST_PROGS := stress_code_patching.sh @@ -22,6 +22,7 @@ $(OUTPUT)/wild_bctr: CFLAGS += -m64 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 $(OUTPUT)/large_vm_gpr_corruption: CFLAGS += -m64 $(OUTPUT)/bad_accesses: CFLAGS += -m64 +$(OUTPUT)/exec_prot: CFLAGS += -m64 $(OUTPUT)/pkey_exec_prot: CFLAGS += -m64 $(OUTPUT)/pkey_siginfo: CFLAGS += -m64 diff --git a/tools/testing/selftests/powerpc/mm/exec_prot.c b/tools/testing/selftests/powerpc/mm/exec_prot.c new file mode 100644 index 000000000000..db75b2225de1 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/exec_prot.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2022, Nicholas Miehlbradt, IBM Corporation + * based on pkey_exec_prot.c + * + * Test if applying execute protection on pages works as expected. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include + +#include +#include + +#include "pkeys.h" + + +#define PPC_INST_NOP 0x60000000 +#define PPC_INST_TRAP 0x7fe00008 +#define PPC_INST_BLR 0x4e800020 + +static volatile sig_atomic_t fault_code; +static volatile sig_atomic_t remaining_faults; +static volatile unsigned int *fault_addr; +static unsigned long pgsize, numinsns; +static unsigned int *insns; +static bool pkeys_supported; + +static bool is_fault_expected(int fault_code) +{ + if (fault_code == SEGV_ACCERR) + return true; + + /* Assume any pkey error is fine since pkey_exec_prot test covers them */ + if (fault_code == SEGV_PKUERR && pkeys_supported) + return true; + + return false; +} + +static void trap_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *)fault_addr) + sigsafe_err("got a fault for an unexpected address\n"); + + _exit(1); +} + +static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + fault_code = sinfo->si_code; + + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *)fault_addr) { + sigsafe_err("got a fault for an unexpected address\n"); + _exit(1); + } + + /* Check if too many faults have occurred for a single test case */ + if (!remaining_faults) { + sigsafe_err("got too many faults for the same address\n"); + _exit(1); + } + + + /* Restore permissions in order to continue */ + if (is_fault_expected(fault_code)) { + if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE | PROT_EXEC)) { + sigsafe_err("failed to set access permissions\n"); + _exit(1); + } + } else { + sigsafe_err("got a fault with an unexpected code\n"); + _exit(1); + } + + remaining_faults--; +} + +static int check_exec_fault(int rights) +{ + /* + * Jump to the executable region. + * + * The first iteration also checks if the overwrite of the + * first instruction word from a trap to a no-op succeeded. + */ + fault_code = -1; + remaining_faults = 0; + if (!(rights & PROT_EXEC)) + remaining_faults = 1; + + FAIL_IF(mprotect(insns, pgsize, rights) != 0); + asm volatile("mtctr %0; bctrl" : : "r"(insns)); + + FAIL_IF(remaining_faults != 0); + if (!(rights & PROT_EXEC)) + FAIL_IF(!is_fault_expected(fault_code)); + + return 0; +} + +static int test(void) +{ + struct sigaction segv_act, trap_act; + int i; + + /* Skip the test if the CPU doesn't support Radix */ + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00)); + + /* Check if pkeys are supported */ + pkeys_supported = pkeys_unsupported() == 0; + + /* Setup SIGSEGV handler */ + segv_act.sa_handler = 0; + segv_act.sa_sigaction = segv_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0); + segv_act.sa_flags = SA_SIGINFO; + segv_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0); + + /* Setup SIGTRAP handler */ + trap_act.sa_handler = 0; + trap_act.sa_sigaction = trap_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0); + trap_act.sa_flags = SA_SIGINFO; + trap_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0); + + /* Setup executable region */ + pgsize = getpagesize(); + numinsns = pgsize / sizeof(unsigned int); + insns = (unsigned int *)mmap(NULL, pgsize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + FAIL_IF(insns == MAP_FAILED); + + /* Write the instruction words */ + for (i = 1; i < numinsns - 1; i++) + insns[i] = PPC_INST_NOP; + + /* + * Set the first instruction as an unconditional trap. If + * the last write to this address succeeds, this should + * get overwritten by a no-op. + */ + insns[0] = PPC_INST_TRAP; + + /* + * Later, to jump to the executable region, we use a branch + * and link instruction (bctrl) which sets the return address + * automatically in LR. Use that to return back. + */ + insns[numinsns - 1] = PPC_INST_BLR; + + /* + * Pick the first instruction's address from the executable + * region. + */ + fault_addr = insns; + + /* + * Read an instruction word from the address when the page + * is execute only. This should generate an access fault. + */ + fault_code = -1; + remaining_faults = 1; + printf("Testing read on --x, should fault..."); + FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); + i = *fault_addr; + FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code)); + printf("ok!\n"); + + /* + * Write an instruction word to the address when the page + * execute only. This should also generate an access fault. + */ + fault_code = -1; + remaining_faults = 1; + printf("Testing write on --x, should fault..."); + FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); + *fault_addr = PPC_INST_NOP; + FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code)); + printf("ok!\n"); + + printf("Testing exec on ---, should fault..."); + FAIL_IF(check_exec_fault(PROT_NONE)); + printf("ok!\n"); + + printf("Testing exec on r--, should fault..."); + FAIL_IF(check_exec_fault(PROT_READ)); + printf("ok!\n"); + + printf("Testing exec on -w-, should fault..."); + FAIL_IF(check_exec_fault(PROT_WRITE)); + printf("ok!\n"); + + printf("Testing exec on rw-, should fault..."); + FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE)); + printf("ok!\n"); + + printf("Testing exec on --x, should succeed..."); + FAIL_IF(check_exec_fault(PROT_EXEC)); + printf("ok!\n"); + + printf("Testing exec on r-x, should succeed..."); + FAIL_IF(check_exec_fault(PROT_READ | PROT_EXEC)); + printf("ok!\n"); + + printf("Testing exec on -wx, should succeed..."); + FAIL_IF(check_exec_fault(PROT_WRITE | PROT_EXEC)); + printf("ok!\n"); + + printf("Testing exec on rwx, should succeed..."); + FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE | PROT_EXEC)); + printf("ok!\n"); + + /* Cleanup */ + FAIL_IF(munmap((void *)insns, pgsize)); + + return 0; +} + +int main(void) +{ + return test_harness(test, "exec_prot"); +} From 0b9e534fcbf0a45e2ba08467de08a848a99ba2d6 Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Thu, 4 Aug 2022 11:27:08 +0930 Subject: [PATCH 013/214] powerpc/microwatt: Add LiteX MMC driver Enable the LiteX MMC device and it's dependency the common clock framework. Signed-off-by: Joel Stanley Acked-by: Michael Neuling Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220804015708.5928-1-joel@jms.id.au --- arch/powerpc/configs/microwatt_defconfig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/configs/microwatt_defconfig b/arch/powerpc/configs/microwatt_defconfig index eff933ebbb9e..ea2dbd778aad 100644 --- a/arch/powerpc/configs/microwatt_defconfig +++ b/arch/powerpc/configs/microwatt_defconfig @@ -75,7 +75,12 @@ CONFIG_SPI_BITBANG=y CONFIG_SPI_SPIDEV=y # CONFIG_HWMON is not set # CONFIG_USB_SUPPORT is not set +CONFIG_MMC=y +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_LITEX=y # CONFIG_VIRTIO_MENU is not set +CONFIG_COMMON_CLK=y # CONFIG_IOMMU_SUPPORT is not set # CONFIG_NVMEM is not set CONFIG_EXT4_FS=y From fd20b60aea6a37788f2f761af405b41c6c34473b Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 24 Aug 2022 10:21:29 +0200 Subject: [PATCH 014/214] powerpc/82xx: remove spidev node from mgcoge Commit 956b200a846e ("spi: spidev: Warn loudly if instantiated from DT as "spidev"") states that there should not be spidev nodes in DTs. Remove this non-HW description. There won't be a regression because it won't bind since 2015 anyhow. Fixes: 5d1d67e361ea ("powerpc/82xx: add SPI support for mgcoge") Signed-off-by: Wolfram Sang Reviewed-by: Krzysztof Kozlowski Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220824082130.21934-3-wsa+renesas@sang-engineering.com --- arch/powerpc/boot/dts/mgcoge.dts | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/powerpc/boot/dts/mgcoge.dts b/arch/powerpc/boot/dts/mgcoge.dts index 7de068991bde..9cefed207234 100644 --- a/arch/powerpc/boot/dts/mgcoge.dts +++ b/arch/powerpc/boot/dts/mgcoge.dts @@ -225,13 +225,6 @@ interrupts = <2 8>; interrupt-parent = <&PIC>; cs-gpios = < &cpm2_pio_d 19 0>; - #address-cells = <1>; - #size-cells = <0>; - ds3106@1 { - compatible = "gen,spidev"; - reg = <0>; - spi-max-frequency = <8000000>; - }; }; }; From 8b4bb0ad00cb347f62e76a636ce08eb179c843fc Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 15 Aug 2022 21:48:40 +1000 Subject: [PATCH 015/214] powerpc/code-patching: Speed up page mapping/unmapping Since commit 591b4b268435 ("powerpc/code-patching: Pre-map patch area") the patch area is premapped so intermediate page tables are already allocated. Use __set_pte_at() directly instead of the heavy map_kernel_page(), at for unmapping just do a pte_clear() followed by a flush. __set_pte_at() can be used directly without the filters in set_pte_at() because we are mapping a normal page non executable. Make sure gcc knows text_poke_area is page aligned in order to optimise the flush. This change reduces by 66% the time needed to activate ftrace on an 8xx (588000 tb ticks instead of 1744000). Signed-off-by: Christophe Leroy [mpe: Add ptesync needed on radix to avoid spurious fault] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220815114840.1468656-1-mpe@ellerman.id.au --- arch/powerpc/lib/code-patching.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 6edf0697a526..ad0cf3108dd0 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -94,17 +94,20 @@ void __init poking_init(void) static_branch_enable(&poking_init_done); } +static unsigned long get_patch_pfn(void *addr) +{ + if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) + return vmalloc_to_pfn(addr); + else + return __pa_symbol(addr) >> PAGE_SHIFT; +} + /* * This can be called for kernel text or a module. */ static int map_patch_area(void *addr, unsigned long text_poke_addr) { - unsigned long pfn; - - if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) - pfn = vmalloc_to_pfn(addr); - else - pfn = __pa_symbol(addr) >> PAGE_SHIFT; + unsigned long pfn = get_patch_pfn(addr); return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); } @@ -149,17 +152,22 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) int err; u32 *patch_addr; unsigned long text_poke_addr; + pte_t *pte; + unsigned long pfn = get_patch_pfn(addr); - text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr; + text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr & PAGE_MASK; patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); - err = map_patch_area(addr, text_poke_addr); - if (err) - return err; + pte = virt_to_kpte(text_poke_addr); + __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); + /* See ptesync comment in radix__set_pte_at() */ + if (radix_enabled()) + asm volatile("ptesync": : :"memory"); err = __patch_instruction(addr, instr, patch_addr); - unmap_patch_area(text_poke_addr); + pte_clear(&init_mm, text_poke_addr, pte); + flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); return err; } From fc06755e25628ce215e9f75c1207250242dadf42 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 31 Aug 2022 11:32:07 +0200 Subject: [PATCH 016/214] powerpc/32: Drop a stale comment about reservation of gigantic pages A comment about the reservation of gigantic pages was left in MMU_init() after commit 79cc38ded1e1 ("powerpc/mm/hugetlb: Add support for reserving gigantic huge pages via kernel command line") Remove it. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/959d77be630b9b46a7458f0fbd41dc3a94ec811a.1661938317.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/init_32.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 62d9af6606cd..50de41042c0a 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -82,10 +82,6 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); - /* - * Reserve gigantic pages for hugetlb. This MUST occur before - * lowmem_end_addr is initialized below. - */ if (memblock.memory.cnt > 1) { #ifndef CONFIG_WII memblock_enforce_memory_limit(memblock.memory.regions[0].size); From b0e0d68b1c52cb2c46e513011fdd53815cffefb7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 31 Aug 2022 11:32:08 +0200 Subject: [PATCH 017/214] powerpc/32: Allow fragmented physical memory Since commit 9e849f231c3c ("powerpc/mm/32s: use generic mmu_mapin_ram() for all blocks.") it is possible to map all blocks as RAM on any PPC32. Remove related restrictions. And remove call to wii_memory_fixups() which doesn't do anything else than checks since commit 160985f3025b ("powerpc/wii: remove wii_mmu_mapin_mem2()") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/fc9a5b8ef49f6eb86e970d4c7ccfba9b407fd4eb.1661938317.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/init_32.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 50de41042c0a..3142d7617412 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -82,15 +82,6 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); - if (memblock.memory.cnt > 1) { -#ifndef CONFIG_WII - memblock_enforce_memory_limit(memblock.memory.regions[0].size); - pr_warn("Only using first contiguous memory region\n"); -#else - wii_memory_fixups(); -#endif - } - total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; lowmem_end_addr = memstart_addr + total_lowmem; From 0115953dcebe8858ba3d9997ba48328ebdca593f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 31 Aug 2022 11:32:09 +0200 Subject: [PATCH 018/214] powerpc/32: Remove wii_memory_fixups() wii_memory_fixups() is not called anymore, remove it. Also remove left-overs in mmu_decl.h which were forgotten by commit 160985f3025b ("powerpc/wii: remove wii_mmu_mapin_mem2()") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/5f2091f86528b59ef92ef1daed5d3dd8c0d7bebd.1661938317.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/mmu_decl.h | 8 -------- arch/powerpc/platforms/embedded6xx/wii.c | 15 --------------- 2 files changed, 23 deletions(-) diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 6dd4744cc56a..0e3528aec49e 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -102,14 +102,6 @@ extern phys_addr_t total_lowmem; extern phys_addr_t memstart_addr; extern phys_addr_t lowmem_end_addr; -#ifdef CONFIG_WII -extern unsigned long wii_hole_start; -extern unsigned long wii_hole_size; - -extern unsigned long wii_mmu_mapin_mem2(unsigned long top); -extern void wii_memory_fixups(void); -#endif - /* ...and now those things that may be slightly different between processor * architectures. -- Dan */ diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 9e03ff8f631c..f4e654a9d4ff 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -15,8 +15,6 @@ #include #include #include -#include -#include #include #include @@ -49,19 +47,6 @@ static void __iomem *hw_ctrl; static void __iomem *hw_gpio; -static int __init page_aligned(unsigned long x) -{ - return !(x & (PAGE_SIZE-1)); -} - -void __init wii_memory_fixups(void) -{ - struct memblock_region *p = memblock.memory.regions; - - BUG_ON(memblock.memory.cnt != 2); - BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); -} - static void __noreturn wii_spin(void) { local_irq_disable(); From aa398d88aea4ec863bd7aea35d5035a37096dc59 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 1 Sep 2022 11:42:53 +1000 Subject: [PATCH 019/214] powerpc/configs: Properly enable PAPR_SCM in pseries_defconfig My commit to add PAPR_SCM to pseries_defconfig failed to add the required dependencies, meaning the driver doesn't get built. Add the required LIBNVDIMM=m. Fixes: d6481a7195df ("powerpc/configs: Add PAPR_SCM to pseries_defconfig") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220901014253.252927-1-mpe@ellerman.id.au --- arch/powerpc/configs/pseries_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index b571d084c148..c05e37af9f1e 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -40,6 +40,7 @@ CONFIG_PPC_SPLPAR=y CONFIG_DTL=y CONFIG_PPC_SMLPAR=y CONFIG_IBMEBUS=y +CONFIG_LIBNVDIMM=m CONFIG_PAPR_SCM=m CONFIG_PPC_SVM=y # CONFIG_PPC_PMAC is not set From 501fe299826ead2cfa2046b5c244e36de254ec6a Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 1 Sep 2022 12:02:15 +1000 Subject: [PATCH 020/214] selftests/powerpc: Skip 4PB test on 4K PAGE_SIZE systems Systems using the hash MMU with a 4K page size don't support 4PB address space, so skip the test because the bug it tests for can't be triggered. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220901020215.254097-1-mpe@ellerman.id.au --- tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c index 927bfae99ed9..7da515f1da72 100644 --- a/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c +++ b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c @@ -112,6 +112,8 @@ static int test(void) // This tests a hash MMU specific bug. FAIL_IF(using_hash_mmu(&hash_mmu)); SKIP_IF(!hash_mmu); + // 4K kernels don't support 4PB address space + SKIP_IF(sysconf(_SC_PAGESIZE) < 65536); page_size = sysconf(_SC_PAGESIZE); From a8933c8d55c37f4d5eb617b4bdb72b8888b88681 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 2 Sep 2022 18:53:13 +1000 Subject: [PATCH 021/214] powerpc/pseries: Add wait interval counter definitions to struct lppaca The hypervisor exposes accumulated partition scheduling interval times in the VPA (lppaca). These can be used to implement a simple stolen time in the guest without complex and costly dtl scanning. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Fabiano Rosas Link: https://lore.kernel.org/r/20220902085316.2071519-2-npiggin@gmail.com --- arch/powerpc/include/asm/lppaca.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index c390ec377bae..34d44cb17c87 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -104,14 +104,18 @@ struct lppaca { volatile __be32 dispersion_count; /* dispatch changed physical cpu */ volatile __be64 cmo_faults; /* CMO page fault count */ volatile __be64 cmo_fault_time; /* CMO page fault time */ - u8 reserved10[104]; + u8 reserved10[64]; /* [S]PURR expropriated/donated */ + volatile __be64 enqueue_dispatch_tb; /* Total TB enqueue->dispatch */ + volatile __be64 ready_enqueue_tb; /* Total TB ready->enqueue */ + volatile __be64 wait_ready_tb; /* Total TB wait->ready */ + u8 reserved11[16]; /* cacheline 4-5 */ __be32 page_ins; /* CMO Hint - # page ins by OS */ - u8 reserved11[148]; + u8 reserved12[148]; volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ - u8 reserved12[96]; + u8 reserved13[96]; } ____cacheline_aligned; #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) From 0e8a63132800dd8ae5fcb19113f79bea43ea18d9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 2 Sep 2022 18:53:14 +1000 Subject: [PATCH 022/214] powerpc/pseries: Implement CONFIG_PARAVIRT_TIME_ACCOUNTING CONFIG_VIRT_CPU_ACCOUNTING_GEN under pseries does not provide stolen time accounting unless CONFIG_PARAVIRT_TIME_ACCOUNTING is enabled. Implement this using the VPA accumulated wait counters. Note this will not work on current KVM hosts because KVM does not implement the VPA dispatch counters (yet). It could be implemented with the dispatch trace log as it is for VIRT_CPU_ACCOUNTING_NATIVE, but that is not necessary for the more limited accounting provided by PARAVIRT_TIME_ACCOUNTING, and it is more expensive, complex, and has downsides like potential log wrap. From Shrikanth: [...] it was tested on Power10 [PowerVM] Shared LPAR. system has two LPAR. we will call first one LPAR1 and second one as LPAR2. Test was carried out in SMT=1. Similar observation was seen in SMT=8 as well. LPAR config header from each LPAR is below. LPAR1 is twice as big as LPAR2. Since Both are sharing the same underlying hardware, work stealing will happen when both the LPAR's are contending for the same resource. LPAR1: type=Shared mode=Uncapped smt=Off lcpu=40 cpus=40 ent=20.00 LPAR2: type=Shared mode=Uncapped smt=Off lcpu=20 cpus=40 ent=10.00 mpstat was used to check for the utilization. stress-ng has been used as the workload. Few cases are tested. when the both LPAR are idle there is no steal time. when LPAR1 starts running at 100% which consumes all of the physical resource, steal time starts to get accounted. With LPAR1 running at 100% and LPAR2 starts running, steal time starts increasing. This is as expected. When the LPAR2 Load is increased further, steal time increases further. Case 1: 0% LPAR1; 0% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 0.00 0.00 0.05 0.00 0.00 0.00 0.00 0.00 0.00 99.95 Case 2: 100% LPAR1; 0% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 97.68 0.00 0.00 0.00 0.00 0.00 2.32 0.00 0.00 0.00 Case 3: 100% LPAR1; 50% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 86.34 0.00 0.10 0.00 0.00 0.03 13.54 0.00 0.00 0.00 Case 4: 100% LPAR1; 100% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 78.54 0.00 0.07 0.00 0.00 0.02 21.36 0.00 0.00 0.00 Case 5: 50% LPAR1; 100% LPAR2 %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle 49.37 0.00 0.00 0.00 0.00 0.00 1.17 0.00 0.00 49.47 Patch is accounting for the steal time and basic tests are holding good. Signed-off-by: Nicholas Piggin Tested-by: Shrikanth Hegde [mpe: Add SPDX tag to new paravirt_api_clock.h] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220902085316.2071519-3-npiggin@gmail.com --- .../admin-guide/kernel-parameters.txt | 6 +++--- arch/powerpc/include/asm/paravirt.h | 12 ++++++++++++ arch/powerpc/include/asm/paravirt_api_clock.h | 2 ++ arch/powerpc/platforms/pseries/Kconfig | 8 ++++++++ arch/powerpc/platforms/pseries/lpar.c | 11 +++++++++++ arch/powerpc/platforms/pseries/setup.c | 19 +++++++++++++++++++ 6 files changed, 55 insertions(+), 3 deletions(-) create mode 100644 arch/powerpc/include/asm/paravirt_api_clock.h diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d7f30902fda0..d62fdd0ac497 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3741,9 +3741,9 @@ [X86,PV_OPS] Disable paravirtualized VMware scheduler clock and use the default one. - no-steal-acc [X86,PV_OPS,ARM64] Disable paravirtualized steal time - accounting. steal time is computed, but won't - influence scheduler behaviour + no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized + steal time accounting. steal time is computed, but + won't influence scheduler behaviour nolapic [X86-32,APIC] Do not enable or use the local APIC. diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h index eb7df559ae74..f5ba1a3c41f8 100644 --- a/arch/powerpc/include/asm/paravirt.h +++ b/arch/powerpc/include/asm/paravirt.h @@ -21,6 +21,18 @@ static inline bool is_shared_processor(void) return static_branch_unlikely(&shared_processor); } +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 pseries_paravirt_steal_clock(int cpu); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return pseries_paravirt_steal_clock(cpu); +} +#endif + /* If bit 0 is set, the cpu has been ceded, conferred, or preempted */ static inline u32 yield_count_of(int cpu) { diff --git a/arch/powerpc/include/asm/paravirt_api_clock.h b/arch/powerpc/include/asm/paravirt_api_clock.h new file mode 100644 index 000000000000..d25ca7ac57c7 --- /dev/null +++ b/arch/powerpc/include/asm/paravirt_api_clock.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index fb6499977f99..a3b4d99567cb 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -23,13 +23,21 @@ config PPC_PSERIES select SWIOTLB default y +config PARAVIRT + bool + config PARAVIRT_SPINLOCKS bool +config PARAVIRT_TIME_ACCOUNTING + select PARAVIRT + bool + config PPC_SPLPAR bool "Support for shared-processor logical partitions" depends on PPC_PSERIES select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS + select PARAVIRT_TIME_ACCOUNTING if VIRT_CPU_ACCOUNTING_GEN default y help Enabling this option will make the kernel run more efficiently diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index e6c117fb6491..97ef6499e501 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -660,6 +660,17 @@ static int __init vcpudispatch_stats_procfs_init(void) } machine_device_initcall(pseries, vcpudispatch_stats_procfs_init); + +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +u64 pseries_paravirt_steal_clock(int cpu) +{ + struct lppaca *lppaca = &lppaca_of(cpu); + + return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) + + be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)); +} +#endif + #endif /* CONFIG_PPC_SPLPAR */ void vpa_init(int cpu) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 489f4c4df468..5e44c65a032c 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -80,6 +80,20 @@ DEFINE_STATIC_KEY_FALSE(shared_processor); EXPORT_SYMBOL(shared_processor); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} + +early_param("no-steal-acc", parse_no_stealacc); +#endif + int CMO_PrPSP = -1; int CMO_SecPSP = -1; unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K); @@ -834,6 +848,11 @@ static void __init pSeries_setup_arch(void) if (lppaca_shared_proc(get_lppaca())) { static_branch_enable(&shared_processor); pv_spinlocks_init(); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); +#endif } ppc_md.power_save = pseries_lpar_idle; From 02382aff72357727f9eee5107fd32c6cd070f1d6 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 2 Sep 2022 18:53:15 +1000 Subject: [PATCH 023/214] powerpc/64: Remove PPC64 special case for cputime accounting default Distro kernels tend to be moving to VIRT_CPU_ACCOUNTING_GEN, and there is not much reason why PPC64 should be special here. Remove the special case and make the ppc64 and pseries defconfigs use GEN accounting (others will use TICK, as-per Kconfig defaults). VIRT_CPU_ACCOUNTING_NATIVE does provide scaled vtime and stolen time apportioned between system and user time, and vtime accounting is not unconditionally enabled, and possibly other things. But it would be better at this point to extend GEN to cover important missing features rather than directing users back to a less used option. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220902085316.2071519-4-npiggin@gmail.com --- arch/powerpc/configs/ppc64_defconfig | 2 ++ arch/powerpc/configs/pseries_defconfig | 2 ++ init/Kconfig | 3 +-- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index c8b0e80d613b..6be0c43397b4 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -1,7 +1,9 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_IKCONFIG=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index c05e37af9f1e..117643d20281 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -3,8 +3,10 @@ CONFIG_NR_CPUS=2048 CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y diff --git a/init/Kconfig b/init/Kconfig index 532362fcfe31..94ce5a46a802 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -461,8 +461,7 @@ config VIRT_CPU_ACCOUNTING choice prompt "Cputime accounting" - default TICK_CPU_ACCOUNTING if !PPC64 - default VIRT_CPU_ACCOUNTING_NATIVE if PPC64 + default TICK_CPU_ACCOUNTING # Kind of a stub config for the pure tick based cputime accounting config TICK_CPU_ACCOUNTING From 6ba5aa541aaa079c0ca888f7fe564b2035d5ca0a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 2 Sep 2022 18:53:16 +1000 Subject: [PATCH 024/214] powerpc/pseries: Move dtl scanning and steal time accounting to pseries platform dtl is the PAPR Dispatch Trace Log, which is entirely a pseries feature. The pseries platform alrady has a file dealing with the dtl, so move scanning for stolen time accounting there from kernel/time.c. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220902085316.2071519-5-npiggin@gmail.com --- arch/powerpc/include/asm/cputime.h | 2 +- arch/powerpc/include/asm/dtl.h | 8 --- arch/powerpc/include/asm/time.h | 5 +- arch/powerpc/kernel/time.c | 92 ++-------------------------- arch/powerpc/platforms/pseries/dtl.c | 81 ++++++++++++++++++++++++ 5 files changed, 90 insertions(+), 98 deletions(-) diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 6d2b27997492..431ae2343022 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h @@ -95,7 +95,7 @@ static notrace inline void account_stolen_time(void) struct lppaca *lp = local_paca->lppaca_ptr; if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx))) - accumulate_stolen_time(); + pseries_accumulate_stolen_time(); } #endif } diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h index 1625888f27ef..4bcb9f9ac764 100644 --- a/arch/powerpc/include/asm/dtl.h +++ b/arch/powerpc/include/asm/dtl.h @@ -37,14 +37,6 @@ struct dtl_entry { extern struct kmem_cache *dtl_cache; extern rwlock_t dtl_access_lock; -/* - * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls - * reading from the dispatch trace log. If other code wants to consume - * DTL entries, it can set this pointer to a function that will get - * called once for each DTL entry that gets processed. - */ -extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index); - extern void register_dtl_buffer(int cpu); extern void alloc_dtl_buffers(unsigned long *time_limit); extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity); diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 1e5643a9b1f2..9f50766c4623 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -116,8 +116,9 @@ unsigned long long tb_to_ns(unsigned long long tb_ticks); void timer_broadcast_interrupt(void); -/* SPLPAR */ -void accumulate_stolen_time(void); +/* SPLPAR and VIRT_CPU_ACCOUNTING_NATIVE */ +void pseries_accumulate_stolen_time(void); +u64 pseries_calculate_stolen_time(u64 stop_tb); #endif /* __KERNEL__ */ #endif /* __POWERPC_TIME_H */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 587adcc12860..ae3e33b4ef95 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -178,92 +178,6 @@ static inline unsigned long read_spurr(unsigned long tb) return tb; } -#ifdef CONFIG_PPC_SPLPAR - -#include - -void (*dtl_consumer)(struct dtl_entry *, u64); - -/* - * Scan the dispatch trace log and count up the stolen time. - * Should be called with interrupts disabled. - */ -static u64 scan_dispatch_log(u64 stop_tb) -{ - u64 i = local_paca->dtl_ridx; - struct dtl_entry *dtl = local_paca->dtl_curr; - struct dtl_entry *dtl_end = local_paca->dispatch_log_end; - struct lppaca *vpa = local_paca->lppaca_ptr; - u64 tb_delta; - u64 stolen = 0; - u64 dtb; - - if (!dtl) - return 0; - - if (i == be64_to_cpu(vpa->dtl_idx)) - return 0; - while (i < be64_to_cpu(vpa->dtl_idx)) { - dtb = be64_to_cpu(dtl->timebase); - tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + - be32_to_cpu(dtl->ready_to_enqueue_time); - barrier(); - if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { - /* buffer has overflowed */ - i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; - dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); - continue; - } - if (dtb > stop_tb) - break; - if (dtl_consumer) - dtl_consumer(dtl, i); - stolen += tb_delta; - ++i; - ++dtl; - if (dtl == dtl_end) - dtl = local_paca->dispatch_log; - } - local_paca->dtl_ridx = i; - local_paca->dtl_curr = dtl; - return stolen; -} - -/* - * Accumulate stolen time by scanning the dispatch trace log. - * Called on entry from user mode. - */ -void notrace accumulate_stolen_time(void) -{ - u64 sst, ust; - struct cpu_accounting_data *acct = &local_paca->accounting; - - sst = scan_dispatch_log(acct->starttime_user); - ust = scan_dispatch_log(acct->starttime); - acct->stime -= sst; - acct->utime -= ust; - acct->steal_time += ust + sst; -} - -static inline u64 calculate_stolen_time(u64 stop_tb) -{ - if (!firmware_has_feature(FW_FEATURE_SPLPAR)) - return 0; - - if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) - return scan_dispatch_log(stop_tb); - - return 0; -} - -#else /* CONFIG_PPC_SPLPAR */ -static inline u64 calculate_stolen_time(u64 stop_tb) -{ - return 0; -} - -#endif /* CONFIG_PPC_SPLPAR */ - /* * Account time for a transition between system, hard irq * or soft irq state. @@ -322,7 +236,11 @@ static unsigned long vtime_delta(struct cpu_accounting_data *acct, *stime_scaled = vtime_delta_scaled(acct, now, stime); - *steal_time = calculate_stolen_time(now); + if (IS_ENABLED(CONFIG_PPC_SPLPAR) && + firmware_has_feature(FW_FEATURE_SPLPAR)) + *steal_time = pseries_calculate_stolen_time(now); + else + *steal_time = 0; return stime; } diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 352af5b14a0f..1b1977bc78e7 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -37,6 +37,15 @@ static u8 dtl_event_mask = DTL_LOG_ALL; static int dtl_buf_entries = N_DISPATCH_LOG; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + +/* + * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls + * reading from the dispatch trace log. If other code wants to consume + * DTL entries, it can set this pointer to a function that will get + * called once for each DTL entry that gets processed. + */ +static void (*dtl_consumer)(struct dtl_entry *entry, u64 index); + struct dtl_ring { u64 write_index; struct dtl_entry *write_ptr; @@ -48,6 +57,78 @@ static DEFINE_PER_CPU(struct dtl_ring, dtl_rings); static atomic_t dtl_count; +/* + * Scan the dispatch trace log and count up the stolen time. + * Should be called with interrupts disabled. + */ +static notrace u64 scan_dispatch_log(u64 stop_tb) +{ + u64 i = local_paca->dtl_ridx; + struct dtl_entry *dtl = local_paca->dtl_curr; + struct dtl_entry *dtl_end = local_paca->dispatch_log_end; + struct lppaca *vpa = local_paca->lppaca_ptr; + u64 tb_delta; + u64 stolen = 0; + u64 dtb; + + if (!dtl) + return 0; + + if (i == be64_to_cpu(vpa->dtl_idx)) + return 0; + while (i < be64_to_cpu(vpa->dtl_idx)) { + dtb = be64_to_cpu(dtl->timebase); + tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) + + be32_to_cpu(dtl->ready_to_enqueue_time); + barrier(); + if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { + /* buffer has overflowed */ + i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; + dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); + continue; + } + if (dtb > stop_tb) + break; + if (dtl_consumer) + dtl_consumer(dtl, i); + stolen += tb_delta; + ++i; + ++dtl; + if (dtl == dtl_end) + dtl = local_paca->dispatch_log; + } + local_paca->dtl_ridx = i; + local_paca->dtl_curr = dtl; + return stolen; +} + +/* + * Accumulate stolen time by scanning the dispatch trace log. + * Called on entry from user mode. + */ +void notrace pseries_accumulate_stolen_time(void) +{ + u64 sst, ust; + struct cpu_accounting_data *acct = &local_paca->accounting; + + sst = scan_dispatch_log(acct->starttime_user); + ust = scan_dispatch_log(acct->starttime); + acct->stime -= sst; + acct->utime -= ust; + acct->steal_time += ust + sst; +} + +u64 pseries_calculate_stolen_time(u64 stop_tb) +{ + if (!firmware_has_feature(FW_FEATURE_SPLPAR)) + return 0; + + if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) + return scan_dispatch_log(stop_tb); + + return 0; +} + /* * The cpu accounting code controls the DTL ring buffer, and we get * given entries as they are processed. From cfe0d370e0788625ce0df3239aad07a2506c1796 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Fri, 2 Sep 2022 18:00:08 +0200 Subject: [PATCH 025/214] powerpc/math_emu/efp: Include module.h When building with a recent version of clang, there are a couple of errors around the call to module_init(): arch/powerpc/math-emu/math_efp.c:927:1: error: type specifier missing, defaults to 'int'; ISO C99 and later do not support implicit int [-Wimplicit-int] module_init(spe_mathemu_init); ^ int arch/powerpc/math-emu/math_efp.c:927:13: error: a parameter list without types is only allowed in a function definition module_init(spe_mathemu_init); ^ 2 errors generated. module_init() is a macro, which is not getting expanded because module.h is not included in this file. Add the include so that the macro can expand properly, clearing up the build failure. Fixes: ac6f120369ff ("powerpc/85xx: Workaroudn e500 CPU erratum A005") [chleroy: added fixes tag] Reported-by: kernel test robot Signed-off-by: Nathan Chancellor Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Link: https://lore.kernel.org/r/8403854a4c187459b2f4da3537f51227b70b9223.1662134272.git.christophe.leroy@csgroup.eu --- arch/powerpc/math-emu/math_efp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index 39b84e7452e1..aa3bb8da1cb9 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c @@ -17,6 +17,7 @@ #include #include +#include #include #include From 7245fc5bb7a966852d5bd7779d1f5855530b461a Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 2 Sep 2022 18:00:09 +0200 Subject: [PATCH 026/214] powerpc/math-emu: Remove -w build flag and fix warnings As reported by Nathan, the module_init() macro was not taken into account because the header was missing. That means spe_mathemu_init() was never called. This should have been detected by gcc at build time, but due to '-w' flag it went undetected. Removing that flag leads to many warnings hence errors. Fix those warnings then remove the -w flag. Reported-by: Nathan Chancellor Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Reviewed-by: Nathan Chancellor Link: https://lore.kernel.org/r/2663961738a46073713786d4efeb53100ca156e7.1662134272.git.christophe.leroy@csgroup.eu --- arch/powerpc/math-emu/Makefile | 2 -- arch/powerpc/math-emu/math.c | 18 +++++----- arch/powerpc/math-emu/math_efp.c | 57 +++++++++++++++++--------------- include/math-emu/op-common.h | 3 ++ 4 files changed, 42 insertions(+), 38 deletions(-) diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index a8794032f15f..26fef2e5672e 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile @@ -16,5 +16,3 @@ obj-$(CONFIG_SPE) += math_efp.o CFLAGS_fabs.o = -fno-builtin-fabs CFLAGS_math.o = -fno-builtin-fabs - -ccflags-y = -w diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c index 36761bd00f38..936a9a149037 100644 --- a/arch/powerpc/math-emu/math.c +++ b/arch/powerpc/math-emu/math.c @@ -24,9 +24,9 @@ FLOATFUNC(mtfsf); FLOATFUNC(mtfsfi); #ifdef CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED -#undef FLOATFUNC(x) +#undef FLOATFUNC #define FLOATFUNC(x) static inline int x(void *op1, void *op2, void *op3, \ - void *op4) { } + void *op4) { return 0; } #endif FLOATFUNC(fadd); @@ -396,28 +396,28 @@ do_mathemu(struct pt_regs *regs) case XCR: op0 = (void *)®s->ccr; - op1 = (void *)((insn >> 23) & 0x7); + op1 = (void *)(long)((insn >> 23) & 0x7); op2 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); op3 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); break; case XCRL: op0 = (void *)®s->ccr; - op1 = (void *)((insn >> 23) & 0x7); - op2 = (void *)((insn >> 18) & 0x7); + op1 = (void *)(long)((insn >> 23) & 0x7); + op2 = (void *)(long)((insn >> 18) & 0x7); break; case XCRB: - op0 = (void *)((insn >> 21) & 0x1f); + op0 = (void *)(long)((insn >> 21) & 0x1f); break; case XCRI: - op0 = (void *)((insn >> 23) & 0x7); - op1 = (void *)((insn >> 12) & 0xf); + op0 = (void *)(long)((insn >> 23) & 0x7); + op1 = (void *)(long)((insn >> 12) & 0xf); break; case XFLB: - op0 = (void *)((insn >> 17) & 0xff); + op0 = (void *)(long)((insn >> 17) & 0xff); op1 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); break; diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index aa3bb8da1cb9..f01e3475f689 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c @@ -219,6 +219,7 @@ int do_spe_mathemu(struct pt_regs *regs) case AB: case XCR: FP_UNPACK_SP(SA, va.wp + 1); + fallthrough; case XB: FP_UNPACK_SP(SB, vb.wp + 1); break; @@ -227,8 +228,8 @@ int do_spe_mathemu(struct pt_regs *regs) break; } - pr_debug("SA: %ld %08lx %ld (%ld)\n", SA_s, SA_f, SA_e, SA_c); - pr_debug("SB: %ld %08lx %ld (%ld)\n", SB_s, SB_f, SB_e, SB_c); + pr_debug("SA: %d %08x %d (%d)\n", SA_s, SA_f, SA_e, SA_c); + pr_debug("SB: %d %08x %d (%d)\n", SB_s, SB_f, SB_e, SB_c); switch (func) { case EFSABS: @@ -279,7 +280,7 @@ int do_spe_mathemu(struct pt_regs *regs) } else { SB_e += (func == EFSCTSF ? 31 : 32); FP_TO_INT_ROUND_S(vc.wp[1], SB, 32, - (func == EFSCTSF)); + (func == EFSCTSF) ? 1 : 0); } goto update_regs; @@ -288,7 +289,7 @@ int do_spe_mathemu(struct pt_regs *regs) FP_CLEAR_EXCEPTIONS; FP_UNPACK_DP(DB, vb.dp); - pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n", + pr_debug("DB: %d %08x %08x %d (%d)\n", DB_s, DB_f1, DB_f0, DB_e, DB_c); FP_CONV(S, D, 1, 2, SR, DB); @@ -302,7 +303,7 @@ int do_spe_mathemu(struct pt_regs *regs) FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_S(vc.wp[1], SB, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; @@ -313,7 +314,7 @@ int do_spe_mathemu(struct pt_regs *regs) FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_S(vc.wp[1], SB, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; @@ -323,7 +324,7 @@ int do_spe_mathemu(struct pt_regs *regs) break; pack_s: - pr_debug("SR: %ld %08lx %ld (%ld)\n", SR_s, SR_f, SR_e, SR_c); + pr_debug("SR: %d %08x %d (%d)\n", SR_s, SR_f, SR_e, SR_c); FP_PACK_SP(vc.wp + 1, SR); goto update_regs; @@ -347,6 +348,7 @@ cmp_s: case AB: case XCR: FP_UNPACK_DP(DA, va.dp); + fallthrough; case XB: FP_UNPACK_DP(DB, vb.dp); break; @@ -355,9 +357,9 @@ cmp_s: break; } - pr_debug("DA: %ld %08lx %08lx %ld (%ld)\n", + pr_debug("DA: %d %08x %08x %d (%d)\n", DA_s, DA_f1, DA_f0, DA_e, DA_c); - pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n", + pr_debug("DB: %d %08x %08x %d (%d)\n", DB_s, DB_f1, DB_f0, DB_e, DB_c); switch (func) { @@ -409,7 +411,7 @@ cmp_s: } else { DB_e += (func == EFDCTSF ? 31 : 32); FP_TO_INT_ROUND_D(vc.wp[1], DB, 32, - (func == EFDCTSF)); + (func == EFDCTSF) ? 1 : 0); } goto update_regs; @@ -418,7 +420,7 @@ cmp_s: FP_CLEAR_EXCEPTIONS; FP_UNPACK_SP(SB, vb.wp + 1); - pr_debug("SB: %ld %08lx %ld (%ld)\n", + pr_debug("SB: %d %08x %d (%d)\n", SB_s, SB_f, SB_e, SB_c); FP_CONV(D, S, 2, 1, DR, SB); @@ -432,7 +434,7 @@ cmp_s: FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_D(vc.dp[0], DB, 64, - ((func & 0x1) == 0)); + ((func & 0x1) == 0) ? 1 : 0); } goto update_regs; @@ -443,7 +445,7 @@ cmp_s: FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_D(vc.wp[1], DB, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; @@ -454,7 +456,7 @@ cmp_s: FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_D(vc.wp[1], DB, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; @@ -464,7 +466,7 @@ cmp_s: break; pack_d: - pr_debug("DR: %ld %08lx %08lx %ld (%ld)\n", + pr_debug("DR: %d %08x %08x %d (%d)\n", DR_s, DR_f1, DR_f0, DR_e, DR_c); FP_PACK_DP(vc.dp, DR); @@ -493,6 +495,7 @@ cmp_d: case XCR: FP_UNPACK_SP(SA0, va.wp); FP_UNPACK_SP(SA1, va.wp + 1); + fallthrough; case XB: FP_UNPACK_SP(SB0, vb.wp); FP_UNPACK_SP(SB1, vb.wp + 1); @@ -503,13 +506,13 @@ cmp_d: break; } - pr_debug("SA0: %ld %08lx %ld (%ld)\n", + pr_debug("SA0: %d %08x %d (%d)\n", SA0_s, SA0_f, SA0_e, SA0_c); - pr_debug("SA1: %ld %08lx %ld (%ld)\n", + pr_debug("SA1: %d %08x %d (%d)\n", SA1_s, SA1_f, SA1_e, SA1_c); - pr_debug("SB0: %ld %08lx %ld (%ld)\n", + pr_debug("SB0: %d %08x %d (%d)\n", SB0_s, SB0_f, SB0_e, SB0_c); - pr_debug("SB1: %ld %08lx %ld (%ld)\n", + pr_debug("SB1: %d %08x %d (%d)\n", SB1_s, SB1_f, SB1_e, SB1_c); switch (func) { @@ -568,7 +571,7 @@ cmp_d: } else { SB0_e += (func == EVFSCTSF ? 31 : 32); FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32, - (func == EVFSCTSF)); + (func == EVFSCTSF) ? 1 : 0); } if (SB1_c == FP_CLS_NAN) { vc.wp[1] = 0; @@ -576,7 +579,7 @@ cmp_d: } else { SB1_e += (func == EVFSCTSF ? 31 : 32); FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32, - (func == EVFSCTSF)); + (func == EVFSCTSF) ? 1 : 0); } goto update_regs; @@ -587,14 +590,14 @@ cmp_d: FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } if (SB1_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; @@ -605,14 +608,14 @@ cmp_d: FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_S(vc.wp[0], SB0, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } if (SB1_c == FP_CLS_NAN) { vc.wp[1] = 0; FP_SET_EXCEPTION(FP_EX_INVALID); } else { FP_TO_INT_S(vc.wp[1], SB1, 32, - ((func & 0x3) != 0)); + ((func & 0x3) != 0) ? 1 : 0); } goto update_regs; @@ -622,9 +625,9 @@ cmp_d: break; pack_vs: - pr_debug("SR0: %ld %08lx %ld (%ld)\n", + pr_debug("SR0: %d %08x %d (%d)\n", SR0_s, SR0_f, SR0_e, SR0_c); - pr_debug("SR1: %ld %08lx %ld (%ld)\n", + pr_debug("SR1: %d %08x %d (%d)\n", SR1_s, SR1_f, SR1_e, SR1_c); FP_PACK_SP(vc.wp, SR0); diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h index 4b57bbba588a..8ce066c035cf 100644 --- a/include/math-emu/op-common.h +++ b/include/math-emu/op-common.h @@ -662,12 +662,14 @@ do { \ if (X##_e < 0) \ { \ FP_SET_EXCEPTION(FP_EX_INEXACT); \ + fallthrough; \ case FP_CLS_ZERO: \ r = 0; \ } \ else if (X##_e >= rsize - (rsigned > 0 || X##_s) \ || (!rsigned && X##_s)) \ { /* overflow */ \ + fallthrough; \ case FP_CLS_NAN: \ case FP_CLS_INF: \ if (rsigned == 2) \ @@ -767,6 +769,7 @@ do { \ if (X##_e >= rsize - (rsigned > 0 || X##_s) \ || (!rsigned && X##_s)) \ { /* overflow */ \ + fallthrough; \ case FP_CLS_NAN: \ case FP_CLS_INF: \ if (!rsigned) \ From 1d53c0192b15f42129a3dbbbfa05637bcf781a3e Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 2 Sep 2022 17:25:24 +0200 Subject: [PATCH 027/214] powerpc/vdso: link with -z noexecstack With recent binutils, the following warning appears: VDSO32L arch/powerpc/kernel/vdso/vdso32.so.dbg /opt/gcc-12.2.0-nolibc/powerpc64-linux/bin/../lib/gcc/powerpc64-linux/12.2.0/../../../../powerpc64-linux/bin/ld: warning: arch/powerpc/kernel/vdso/getcpu-32.o: missing .note.GNU-stack section implies executable stack /opt/gcc-12.2.0-nolibc/powerpc64-linux/bin/../lib/gcc/powerpc64-linux/12.2.0/../../../../powerpc64-linux/bin/ld: NOTE: This behaviour is deprecated and will be removed in a future version of the linker To avoid that, explicitly tell the linker we don't want executable stack. For more explanations, see commit ffcf9c5700e4 ("x86: link vdso and boot with -z noexecstack --no-warn-rwx-segments") Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b95f2e3216a574837dd61208444e9515c3423da4.1662132312.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/vdso/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index 096b0bf1335f..a2e7b0ce5b19 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -92,13 +92,13 @@ include/generated/vdso64-offsets.h: $(obj)/vdso64.so.dbg FORCE # actual build commands quiet_cmd_vdso32ld_and_check = VDSO32L $@ - cmd_vdso32ld_and_check = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) ; $(cmd_vdso_check) + cmd_vdso32ld_and_check = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check) quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) $(AS32FLAGS) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ cmd_vdso32cc = $(VDSOCC) $(c_flags) $(CC32FLAGS) -c -o $@ $< quiet_cmd_vdso64ld_and_check = VDSO64L $@ - cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) ; $(cmd_vdso_check) + cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check) quiet_cmd_vdso64as = VDSO64A $@ cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $< From 06f48f5cb5df2299f5e4b42e9dda1858bf172bcb Mon Sep 17 00:00:00 2001 From: Liang He Date: Wed, 15 Jun 2022 22:37:03 +0800 Subject: [PATCH 028/214] powerpc/512x: Add missing of_node_put() in mpc5121_clk_init() In mpc5121_clk_init(), of_find_compatible_node() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() when it is not used anymore. Signed-off-by: Liang He [mpe: of_clk_add_provider() will take its own reference.] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220615143703.3968898-1-windhl@126.com --- arch/powerpc/platforms/512x/clock-commonclk.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index 0652c7e69225..ca475462e95b 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -1208,6 +1208,8 @@ int __init mpc5121_clk_init(void) /* register as an OF clock provider */ mpc5121_clk_register_of_provider(clk_np); + of_node_put(clk_np); + /* * unbreak not yet adjusted peripheral drivers during migration * towards fully operational common clock support, and allow From 64e696af167f612cd1ecba89edfeb2353ca59947 Mon Sep 17 00:00:00 2001 From: Liang He Date: Thu, 16 Jun 2022 21:29:22 +0800 Subject: [PATCH 029/214] powerpc/85xx: Add missing of_node_put() in ksi8560.c In ksi8560_setup_arch(), of_find_compatible_node() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() when it is not used anymore. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220616132922.3987053-1-windhl@126.com --- arch/powerpc/platforms/85xx/ksi8560.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index bdf9d42f8521..a22f02b0fc77 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c @@ -133,6 +133,8 @@ static void __init ksi8560_setup_arch(void) else printk(KERN_ERR "Can't find CPLD in device tree\n"); + of_node_put(cpld); + if (ppc_md.progress) ppc_md.progress("ksi8560_setup_arch()", 0); From 593d7b89c6a2bf7aea2324c94f10ef3c21308418 Mon Sep 17 00:00:00 2001 From: Liang He Date: Thu, 16 Jun 2022 22:40:07 +0800 Subject: [PATCH 030/214] powerpc/52xx: Add missing of_node_put() in media5200.c In media5200_init_irq(), of_find_compatible_node() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() in the failure path or when it is not used anymore. Don't worry about 'fpga_np == NULL' as of_node_put() can correctly handle that. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220616144007.3987743-1-windhl@126.com --- arch/powerpc/platforms/52xx/media5200.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index ee367ff3ec8a..33a35fff11b5 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -174,6 +174,8 @@ static void __init media5200_init_irq(void) goto out; pr_debug("%s: allocated irqhost\n", __func__); + of_node_put(fpga_np); + irq_set_handler_data(cascade_virq, &media5200_irq); irq_set_chained_handler(cascade_virq, media5200_irq_cascade); @@ -181,6 +183,7 @@ static void __init media5200_init_irq(void) out: pr_err("Could not find Media5200 FPGA; PCI interrupts will not work\n"); + of_node_put(fpga_np); } /* From 14b9e26c6c9a845c005087b9653459623a7d029b Mon Sep 17 00:00:00 2001 From: Liang He Date: Fri, 17 Jun 2022 18:50:11 +0800 Subject: [PATCH 031/214] powerpc/85xx: Add missing of_node_put() in sgy_cst1000 In gpio_halt_probe(), of_find_matching_node() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() in the failure path. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220617105011.4041123-1-windhl@126.com --- arch/powerpc/platforms/85xx/sgy_cts1000.c | 35 ++++++++++++++--------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 98ae64075193..e14d1b74d4e4 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c @@ -71,6 +71,7 @@ static int gpio_halt_probe(struct platform_device *pdev) { enum of_gpio_flags flags; struct device_node *node = pdev->dev.of_node; + struct device_node *child_node; int gpio, err, irq; int trigger; @@ -78,26 +79,29 @@ static int gpio_halt_probe(struct platform_device *pdev) return -ENODEV; /* If there's no matching child, this isn't really an error */ - halt_node = of_find_matching_node(node, child_match); - if (!halt_node) + child_node = of_find_matching_node(node, child_match); + if (!child_node) return 0; /* Technically we could just read the first one, but punish * DT writers for invalid form. */ - if (of_gpio_count(halt_node) != 1) - return -EINVAL; + if (of_gpio_count(child_node) != 1) { + err = -EINVAL; + goto err_put; + } /* Get the gpio number relative to the dynamic base. */ - gpio = of_get_gpio_flags(halt_node, 0, &flags); - if (!gpio_is_valid(gpio)) - return -EINVAL; + gpio = of_get_gpio_flags(child_node, 0, &flags); + if (!gpio_is_valid(gpio)) { + err = -EINVAL; + goto err_put; + } err = gpio_request(gpio, "gpio-halt"); if (err) { printk(KERN_ERR "gpio-halt: error requesting GPIO %d.\n", gpio); - halt_node = NULL; - return err; + goto err_put; } trigger = (flags == OF_GPIO_ACTIVE_LOW); @@ -105,15 +109,14 @@ static int gpio_halt_probe(struct platform_device *pdev) gpio_direction_output(gpio, !trigger); /* Now get the IRQ which tells us when the power button is hit */ - irq = irq_of_parse_and_map(halt_node, 0); + irq = irq_of_parse_and_map(child_node, 0); err = request_irq(irq, gpio_halt_irq, IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, "gpio-halt", halt_node); + IRQF_TRIGGER_FALLING, "gpio-halt", child_node); if (err) { printk(KERN_ERR "gpio-halt: error requesting IRQ %d for " "GPIO %d.\n", irq, gpio); gpio_free(gpio); - halt_node = NULL; - return err; + goto err_put; } /* Register our halt function */ @@ -123,7 +126,12 @@ static int gpio_halt_probe(struct platform_device *pdev) printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d" " irq).\n", gpio, trigger, irq); + halt_node = child_node; return 0; + +err_put: + of_node_put(child_node); + return err; } static int gpio_halt_remove(struct platform_device *pdev) @@ -139,6 +147,7 @@ static int gpio_halt_remove(struct platform_device *pdev) gpio_free(gpio); + of_node_put(halt_node); halt_node = NULL; } From 23b1481898ee8704394cead67eae2634003f7ca8 Mon Sep 17 00:00:00 2001 From: Liang He Date: Fri, 17 Jun 2022 20:40:45 +0800 Subject: [PATCH 032/214] powerpc/maple: Add missing of_node_put() in time.c In maple_get_boot_time(), of_find_compatible_node() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() when it is not used anymore. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220617124045.4048757-1-windhl@126.com --- arch/powerpc/platforms/maple/time.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c index 823e219ef8ee..91606411d2e0 100644 --- a/arch/powerpc/platforms/maple/time.c +++ b/arch/powerpc/platforms/maple/time.c @@ -153,6 +153,7 @@ time64_t __init maple_get_boot_time(void) maple_rtc_addr); } bail: + of_node_put(rtcs); if (maple_rtc_addr == 0) { maple_rtc_addr = RTC_PORT(0); /* legacy address */ printk(KERN_INFO "Maple: No device node for RTC, assuming " From edc17890ae8ee475b566079bea2e9ba83fec021d Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 18 Jun 2022 10:49:30 +0800 Subject: [PATCH 033/214] powerpc/8xx: Add missing of_node_put() in tqm8xx_setup.c In init_ioports(), of_find_node_by_name() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() when it is not used anymore. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220618024930.4056825-1-windhl@126.com --- arch/powerpc/platforms/8xx/tqm8xx_setup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c index 3725d51248df..ffcfd17a5fa3 100644 --- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c +++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c @@ -105,6 +105,9 @@ static void __init init_ioports(void) if (dnode == NULL) return; prop = of_find_property(dnode, "ethernet1", &len); + + of_node_put(dnode); + if (prop == NULL) return; From 6b2d17d514b105ecf486bdf011c444978e633085 Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 18 Jun 2022 12:10:42 +0800 Subject: [PATCH 034/214] powerpc/embedded6xx: Add missing of_node_put()s Add missing of_node_put()s in various paths. Signed-off-by: Liang He [mpe: Rewrite change log] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220618041042.4058066-1-windhl@126.com --- arch/powerpc/platforms/embedded6xx/holly.c | 6 ++++++ arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | 3 +++ 2 files changed, 9 insertions(+) diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index 78f2378d9223..bebc5a972694 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c @@ -123,6 +123,8 @@ static void __init holly_init_pci(void) if (np) tsi108_setup_pci(np, HOLLY_PCI_CFG_PHYS, 1); + of_node_put(np); + ppc_md.pci_exclude_device = holly_exclude_device; if (ppc_md.progress) ppc_md.progress("tsi108: resources set", 0x100); @@ -184,6 +186,9 @@ static void __init holly_init_IRQ(void) tsi108_pci_int_init(cascade_node); irq_set_handler_data(cascade_pci_irq, mpic); irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); + + of_node_put(tsi_pci); + of_node_put(cascade_node); #endif /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); @@ -210,6 +215,7 @@ static void __noreturn holly_restart(char *cmd) if (bridge) { prop = of_get_property(bridge, "reg", &size); addr = of_translate_address(bridge, prop); + of_node_put(bridge); } addr += (TSI108_PB_OFFSET + 0x414); diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 8b2b42210356..ddf0c652af80 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -135,6 +135,9 @@ static void __init mpc7448_hpc2_init_IRQ(void) tsi108_pci_int_init(cascade_node); irq_set_handler_data(cascade_pci_irq, mpic); irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); + + of_node_put(tsi_pci); + of_node_put(cascade_node); #endif /* Configure MPIC outputs to CPU0 */ tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); From 0dd8d2c8066e672244975c171816fdd9dae87721 Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 18 Jun 2022 15:13:53 +0800 Subject: [PATCH 035/214] powerpc/perf: Add missing of_node_put()s in imc-pmu.c In update_events_in_group(), of_find_node_by_phandle() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() in the failure path or when it is not used anymore. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Reviewed-by: Athira Rajeev Link: https://lore.kernel.org/r/20220618071353.4059000-1-windhl@126.com --- arch/powerpc/perf/imc-pmu.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index d7976ab40d38..d517aba94d1b 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -240,8 +240,10 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) ct = of_get_child_count(pmu_events); /* Get the event prefix */ - if (of_property_read_string(node, "events-prefix", &prefix)) + if (of_property_read_string(node, "events-prefix", &prefix)) { + of_node_put(pmu_events); return 0; + } /* Get a global unit and scale data if available */ if (of_property_read_string(node, "scale", &g_scale)) @@ -255,8 +257,10 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) /* Allocate memory for the events */ pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); - if (!pmu->events) + if (!pmu->events) { + of_node_put(pmu_events); return -ENOMEM; + } ct = 0; /* Parse the events and update the struct */ @@ -266,6 +270,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) ct++; } + of_node_put(pmu_events); + /* Allocate memory for attribute group */ attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL); if (!attr_group) { From d1aabbbb2564f23b66ded10d870e7859e92075a3 Mon Sep 17 00:00:00 2001 From: Liang He Date: Sun, 19 Jun 2022 15:08:11 +0800 Subject: [PATCH 036/214] powerpc/kernel: Add missing of_node_put() in legacy_serial.c In find_legacy_serial_ports(), of_find_node_by_path() will return a node pointer with refcount incremented. The reference should be dropped with of_node_put() when it is not used anymore. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220619070811.4067215-1-windhl@126.com --- arch/powerpc/kernel/legacy_serial.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 5c58460b269a..f048c424c525 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -471,6 +471,8 @@ void __init find_legacy_serial_ports(void) } #endif + of_node_put(stdout); + DBG("legacy_serial_console = %d\n", legacy_serial_console); if (legacy_serial_console >= 0) setup_legacy_serial_console(legacy_serial_console); From d9e1c6104d87d4027133b28f5ccab8f999830acd Mon Sep 17 00:00:00 2001 From: Liang He Date: Sun, 19 Jun 2022 15:23:35 +0800 Subject: [PATCH 037/214] powerpc/cell: Add missing of_node_put()s Use of_node_put() for of_find_node_by_path() and of_find_node_by_phandle() to keep refcount balance. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220619072335.4067728-1-windhl@126.com --- arch/powerpc/platforms/cell/setup.c | 2 ++ arch/powerpc/platforms/cell/spu_manage.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 52de014983c9..47eaf75349f2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -167,6 +167,8 @@ static int __init cell_publish_devices(void) of_platform_device_create(np, NULL, NULL); } + of_node_put(root); + /* There is no device for the MIC memory controller, thus we create * a platform device for it to attach the EDAC driver to. */ diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index ae09c5a91b40..f1ac4c742069 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c @@ -488,6 +488,8 @@ static void __init init_affinity_node(int cbe) avoid_ph = vic_dn->phandle; } + of_node_put(vic_dn); + list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; break; From ad4b323693abd221798a6479105d22c79605aa0f Mon Sep 17 00:00:00 2001 From: Liang He Date: Fri, 1 Jul 2022 22:49:48 +0800 Subject: [PATCH 038/214] powerpc/cell: Add missing of_node_put()s in cbe_regs.c There are several bugs as following: (1) In cbe_get_be_node(), hold the reference returned by of_find_xxx and of_get_xxx OF APIs and use it to call of_node_put(). (2) In cbe_fill_regs_map(), same as above. (3) In cbe_regs_init(), during the iteration of for_each_node_by_type(), the refcount of 'cpu' will be automatically increased and decreased. However, there is a reference escaped out into 'map->cpu_node' and it should be properly handled. Signed-off-by: Liang He [mpe: Drop references before pointer equality test in cbe_get_be_node()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220701144949.252364-1-windhl@126.com --- arch/powerpc/platforms/cell/cbe_regs.c | 37 +++++++++++++++++++------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index 316e533afc00..fb4023f9ea6b 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c @@ -182,9 +182,16 @@ static struct device_node *__init cbe_get_be_node(int cpu_id) if (WARN_ON_ONCE(!cpu_handle)) return np; - for (i=0; ibe_node) { - struct device_node *be, *np; + struct device_node *be, *np, *parent_np; be = map->be_node; - for_each_node_by_type(np, "pervasive") - if (of_get_parent(np) == be) + for_each_node_by_type(np, "pervasive") { + parent_np = of_get_parent(np); + if (parent_np == be) map->pmd_regs = of_iomap(np, 0); + of_node_put(parent_np); + } - for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") - if (of_get_parent(np) == be) + for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") { + parent_np = of_get_parent(np); + if (parent_np == be) map->iic_regs = of_iomap(np, 2); + of_node_put(parent_np); + } - for_each_node_by_type(np, "mic-tm") - if (of_get_parent(np) == be) + for_each_node_by_type(np, "mic-tm") { + parent_np = of_get_parent(np); + if (parent_np == be) map->mic_tm_regs = of_iomap(np, 0); + of_node_put(parent_np); + } } else { struct device_node *cpu; /* That hack must die die die ! */ @@ -261,7 +277,8 @@ void __init cbe_regs_init(void) of_node_put(cpu); return; } - map->cpu_node = cpu; + of_node_put(map->cpu_node); + map->cpu_node = of_node_get(cpu); for_each_possible_cpu(i) { struct cbe_thread_map *thread = &cbe_thread_map[i]; From f4f8320b01677b467c768c43c1e1d10383a0e30d Mon Sep 17 00:00:00 2001 From: Liang He Date: Fri, 1 Jul 2022 22:49:49 +0800 Subject: [PATCH 039/214] powerpc/cell: Add missing of_node_put() in iommu.c In cell_iommu_init_disabled(), hold the reference returned by of_find_node_by_name() and use it to call of_node_put() for reference balance. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220701144949.252364-2-windhl@126.com --- arch/powerpc/platforms/cell/iommu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 0ca3efeef293..8c7133039566 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -720,8 +720,10 @@ static int __init cell_iommu_init_disabled(void) cell_disable_iommus(); /* If we have no Axon, we set up the spider DMA magic offset */ - if (of_find_node_by_name(NULL, "axon") == NULL) + np = of_find_node_by_name(NULL, "axon"); + if (!np) cell_dma_nommu_offset = SPIDER_DMA_OFFSET; + of_node_put(np); /* Now we need to check to see where the memory is mapped * in PCI space. We assume that all busses use the same dma From 1c754b49c002a965004b6a96d158f7ce554eb977 Mon Sep 17 00:00:00 2001 From: Liang He Date: Sun, 19 Jun 2022 15:40:16 +0800 Subject: [PATCH 040/214] powerpc/pseries: Add missing of_node_put() in ibmebus In ibmebus_match_path(), use of_node_put() to drop the reference returned by of_find_node_by_path() before testing for equality of the pointers. Signed-off-by: Liang He [mpe: Rewrite change log] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220619074016.4068105-1-windhl@126.com --- arch/powerpc/platforms/pseries/ibmebus.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 7ee3ed7d6cc2..a870cada7acd 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -152,7 +152,11 @@ static const struct dma_map_ops ibmebus_dma_ops = { static int ibmebus_match_path(struct device *dev, const void *data) { struct device_node *dn = to_platform_device(dev)->dev.of_node; - return (of_find_node_by_path(data) == dn); + struct device_node *tn = of_find_node_by_path(data); + + of_node_put(tn); + + return (tn == dn); } static int ibmebus_match_node(struct device *dev, const void *data) From cd772e659da0ad67f19f022f65449e14ebcf1284 Mon Sep 17 00:00:00 2001 From: Liang He Date: Mon, 20 Jun 2022 14:59:04 +0800 Subject: [PATCH 041/214] powerpc/embedded6xx/ls_uart: Add missing of_node_put() In ls_uarts_init(), add an of_node_put() to keep refcount balance. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220620065904.4071787-1-windhl@126.com --- arch/powerpc/platforms/embedded6xx/ls_uart.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c index 0133e175a0fc..4ecbc55b37c0 100644 --- a/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c @@ -124,6 +124,8 @@ static int __init ls_uarts_init(void) avr_clock = *(u32*)of_get_property(avr, "clock-frequency", &len); phys_addr = ((u32*)of_get_property(avr, "reg", &len))[0]; + of_node_put(avr); + if (!avr_clock || !phys_addr) return -EINVAL; From 3d31adc47edb6d0cef122a41fba1b639db5d1c37 Mon Sep 17 00:00:00 2001 From: Liang He Date: Mon, 20 Jun 2022 21:02:21 +0800 Subject: [PATCH 042/214] powerpc/sysdev: Add missing of_node_put()s Add of_node_put() in various paths to drop references once they are no longer needed. Signed-off-by: Liang He [mpe: Rewrite change log] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220620130221.4073228-1-windhl@126.com --- arch/powerpc/sysdev/fsl_pci.c | 6 +++++- arch/powerpc/sysdev/mpic_msgr.c | 9 ++++++++- arch/powerpc/sysdev/xive/native.c | 15 ++++++++++----- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index af6c8ca824d3..4c3fd9f4cc7b 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -181,6 +181,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci, static bool is_kdump(void) { struct device_node *node; + bool ret; node = of_find_node_by_type(NULL, "memory"); if (!node) { @@ -188,7 +189,10 @@ static bool is_kdump(void) return false; } - return of_property_read_bool(node, "linux,usable-memory"); + ret = of_property_read_bool(node, "linux,usable-memory"); + of_node_put(node); + + return ret; } /* atmu setup for fsl pci/pcie controller */ diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 698fefaaa6dd..a439e33eae06 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -121,6 +121,7 @@ static unsigned int mpic_msgr_number_of_blocks(void) count += 1; } + of_node_put(aliases); } return count; @@ -144,12 +145,18 @@ static int mpic_msgr_block_number(struct device_node *node) for (index = 0; index < number_of_blocks; ++index) { struct property *prop; + struct device_node *tn; snprintf(buf, sizeof(buf), "mpic-msgr-block%d", index); prop = of_find_property(aliases, buf, NULL); - if (node == of_find_node_by_path(prop->value)) + tn = of_find_node_by_path(prop->value); + if (node == tn) { + of_node_put(tn); break; + } + of_node_put(tn); } + of_node_put(aliases); return index == number_of_blocks ? -1 : index; } diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index d25d8c692909..3925825954bc 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -579,12 +579,12 @@ bool __init xive_native_init(void) /* Resource 1 is HV window */ if (of_address_to_resource(np, 1, &r)) { pr_err("Failed to get thread mgmnt area resource\n"); - return false; + goto err_put; } tima = ioremap(r.start, resource_size(&r)); if (!tima) { pr_err("Failed to map thread mgmnt area\n"); - return false; + goto err_put; } /* Read number of priorities */ @@ -612,7 +612,7 @@ bool __init xive_native_init(void) /* Resource 2 is OS window */ if (of_address_to_resource(np, 2, &r)) { pr_err("Failed to get thread mgmnt area resource\n"); - return false; + goto err_put; } xive_tima_os = r.start; @@ -624,7 +624,7 @@ bool __init xive_native_init(void) rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL); if (rc) { pr_err("Switch to exploitation mode failed with error %lld\n", rc); - return false; + goto err_put; } /* Setup some dummy HV pool VPs */ @@ -634,10 +634,15 @@ bool __init xive_native_init(void) if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS, max_prio)) { opal_xive_reset(OPAL_XIVE_MODE_EMU); - return false; + goto err_put; } + of_node_put(np); pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); return true; + +err_put: + of_node_put(np); + return false; } static bool xive_native_provision_pages(void) From def435c04ee984a5f9ed2711b2bfe946936c6a21 Mon Sep 17 00:00:00 2001 From: Liang He Date: Mon, 4 Jul 2022 22:52:33 +0800 Subject: [PATCH 043/214] powerpc/sysdev/fsl_msi: Add missing of_node_put() In fsl_setup_msi_irqs(), use of_node_put() to drop the reference returned by of_parse_phandle(). Fixes: 895d603f945ba ("powerpc/fsl_msi: add support for the fsl, msi property in PCI nodes") Co-authored-by: Miaoqian Lin Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220704145233.278539-1-windhl@126.com --- arch/powerpc/sysdev/fsl_msi.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index ef9a5999fa93..73c2d70706c0 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -209,8 +209,10 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) dev_err(&pdev->dev, "node %pOF has an invalid fsl,msi phandle %u\n", hose->dn, np->phandle); + of_node_put(np); return -EINVAL; } + of_node_put(np); } msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) { From a3a4c10aef88a80ba1b230a7bf63ea381cc5116e Mon Sep 17 00:00:00 2001 From: Liang He Date: Mon, 20 Jun 2022 23:05:18 +0800 Subject: [PATCH 044/214] powerpc/powermac: Add missing of_node_put() in smp_core99_setup() In smp_core99_setup(), add of_node_put() to drop the reference once it's no longer needed. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220620150518.4074910-1-windhl@126.com --- arch/powerpc/platforms/powermac/smp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index d9df45741ece..5b26a9012d2e 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -711,6 +711,7 @@ static void __init smp_core99_setup(int ncpus) printk(KERN_INFO "Processor timebase sync using" " platform function\n"); } + of_node_put(cpus); } #else /* CONFIG_PPC64 */ From cc0dd82c18559184e009bef8d0353d008013a813 Mon Sep 17 00:00:00 2001 From: Liang He Date: Tue, 21 Jun 2022 16:03:49 +0800 Subject: [PATCH 045/214] powerpc/512x: Add missing of_node_put() in clock-commonclk.c In mpc5121_clk_provide_migration_support(), hold the reference returned by of_find_compatible_node() and use it to call of_node_put() for refcount balance. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220621080349.4081689-1-windhl@126.com --- arch/powerpc/platforms/512x/clock-commonclk.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index ca475462e95b..42abeba4f698 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -950,7 +950,7 @@ static void __init mpc5121_clk_register_of_provider(struct device_node *np) */ static void __init mpc5121_clk_provide_migration_support(void) { - + struct device_node *np; /* * pre-enable those clock items which are not yet appropriately * acquired by their peripheral driver @@ -970,7 +970,9 @@ static void __init mpc5121_clk_provide_migration_support(void) * unused and so it gets disabled */ clk_prepare_enable(clks[MPC512x_CLK_PSC3_MCLK]);/* serial console */ - if (of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci")) + np = of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci"); + of_node_put(np); + if (np) clk_prepare_enable(clks[MPC512x_CLK_PCI]); } From 24156df00dbbc673d9b2d31a336c3aba537d2c60 Mon Sep 17 00:00:00 2001 From: Liang He Date: Tue, 21 Jun 2022 16:09:32 +0800 Subject: [PATCH 046/214] powerpc/83xx: Add missing of_node_put() in mpc832x_spi_init() In mpc832x_spi_init(), hold the reference returned by of_find_compatible_node() and use it to call of_node_put() for refcount balance. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220621080932.4081935-1-windhl@126.com --- arch/powerpc/platforms/83xx/mpc832x_rdb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c index bb8caa5071f8..e12cb44e717f 100644 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c @@ -162,6 +162,8 @@ static struct spi_board_info mpc832x_spi_boardinfo = { static int __init mpc832x_spi_init(void) { + struct device_node *np; + par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */ par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */ par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */ @@ -175,7 +177,9 @@ static int __init mpc832x_spi_init(void) * Don't bother with legacy stuff when device tree contains * mmc-spi-slot node. */ - if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot")) + np = of_find_compatible_node(NULL, NULL, "mmc-spi-slot"); + of_node_put(np); + if (np) return 0; return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control); } From d208d8c2cde513b94ae3b8b97663656079004555 Mon Sep 17 00:00:00 2001 From: Liang He Date: Wed, 22 Jun 2022 14:16:52 +0800 Subject: [PATCH 047/214] macintosh: Add missing of_node_get() in do_attach() We need a of_node_get() for of_find_compatible_node() to keep refcount balance. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220622061652.4095330-1-windhl@126.com --- drivers/macintosh/therm_windtunnel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 091278240baa..5cd9671b2e3d 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -317,6 +317,7 @@ static void do_attach(struct i2c_adapter *adapter) if (x.running || strncmp(adapter->name, "uni-n", 5)) return; + of_node_get(adapter->dev.of_node); np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775"); if (np) { of_node_put(np); @@ -325,6 +326,7 @@ static void do_attach(struct i2c_adapter *adapter) i2c_new_scanned_device(adapter, &info, scan_ds1775, NULL); } + of_node_get(adapter->dev.of_node); np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030"); if (np) { of_node_put(np); From 6ec4836fa15a0ff02e3a382ad6b1920c728a8c95 Mon Sep 17 00:00:00 2001 From: Liang He Date: Tue, 21 Jun 2022 19:17:01 +0800 Subject: [PATCH 048/214] powerpc/pseries: Add missing of_node_put()s in hotplug-cpu.c In pseries_cpuhp_cache_use_count() and pseries_cpuhp_detach_nodes(), we need carefully hold the reference returned by of_find_next_cache_node() and use it to call of_node_put() to keep refcount balance. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220621111701.4082889-1-windhl@126.com --- arch/powerpc/platforms/pseries/hotplug-cpu.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 0f8cd8b06432..e0a7ac5db15d 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -619,17 +619,21 @@ static ssize_t dlpar_cpu_add(u32 drc_index) static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn) { unsigned int use_count = 0; - struct device_node *dn; + struct device_node *dn, *tn; WARN_ON(!of_node_is_type(cachedn, "cache")); for_each_of_cpu_node(dn) { - if (of_find_next_cache_node(dn) == cachedn) + tn = of_find_next_cache_node(dn); + of_node_put(tn); + if (tn == cachedn) use_count++; } for_each_node_by_type(dn, "cache") { - if (of_find_next_cache_node(dn) == cachedn) + tn = of_find_next_cache_node(dn); + of_node_put(tn); + if (tn == cachedn) use_count++; } @@ -649,10 +653,13 @@ static int pseries_cpuhp_detach_nodes(struct device_node *cpudn) dn = cpudn; while ((dn = of_find_next_cache_node(dn))) { - if (pseries_cpuhp_cache_use_count(dn) > 1) + if (pseries_cpuhp_cache_use_count(dn) > 1) { + of_node_put(dn); break; + } ret = of_changeset_detach_node(&cs, dn); + of_node_put(dn); if (ret) goto out; } From 110a1fcb6c4d55144d8179983a475f17a1d6f832 Mon Sep 17 00:00:00 2001 From: Liang He Date: Fri, 1 Jul 2022 21:17:50 +0800 Subject: [PATCH 049/214] powerpc/pci_dn: Add missing of_node_put() In pci_add_device_node_info(), use of_node_put() to drop the reference to 'parent' returned by of_get_parent() to keep refcount balance. Fixes: cca87d303c85 ("powerpc/pci: Refactor pci_dn") Co-authored-by: Miaoqian Lin Signed-off-by: Liang He Signed-off-by: Michael Ellerman Reviewed-by: Tyrel Datwyler Link: https://lore.kernel.org/r/20220701131750.240170-1-windhl@126.com --- arch/powerpc/kernel/pci_dn.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 7a35fc25a304..38561d6a2079 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -330,6 +330,7 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose, INIT_LIST_HEAD(&pdn->list); parent = of_get_parent(dn); pdn->parent = parent ? PCI_DN(parent) : NULL; + of_node_put(parent); if (pdn->parent) list_add_tail(&pdn->list, &pdn->parent->child_list); From 9d86f0919544d8e422be2a1d562de1dbbbb6052d Mon Sep 17 00:00:00 2001 From: Liang He Date: Fri, 1 Jul 2022 21:31:26 +0800 Subject: [PATCH 050/214] powerpc/44x: Add of_node_put() when break out from for_each In ppc47x_init_irq(), we need to call of_node_put() when there is a break during the iteration of for_each_node_with_property() which will automatically increase and decrease the refcount. Signed-off-by: Liang He [mpe: mpic_alloc() takes its own reference] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220701133126.243102-1-windhl@126.com --- arch/powerpc/platforms/44x/ppc476.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index 20cc8f80b086..7c91ac5a5241 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -140,6 +140,8 @@ static void __init ppc47x_init_irq(void) ppc_md.get_irq = mpic_get_irq; } else panic("Unrecognized top level interrupt controller"); + + of_node_put(np); } #ifdef CONFIG_SMP From a8b89c10e6052027061a447ff7436642310c8f20 Mon Sep 17 00:00:00 2001 From: Liang He Date: Fri, 1 Jul 2022 22:01:19 +0800 Subject: [PATCH 051/214] powerpc/85xx: Add missing of_node_get/put() in ge_imp3a_pci_assign_primary() for_each_node_by_type() will automatically increase and decrease the refcount during the iteration. However, there is a reference escaped into global 'fsl_pci_primary' and we need to handle it. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220701140119.245435-1-windhl@126.com --- arch/powerpc/platforms/85xx/ge_imp3a.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c index 8e827376d97b..e3e8f18825a1 100644 --- a/arch/powerpc/platforms/85xx/ge_imp3a.c +++ b/arch/powerpc/platforms/85xx/ge_imp3a.c @@ -89,8 +89,10 @@ static void __init ge_imp3a_pci_assign_primary(void) of_device_is_compatible(np, "fsl,mpc8548-pcie") || of_device_is_compatible(np, "fsl,p2020-pcie")) { of_address_to_resource(np, 0, &rsrc); - if ((rsrc.start & 0xfffff) == 0x9000) - fsl_pci_primary = np; + if ((rsrc.start & 0xfffff) == 0x9000) { + of_node_put(fsl_pci_primary); + fsl_pci_primary = of_node_get(np); + } } } #endif From afa6a472a3d2a8dd477b285eeb67b3593546647b Mon Sep 17 00:00:00 2001 From: Liang He Date: Wed, 20 Jul 2022 20:45:57 +0800 Subject: [PATCH 052/214] powerpc/fsl_pci: Remove of_node_put() when reference escaped out In fsl_pci_assign_primary(), we should remove the of_node_put() when breaking out of the for_each_matching_node() as the 'np' is escaped out by global 'fsl_pci_primary'. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220720124557.1256243-1-windhl@126.com --- arch/powerpc/sysdev/fsl_pci.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 4c3fd9f4cc7b..1ef7400ef244 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -1146,7 +1146,6 @@ void __init fsl_pci_assign_primary(void) for_each_matching_node(np, pci_ids) { if (of_device_is_available(np)) { fsl_pci_primary = np; - of_node_put(np); return; } } From 605c27f3802038e4623b6fd1bbfa021e1f65b5c4 Mon Sep 17 00:00:00 2001 From: Liang He Date: Mon, 20 Jun 2022 21:25:53 +0800 Subject: [PATCH 053/214] powerpc/powernv: Add missing of_node_put()s In these driver init functions, there are two kinds of errors: (1) missing of_put_node() for of_find_compatible_node()'s returned pointer (refcount incremented) in fail path or when it is not used anymore. (2) missing of_put_node() for 'for_each_xxx' loop's break Signed-off-by: Liang He [mpe: Use out_put_xxx goto label naming] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220620132553.4073863-1-windhl@126.com --- arch/powerpc/platforms/powernv/idle.c | 1 + arch/powerpc/platforms/powernv/opal-core.c | 2 ++ arch/powerpc/platforms/powernv/opal-powercap.c | 6 +++++- arch/powerpc/platforms/powernv/opal-psr.c | 6 +++++- arch/powerpc/platforms/powernv/opal-sensor-groups.c | 6 +++++- arch/powerpc/platforms/powernv/opal.c | 2 ++ 6 files changed, 20 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 6e6b3bd9c92f..841cb7f31f4f 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -1419,6 +1419,7 @@ out: kfree(temp_u32); kfree(temp_u64); kfree(temp_string); + of_node_put(np); return rc; } diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c index adcb1a1a2bfe..bb7657115f1d 100644 --- a/arch/powerpc/platforms/powernv/opal-core.c +++ b/arch/powerpc/platforms/powernv/opal-core.c @@ -348,6 +348,8 @@ static int __init create_opalcore(void) if (!dn || ret) pr_warn("WARNING: Failed to read OPAL base & entry values\n"); + of_node_put(dn); + /* Use count to keep track of the program headers */ count = 0; diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c index 64506b46e77b..7bfe4cbeb35a 100644 --- a/arch/powerpc/platforms/powernv/opal-powercap.c +++ b/arch/powerpc/platforms/powernv/opal-powercap.c @@ -153,7 +153,7 @@ void __init opal_powercap_init(void) pcaps = kcalloc(of_get_child_count(powercap), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) - return; + goto out_put_powercap; powercap_kobj = kobject_create_and_add("powercap", opal_kobj); if (!powercap_kobj) { @@ -226,6 +226,7 @@ void __init opal_powercap_init(void) } i++; } + of_node_put(powercap); return; @@ -236,6 +237,9 @@ out_pcaps_pattrs: kfree(pcaps[i].pg.name); } kobject_put(powercap_kobj); + of_node_put(node); out_pcaps: kfree(pcaps); +out_put_powercap: + of_node_put(powercap); } diff --git a/arch/powerpc/platforms/powernv/opal-psr.c b/arch/powerpc/platforms/powernv/opal-psr.c index 69d7e75950d1..6441e17b6996 100644 --- a/arch/powerpc/platforms/powernv/opal-psr.c +++ b/arch/powerpc/platforms/powernv/opal-psr.c @@ -135,7 +135,7 @@ void __init opal_psr_init(void) psr_attrs = kcalloc(of_get_child_count(psr), sizeof(*psr_attrs), GFP_KERNEL); if (!psr_attrs) - return; + goto out_put_psr; psr_kobj = kobject_create_and_add("psr", opal_kobj); if (!psr_kobj) { @@ -162,10 +162,14 @@ void __init opal_psr_init(void) } i++; } + of_node_put(psr); return; out_kobj: + of_node_put(node); kobject_put(psr_kobj); out: kfree(psr_attrs); +out_put_psr: + of_node_put(psr); } diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c index 8fba7d25ae56..9944376b115c 100644 --- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c +++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c @@ -170,7 +170,7 @@ void __init opal_sensor_groups_init(void) sgs = kcalloc(of_get_child_count(sg), sizeof(*sgs), GFP_KERNEL); if (!sgs) - return; + goto out_sg_put; sg_kobj = kobject_create_and_add("sensor_groups", opal_kobj); if (!sg_kobj) { @@ -222,6 +222,7 @@ void __init opal_sensor_groups_init(void) } i++; } + of_node_put(sg); return; @@ -231,6 +232,9 @@ out_sgs_sgattrs: kfree(sgs[i].sg.attrs); } kobject_put(sg_kobj); + of_node_put(node); out_sgs: kfree(sgs); +out_sg_put: + of_node_put(sg); } diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 55a8fbfdb5b2..e536a6a3c801 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -952,6 +952,8 @@ static void __init opal_imc_init_dev(void) np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT); if (np) of_platform_device_create(np, NULL, NULL); + + of_node_put(np); } static int kopald(void *unused) From ce63c44b63cdae892107717ba10fdb6fb4fc6cdb Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 2 Jul 2022 10:29:36 +0800 Subject: [PATCH 054/214] powerpc/pci-common: Fix refcount bug for 'phb->dn' In pcibios_alloc_controller(), 'phb' is allocated and escaped into global 'hose_list'. So we should call of_node_get() when a new reference created into 'phb->dn'. And when phb is freed, we should call of_node_put() on it. NOTE: This function is called in the iteration of for_each_xx in chrp_find_bridges() and pSeries_discover_phbs(). If there is no of_node_get(), the object may be prematurely freed. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220702022936.266146-1-windhl@126.com --- arch/powerpc/kernel/pci-common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 31de91c8359c..d67cf79bf5d0 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -135,7 +135,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev) list_add_tail(&phb->list_node, &hose_list); spin_unlock(&hose_spinlock); - phb->dn = dev; + phb->dn = of_node_get(dev); phb->is_dynamic = slab_is_available(); #ifdef CONFIG_PPC64 if (dev) { @@ -158,7 +158,7 @@ void pcibios_free_controller(struct pci_controller *phb) /* Clear bit of phb_bitmap to allow reuse of this PHB number. */ if (phb->global_number < MAX_PHBS) clear_bit(phb->global_number, phb_bitmap); - + of_node_put(phb->dn); list_del(&phb->list_node); spin_unlock(&hose_spinlock); From d36337ce950ce8c57a8e4f61593f923cceaf0dd7 Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 16 Jul 2022 14:54:12 +0800 Subject: [PATCH 055/214] powerpc/powermac/feature: Add missing of_node_put() In probe_one_macio(), call of_node_put() for the refernece 'node' escaped out of the for_each_node_by_name() which has increased its refcount. While the 'node' will finally escaped into a global reference, we should still call of_node_put() in fail path which will stop global reference creation. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220716065412.539153-1-windhl@126.com --- arch/powerpc/platforms/powermac/feature.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 5cc958adba13..0382d20b5619 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -2632,31 +2632,31 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ if (!macio_chips[i].of_node) break; if (macio_chips[i].of_node == node) - return; + goto out_put; } if (i >= MAX_MACIO_CHIPS) { printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); printk(KERN_ERR "pmac_feature: %pOF skipped\n", node); - return; + goto out_put; } addrp = of_get_pci_address(node, 0, &size, NULL); if (addrp == NULL) { printk(KERN_ERR "pmac_feature: %pOF: can't find base !\n", node); - return; + goto out_put; } addr = of_translate_address(node, addrp); if (addr == 0) { printk(KERN_ERR "pmac_feature: %pOF, can't translate base !\n", node); - return; + goto out_put; } base = ioremap(addr, (unsigned long)size); if (!base) { printk(KERN_ERR "pmac_feature: %pOF, can't map mac-io chip !\n", node); - return; + goto out_put; } if (type == macio_keylargo || type == macio_keylargo2) { const u32 *did = of_get_property(node, "device-id", NULL); @@ -2677,6 +2677,11 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ macio_chips[i].rev = *revp; printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n", macio_names[type], macio_chips[i].rev, macio_chips[i].base); + + return; + +out_put: + of_node_put(node); } static int __init From b3d6637bcc5d17caec56a76f6e430dcf444ef80e Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 16 Jul 2022 15:07:58 +0800 Subject: [PATCH 056/214] powerpc/powermac/low_i2c: Add missing of_node_put() in kw_i2c_probe() Call of_node_put() for the reference 'parent' returned by of_get_parent() which has increased the refcount. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220716070758.539434-1-windhl@126.com --- arch/powerpc/platforms/powermac/low_i2c.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index c1c430c66dc9..40f3aa432fba 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -627,6 +627,7 @@ static void __init kw_i2c_probe(void) if (parent == NULL) continue; chans = parent->name[0] == 'u' ? 2 : 1; + of_node_put(parent); for (i = 0; i < chans; i++) kw_i2c_add(host, np, np, i); } else { From 11373c933db20f8b6fd2cad27712e683ac9785f0 Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 16 Jul 2022 15:31:11 +0800 Subject: [PATCH 057/214] powerpc/powermac/pfunc_base: Add missing of_node_put() in macio_gpio_init_one() Call of_node_put() for the reference 'gparent' escaped out of the previous for_each_child_of_node() as it has increased the refcount. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220716073111.539739-1-windhl@126.com --- arch/powerpc/platforms/powermac/pfunc_base.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index 9c2947a3edd5..085e0ad20eba 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -136,6 +136,8 @@ static void __init macio_gpio_init_one(struct macio_chip *macio) for_each_child_of_node(gparent, gp) pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL); + of_node_put(gparent); + /* Note: We do not at this point implement the "at sleep" or "at wake" * functions. I yet to find any for GPIOs anyway */ From 2378bf144b841df548161af49bf1ff393dc60d44 Mon Sep 17 00:00:00 2001 From: Liang He Date: Sat, 16 Jul 2022 15:43:44 +0800 Subject: [PATCH 058/214] powerpc/powermac/udbg_scc: Add missing of_node_put()s in udbg_scc_init() During the iteration of for_each_child_of_node(), we need to call of_node_put() for the old references stored in to 'ch_def' and 'ch_a' as their refcounters have been increased in last iteration. Signed-off-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220716074344.540049-1-windhl@126.com --- arch/powerpc/platforms/powermac/udbg_scc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 734df5a32f99..1b7c39e841ee 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -81,10 +81,14 @@ void __init udbg_scc_init(int force_scc) if (path != NULL) stdout = of_find_node_by_path(path); for_each_child_of_node(escc, ch) { - if (ch == stdout) + if (ch == stdout) { + of_node_put(ch_def); ch_def = of_node_get(ch); - if (of_node_name_eq(ch, "ch-a")) + } + if (of_node_name_eq(ch, "ch-a")) { + of_node_put(ch_a); ch_a = of_node_get(ch); + } } if (ch_def == NULL && !force_scc) goto bail; From 4c73cadcdc64b53248bca85baa8a19e7384701ec Mon Sep 17 00:00:00 2001 From: Jilin Yuan Date: Wed, 31 Aug 2022 08:51:09 +0800 Subject: [PATCH 059/214] powerpc/mobility: fix repeated words in comments Delete the redundant word 'the'. Signed-off-by: Jilin Yuan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220831005109.38314-1-yuanjilin@cdjrlc.com --- arch/powerpc/platforms/pseries/mobility.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index 3d36a8955eaf..c92c78332303 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -216,7 +216,7 @@ static int update_dt_node(struct device_node *dn, s32 scope) nprops = be32_to_cpu(upwa->nprops); /* On the first call to ibm,update-properties for a node the - * the first property value descriptor contains an empty + * first property value descriptor contains an empty * property name, the property value length encoded as u32, * and the property value is the node path being updated. */ From 0d4bb5e45aa698f2f357b1424b842bebe13b1c8b Mon Sep 17 00:00:00 2001 From: Jilin Yuan Date: Wed, 31 Aug 2022 08:49:14 +0800 Subject: [PATCH 060/214] powerpc/vas: fix repeated words in comments Delete the redundant word 'the'. Signed-off-by: Jilin Yuan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220831004914.37055-1-yuanjilin@cdjrlc.com --- arch/powerpc/platforms/book3s/vas-api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index c0799fb26b6d..40f5ae5e1238 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -431,7 +431,7 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) * The window may be inactive due to lost credit (Ex: core * removal with DLPAR). If the window is active again when * the credit is available, map the new paste address at the - * the window virtual address. + * window virtual address. */ if (txwin->status == VAS_WIN_ACTIVE) { paste_addr = cp_inst->coproc->vops->paste_addr(txwin); From 9b135eef0787813ad073aaeb9ff80ab57bc63e69 Mon Sep 17 00:00:00 2001 From: Jilin Yuan Date: Wed, 31 Aug 2022 08:47:06 +0800 Subject: [PATCH 061/214] powerpc/xive: fix repeated words in comments Delete the redundant word 'set'. Signed-off-by: Jilin Yuan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220831004706.35280-1-yuanjilin@cdjrlc.com --- arch/powerpc/sysdev/xive/common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 61b9f98dfd4a..a289cb97c1d7 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -783,7 +783,7 @@ static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) * the corresponding descriptor bits mind you but those will in turn * affect the resend function when re-enabling an edge interrupt. * - * Set set the default to edge as explained in map(). + * Set the default to edge as explained in map(). */ if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_EDGE_RISING; From 245685495bff35062a394f5cdbd32b237dc596a5 Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Sat, 27 Aug 2022 16:39:46 +1000 Subject: [PATCH 062/214] powerpc/pasemi: Use strscpy instead of strlcpy find_i2c_driver() contained the last usage of strlcpy() in arch/powerpc. The return value was used to check if strlen(src) >= n, for which strscpy() returns -E2BIG. Signed-off-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220827063946.9073-1-ruscur@russell.cc --- arch/powerpc/platforms/pasemi/misc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c index f859ada29074..9e9a7e46288a 100644 --- a/arch/powerpc/platforms/pasemi/misc.c +++ b/arch/powerpc/platforms/pasemi/misc.c @@ -36,8 +36,7 @@ static int __init find_i2c_driver(struct device_node *node, for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) { if (!of_device_is_compatible(node, i2c_devices[i].of_device)) continue; - if (strlcpy(info->type, i2c_devices[i].i2c_type, - I2C_NAME_SIZE) >= I2C_NAME_SIZE) + if (strscpy(info->type, i2c_devices[i].i2c_type, I2C_NAME_SIZE) < 0) return -ENOMEM; return 0; } From c28c2d4abdf95655001992c4f52dc243ba00cac3 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 5 Sep 2022 14:56:37 +1000 Subject: [PATCH 063/214] powerpc/pasemi: Use of_root in pas_pci_init() Currently in pas_pci_init() a reference to the root node is leaked due to a missing of_node_put(). Instead just use of_root directly. Note that converting to of_find_compatible_node(NULL, ...) would not be entirely equivalent, because that would check the compatible property of the root node, whereas using of_root skips checking the root node and start the search at the first child of the root. Reported-by: Liang He Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220906010313.1296714-1-mpe@ellerman.id.au --- arch/powerpc/platforms/pasemi/pci.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c index 55f0160910bf..f27d31414737 100644 --- a/arch/powerpc/platforms/pasemi/pci.c +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -270,18 +270,12 @@ static int __init pas_add_bridge(struct device_node *dev) void __init pas_pci_init(void) { - struct device_node *np, *root; + struct device_node *np; int res; - root = of_find_node_by_path("/"); - if (!root) { - pr_crit("pas_pci_init: can't find root of device tree\n"); - return; - } - pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS); - np = of_find_compatible_node(root, NULL, "pasemi,rootbus"); + np = of_find_compatible_node(of_root, NULL, "pasemi,rootbus"); if (np) { res = pas_add_bridge(np); of_node_put(np); From 78c73c80fd860d5b3471d8eaa2778a105a56f6ab Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 08:12:54 +0200 Subject: [PATCH 064/214] powerpc/math-emu: Inhibit W=1 warnings When building with W=1 you get: arch/powerpc/math-emu/fre.c:6:5: error: no previous prototype for 'fre' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsqrt.c:11:1: error: no previous prototype for 'fsqrt' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsqrts.c:12:1: error: no previous prototype for 'fsqrts' [-Werror=missing-prototypes] arch/powerpc/math-emu/frsqrtes.c:6:5: error: no previous prototype for 'frsqrtes' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsf.c:10:1: error: no previous prototype for 'mtfsf' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsfi.c:10:1: error: no previous prototype for 'mtfsfi' [-Werror=missing-prototypes] arch/powerpc/math-emu/fabs.c:7:1: error: no previous prototype for 'fabs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fadd.c:11:1: error: no previous prototype for 'fadd' [-Werror=missing-prototypes] arch/powerpc/math-emu/fadds.c:12:1: error: no previous prototype for 'fadds' [-Werror=missing-prototypes] arch/powerpc/math-emu/fcmpo.c:11:1: error: no previous prototype for 'fcmpo' [-Werror=missing-prototypes] arch/powerpc/math-emu/fcmpu.c:11:1: error: no previous prototype for 'fcmpu' [-Werror=missing-prototypes] arch/powerpc/math-emu/fcmpu.c:14:19: error: variable 'B_c' set but not used [-Werror=unused-but-set-variable] arch/powerpc/math-emu/fcmpu.c:13:19: error: variable 'A_c' set but not used [-Werror=unused-but-set-variable] arch/powerpc/math-emu/fctiw.c:11:1: error: no previous prototype for 'fctiw' [-Werror=missing-prototypes] arch/powerpc/math-emu/fctiwz.c:11:1: error: no previous prototype for 'fctiwz' [-Werror=missing-prototypes] arch/powerpc/math-emu/fdiv.c:11:1: error: no previous prototype for 'fdiv' [-Werror=missing-prototypes] arch/powerpc/math-emu/fdivs.c:12:1: error: no previous prototype for 'fdivs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmadd.c:11:1: error: no previous prototype for 'fmadd' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmadds.c:12:1: error: no previous prototype for 'fmadds' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmsub.c:11:1: error: no previous prototype for 'fmsub' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmsubs.c:12:1: error: no previous prototype for 'fmsubs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmul.c:11:1: error: no previous prototype for 'fmul' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmuls.c:12:1: error: no previous prototype for 'fmuls' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnabs.c:7:1: error: no previous prototype for 'fnabs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fneg.c:7:1: error: no previous prototype for 'fneg' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmadd.c:11:1: error: no previous prototype for 'fnmadd' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmadds.c:12:1: error: no previous prototype for 'fnmadds' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmsub.c:11:1: error: no previous prototype for 'fnmsub' [-Werror=missing-prototypes] arch/powerpc/math-emu/fnmsubs.c:12:1: error: no previous prototype for 'fnmsubs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fres.c:7:1: error: no previous prototype for 'fres' [-Werror=missing-prototypes] arch/powerpc/math-emu/frsp.c:12:1: error: no previous prototype for 'frsp' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsel.c:11:1: error: no previous prototype for 'fsel' [-Werror=missing-prototypes] arch/powerpc/math-emu/lfs.c:12:1: error: no previous prototype for 'lfs' [-Werror=missing-prototypes] arch/powerpc/math-emu/frsqrte.c:7:1: error: no previous prototype for 'frsqrte' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsub.c:11:1: error: no previous prototype for 'fsub' [-Werror=missing-prototypes] arch/powerpc/math-emu/fsubs.c:12:1: error: no previous prototype for 'fsubs' [-Werror=missing-prototypes] arch/powerpc/math-emu/mcrfs.c:10:1: error: no previous prototype for 'mcrfs' [-Werror=missing-prototypes] arch/powerpc/math-emu/mffs.c:10:1: error: no previous prototype for 'mffs' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsb0.c:10:1: error: no previous prototype for 'mtfsb0' [-Werror=missing-prototypes] arch/powerpc/math-emu/mtfsb1.c:10:1: error: no previous prototype for 'mtfsb1' [-Werror=missing-prototypes] arch/powerpc/math-emu/stfiwx.c:7:1: error: no previous prototype for 'stfiwx' [-Werror=missing-prototypes] arch/powerpc/math-emu/stfs.c:12:1: error: no previous prototype for 'stfs' [-Werror=missing-prototypes] arch/powerpc/math-emu/fmr.c:7:1: error: no previous prototype for 'fmr' [-Werror=missing-prototypes] arch/powerpc/math-emu/lfd.c:10:1: error: no previous prototype for 'lfd' [-Werror=missing-prototypes] arch/powerpc/math-emu/stfd.c:7:1: error: no previous prototype for 'stfd' [-Werror=missing-prototypes] arch/powerpc/math-emu/math_efp.c:177:5: error: no previous prototype for 'do_spe_mathemu' [-Werror=missing-prototypes] arch/powerpc/math-emu/math_efp.c:726:5: error: no previous prototype for 'speround_handler' [-Werror=missing-prototypes] arch/powerpc/math-emu/math_efp.c:893:12: error: no previous prototype for 'spe_mathemu_init' [-Werror=missing-prototypes] Fix the warnings in math_efp.c by adding prototypes of do_spe_mathemu() and speround_handler() to asm/processor.h and declare spe_mathemu_init() static. The other warnings are benign and not worth the churn of fixing them, expecially the 'unused-but-set-variable' which would impact the core part of 'math-emu'. So silence them by adding -Wno-missing-prototypes -Wno-unused-but-set-variable. But then you get: arch/powerpc/math-emu/fre.c:6:5: error: no previous declaration for 'fre' [-Werror=missing-declarations] arch/powerpc/math-emu/fsqrt.c:11:1: error: no previous declaration for 'fsqrt' [-Werror=missing-declarations] arch/powerpc/math-emu/fsqrts.c:12:1: error: no previous declaration for 'fsqrts' [-Werror=missing-declarations] arch/powerpc/math-emu/frsqrtes.c:6:5: error: no previous declaration for 'frsqrtes' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsf.c:10:1: error: no previous declaration for 'mtfsf' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsfi.c:10:1: error: no previous declaration for 'mtfsfi' [-Werror=missing-declarations] arch/powerpc/math-emu/fabs.c:7:1: error: no previous declaration for 'fabs' [-Werror=missing-declarations] arch/powerpc/math-emu/fadd.c:11:1: error: no previous declaration for 'fadd' [-Werror=missing-declarations] arch/powerpc/math-emu/fadds.c:12:1: error: no previous declaration for 'fadds' [-Werror=missing-declarations] arch/powerpc/math-emu/fcmpo.c:11:1: error: no previous declaration for 'fcmpo' [-Werror=missing-declarations] arch/powerpc/math-emu/fcmpu.c:11:1: error: no previous declaration for 'fcmpu' [-Werror=missing-declarations] arch/powerpc/math-emu/fctiw.c:11:1: error: no previous declaration for 'fctiw' [-Werror=missing-declarations] arch/powerpc/math-emu/fctiwz.c:11:1: error: no previous declaration for 'fctiwz' [-Werror=missing-declarations] arch/powerpc/math-emu/fdiv.c:11:1: error: no previous declaration for 'fdiv' [-Werror=missing-declarations] arch/powerpc/math-emu/fdivs.c:12:1: error: no previous declaration for 'fdivs' [-Werror=missing-declarations] arch/powerpc/math-emu/fmadd.c:11:1: error: no previous declaration for 'fmadd' [-Werror=missing-declarations] arch/powerpc/math-emu/fmadds.c:12:1: error: no previous declaration for 'fmadds' [-Werror=missing-declarations] arch/powerpc/math-emu/fmsub.c:11:1: error: no previous declaration for 'fmsub' [-Werror=missing-declarations] arch/powerpc/math-emu/fmsubs.c:12:1: error: no previous declaration for 'fmsubs' [-Werror=missing-declarations] arch/powerpc/math-emu/fmul.c:11:1: error: no previous declaration for 'fmul' [-Werror=missing-declarations] arch/powerpc/math-emu/fmuls.c:12:1: error: no previous declaration for 'fmuls' [-Werror=missing-declarations] arch/powerpc/math-emu/fnabs.c:7:1: error: no previous declaration for 'fnabs' [-Werror=missing-declarations] arch/powerpc/math-emu/fneg.c:7:1: error: no previous declaration for 'fneg' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmadd.c:11:1: error: no previous declaration for 'fnmadd' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmadds.c:12:1: error: no previous declaration for 'fnmadds' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmsub.c:11:1: error: no previous declaration for 'fnmsub' [-Werror=missing-declarations] arch/powerpc/math-emu/fnmsubs.c:12:1: error: no previous declaration for 'fnmsubs' [-Werror=missing-declarations] arch/powerpc/math-emu/fres.c:7:1: error: no previous declaration for 'fres' [-Werror=missing-declarations] arch/powerpc/math-emu/frsp.c:12:1: error: no previous declaration for 'frsp' [-Werror=missing-declarations] arch/powerpc/math-emu/fsel.c:11:1: error: no previous declaration for 'fsel' [-Werror=missing-declarations] arch/powerpc/math-emu/lfs.c:12:1: error: no previous declaration for 'lfs' [-Werror=missing-declarations] arch/powerpc/math-emu/frsqrte.c:7:1: error: no previous declaration for 'frsqrte' [-Werror=missing-declarations] arch/powerpc/math-emu/fsub.c:11:1: error: no previous declaration for 'fsub' [-Werror=missing-declarations] arch/powerpc/math-emu/fsubs.c:12:1: error: no previous declaration for 'fsubs' [-Werror=missing-declarations] arch/powerpc/math-emu/mcrfs.c:10:1: error: no previous declaration for 'mcrfs' [-Werror=missing-declarations] arch/powerpc/math-emu/mffs.c:10:1: error: no previous declaration for 'mffs' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsb0.c:10:1: error: no previous declaration for 'mtfsb0' [-Werror=missing-declarations] arch/powerpc/math-emu/mtfsb1.c:10:1: error: no previous declaration for 'mtfsb1' [-Werror=missing-declarations] arch/powerpc/math-emu/stfiwx.c:7:1: error: no previous declaration for 'stfiwx' [-Werror=missing-declarations] arch/powerpc/math-emu/stfs.c:12:1: error: no previous declaration for 'stfs' [-Werror=missing-declarations] arch/powerpc/math-emu/fmr.c:7:1: error: no previous declaration for 'fmr' [-Werror=missing-declarations] arch/powerpc/math-emu/lfd.c:10:1: error: no previous declaration for 'lfd' [-Werror=missing-declarations] arch/powerpc/math-emu/stfd.c:7:1: error: no previous declaration for 'stfd' [-Werror=missing-declarations] So also add -Wno-missing-declarations. Reported-by: kernel test robot Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/688084b40b5ac88f2905cb207d5dad947d8d34dc.1662531153.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/processor.h | 2 ++ arch/powerpc/kernel/traps.c | 2 -- arch/powerpc/math-emu/Makefile | 7 +++++++ arch/powerpc/math-emu/math_efp.c | 2 +- 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index fdfaae194ddd..97a77b37daa3 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -426,6 +426,8 @@ extern int fix_alignment(struct pt_regs *); #endif int do_mathemu(struct pt_regs *regs); +int do_spe_mathemu(struct pt_regs *regs); +int speround_handler(struct pt_regs *regs); /* VMX copying */ int enter_vmx_usercopy(void); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index dadfcef5d6db..dcf4046f8565 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -2103,7 +2103,6 @@ DEFINE_INTERRUPT_HANDLER(CacheLockingException) #ifdef CONFIG_SPE DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) { - extern int do_spe_mathemu(struct pt_regs *regs); unsigned long spefscr; int fpexc_mode; int code = FPE_FLTUNK; @@ -2153,7 +2152,6 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException) { - extern int speround_handler(struct pt_regs *regs); int err; interrupt_cond_local_irq_enable(regs); diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index 26fef2e5672e..603e59c3db10 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile @@ -16,3 +16,10 @@ obj-$(CONFIG_SPE) += math_efp.o CFLAGS_fabs.o = -fno-builtin-fabs CFLAGS_math.o = -fno-builtin-fabs + +ccflags-remove-y = -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable + +ifdef KBUILD_EXTRA_WARN +CFLAGS_math.o += -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable +CFLAGS_math_efp.o += -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable +endif diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index f01e3475f689..34f62aafe706 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c @@ -890,7 +890,7 @@ int speround_handler(struct pt_regs *regs) return 0; } -int __init spe_mathemu_init(void) +static int __init spe_mathemu_init(void) { u32 pvr, maj, min; From b11931e9adc1a439eab75c97ca4b9f15754cdcb1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 1 Sep 2022 21:03:34 +1000 Subject: [PATCH 065/214] powerpc/64s: add pte_needs_flush and huge_pmd_needs_flush Allow PTE changes to avoid flushing the TLB when access permissions are being relaxed, the dirty bit is being set, and the accessed bit is being changed. Relaxing access permissions and setting dirty and accessed bits do not require a flush because the MMU will re-load the PTE and notice the updates (it may also cause a spurious fault). Clearing the accessed bit does not require a flush because of the imprecise PTE accessed bit accounting that is already performed, as documented in ptep_clear_flush_young(). This reduces TLB flushing for some mprotect(2) calls. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Link: https://lore.kernel.org/r/20220901110334.1618913-1-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 3 + arch/powerpc/include/asm/book3s/64/tlbflush.h | 56 +++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 486902aff040..b7fd9b69e828 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -413,6 +413,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, * event of it not getting flushed for a long time the delay * shouldn't really matter because there's no real memory * pressure for swapout to react to. ] + * + * Note: this optimisation also exists in pte_needs_flush() and + * huge_pmd_needs_flush(). */ #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young ptep_test_and_clear_young diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index 206f920fe5b9..67655cd60545 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h @@ -163,6 +163,62 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma, */ } +static inline bool __pte_flags_need_flush(unsigned long oldval, + unsigned long newval) +{ + unsigned long delta = oldval ^ newval; + + /* + * The return value of this function doesn't matter for hash, + * ptep_modify_prot_start() does a pte_update() which does or schedules + * any necessary hash table update and flush. + */ + if (!radix_enabled()) + return true; + + /* + * We do not expect kernel mappings or non-PTEs or not-present PTEs. + */ + VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED); + VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED); + VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE)); + VM_WARN_ON_ONCE(!(newval & _PAGE_PTE)); + VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT)); + VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT)); + + /* + * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED. + * + * In theory, some changed software bits could be tolerated, in + * practice those should rarely if ever matter. + */ + + if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED)) + return true; + + /* + * If any of the above was present in old but cleared in new, flush. + * With the exception of _PAGE_ACCESSED, don't worry about flushing + * if that was cleared (see the comment in ptep_clear_flush_young()). + */ + if ((delta & ~_PAGE_ACCESSED) & oldval) + return true; + + return false; +} + +static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) +{ + return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte)); +} +#define pte_needs_flush pte_needs_flush + +static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) +{ + return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd)); +} +#define huge_pmd_needs_flush huge_pmd_needs_flush + extern bool tlbie_capable; extern bool tlbie_enabled; From d4d944ff68cb1f896d3f3b1af0bc656949dc626a Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 6 Sep 2022 22:32:13 +0100 Subject: [PATCH 066/214] powerpc/85xx: Fix fall-through warning for Clang Fix the following fallthrough warning: arch/powerpc/platforms/85xx/mpc85xx_cds.c:161:3: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] Reported-by: kernel test robot Signed-off-by: Gustavo A. R. Silva Reviewed-by: Kees Cook Signed-off-by: Michael Ellerman Link: https://github.com/KSPP/linux/issues/198 Link: https://lore.kernel.org/lkml/202209061224.KxORRGVg-lkp@intel.com/ Link: https://lore.kernel.org/r/Yxe8XTY5C9qJLd0Z@work --- arch/powerpc/platforms/85xx/mpc85xx_cds.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 48f3acfece0b..0b8f2101c5fb 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -159,6 +159,7 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev) else dev->irq = 10; pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); + break; default: break; } From 71a92e99c47900cc164620948b3863382cec4f1a Mon Sep 17 00:00:00 2001 From: Zheng Yongjun Date: Tue, 6 Sep 2022 14:17:03 +0000 Subject: [PATCH 067/214] powerpc/powernv: add missing of_node_put() in opal_export_attrs() After using 'np' returned by of_find_node_by_path(), of_node_put() need be called to decrease the refcount. Fixes: 11fe909d2362 ("powerpc/powernv: Add OPAL exports attributes to sysfs") Signed-off-by: Zheng Yongjun Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220906141703.118192-1-zhengyongjun3@huawei.com --- arch/powerpc/platforms/powernv/opal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index e536a6a3c801..cdf3838f08d3 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -892,6 +892,7 @@ static void opal_export_attrs(void) kobj = kobject_create_and_add("exports", opal_kobj); if (!kobj) { pr_warn("kobject_create_and_add() of exports failed\n"); + of_node_put(np); return; } From f88aabad33ea22be2ce1c60d8901942e4e2a9edb Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Wed, 7 Sep 2022 17:01:11 -0500 Subject: [PATCH 068/214] Revert "powerpc/rtas: Implement reentrant rtas call" At the time this was submitted by Leonardo, I confirmed -- or thought I had confirmed -- with PowerVM partition firmware development that the following RTAS functions: - ibm,get-xive - ibm,int-off - ibm,int-on - ibm,set-xive were safe to call on multiple CPUs simultaneously, not only with respect to themselves as indicated by PAPR, but with arbitrary other RTAS calls: https://lore.kernel.org/linuxppc-dev/875zcy2v8o.fsf@linux.ibm.com/ Recent discussion with firmware development makes it clear that this is not true, and that the code in commit b664db8e3f97 ("powerpc/rtas: Implement reentrant rtas call") is unsafe, likely explaining several strange bugs we've seen in internal testing involving DLPAR and LPM. These scenarios use ibm,configure-connector, whose internal state can be corrupted by the concurrent use of the "reentrant" functions, leading to symptoms like endless busy statuses from RTAS. Fixes: b664db8e3f97 ("powerpc/rtas: Implement reentrant rtas call") Cc: stable@vger.kernel.org # v5.8+ Signed-off-by: Nathan Lynch Reviewed-by: Laurent Dufour Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220907220111.223267-1-nathanl@linux.ibm.com --- arch/powerpc/include/asm/paca.h | 1 - arch/powerpc/include/asm/rtas.h | 1 - arch/powerpc/kernel/paca.c | 32 ----------------- arch/powerpc/kernel/rtas.c | 54 ----------------------------- arch/powerpc/sysdev/xics/ics-rtas.c | 22 ++++++------ 5 files changed, 11 insertions(+), 99 deletions(-) diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 4d7aaab82702..3537b0500f4d 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -263,7 +263,6 @@ struct paca_struct { u64 l1d_flush_size; #endif #ifdef CONFIG_PPC_PSERIES - struct rtas_args *rtas_args_reentrant; u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */ #endif /* CONFIG_PPC_PSERIES */ diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 00531af17ce0..56319aea646e 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -240,7 +240,6 @@ extern struct rtas_t rtas; extern int rtas_token(const char *service); extern int rtas_service_present(const char *service); extern int rtas_call(int token, int, int, int *, ...); -int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...); void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...); extern void __noreturn rtas_restart(char *cmd); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ba593fd60124..dfd097b79160 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -16,7 +16,6 @@ #include #include #include -#include #include "setup.h" @@ -170,30 +169,6 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) } #endif /* CONFIG_PPC_64S_HASH_MMU */ -#ifdef CONFIG_PPC_PSERIES -/** - * new_rtas_args() - Allocates rtas args - * @cpu: CPU number - * @limit: Memory limit for this allocation - * - * Allocates a struct rtas_args and return it's pointer, - * if not in Hypervisor mode - * - * Return: Pointer to allocated rtas_args - * NULL if CPU in Hypervisor Mode - */ -static struct rtas_args * __init new_rtas_args(int cpu, unsigned long limit) -{ - limit = min_t(unsigned long, limit, RTAS_INSTANTIATE_MAX); - - if (early_cpu_has_feature(CPU_FTR_HVMODE)) - return NULL; - - return alloc_paca_data(sizeof(struct rtas_args), L1_CACHE_BYTES, - limit, cpu); -} -#endif /* CONFIG_PPC_PSERIES */ - /* The Paca is an array with one entry per processor. Each contains an * lppaca, which contains the information shared between the * hypervisor and Linux. @@ -232,10 +207,6 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) /* For now -- if we have threads this will be adjusted later */ new_paca->tcd_ptr = &new_paca->tcd; #endif - -#ifdef CONFIG_PPC_PSERIES - new_paca->rtas_args_reentrant = NULL; -#endif } /* Put the paca pointer into r13 and SPRG_PACA */ @@ -307,9 +278,6 @@ void __init allocate_paca(int cpu) #endif #ifdef CONFIG_PPC_64S_HASH_MMU paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); -#endif -#ifdef CONFIG_PPC_PSERIES - paca->rtas_args_reentrant = new_rtas_args(cpu, limit); #endif paca_struct_size += sizeof(struct paca_struct); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 693133972294..0b8a858aa847 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -43,7 +43,6 @@ #include #include #include -#include /* This is here deliberately so it's only used in this file */ void enter_rtas(unsigned long); @@ -932,59 +931,6 @@ void rtas_activate_firmware(void) pr_err("ibm,activate-firmware failed (%i)\n", fwrc); } -#ifdef CONFIG_PPC_PSERIES -/** - * rtas_call_reentrant() - Used for reentrant rtas calls - * @token: Token for desired reentrant RTAS call - * @nargs: Number of Input Parameters - * @nret: Number of Output Parameters - * @outputs: Array of outputs - * @...: Inputs for desired RTAS call - * - * According to LoPAR documentation, only "ibm,int-on", "ibm,int-off", - * "ibm,get-xive" and "ibm,set-xive" are currently reentrant. - * Reentrant calls need their own rtas_args buffer, so not using rtas.args, but - * PACA one instead. - * - * Return: -1 on error, - * First output value of RTAS call if (nret > 0), - * 0 otherwise, - */ -int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...) -{ - va_list list; - struct rtas_args *args; - unsigned long flags; - int i, ret = 0; - - if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) - return -1; - - local_irq_save(flags); - preempt_disable(); - - /* We use the per-cpu (PACA) rtas args buffer */ - args = local_paca->rtas_args_reentrant; - - va_start(list, outputs); - va_rtas_call_unlocked(args, token, nargs, nret, list); - va_end(list); - - if (nret > 1 && outputs) - for (i = 0; i < nret - 1; ++i) - outputs[i] = be32_to_cpu(args->rets[i + 1]); - - if (nret > 0) - ret = be32_to_cpu(args->rets[0]); - - local_irq_restore(flags); - preempt_enable(); - - return ret; -} - -#endif /* CONFIG_PPC_PSERIES */ - /** * get_pseries_errorlog() - Find a specific pseries error log in an RTAS * extended event log. diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c index 9e7007f9aca5..f8320f8e5bc7 100644 --- a/arch/powerpc/sysdev/xics/ics-rtas.c +++ b/arch/powerpc/sysdev/xics/ics-rtas.c @@ -36,8 +36,8 @@ static void ics_rtas_unmask_irq(struct irq_data *d) server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0); - call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, - server, DEFAULT_PRIORITY); + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, + DEFAULT_PRIORITY); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive irq %u server %x returned %d\n", @@ -46,7 +46,7 @@ static void ics_rtas_unmask_irq(struct irq_data *d) } /* Now unmask the interrupt (often a no-op) */ - call_status = rtas_call_reentrant(ibm_int_on, 1, 1, NULL, hw_irq); + call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -68,7 +68,7 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) if (hw_irq == XICS_IPI) return; - call_status = rtas_call_reentrant(ibm_int_off, 1, 1, NULL, hw_irq); + call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); if (call_status != 0) { printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -76,8 +76,8 @@ static void ics_rtas_mask_real_irq(unsigned int hw_irq) } /* Have to set XIVE to 0xff to be able to remove a slot */ - call_status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, hw_irq, - xics_default_server, 0xff); + call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, + xics_default_server, 0xff); if (call_status != 0) { printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", __func__, hw_irq, call_status); @@ -108,7 +108,7 @@ static int ics_rtas_set_affinity(struct irq_data *d, if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) return -1; - status = rtas_call_reentrant(ibm_get_xive, 1, 3, xics_status, hw_irq); + status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); if (status) { printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", @@ -126,8 +126,8 @@ static int ics_rtas_set_affinity(struct irq_data *d, pr_debug("%s: irq %d [hw 0x%x] server: 0x%x\n", __func__, d->irq, hw_irq, irq_server); - status = rtas_call_reentrant(ibm_set_xive, 3, 1, NULL, - hw_irq, irq_server, xics_status[1]); + status = rtas_call(ibm_set_xive, 3, 1, NULL, + hw_irq, irq_server, xics_status[1]); if (status) { printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", @@ -158,7 +158,7 @@ static int ics_rtas_check(struct ics *ics, unsigned int hw_irq) return -EINVAL; /* Check if RTAS knows about this interrupt */ - rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, hw_irq); + rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); if (rc) return -ENXIO; @@ -174,7 +174,7 @@ static long ics_rtas_get_server(struct ics *ics, unsigned long vec) { int rc, status[2]; - rc = rtas_call_reentrant(ibm_get_xive, 1, 3, status, vec); + rc = rtas_call(ibm_get_xive, 1, 3, status, vec); if (rc) return -1; return status[0]; From edd100634a5eb99cf97868c419bdf44d44355c4f Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:21 +0800 Subject: [PATCH 069/214] powerpc/xmon: remove unused ppc_parse_cpu() declaration ppc_parse_cpu() has been removed since commit 5b102782c7f4 ("powerpc/xmon: Enable disassembly files (compilation changes)"), so remove it. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-2-cuigaosheng1@huawei.com --- arch/powerpc/xmon/ppc.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/xmon/ppc.h b/arch/powerpc/xmon/ppc.h index d00f33dcf192..1d98b8dd134e 100644 --- a/arch/powerpc/xmon/ppc.h +++ b/arch/powerpc/xmon/ppc.h @@ -435,8 +435,6 @@ struct powerpc_macro extern const struct powerpc_macro powerpc_macros[]; extern const int powerpc_num_macros; -extern ppc_cpu_t ppc_parse_cpu (ppc_cpu_t, ppc_cpu_t *, const char *); - static inline long ppc_optional_operand_value (const struct powerpc_operand *operand) { From cf78ddd3a1040a84bf882eecea44626dbad450c4 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:22 +0800 Subject: [PATCH 070/214] powerpc/spufs: remove orphan declarations from spufs.h Remove the following orphan declarations from spufs.h: 1. spufs_coredump_calls has been removed since commit 48cad41f7ee7 ("[POWERPC] spufs: Combine spufs_coredump_calls with spufs_calls"). 2. spufs_coredump_num_notes has been removed since commit 936d5bf1d7dc ("[POWERPC] spufs: Get rid of spufs_coredump_num_notes, it's not needed if we NULL terminate"). Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-3-cuigaosheng1@huawei.com --- arch/powerpc/platforms/cell/spufs/spufs.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 23c6799cfa5a..403c4aa3f6cd 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -333,7 +333,6 @@ void spufs_stop_callback(struct spu *spu, int irq); void spufs_mfc_callback(struct spu *spu); void spufs_dma_callback(struct spu *spu, int type); -extern struct spu_coredump_calls spufs_coredump_calls; struct spufs_coredump_reader { char *name; ssize_t (*dump)(struct spu_context *ctx, struct coredump_params *cprm); @@ -341,7 +340,6 @@ struct spufs_coredump_reader { size_t size; }; extern const struct spufs_coredump_reader spufs_coredump_read[]; -extern int spufs_coredump_num_notes; extern int spu_init_csa(struct spu_state *csa); extern void spu_fini_csa(struct spu_state *csa); From 29e1eb9169a9c73985ed15361520900ce1cef1d4 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:23 +0800 Subject: [PATCH 071/214] powerpc: remove unused chrp_event_scan() declaration chrp_event_scan() has been removed since commit 3d541c4b7f6e ("powerpc/chrp: Use the same RTAS daemon as pSeries"), so remove it. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-4-cuigaosheng1@huawei.com --- arch/powerpc/platforms/chrp/chrp.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h index a5a7c338caf9..6ff4631d9db4 100644 --- a/arch/powerpc/platforms/chrp/chrp.h +++ b/arch/powerpc/platforms/chrp/chrp.h @@ -9,4 +9,3 @@ extern int chrp_set_rtc_time(struct rtc_time *); extern long chrp_time_init(void); extern void chrp_find_bridges(void); -extern void chrp_event_scan(unsigned long); From b5a472ad81ba23327ef11ca5b4ba9fd8ed38e45e Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:24 +0800 Subject: [PATCH 072/214] powerpc: remove unused udbg_init_debug_beat() declaration udbg_init_debug_beat() has been removed since commit bf4981a00636 ("powerpc: Remove the celleb support"), so remove it. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-5-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/udbg.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index b4aa0d88ce2c..524c2085070f 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -42,7 +42,6 @@ extern void __init udbg_init_maple_realmode(void); extern void __init udbg_init_pas_realmode(void); extern void __init udbg_init_rtas_panel(void); extern void __init udbg_init_rtas_console(void); -extern void __init udbg_init_debug_beat(void); extern void __init udbg_init_btext(void); extern void __init udbg_init_44x_as1(void); extern void __init udbg_init_40x_realmode(void); From d24b8f01fe7b848f88ff0a3204a674a092f365d0 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:25 +0800 Subject: [PATCH 073/214] powerpc/mm: remove orphan declarations from mmu_context.h Remove the following orphan declarations from mmu_context.h: 1. switch_cop() and drop_cop() have been removed since commit 6ff4d3e96652 ("powerpc: Remove old unused icswx based coprocessor support"). 2. mm_iommu_cleanup() has been removed since commit 4b6fad7097f8 ("powerpc/mm/iommu, vfio/spapr: Put pages on VFIO container shutdown"). So remove the declarations for them from header file. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-6-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/mmu_context.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 3f25bd3e14eb..c1ea270bb848 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -31,7 +31,6 @@ extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, extern long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem); extern void mm_iommu_init(struct mm_struct *mm); -extern void mm_iommu_cleanup(struct mm_struct *mm); extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, unsigned long ua, unsigned long size); extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, @@ -117,7 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea) } #endif -extern void switch_cop(struct mm_struct *next); extern int use_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm); From 77d30535816e90ff6a4466210c403a6b8b42a0a5 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:26 +0800 Subject: [PATCH 074/214] powerpc/powernv: remove orphan declarations from opal.h Remove the following orphan declarations from opal.h: 1. opal_notifier_register() 2. opal_notifier_unregister() 3. opal_notifier_update_evt() 4. opal_notifier_enable() 5. opal_notifier_disable() They have been removed since commit 81f2f7ce4c5b ("opal: Remove events notifier"), so remove them. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-7-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/opal.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index bfd3142cd0ba..726125a534de 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -324,16 +324,10 @@ extern int opal_flush_console(uint32_t vtermno); extern void hvc_opal_init_early(void); -extern int opal_notifier_register(struct notifier_block *nb); -extern int opal_notifier_unregister(struct notifier_block *nb); - extern int opal_message_notifier_register(enum opal_msg_type msg_type, struct notifier_block *nb); extern int opal_message_notifier_unregister(enum opal_msg_type msg_type, struct notifier_block *nb); -extern void opal_notifier_enable(void); -extern void opal_notifier_disable(void); -extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val); extern int opal_async_get_token_interruptible(void); extern int opal_async_release_token(int token); From 3abed8acfe95046b27117f48d52dccd2ea82a322 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:27 +0800 Subject: [PATCH 075/214] powerpc/sysdev: remove unused xics_ipi_dispatch() declaration xics_ipi_dispatch() has been removed since commit 23d72bfd8f9f ("powerpc: Consolidate ipi message mux and demux"), so remove it. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-8-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/xics.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h index e2e704eca5f6..89090485bec1 100644 --- a/arch/powerpc/include/asm/xics.h +++ b/arch/powerpc/include/asm/xics.h @@ -159,7 +159,6 @@ extern void xics_setup_cpu(void); extern void xics_update_irq_servers(void); extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join); extern void xics_mask_unknown_vec(unsigned int vec); -extern irqreturn_t xics_ipi_dispatch(int cpu); extern void xics_smp_probe(void); extern void xics_register_ics(struct ics *ics); extern void xics_teardown_cpu(void); From b47f0024f990803f36e6a06f82e9d0dbe8424c26 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:28 +0800 Subject: [PATCH 076/214] powerpc/ps3: remove orphan declarations from ps3av.h Remove the following orphan declarations from ps3av.h: 1. ps3av_dev_open() 2. ps3av_dev_close() They have been removed since commit 13a5e30cf740 ("[POWERPC] PS3: Rework AV settings driver"), so remove them. Signed-off-by: Gaosheng Cui Acked-by: Geoff Levand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-9-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/ps3av.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h index 82db78fc169d..c8b0f2ffcd35 100644 --- a/arch/powerpc/include/asm/ps3av.h +++ b/arch/powerpc/include/asm/ps3av.h @@ -726,6 +726,4 @@ extern int ps3av_video_mode2res(u32, u32 *, u32 *); extern int ps3av_video_mute(int); extern int ps3av_audio_mute(int); extern int ps3av_audio_mute_analog(int); -extern int ps3av_dev_open(void); -extern int ps3av_dev_close(void); #endif /* _ASM_POWERPC_PS3AV_H_ */ From 3d7a198cfdb47405cfb4a3ea523876569fe341e6 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 15:50:29 +0800 Subject: [PATCH 077/214] KVM: PPC: remove orphan declarations from kvm_ppc.h Remove the following orphan declarations from kvm_ppc.h: 1. kvmppc_mmu_priv_switch() has been removed since commit dd9ebf1f9435 ("KVM: PPC: e500: Add shadow PID support"). 2. kvmppc_core_destroy_mmu() has been removed since commit ecc0981ff07c ("KVM: ppc: cosmetic changes to mmu hook names"). 3. kvmppc_prepare_vrma() has been removed since commit aa04b4cc5be6 ("KVM: PPC: Allocate RMAs (Real Mode Areas) at boot for use by guests"). So remove the declarations for them from header file. Signed-off-by: Gaosheng Cui Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913075029.682327-10-cuigaosheng1@huawei.com --- arch/powerpc/include/asm/kvm_ppc.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9f625af3b65b..bfacf12784dd 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -104,7 +104,6 @@ extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, unsigned int gtlb_idx); -extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); @@ -153,7 +152,6 @@ extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_booke_init(void); extern void kvmppc_booke_exit(void); -extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); @@ -162,8 +160,6 @@ extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info); extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order); extern void kvmppc_free_hpt(struct kvm_hpt_info *info); extern void kvmppc_rmap_reset(struct kvm *kvm); -extern long kvmppc_prepare_vrma(struct kvm *kvm, - struct kvm_userspace_memory_region *mem); extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, unsigned long porder); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); From 9a10ccb29c0a2befa5a9f691ed0ae37ee3e799a8 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:38:23 +1000 Subject: [PATCH 078/214] powerpc/pseries: move hcall_tracepoint_refcount out of .toc The .toc section is not really intended for arbitrary data. Writable data in particular prevents making the TOC read-only after relocation. Move hcall_tracepoint_refcount into the .data section. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926053823.2668799-1-npiggin@gmail.com --- arch/powerpc/platforms/pseries/hvCall.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index ab9fc6506861..762eb15d3bd4 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -16,7 +16,7 @@ #ifdef CONFIG_TRACEPOINTS #ifndef CONFIG_JUMP_LABEL - .section ".toc","aw" + .data .globl hcall_tracepoint_refcount hcall_tracepoint_refcount: @@ -88,7 +88,7 @@ hcall_tracepoint_refcount: BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ - ld r12,hcall_tracepoint_refcount@toc(r2); \ + LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ; \ std r12,32(r1); \ cmpdi r12,0; \ bne- LABEL; \ From 0c32903197ce9f7119aee75a6bcaa4b49e0cd21a Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 17 Sep 2022 16:36:47 +1000 Subject: [PATCH 079/214] powerpc/64: Remove unused prom_init_toc symbols Commit 24d33ac5b8ff ("powerpc/64s: Make prom_init require RELOCATABLE") made prom_init depend on CONFIG_RELOCATABLE. But it missed cleaning up a case in the linker script for RELOCATABLE=n, and associated symbols. Remove them now. Fixes: 24d33ac5b8ff ("powerpc/64s: Make prom_init require RELOCATABLE") Reported-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220920131157.1032707-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/sections.h | 3 --- arch/powerpc/kernel/prom_init_check.sh | 3 +-- arch/powerpc/kernel/vmlinux.lds.S | 5 ----- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index 183e6b8af392..babda2677b30 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -32,9 +32,6 @@ extern long kvm_flush_link_stack; extern char __start_interrupts[]; extern char __end_interrupts[]; -extern char __prom_init_toc_start[]; -extern char __prom_init_toc_end[]; - #ifdef CONFIG_PPC_POWERNV extern char start_real_trampolines[]; extern char end_real_trampolines[]; diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index dfa5f729f774..311890d71c4c 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -26,8 +26,7 @@ _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold __secondary_hold_acknowledge __secondary_hold_spinloop __start logo_linux_clut224 btext_prepare_BAT reloc_got2 kernstart_addr memstart_addr linux_banner _stext -__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC. -relocate" +btext_setup_display TOC. relocate" NM="$1" OBJ="$2" diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index fe22d940412f..0f2a10b029e8 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -337,11 +337,6 @@ SECTIONS .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) { *(.got) -#ifndef CONFIG_RELOCATABLE - __prom_init_toc_start = .; - arch/powerpc/kernel/prom_init.o*(.toc) - __prom_init_toc_end = .; -#endif *(.toc) } #endif From 331771e836e6a32c8632d8cf5e2cdd94471258ad Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 16 Sep 2022 14:40:57 +1000 Subject: [PATCH 080/214] powerpc/vmlinux.lds: Ensure STRICT_ALIGN_SIZE is at least page aligned Add a check that STRICT_ALIGN_SIZE is aligned to at least PAGE_SIZE. That then makes the alignment to PAGE_SIZE immediately after the alignment to STRICT_ALIGN_SIZE redundant, so remove it. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916131422.318752-1-mpe@ellerman.id.au --- arch/powerpc/kernel/vmlinux.lds.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0f2a10b029e8..dacf8b4302d9 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -32,6 +32,10 @@ #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT) +#if STRICT_ALIGN_SIZE < PAGE_SIZE +#error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT" +#endif + ENTRY(_stext) PHDRS { @@ -215,7 +219,6 @@ SECTIONS */ . = ALIGN(STRICT_ALIGN_SIZE); __init_begin = .; - . = ALIGN(PAGE_SIZE); .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT From b150a4d12b919baf956b807aa305cf78df03d0fe Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 16 Sep 2022 14:41:24 +1000 Subject: [PATCH 081/214] powerpc/vmlinux.lds: Add an explicit symbol for the SRWX boundary Currently __init_begin is used as the boundary for strict RWX between executable/read-only text and data, and non-executable (after boot) code and data. But that's a little subtle, so add an explicit symbol to document that the SRWX boundary lies there, and add a comment making it clear that __init_begin must also begin there. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916131422.318752-2-mpe@ellerman.id.au --- arch/powerpc/include/asm/sections.h | 1 + arch/powerpc/kernel/vmlinux.lds.S | 9 +++++++-- arch/powerpc/mm/book3s32/mmu.c | 2 +- arch/powerpc/mm/book3s64/radix_pgtable.c | 4 ++-- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index babda2677b30..9c00c9c0ca8f 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -13,6 +13,7 @@ typedef struct func_desc func_desc_t; #include extern char __head_end[]; +extern char __srwx_boundary[]; /* Patch sites */ extern s32 patch__call_flush_branch_caches1; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index dacf8b4302d9..29d891329856 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -214,11 +214,16 @@ SECTIONS } #endif + /* + * Various code relies on __init_begin being at the strict RWX boundary. + */ + . = ALIGN(STRICT_ALIGN_SIZE); + __srwx_boundary = .; + __init_begin = .; + /* * Init sections discarded at runtime */ - . = ALIGN(STRICT_ALIGN_SIZE); - __init_begin = .; .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; INIT_TEXT diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index a96b73006dfb..250d174459d4 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -158,7 +158,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long done; - unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; + unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET; unsigned long size; size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 698274109c91..9f880f91f584 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -259,8 +259,8 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e static unsigned long next_boundary(unsigned long addr, unsigned long end) { #ifdef CONFIG_STRICT_KERNEL_RWX - if (addr < __pa_symbol(__init_begin)) - return __pa_symbol(__init_begin); + if (addr < __pa_symbol(__srwx_boundary)) + return __pa_symbol(__srwx_boundary); #endif return end; } From 7082f8e7d2276575a8806370007cbb4a7b9abdce Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 16 Sep 2022 14:07:49 +1000 Subject: [PATCH 082/214] powerpc: move __end_rodata to cover arch read-only sections powerpc has a number of read-only sections and tables that are put after RO_DATA(). Move the __end_rodata symbol to cover these as well. Setting memory to read-only at boot is done using __init_begin, change that to use __end_rodata. This makes is_kernel_rodata() exactly cover the read-only region, as well as other things using __end_rodata (e.g., kernel/dma/debug.c). Boot dmesg also prints the rodata size more accurately. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916040755.2398112-2-npiggin@gmail.com --- arch/powerpc/kernel/vmlinux.lds.S | 1 + arch/powerpc/mm/book3s32/mmu.c | 2 +- arch/powerpc/mm/book3s64/hash_pgtable.c | 2 +- arch/powerpc/mm/book3s64/radix_pgtable.c | 2 +- arch/powerpc/mm/pgtable_32.c | 7 ++++--- 5 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 29d891329856..5fe33a659029 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -219,6 +219,7 @@ SECTIONS */ . = ALIGN(STRICT_ALIGN_SIZE); __srwx_boundary = .; + __end_rodata = .; __init_begin = .; /* diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 250d174459d4..e5ee05c234c7 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -240,7 +240,7 @@ void mmu_mark_rodata_ro(void) for (i = 0; i < nb; i++) { struct ppc_bat *bat = BATS[i]; - if (bat_addrs[i].start < (unsigned long)__init_begin) + if (bat_addrs[i].start < (unsigned long)__end_rodata) bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX; } diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index ae008b9df0e6..28332001bd87 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -541,7 +541,7 @@ void hash__mark_rodata_ro(void) unsigned long start, end, pp; start = (unsigned long)_stext; - end = (unsigned long)__init_begin; + end = (unsigned long)__end_rodata; pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 9f880f91f584..60c277a9b043 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -228,7 +228,7 @@ void radix__mark_rodata_ro(void) unsigned long start, end; start = (unsigned long)_stext; - end = (unsigned long)__init_begin; + end = (unsigned long)__end_rodata; radix__change_memory_range(start, end, _PAGE_WRITE); } diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 3ac73f9fb5d5..5c02fd08d61e 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -158,10 +158,11 @@ void mark_rodata_ro(void) } /* - * mark .text and .rodata as read only. Use __init_begin rather than - * __end_rodata to cover NOTES and EXCEPTION_TABLE. + * mark text and rodata as read only. __end_rodata is set by + * powerpc's linker script and includes tables and data + * requiring relocation which are not put in RO_DATA. */ - numpages = PFN_UP((unsigned long)__init_begin) - + numpages = PFN_UP((unsigned long)__end_rodata) - PFN_DOWN((unsigned long)_stext); set_memory_ro((unsigned long)_stext, numpages); From 1faa1235c1a00614bc4849a8dbd0790363c9a22f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 16 Sep 2022 14:07:50 +1000 Subject: [PATCH 083/214] powerpc/32/build: move got1/got2 sections out of text Following the example from the binutils default linker script, move .got1 and .got2 out of .text, to just after RO_DATA. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916040755.2398112-3-npiggin@gmail.com --- arch/powerpc/kernel/vmlinux.lds.S | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 5fe33a659029..a922c348b4f7 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -126,14 +126,6 @@ SECTIONS *(.sfpr); MEM_KEEP(init.text) MEM_KEEP(exit.text) - -#ifdef CONFIG_PPC32 - *(.got1) - __got2_start = .; - *(.got2) - __got2_end = .; -#endif /* CONFIG_PPC32 */ - } :text . = ALIGN(PAGE_SIZE); @@ -143,7 +135,16 @@ SECTIONS /* Read-only data */ RO_DATA(PAGE_SIZE) -#ifdef CONFIG_PPC64 +#ifdef CONFIG_PPC32 + .got1 : AT(ADDR(.got1) - LOAD_OFFSET) { + *(.got1) + } + .got2 : AT(ADDR(.got2) - LOAD_OFFSET) { + __got2_start = .; + *(.got2) + __got2_end = .; + } +#else /* CONFIG_PPC32 */ SOFT_MASK_TABLE(8) RESTART_TABLE(8) @@ -194,7 +195,7 @@ SECTIONS *(__rfi_flush_fixup) __stop___rfi_flush_fixup = .; } -#endif /* CONFIG_PPC64 */ +#endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC_BARRIER_NOSPEC . = ALIGN(8); From f21ba4499a15b76ad6013ca0a60873dbcf164c7b Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 16 Sep 2022 14:07:51 +1000 Subject: [PATCH 084/214] powerpc/build: move got, toc, plt, branch_lt sections to read-only This moves linker-related tables from .data to read-only area. Relocations are performed at early boot time before memory is protected, after which there should be no modifications required. Signed-off-by: Nicholas Piggin [mpe: Don't use SPECIAL as reported by lkp@intel.com] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916040755.2398112-4-npiggin@gmail.com --- arch/powerpc/kernel/vmlinux.lds.S | 32 +++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index a922c348b4f7..0bacec66fecf 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -135,6 +135,10 @@ SECTIONS /* Read-only data */ RO_DATA(PAGE_SIZE) + .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) { + *(.branch_lt) + } + #ifdef CONFIG_PPC32 .got1 : AT(ADDR(.got1) - LOAD_OFFSET) { *(.got1) @@ -144,7 +148,25 @@ SECTIONS *(.got2) __got2_end = .; } + .got : AT(ADDR(.got) - LOAD_OFFSET) { + *(.got) + *(.got.plt) + } + .plt : AT(ADDR(.plt) - LOAD_OFFSET) { + /* XXX: is .plt (and .got.plt) required? */ + *(.plt) + } + #else /* CONFIG_PPC32 */ + .toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) { + *(.toc1) + } + + .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) { + *(.got) + *(.toc) + } + SOFT_MASK_TABLE(8) RESTART_TABLE(8) @@ -333,21 +355,11 @@ SECTIONS *(.data.rel*) *(SDATA_MAIN) *(.sdata2) - *(.got.plt) *(.got) - *(.plt) - *(.branch_lt) } #else .data : AT(ADDR(.data) - LOAD_OFFSET) { DATA_DATA *(.data.rel*) - *(.toc1) - *(.branch_lt) - } - - .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) { - *(.got) - *(.toc) } #endif From b6adc6d6d327229d75607a948cde2349d317f366 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 16 Sep 2022 14:07:52 +1000 Subject: [PATCH 085/214] powerpc/build: move .data.rel.ro, .sdata2 to read-only .sdata2 is a readonly small data section for ppc32, and .data.rel.ro is data that needs relocating but is read-only after that so these can both be moved to the read only memory region. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916040755.2398112-5-npiggin@gmail.com --- arch/powerpc/kernel/vmlinux.lds.S | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0bacec66fecf..fb697bc4b4e7 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -135,6 +135,16 @@ SECTIONS /* Read-only data */ RO_DATA(PAGE_SIZE) +#ifdef CONFIG_PPC32 + .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { + *(.sdata2) + } +#endif + + .data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) { + *(.data.rel.ro*) + } + .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) { *(.branch_lt) } @@ -349,19 +359,13 @@ SECTIONS . = ALIGN(PAGE_SIZE); _sdata = .; + .data : AT(ADDR(.data) - LOAD_OFFSET) { + DATA_DATA + *(.data.rel*) #ifdef CONFIG_PPC32 - .data : AT(ADDR(.data) - LOAD_OFFSET) { - DATA_DATA - *(.data.rel*) *(SDATA_MAIN) - *(.sdata2) - } -#else - .data : AT(ADDR(.data) - LOAD_OFFSET) { - DATA_DATA - *(.data.rel*) - } #endif + } /* The initial task and kernel stack */ INIT_TASK_DATA_SECTION(THREAD_ALIGN) From c787fed11890babda1e4882cd3b6efaf412e1bde Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 16 Sep 2022 14:07:53 +1000 Subject: [PATCH 086/214] powerpc/64/build: only include .opd with ELFv1 ELFv2 does not use function descriptors so .opd is not required. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916040755.2398112-6-npiggin@gmail.com --- arch/powerpc/kernel/vmlinux.lds.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index fb697bc4b4e7..4c38406384a9 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -180,11 +180,13 @@ SECTIONS SOFT_MASK_TABLE(8) RESTART_TABLE(8) +#ifdef CONFIG_PPC64_ELF_ABI_V1 .opd : AT(ADDR(.opd) - LOAD_OFFSET) { __start_opd = .; KEEP(*(.opd)) __end_opd = .; } +#endif . = ALIGN(8); __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) { From 1e9eca485a840985a663080eb049c420272d4bdd Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 16 Sep 2022 14:07:54 +1000 Subject: [PATCH 087/214] powerpc/64/build: merge .got and .toc input sections Follow the binutils ld internal linker script and merge .got and .toc input sections in the .got output section. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916040755.2398112-7-npiggin@gmail.com --- arch/powerpc/kernel/vmlinux.lds.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 4c38406384a9..e68eb9381066 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -173,8 +173,7 @@ SECTIONS } .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) { - *(.got) - *(.toc) + *(.got .toc) } SOFT_MASK_TABLE(8) From fdfdcfd504933ed06eb6b4c9df21eede0e213c3e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 16 Sep 2022 14:07:55 +1000 Subject: [PATCH 088/214] powerpc/build: put sys_call_table in .data.rel.ro if RELOCATABLE Const function pointers by convention live in .data.rel.ro if they need to be relocated. Now that .data.rel.ro is linked into the read-only region, put them in the right section. This doesn't make much practical difference, but it will make the C conversion of sys_call_table a smaller change as far as linking goes. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916040755.2398112-8-npiggin@gmail.com --- arch/powerpc/kernel/systbl.S | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 6c1db3b6de2d..280d6b6955e2 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -12,7 +12,11 @@ #include +#ifdef CONFIG_RELOCATABLE +.section .data.rel.ro,"aw" +#else .section .rodata,"a" +#endif #ifdef CONFIG_PPC64 .p2align 3 From e74611aa91bb9939dfc4a41b045a1a19227cff98 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 13 Sep 2022 22:45:45 +1000 Subject: [PATCH 089/214] powerpc/64: Remove unused SYS_CALL_TABLE symbol In interrupt_64.S, formerly entry_64.S, there are two toc entries created for sys_call_table and compat_sys_call_table. These are no longer used, since the system call entry was converted from asm to C, so remove them. Fixes: 68b34588e202 ("powerpc/64/sycall: Implement syscall entry/exit logic in C") Acked-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220913124545.2817825-1-mpe@ellerman.id.au --- arch/powerpc/kernel/interrupt_64.S | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index ce25b28cf418..4b4ba3364665 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -13,16 +13,6 @@ #include #include - .section ".toc","aw" -SYS_CALL_TABLE: - .tc sys_call_table[TC],sys_call_table - -#ifdef CONFIG_COMPAT -COMPAT_SYS_CALL_TABLE: - .tc compat_sys_call_table[TC],compat_sys_call_table -#endif - .previous - .align 7 .macro DEBUG_SRR_VALID srr From 456c3005102b18cce6662b1915c6efffe7744dcc Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 19 Sep 2022 15:27:55 +1000 Subject: [PATCH 090/214] powerpc/microwatt: Remove unused early debug code The original microwatt submission[1] included some early debug code for using the Microwatt "potato" UART. The series that was eventually merged switched to using a standard UART, and so doesn't need any special early debug handling. But some of the original code was merged accidentally under the non-existent CONFIG_PPC_EARLY_DEBUG_MICROWATT. Drop the unused code. 1: https://lore.kernel.org/linuxppc-dev/20200509050340.GD1464954@thinks.paulus.ozlabs.org/ Fixes: 48b545b8018d ("powerpc/microwatt: Use standard 16550 UART for console") Reported-by: Lukas Bulwahn Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220919052755.800907-1-mpe@ellerman.id.au --- arch/powerpc/kernel/udbg_16550.c | 39 -------------------------------- 1 file changed, 39 deletions(-) diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index d3942de254c6..ddfbc74bf85f 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -296,42 +296,3 @@ void __init udbg_init_40x_realmode(void) } #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ - -#ifdef CONFIG_PPC_EARLY_DEBUG_MICROWATT - -#define UDBG_UART_MW_ADDR ((void __iomem *)0xc0002000) - -static u8 udbg_uart_in_isa300_rm(unsigned int reg) -{ - uint64_t msr = mfmsr(); - uint8_t c; - - mtmsr(msr & ~(MSR_EE|MSR_DR)); - isync(); - eieio(); - c = __raw_rm_readb(UDBG_UART_MW_ADDR + (reg << 2)); - mtmsr(msr); - isync(); - return c; -} - -static void udbg_uart_out_isa300_rm(unsigned int reg, u8 val) -{ - uint64_t msr = mfmsr(); - - mtmsr(msr & ~(MSR_EE|MSR_DR)); - isync(); - eieio(); - __raw_rm_writeb(val, UDBG_UART_MW_ADDR + (reg << 2)); - mtmsr(msr); - isync(); -} - -void __init udbg_init_debug_microwatt(void) -{ - udbg_uart_in = udbg_uart_in_isa300_rm; - udbg_uart_out = udbg_uart_out_isa300_rm; - udbg_use_uart(); -} - -#endif /* CONFIG_PPC_EARLY_DEBUG_MICROWATT */ From 51da853e3708852f47cd95e6f5e1821c3d54c3ef Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 3 Sep 2022 22:36:39 +1000 Subject: [PATCH 091/214] powerpc/mm/64s: Drop pgd_huge() On powerpc there are two ways for huge pages to be represented in the top level page table, aka PGD (Page Global Directory). If the address space mapped by an individual PGD entry does not correspond to a given huge page size, then the PGD entry points to a non-standard page table, known as a "hugepd" (Huge Page Directory). The hugepd contains some number of huge page PTEs sufficient to map the address space with the given huge page size. On the other hand, if the address space mapped by an individual PGD entry does correspond exactly to a given huge page size, that PGD entry is used to directly encode the huge page PTE in place. In this case the pgd_huge() wrapper indicates to generic code that the PGD entry is actually a huge page PTE. This commit deals with the pgd_huge() case only, it does nothing with respect to the hugepd case. Over time the size of the virtual address space supported on powerpc has increased several times, which means the location at which huge pages can sit in the tree has also changed. There have also been new huge page sizes added, with the introduction of the Radix MMU. On Power9 and later with the Radix MMU, the largest huge page size in any implementation is 1GB. Since the introduction of Radix, 1GB entries have been supported at the PUD level, with both 4K and 64K base page size. Radix has never had a supported huge page size at the PGD level. On Power8 or earlier, which uses the Hash MMU, or Power9 or later with the Hash MMU enabled, the largest huge page size is 16GB. Using the Hash MMU and a base page size of 4K, 16GB has never been a supported huge page size at the PGD level, due to the geometry being incompatible. The two supported huge page sizes (16M & 16GB) both use the hugepd format. Using the Hash MMU and a base page size of 64K, 16GB pages were supported in the past at the PGD level. However in commit ba95b5d03596 ("powerpc/mm/book3s/64: Rework page table geometry for lower memory usage") the page table layout was reworked to shrink the size of the PGD. As a result the 16GB page size now fits at the PUD level when using 64K base page size. Therefore there are no longer any supported configurations where pgd_huge() can be true, so drop the definitions for pgd_huge(), and fallback to the generic definition which is always false. Fixes: ba95b5d03596 ("powerpc/mm/book3s/64: Rework page table geometry for lower memory usage") Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220903123640.719846-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/book3s/64/pgtable-4k.h | 10 ---------- arch/powerpc/include/asm/book3s/64/pgtable-64k.h | 9 --------- 2 files changed, 19 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h index 4e697bc2f4cd..48f21820afe2 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h @@ -26,16 +26,6 @@ static inline int pud_huge(pud_t pud) return 0; } -static inline int pgd_huge(pgd_t pgd) -{ - /* - * leaf pte for huge page - */ - if (radix_enabled()) - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE)); - return 0; -} -#define pgd_huge pgd_huge /* * With radix , we have hugepage ptes in the pud and pmd entries. We don't * need to setup hugepage directory for them. Our pte and page directory format diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index 34d1018896b3..2fce3498b000 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -30,15 +30,6 @@ static inline int pud_huge(pud_t pud) return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); } -static inline int pgd_huge(pgd_t pgd) -{ - /* - * leaf pte for huge page - */ - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE)); -} -#define pgd_huge pgd_huge - /* * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't * need to setup hugepage directory for them. Our pte and page directory format From 79c5640ab4460a03535ce0f120193174e7701b65 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Sat, 3 Sep 2022 22:36:40 +1000 Subject: [PATCH 092/214] powerpc/mm/64s: Drop p4d_leaf() Because 64-bit Book3S uses pgtable-nop4d.h, the P4D is folded into the PGD. So P4D entries are actually PGD entries, or vice versa. The other way to think of it is that the P4D is a single entry page table below the PGD. Zero bits of the address are needed to index into the P4D, therefore a P4D entry maps the same size address space as a PGD entry. As explained in the previous commit, there are no huge page sizes supported directly at the PGD level on 64-bit Book3S, so there are also no huge page sizes supported at the P4D level. Therefore p4d_is_leaf() can never be true, so drop the definition and fallback to the default implementation that always returns false. Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220903123640.719846-2-mpe@ellerman.id.au --- arch/powerpc/include/asm/book3s/64/pgtable.h | 7 ------- 1 file changed, 7 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index b7fd9b69e828..28fd016cf7ff 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1462,12 +1462,5 @@ static inline bool pud_is_leaf(pud_t pud) return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); } -#define p4d_is_leaf p4d_is_leaf -#define p4d_leaf p4d_is_leaf -static inline bool p4d_is_leaf(p4d_t p4d) -{ - return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); -} - #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ From a26494cf4aeb8e9888428a43f55cc486f06f1334 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 11:34:44 +0200 Subject: [PATCH 093/214] powerpc/nohash: Remove pgd_huge() stub linux/hugetlb.h has a fallback pgd_huge() macro for when pgd_huge is not defined. Remove the powerpc redundant definitions. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ae6aa7fce84f7abcbf67f534271a4a6dd7949b0d.1662543243.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/nohash/pgtable.h | 6 ------ arch/powerpc/include/asm/page.h | 1 - 2 files changed, 7 deletions(-) diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index b499da6c1a99..08429c612cdf 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -277,12 +277,6 @@ static inline int pud_huge(pud_t pud) return 0; } -static inline int pgd_huge(pgd_t pgd) -{ - return 0; -} -#define pgd_huge pgd_huge - #define is_hugepd(hpd) (hugepd_ok(hpd)) #endif diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index e5f75c70eda8..c67eb9531a3f 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -311,7 +311,6 @@ static inline bool pfn_valid(unsigned long pfn) #ifndef CONFIG_HUGETLB_PAGE #define is_hugepd(pdep) (0) -#define pgd_huge(pgd) (0) #endif /* CONFIG_HUGETLB_PAGE */ struct page; From 691cdf016d3be6f66a3ea384809be229e0f9c590 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 11:34:45 +0200 Subject: [PATCH 094/214] powerpc: Rely on generic definition of hugepd_t and is_hugepd when unused CONFIG_ARCH_HAS_HUGEPD is used to tell core mm when huge page directories are used. When they are not used, no need to provide hugepd_t or is_hugepd(), just rely on the core mm fallback definition. For that, change core mm behaviour so that CONFIG_ARCH_HAS_HUGEPD is used instead of indirect is_hugepd macro existence. powerpc being the only user of huge page directories, there is no impact on other architectures. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/da81462d93069bb90fe5e762dd3283a644318937.1662543243.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/page.h | 5 ----- arch/powerpc/include/asm/pgtable-be-types.h | 2 ++ arch/powerpc/include/asm/pgtable-types.h | 2 ++ include/linux/hugetlb.h | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index c67eb9531a3f..7f20636d13ed 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -308,11 +308,6 @@ static inline bool pfn_valid(unsigned long pfn) #include #endif - -#ifndef CONFIG_HUGETLB_PAGE -#define is_hugepd(pdep) (0) -#endif /* CONFIG_HUGETLB_PAGE */ - struct page; extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long vaddr, diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h index b169bbf95fcb..82633200b500 100644 --- a/arch/powerpc/include/asm/pgtable-be-types.h +++ b/arch/powerpc/include/asm/pgtable-be-types.h @@ -101,6 +101,7 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new) return pmd_raw(old) == prev; } +#ifdef CONFIG_ARCH_HAS_HUGEPD typedef struct { __be64 pdbe; } hugepd_t; #define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) }) @@ -108,5 +109,6 @@ static inline unsigned long hpd_val(hugepd_t x) { return be64_to_cpu(x.pdbe); } +#endif #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */ diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index efed0db7b1db..082c85cc09b1 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -83,11 +83,13 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) } #endif +#ifdef CONFIG_ARCH_HAS_HUGEPD typedef struct { unsigned long pd; } hugepd_t; #define __hugepd(x) ((hugepd_t) { (x) }) static inline unsigned long hpd_val(hugepd_t x) { return x.pd; } +#endif #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 3ec981a0d8b3..1ec1535be04f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -17,7 +17,7 @@ struct ctl_table; struct user_struct; struct mmu_gather; -#ifndef is_hugepd +#ifndef CONFIG_ARCH_HAS_HUGEPD typedef struct { unsigned long pd; } hugepd_t; #define is_hugepd(hugepd) (0) #define __hugepd(x) ((hugepd_t) { (x) }) From 73ea68ad0d2f655815b6f1fbe1c5521d72f01b64 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 5 Sep 2022 11:38:25 +0200 Subject: [PATCH 095/214] powerpc/book3s: Inline first level of update_mmu_cache() update_mmu_cache() voids when hash page tables are not used. On PPC32 that means when MMU_FTR_HPTE_TABLE is not defined. On PPC64 that means when RADIX is enabled. Rename core part of update_mmu_cache() as __update_mmu_cache() and include the initial verification in an inlined caller. Signed-off-by: Christophe Leroy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/bea5ad0de7f83eff256116816d46c84fa0a444de.1662370698.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/pgtable.h | 15 ++++++++++----- arch/powerpc/mm/book3s32/mmu.c | 4 +--- arch/powerpc/mm/book3s64/hash_utils.c | 5 +---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h index e8269434ecbe..d18b748ea3ae 100644 --- a/arch/powerpc/include/asm/book3s/pgtable.h +++ b/arch/powerpc/include/asm/book3s/pgtable.h @@ -25,7 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot); #define __HAVE_PHYS_MEM_ACCESS_PROT -#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU) +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); + /* * This gets called at the end of handling a page fault, when * the kernel has put a new PTE into the page table for the process. @@ -35,10 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); -#else -static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} -#endif +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) +{ + if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE)) + return; + if (radix_enabled()) + return; + __update_mmu_cache(vma, address, ptep); +} #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index e5ee05c234c7..850783cfa9c7 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -314,11 +314,9 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea) * * This must always be called with the pte lock held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) - return; /* * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 363a9447d63d..ced1107b1677 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1781,7 +1781,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea, * * This must always be called with the pte lock held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, +void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* @@ -1791,9 +1791,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, unsigned long trap; bool is_exec; - if (radix_enabled()) - return; - /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(*ptep) || address >= TASK_SIZE) return; From b997b2f57cae396448bb62c428efa4b112dd90ed Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 12:05:01 +0200 Subject: [PATCH 096/214] powerpc/mm: Reduce redundancy in pgtable.h PAGE_KERNEL_TEXT, PAGE_KERNEL_EXEC and PAGE_AGP are the same for all powerpcs. Remove duplicated definitions. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/92254499430d13d99e4a4d7e9ad8e8284cb35380.1662544974.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/pgtable.h | 19 ------------------- arch/powerpc/include/asm/book3s/64/pgtable.h | 19 ------------------- arch/powerpc/include/asm/nohash/pgtable.h | 19 ------------------- arch/powerpc/include/asm/pgtable.h | 19 +++++++++++++++++++ 4 files changed, 19 insertions(+), 57 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 40041ac713d9..8da6d4544822 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -118,25 +118,6 @@ static inline bool pte_user(pte_t pte) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) -/* - * Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - -/* Make modules code happy. We don't set RO yet */ -#define PAGE_KERNEL_EXEC PAGE_KERNEL_X - -/* Advertise special mapping type for AGP */ -#define PAGE_AGP (PAGE_KERNEL_NC) -#define HAVE_PAGE_AGP - #define PTE_INDEX_SIZE PTE_SHIFT #define PMD_INDEX_SIZE 0 #define PUD_INDEX_SIZE 0 diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 28fd016cf7ff..3007a251a186 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -164,22 +164,6 @@ #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) -/* - * Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - -/* Make modules code happy. We don't set RO yet */ -#define PAGE_KERNEL_EXEC PAGE_KERNEL_X -#define PAGE_AGP (PAGE_KERNEL_NC) - #ifndef __ASSEMBLY__ /* * page table defines @@ -335,9 +319,6 @@ extern unsigned long pci_io_base; #define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE) #define FIXADDR_SIZE SZ_32M -/* Advertise special mapping type for AGP */ -#define HAVE_PAGE_AGP - #ifndef __ASSEMBLY__ /* diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 08429c612cdf..18b29cfee0d6 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -17,25 +17,6 @@ #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) -/* - * Protection used for kernel text. We want the debuggers to be able to - * set breakpoints anywhere, so don't write protect the kernel text - * on platforms where such control is possible. - */ -#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ - defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) -#define PAGE_KERNEL_TEXT PAGE_KERNEL_X -#else -#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX -#endif - -/* Make modules code happy. We don't set RO yet */ -#define PAGE_KERNEL_EXEC PAGE_KERNEL_X - -/* Advertise special mapping type for AGP */ -#define PAGE_AGP (PAGE_KERNEL_NC) -#define HAVE_PAGE_AGP - #ifndef __ASSEMBLY__ /* Generic accessors to PTE bits */ diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 33f4bf8d22b0..283f40d05a4d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -20,6 +20,25 @@ struct mm_struct; #include #endif /* !CONFIG_PPC_BOOK3S */ +/* + * Protection used for kernel text. We want the debuggers to be able to + * set breakpoints anywhere, so don't write protect the kernel text + * on platforms where such control is possible. + */ +#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ + defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X +#else +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX +#endif + +/* Make modules code happy. We don't set RO yet */ +#define PAGE_KERNEL_EXEC PAGE_KERNEL_X + +/* Advertise special mapping type for AGP */ +#define PAGE_AGP (PAGE_KERNEL_NC) +#define HAVE_PAGE_AGP + #ifndef __ASSEMBLY__ #ifndef MAX_PTRS_PER_PGD From 6cc07821adce44e864c3752a3842936a6a7f6aef Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 7 Sep 2022 12:05:21 +0200 Subject: [PATCH 097/214] powerpc/mm: Make PAGE_KERNEL_xxx macros grep-friendly Avoid multi-lines to help getting a complete view when using grep. They still remain under the 100 chars limit. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/3bc3f5a51949ee7f52dba36677db23d4337c7995.1662544980.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/book3s/32/pgtable.h | 3 +-- arch/powerpc/include/asm/book3s/64/pgtable.h | 9 +++------ arch/powerpc/include/asm/nohash/pgtable.h | 3 +-- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 8da6d4544822..75823f39e042 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -112,8 +112,7 @@ static inline bool pte_user(pte_t pte) /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED) #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 3007a251a186..b5f8264cc980 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -117,8 +117,7 @@ #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY) #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ) #define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC) -#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \ - _PAGE_RW | _PAGE_EXEC) +#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) /* * _PAGE_CHG_MASK masks of bits that are to be preserved across * pgprot changes @@ -156,10 +155,8 @@ /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) -#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_TOLERANT) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NON_IDEMPOTENT) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_TOLERANT) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NON_IDEMPOTENT) #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 18b29cfee0d6..4fd73c7412d0 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -11,8 +11,7 @@ /* Permission masks used for kernel mappings */ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) -#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ - _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED) #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) From c4167aec98524fa4511b3222303a758b532b6009 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 20 Sep 2022 14:23:01 +0200 Subject: [PATCH 098/214] powerpc/prom_init: drop PROM_BUG() Unused, let's drop it. Signed-off-by: David Hildenbrand Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220920122302.99195-3-david@redhat.com --- arch/powerpc/kernel/prom_init.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index a6669c40c1db..d464ba412084 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -96,12 +96,6 @@ static int of_workarounds __prombss; #define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */ #define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */ -#define PROM_BUG() do { \ - prom_printf("kernel BUG at %s line 0x%x!\n", \ - __FILE__, __LINE__); \ - __builtin_trap(); \ -} while (0) - #ifdef DEBUG_PROM #define prom_debug(x...) prom_printf(x) #else From 2fc1c63d2763ad7562ea7d241da79b42538a557b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 20 Sep 2022 19:36:42 +0200 Subject: [PATCH 099/214] powerpc/highmem: Properly handle fragmented memory MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In addition to checking whether a page is reserved before allocating it to highmem, verify that it is valid memory. Otherwise the kernel Oopses as below: mem auto-init: stack:off, heap alloc:off, heap free:off Kernel attempted to read user page (7df58) - exploit attempt? (uid: 0) BUG: Unable to handle kernel data access on read at 0x0007df58 Faulting instruction address: 0xc01c8348 Oops: Kernel access of bad area, sig: 11 [#1] BE PAGE_SIZE=4K SMP NR_CPUS=2 P2020RDB-PC Modules linked in: CPU: 0 PID: 0 Comm: swapper Not tainted 6.0.0-rc2-0caacb197b677410bdac81bc34f05235+ #121 NIP: c01c8348 LR: c01cb2bc CTR: 0000000a REGS: c10d7e20 TRAP: 0300 Not tainted (6.0.0-rc2-0caacb197b677410bdac81bc34f05235+) MSR: 00021000 CR: 48044224 XER: 00000000 DEAR: 0007df58 ESR: 00000000 GPR00: c01cb294 c10d7f10 c1045340 00000001 00000004 c112bcc0 00000015 eedf1000 GPR08: 00000003 0007df58 00000000 f0000000 28044228 00000200 00000000 00000000 GPR16: 00000000 00000000 00000000 0275cb7a c0000000 00000001 0000075f 00000000 GPR24: c1031004 00000000 00000000 00000001 c10f0000 eedf1000 00080000 00080000 NIP free_unref_page_prepare.part.93+0x48/0x60 LR free_unref_page+0x84/0x4b8 Call Trace: 0xeedf1000 (unreliable) free_unref_page+0x5c/0x4b8 mem_init+0xd0/0x194 start_kernel+0x4c0/0x6d0 set_ivor+0x13c/0x178 Reported-by: Pali Rohár Signed-off-by: Christophe Leroy Fixes: b0e0d68b1c52 ("powerpc/32: Allow fragmented physical memory") Tested-by: Pali Rohár [mpe: Trim oops] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/f08cca5c46d67399c53262eca48e015dcf1841f9.1663695394.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 01772e79fd93..6ddbd6cb3a2a 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -302,7 +302,7 @@ void __init mem_init(void) for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; struct page *page = pfn_to_page(pfn); - if (!memblock_is_reserved(paddr)) + if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr)) free_highmem_page(page); } } From ecf8f36446f53866727d9670df1746f8d20130a8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 16 Sep 2022 23:15:23 +1000 Subject: [PATCH 100/214] powerpc: Always select HAVE_EFFICIENT_UNALIGNED_ACCESS Currently powerpc selects HAVE_EFFICIENT_UNALIGNED_ACCESS in all cases but one. The exception is if the kernel is being built little endian and explicitly targeted for Power7. The combination of Power7 and little endian was never commercially supported, or widely used. It was only ever possible on bare metal machines, using unofficial firmware, or in qemu guests hosted on those machines. The bare metal firmware support for Power7 was removed in 2019, see skiboot commit 16b7ae64 ("Remove POWER7 and POWER7+ support"). Little endian kernel builds were switched to target Power8 or later in 2018, in commit a73657ea19ae ("powerpc/64: Add GENERIC_CPU support for little endian"). Since then it's only been possible to boot a Power7/LE kernel by explicitly building for Power7. So drop the exception and always select HAVE_EFFICIENT_UNALIGNED_ACCESS. If anyone does still have a Power7/LE machine it should hopefully continue to boot, just with some performance penality, and if not they can report a bug. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220916131523.319123-1-mpe@ellerman.id.au --- arch/powerpc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4c466acdc70d..7dc46dc2e387 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -211,7 +211,7 @@ config PPC select HAVE_DYNAMIC_FTRACE_WITH_ARGS if MPROFILE_KERNEL || PPC32 select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL || PPC32 select HAVE_EBPF_JIT - select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) + select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1 From c9986f0aefd1ae22fe9cf794d49699643f1e268b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Wed, 31 Aug 2022 00:55:00 +0200 Subject: [PATCH 101/214] powerpc: dts: turris1x.dts: Fix NOR partitions labels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Partition partition@20000 contains generic kernel image and it does not have to be used only for rescue purposes. Partition partition@1c0000 contains bootable rescue system and partition partition@340000 contains factory image/data for restoring to NAND. So change partition labels to better fit their purpose by removing possible misleading substring "rootfs" from these labels. Fixes: 54c15ec3b738 ("powerpc: dts: Add DTS file for CZ.NIC Turris 1.x routers") Signed-off-by: Pali Rohár Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220830225500.8856-1-pali@kernel.org --- arch/powerpc/boot/dts/turris1x.dts | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts index 12e08271e61f..47027b4cebb3 100644 --- a/arch/powerpc/boot/dts/turris1x.dts +++ b/arch/powerpc/boot/dts/turris1x.dts @@ -263,21 +263,21 @@ }; partition@20000 { - /* 1.7 MB for Rescue Linux Kernel Image */ + /* 1.7 MB for Linux Kernel Image */ reg = <0x00020000 0x001a0000>; - label = "rescue-kernel"; + label = "kernel"; }; partition@1c0000 { /* 1.5 MB for Rescue JFFS2 Root File System */ reg = <0x001c0000 0x00180000>; - label = "rescue-rootfs"; + label = "rescue"; }; partition@340000 { - /* 11 MB for TAR.XZ Backup with content of NAND Root File System */ + /* 11 MB for TAR.XZ Archive with Factory content of NAND Root File System */ reg = <0x00340000 0x00b00000>; - label = "backup-rootfs"; + label = "factory"; }; partition@e40000 { From 8bf056f57f1d16c561e43f9af37301f23990cd21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Sat, 27 Aug 2022 15:15:38 +0200 Subject: [PATCH 102/214] powerpc: dts: turris1x.dts: Fix labels in DSA cpu port nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DSA cpu port node has to be marked with "cpu" label. So fix it for both cpu port nodes. Fixes: 54c15ec3b738 ("powerpc: dts: Add DTS file for CZ.NIC Turris 1.x routers") Signed-off-by: Pali Rohár Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220827131538.14577-1-pali@kernel.org --- arch/powerpc/boot/dts/turris1x.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts index 47027b4cebb3..045af668e928 100644 --- a/arch/powerpc/boot/dts/turris1x.dts +++ b/arch/powerpc/boot/dts/turris1x.dts @@ -147,7 +147,7 @@ port@0 { reg = <0>; - label = "cpu1"; + label = "cpu"; ethernet = <&enet1>; phy-mode = "rgmii-id"; @@ -184,7 +184,7 @@ port@6 { reg = <6>; - label = "cpu0"; + label = "cpu"; ethernet = <&enet0>; phy-mode = "rgmii-id"; From d1203f32d86987a3ccd7de9ba2448ba12d86d125 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:25 +0200 Subject: [PATCH 103/214] powerpc/Kconfig: Fix non existing CONFIG_PPC_FSL_BOOKE CONFIG_PPC_FSL_BOOKE doesn't exist. Should be CONFIG_FSL_BOOKE. Fixes: 49e3d8ea6248 ("powerpc/fsl_booke: Enable STRICT_KERNEL_RWX") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/828f6a64eeb51ce9abfa1d4e84c521a02fecebb8.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 7dc46dc2e387..220045692e48 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -828,7 +828,7 @@ config DATA_SHIFT default 24 if STRICT_KERNEL_RWX && PPC64 range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx - range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_FSL_BOOKE + range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && FSL_BOOKE default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 default 23 if STRICT_KERNEL_RWX && PPC_8xx From 0069f3d14e7a656ba9d7dbaac72659687fdbf43c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:26 +0200 Subject: [PATCH 104/214] powerpc/64e: Tie PPC_BOOK3E_64 to PPC_E500MC The only 64-bit Book3E CPUs we support require the selection of CONFIG_PPC_E500MC. However our Kconfig allows configurating a kernel that has 64-bit Book3E support, but without CONFIG_PPC_E500MC enabled. Such a kernel would never boot, it doesn't know about any CPUs. To fix this, force CONFIG_PPC_E500MC to be selected whenever we are building a 64-bit Book3E kernel. And add a test to detect future situations where cpu_specs is empty. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/ae5d8b8b3ccc346e61d2ec729767f92766273f0b.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/cputable.c | 2 ++ arch/powerpc/platforms/Kconfig.cputype | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index d8e42ef750f1..2829ea537277 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2018,6 +2018,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) struct cpu_spec *s = cpu_specs; int i; + BUILD_BUG_ON(!ARRAY_SIZE(cpu_specs)); + s = PTRRELOC(s); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 5185d942b455..19fd95a06352 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -108,6 +108,8 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" select PPC_FSL_BOOK3E + select E500 + select PPC_E500MC select PPC_FPU # Make it a choice ? select PPC_SMP_MUXED_IPI select PPC_DOORBELL From b6100bedf1f9aea264757ac4a56eb1d8b04b9356 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:27 +0200 Subject: [PATCH 105/214] powerpc/64e: Remove unnecessary #ifdef CONFIG_PPC_FSL_BOOK3E CONFIG_PPC_BOOK3E_64 implies CONFIG_PPC_FSL_BOOK3E so no need of additional #ifdefs in files built exclusively for CONFIG_PPC_BOOK3E_64. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/df16255c13b63b0221c9be63b94a6864bed22c12.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/exceptions-64e.S | 8 -------- arch/powerpc/mm/nohash/tlb_low_64e.S | 6 ------ 2 files changed, 14 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 67dc4e3179a0..3afba070a5d8 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -291,7 +291,6 @@ ret_from_mc_except: #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1 -#ifdef CONFIG_PPC_FSL_BOOK3E #define GEN_BTB_FLUSH \ START_BTB_FLUSH_SECTION \ beq 1f; \ @@ -307,13 +306,6 @@ ret_from_mc_except: #define DBG_BTB_FLUSH CRIT_BTB_FLUSH #define MC_BTB_FLUSH CRIT_BTB_FLUSH #define GDBELL_BTB_FLUSH GEN_BTB_FLUSH -#else -#define GEN_BTB_FLUSH -#define CRIT_BTB_FLUSH -#define DBG_BTB_FLUSH -#define MC_BTB_FLUSH -#define GDBELL_BTB_FLUSH -#endif #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index 68ffbfdba894..be26f33a6ac0 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -61,7 +61,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) ld r14,PACAPGD(r13) std r15,EX_TLB_R15(r12) std r10,EX_TLB_CR(r12) -#ifdef CONFIG_PPC_FSL_BOOK3E START_BTB_FLUSH_SECTION mfspr r11, SPRN_SRR1 andi. r10,r11,MSR_PR @@ -70,14 +69,11 @@ START_BTB_FLUSH_SECTION 1: END_BTB_FLUSH_SECTION std r7,EX_TLB_R7(r12) -#endif .endm .macro tlb_epilog_bolted ld r14,EX_TLB_CR(r12) -#ifdef CONFIG_PPC_FSL_BOOK3E ld r7,EX_TLB_R7(r12) -#endif ld r10,EX_TLB_R10(r12) ld r11,EX_TLB_R11(r12) ld r13,EX_TLB_R13(r12) @@ -248,7 +244,6 @@ itlb_miss_fault_bolted: beq tlb_miss_user_bolted b itlb_miss_kernel_bolted -#ifdef CONFIG_PPC_FSL_BOOK3E /* * TLB miss handling for e6500 and derivatives, using hardware tablewalk. * @@ -515,7 +510,6 @@ dtlb_miss_fault_e6500: itlb_miss_fault_e6500: tlb_epilog_bolted b exc_instruction_storage_book3e -#endif /* CONFIG_PPC_FSL_BOOK3E */ /********************************************************************** * * From afd2288a4c7d3400a53cb29616742f4395a809a1 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:28 +0200 Subject: [PATCH 106/214] powerpc/cputable: Remove __machine_check_early_realmode_p{7/8/9} prototypes __machine_check_early_realmode_p{7/8/9} are already in mce.h which is included. Remove them from cputable.c Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b77fc0f90e3a9c065324cbff549b718ccf0809f8.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/cputable.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 2829ea537277..5ace97cccad8 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -64,9 +64,6 @@ extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); -extern long __machine_check_early_realmode_p7(struct pt_regs *regs); -extern long __machine_check_early_realmode_p8(struct pt_regs *regs); -extern long __machine_check_early_realmode_p9(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); From 76b719881a26fec3b77652134f19cf1dfcc96318 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:29 +0200 Subject: [PATCH 107/214] powerpc/cputable: Move __cpu_setup() prototypes out of cputable.h Move all prototypes out of cputable.h For that rename cpu_setup_power.h to cpu_setup.h and move all prototypes in it. Signed-off-by: Christophe Leroy [mpe: Standardise cpu_spec *spec formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/f45118489ee450db654db8bbcdfd8f5907337c22.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/cpu_setup.h | 49 ++++++++++++++++++++++ arch/powerpc/include/asm/cpu_setup_power.h | 12 ------ arch/powerpc/kernel/cpu_setup_power.c | 2 +- arch/powerpc/kernel/cputable.c | 38 +---------------- 4 files changed, 51 insertions(+), 50 deletions(-) create mode 100644 arch/powerpc/include/asm/cpu_setup.h delete mode 100644 arch/powerpc/include/asm/cpu_setup_power.h diff --git a/arch/powerpc/include/asm/cpu_setup.h b/arch/powerpc/include/asm/cpu_setup.h new file mode 100644 index 000000000000..30e2fe389502 --- /dev/null +++ b/arch/powerpc/include/asm/cpu_setup.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 IBM Corporation + */ + +#ifndef _ASM_POWERPC_CPU_SETUP_H +#define _ASM_POWERPC_CPU_SETUP_H +void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power7(void); +void __restore_cpu_power8(void); +void __restore_cpu_power9(void); +void __restore_cpu_power10(void); + +void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440ep(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440epx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440gx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440grx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440spe(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_440x5(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_460ex(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_460gt(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_603(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_604(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_750(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_750cx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_750fx(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_7400(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_7410(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_745x(unsigned long offset, struct cpu_spec *spec); + +void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_pa6t(void); +void __restore_cpu_ppc970(void); + +void __setup_cpu_e5500(unsigned long offset, struct cpu_spec *spec); +void __setup_cpu_e6500(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_e5500(void); +void __restore_cpu_e6500(void); +#endif /* _ASM_POWERPC_CPU_SETUP_H */ diff --git a/arch/powerpc/include/asm/cpu_setup_power.h b/arch/powerpc/include/asm/cpu_setup_power.h deleted file mode 100644 index 24be9131f803..000000000000 --- a/arch/powerpc/include/asm/cpu_setup_power.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2020 IBM Corporation - */ -void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power7(void); -void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power8(void); -void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power9(void); -void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec); -void __restore_cpu_power10(void); diff --git a/arch/powerpc/kernel/cpu_setup_power.c b/arch/powerpc/kernel/cpu_setup_power.c index 3dc61e203f37..097c033668f0 100644 --- a/arch/powerpc/kernel/cpu_setup_power.c +++ b/arch/powerpc/kernel/cpu_setup_power.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include /* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */ static bool init_hvmode_206(struct cpu_spec *t) diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 5ace97cccad8..9229e0930332 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -18,6 +18,7 @@ #include #include #include +#include static struct cpu_spec the_cpu_spec __read_mostly; @@ -34,43 +35,6 @@ const char *powerpc_base_platform; * part of the cputable though. That has to be fixed for both ppc32 * and ppc64 */ -#ifdef CONFIG_PPC32 -extern void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); -extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); -extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); -#endif /* CONFIG_PPC32 */ -#ifdef CONFIG_PPC64 -#include -extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_pa6t(void); -extern void __restore_cpu_ppc970(void); -#endif /* CONFIG_PPC64 */ -#if defined(CONFIG_E500) -extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); -extern void __setup_cpu_e6500(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_e5500(void); -extern void __restore_cpu_e6500(void); -#endif /* CONFIG_E500 */ /* This table only contains "desktop" CPUs, it need to be filled with embedded * ones as well... From e320a76db4b02e1160eb4bfb17d8d1bc57979955 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:30 +0200 Subject: [PATCH 108/214] powerpc/cputable: Split cpu_specs[] out of cputable.h cpu_specs[] is full of #ifdefs depending on the different types of CPU. CPUs are mutually exclusive, it is therefore possible to split cpu_specs[] into smaller more readable pieces. Create cpu_specs_XXX.h that will each be dedicated on one of the following mutually exclusive families: - 40x - 44x - 47x - 8xx - e500 - book3s/32 - book3s/64 In book3s/32, the block for 603 has been moved in front in order to not have two 604 blocks. Signed-off-by: Christophe Leroy [mpe: Fix CONFIG_47x to be CONFIG_PPC_47x, tweak some formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a44b865e0318286155273b10cdf524ab697928c1.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/cpu_specs.h | 27 + arch/powerpc/kernel/cpu_specs_40x.h | 280 +++ arch/powerpc/kernel/cpu_specs_44x.h | 304 ++++ arch/powerpc/kernel/cpu_specs_47x.h | 74 + arch/powerpc/kernel/cpu_specs_8xx.h | 23 + arch/powerpc/kernel/cpu_specs_book3s_32.h | 605 +++++++ arch/powerpc/kernel/cpu_specs_book3s_64.h | 481 ++++++ arch/powerpc/kernel/cpu_specs_e500.h | 135 ++ arch/powerpc/kernel/cputable.c | 1877 +-------------------- 9 files changed, 1930 insertions(+), 1876 deletions(-) create mode 100644 arch/powerpc/kernel/cpu_specs.h create mode 100644 arch/powerpc/kernel/cpu_specs_40x.h create mode 100644 arch/powerpc/kernel/cpu_specs_44x.h create mode 100644 arch/powerpc/kernel/cpu_specs_47x.h create mode 100644 arch/powerpc/kernel/cpu_specs_8xx.h create mode 100644 arch/powerpc/kernel/cpu_specs_book3s_32.h create mode 100644 arch/powerpc/kernel/cpu_specs_book3s_64.h create mode 100644 arch/powerpc/kernel/cpu_specs_e500.h diff --git a/arch/powerpc/kernel/cpu_specs.h b/arch/powerpc/kernel/cpu_specs.h new file mode 100644 index 000000000000..658c68acf96e --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifdef CONFIG_40x +#include "cpu_specs_40x.h" +#endif + +#ifdef CONFIG_PPC_47x +#include "cpu_specs_47x.h" +#elif defined(CONFIG_44x) +#include "cpu_specs_44x.h" +#endif + +#ifdef CONFIG_PPC_8xx +#include "cpu_specs_8xx.h" +#endif + +#ifdef CONFIG_E500 +#include "cpu_specs_e500.h" +#endif + +#ifdef CONFIG_PPC_BOOK3S_32 +#include "cpu_specs_book3s_32.h" +#endif + +#ifdef CONFIG_PPC_BOOK3S_64 +#include "cpu_specs_book3s_64.h" +#endif diff --git a/arch/powerpc/kernel/cpu_specs_40x.h b/arch/powerpc/kernel/cpu_specs_40x.h new file mode 100644 index 000000000000..a1362a75b8c8 --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_40x.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + */ + +static struct cpu_spec cpu_specs[] __initdata = { + { /* STB 04xxx */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x41810000, + .cpu_name = "STB04xxx", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* NP405L */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x41610000, + .cpu_name = "NP405L", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* NP4GS3 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x40B10000, + .cpu_name = "NP4GS3", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* NP405H */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x41410000, + .cpu_name = "NP405H", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405GPr */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x50910000, + .cpu_name = "405GPr", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* STBx25xx */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x51510000, + .cpu_name = "STBx25xx", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405LP */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x41F10000, + .cpu_name = "405LP", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EP */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x51210000, + .cpu_name = "405EP", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EX Rev. A/B with Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x12910007, + .cpu_name = "405EX Rev. A/B", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EX Rev. C without Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x1291000d, + .cpu_name = "405EX Rev. C", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EX Rev. C with Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x1291000f, + .cpu_name = "405EX Rev. C", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EX Rev. D without Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x12910003, + .cpu_name = "405EX Rev. D", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EX Rev. D with Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x12910005, + .cpu_name = "405EX Rev. D", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EXr Rev. A/B without Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x12910001, + .cpu_name = "405EXr Rev. A/B", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EXr Rev. C without Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x12910009, + .cpu_name = "405EXr Rev. C", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EXr Rev. C with Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x1291000b, + .cpu_name = "405EXr Rev. C", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EXr Rev. D without Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x12910000, + .cpu_name = "405EXr Rev. D", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* 405EXr Rev. D with Security */ + .pvr_mask = 0xffff000f, + .pvr_value = 0x12910002, + .cpu_name = "405EXr Rev. D", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { + /* 405EZ */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x41510000, + .cpu_name = "405EZ", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* APM8018X */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x7ff11432, + .cpu_name = "APM8018X", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic 40x PPC)", + .cpu_features = CPU_FTRS_40X, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | + PPC_FEATURE_HAS_4xxMAC, + .mmu_features = MMU_FTR_TYPE_40x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc405", + } +}; diff --git a/arch/powerpc/kernel/cpu_specs_44x.h b/arch/powerpc/kernel/cpu_specs_44x.h new file mode 100644 index 000000000000..69c4cdc0cdee --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_44x.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + */ + +#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ + PPC_FEATURE_BOOKE) + +static struct cpu_spec cpu_specs[] __initdata = { + { + .pvr_mask = 0xf0000fff, + .pvr_value = 0x40000850, + .cpu_name = "440GR Rev. A", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc440", + }, + { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x40000858, + .cpu_name = "440EP Rev. A", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440ep, + .machine_check = machine_check_4xx, + .platform = "ppc440", + }, + { + .pvr_mask = 0xf0000fff, + .pvr_value = 0x400008d3, + .cpu_name = "440GR Rev. B", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc440", + }, + { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */ + .pvr_mask = 0xf0000ff7, + .pvr_value = 0x400008d4, + .cpu_name = "440EP Rev. C", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440ep, + .machine_check = machine_check_4xx, + .platform = "ppc440", + }, + { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x400008db, + .cpu_name = "440EP Rev. B", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440ep, + .machine_check = machine_check_4xx, + .platform = "ppc440", + }, + { /* 440GRX */ + .pvr_mask = 0xf0000ffb, + .pvr_value = 0x200008D0, + .cpu_name = "440GRX", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440grx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */ + .pvr_mask = 0xf0000ffb, + .pvr_value = 0x200008D8, + .cpu_name = "440EPX", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440epx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 440GP Rev. B */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x40000440, + .cpu_name = "440GP Rev. B", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc440gp", + }, + { /* 440GP Rev. C */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x40000481, + .cpu_name = "440GP Rev. C", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc440gp", + }, + { /* 440GX Rev. A */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x50000850, + .cpu_name = "440GX Rev. A", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440gx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 440GX Rev. B */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x50000851, + .cpu_name = "440GX Rev. B", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440gx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 440GX Rev. C */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x50000892, + .cpu_name = "440GX Rev. C", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440gx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 440GX Rev. F */ + .pvr_mask = 0xf0000fff, + .pvr_value = 0x50000894, + .cpu_name = "440GX Rev. F", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440gx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 440SP Rev. A */ + .pvr_mask = 0xfff00fff, + .pvr_value = 0x53200891, + .cpu_name = "440SP Rev. A", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc440", + }, + { /* 440SPe Rev. A */ + .pvr_mask = 0xfff00fff, + .pvr_value = 0x53400890, + .cpu_name = "440SPe Rev. A", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440spe, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 440SPe Rev. B */ + .pvr_mask = 0xfff00fff, + .pvr_value = 0x53400891, + .cpu_name = "440SPe Rev. B", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_440spe, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 460EX */ + .pvr_mask = 0xffff0006, + .pvr_value = 0x13020002, + .cpu_name = "460EX", + .cpu_features = CPU_FTRS_440x6, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_460ex, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 460EX Rev B */ + .pvr_mask = 0xffff0007, + .pvr_value = 0x13020004, + .cpu_name = "460EX Rev. B", + .cpu_features = CPU_FTRS_440x6, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_460ex, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 460GT */ + .pvr_mask = 0xffff0006, + .pvr_value = 0x13020000, + .cpu_name = "460GT", + .cpu_features = CPU_FTRS_440x6, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_460gt, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 460GT Rev B */ + .pvr_mask = 0xffff0007, + .pvr_value = 0x13020005, + .cpu_name = "460GT Rev. B", + .cpu_features = CPU_FTRS_440x6, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_460gt, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 460SX */ + .pvr_mask = 0xffffff00, + .pvr_value = 0x13541800, + .cpu_name = "460SX", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_460sx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* 464 in APM821xx */ + .pvr_mask = 0xfffffff0, + .pvr_value = 0x12C41C80, + .cpu_name = "APM821XX", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_apm821xx, + .machine_check = machine_check_440A, + .platform = "ppc440", + }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic 44x PPC)", + .cpu_features = CPU_FTRS_44X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_44x, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_4xx, + .platform = "ppc440", + } +}; diff --git a/arch/powerpc/kernel/cpu_specs_47x.h b/arch/powerpc/kernel/cpu_specs_47x.h new file mode 100644 index 000000000000..3143cd504a51 --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_47x.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + */ + +#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ + PPC_FEATURE_BOOKE) + +static struct cpu_spec cpu_specs[] __initdata = { + { /* 476 DD2 core */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x11a52080, + .cpu_name = "476", + .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | + MMU_FTR_LOCK_BCAST_INVAL, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + }, + { /* 476fpe */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x7ff50000, + .cpu_name = "476fpe", + .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | + MMU_FTR_LOCK_BCAST_INVAL, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + }, + { /* 476 iss */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00050000, + .cpu_name = "476", + .cpu_features = CPU_FTRS_47X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | + MMU_FTR_LOCK_BCAST_INVAL, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + }, + { /* 476 others */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x11a50000, + .cpu_name = "476", + .cpu_features = CPU_FTRS_47X, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | + MMU_FTR_LOCK_BCAST_INVAL, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic 47x PPC)", + .cpu_features = CPU_FTRS_47X, + .cpu_user_features = COMMON_USER_BOOKE, + .mmu_features = MMU_FTR_TYPE_47x, + .icache_bsize = 32, + .dcache_bsize = 128, + .machine_check = machine_check_47x, + .platform = "ppc470", + } +}; diff --git a/arch/powerpc/kernel/cpu_specs_8xx.h b/arch/powerpc/kernel/cpu_specs_8xx.h new file mode 100644 index 000000000000..93ddbc202ba3 --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_8xx.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + */ + +static struct cpu_spec cpu_specs[] __initdata = { + { /* 8xx */ + .pvr_mask = 0xffff0000, + .pvr_value = PVR_8xx, + .cpu_name = "8xx", + /* + * CPU_FTR_MAYBE_CAN_DOZE is possible, + * if the 8xx code is there.... + */ + .cpu_features = CPU_FTRS_8XX, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_TYPE_8xx, + .icache_bsize = 16, + .dcache_bsize = 16, + .machine_check = machine_check_8xx, + .platform = "ppc823", + }, +}; diff --git a/arch/powerpc/kernel/cpu_specs_book3s_32.h b/arch/powerpc/kernel/cpu_specs_book3s_32.h new file mode 100644 index 000000000000..3714634d194a --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_book3s_32.h @@ -0,0 +1,605 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + */ + +#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ + PPC_FEATURE_HAS_MMU) + +static struct cpu_spec cpu_specs[] __initdata = { +#ifdef CONFIG_PPC_BOOK3S_603 + { /* 603 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00030000, + .cpu_name = "603", + .cpu_features = CPU_FTRS_603, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_generic, + .platform = "ppc603", + }, + { /* 603e */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00060000, + .cpu_name = "603e", + .cpu_features = CPU_FTRS_603, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_generic, + .platform = "ppc603", + }, + { /* 603ev */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00070000, + .cpu_name = "603ev", + .cpu_features = CPU_FTRS_603, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = 0, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_generic, + .platform = "ppc603", + }, + { /* 82xx (8240, 8245, 8260 are all 603e cores) */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00810000, + .cpu_name = "82xx", + .cpu_features = CPU_FTRS_82XX, + .cpu_user_features = COMMON_USER, + .mmu_features = 0, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_generic, + .platform = "ppc603", + }, + { /* All G2_LE (603e core, plus some) have the same pvr */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00820000, + .cpu_name = "G2_LE", + .cpu_features = CPU_FTRS_G2_LE, + .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_generic, + .platform = "ppc603", + }, +#ifdef CONFIG_PPC_83xx + { /* e300c1 (a 603e core, plus some) on 83xx */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00830000, + .cpu_name = "e300c1", + .cpu_features = CPU_FTRS_E300, + .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_83xx, + .platform = "ppc603", + }, + { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00840000, + .cpu_name = "e300c2", + .cpu_features = CPU_FTRS_E300C2, + .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_83xx, + .platform = "ppc603", + }, + { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00850000, + .cpu_name = "e300c3", + .cpu_features = CPU_FTRS_E300, + .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_83xx, + .num_pmcs = 4, + .platform = "ppc603", + }, + { /* e300c4 (e300c1, plus one IU) */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00860000, + .cpu_name = "e300c4", + .cpu_features = CPU_FTRS_E300, + .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU, + .icache_bsize = 32, + .dcache_bsize = 32, + .cpu_setup = __setup_cpu_603, + .machine_check = machine_check_83xx, + .num_pmcs = 4, + .platform = "ppc603", + }, +#endif +#endif /* CONFIG_PPC_BOOK3S_603 */ +#ifdef CONFIG_PPC_BOOK3S_604 + { /* 604 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00040000, + .cpu_name = "604", + .cpu_features = CPU_FTRS_604, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 2, + .cpu_setup = __setup_cpu_604, + .machine_check = machine_check_generic, + .platform = "ppc604", + }, + { /* 604e */ + .pvr_mask = 0xfffff000, + .pvr_value = 0x00090000, + .cpu_name = "604e", + .cpu_features = CPU_FTRS_604, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_604, + .machine_check = machine_check_generic, + .platform = "ppc604", + }, + { /* 604r */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00090000, + .cpu_name = "604r", + .cpu_features = CPU_FTRS_604, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_604, + .machine_check = machine_check_generic, + .platform = "ppc604", + }, + { /* 604ev */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x000a0000, + .cpu_name = "604ev", + .cpu_features = CPU_FTRS_604, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_604, + .machine_check = machine_check_generic, + .platform = "ppc604", + }, + { /* 740/750 (0x4202, don't support TAU ?) */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x00084202, + .cpu_name = "740/750", + .cpu_features = CPU_FTRS_740_NOTAU, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_750, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750CX (80100 and 8010x?) */ + .pvr_mask = 0xfffffff0, + .pvr_value = 0x00080100, + .cpu_name = "750CX", + .cpu_features = CPU_FTRS_750, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_750cx, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750CX (82201 and 82202) */ + .pvr_mask = 0xfffffff0, + .pvr_value = 0x00082200, + .cpu_name = "750CX", + .cpu_features = CPU_FTRS_750, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750cx, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750CXe (82214) */ + .pvr_mask = 0xfffffff0, + .pvr_value = 0x00082210, + .cpu_name = "750CXe", + .cpu_features = CPU_FTRS_750, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750cx, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750CXe "Gekko" (83214) */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x00083214, + .cpu_name = "750CXe", + .cpu_features = CPU_FTRS_750, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750cx, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750CL (and "Broadway") */ + .pvr_mask = 0xfffff0e0, + .pvr_value = 0x00087000, + .cpu_name = "750CL", + .cpu_features = CPU_FTRS_750CL, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 745/755 */ + .pvr_mask = 0xfffff000, + .pvr_value = 0x00083000, + .cpu_name = "745/755", + .cpu_features = CPU_FTRS_750, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750FX rev 1.x */ + .pvr_mask = 0xffffff00, + .pvr_value = 0x70000100, + .cpu_name = "750FX", + .cpu_features = CPU_FTRS_750FX1, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750FX rev 2.0 must disable HID0[DPM] */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x70000200, + .cpu_name = "750FX", + .cpu_features = CPU_FTRS_750FX2, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750FX (All revs except 2.0) */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x70000000, + .cpu_name = "750FX", + .cpu_features = CPU_FTRS_750FX, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750fx, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 750GX */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x70020000, + .cpu_name = "750GX", + .cpu_features = CPU_FTRS_750GX, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750fx, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 740/750 (L2CR bit need fixup for 740) */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00080000, + .cpu_name = "740/750", + .cpu_features = CPU_FTRS_740, + .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_750, + .machine_check = machine_check_generic, + .platform = "ppc750", + }, + { /* 7400 rev 1.1 ? (no TAU) */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x000c1101, + .cpu_name = "7400 (1.1)", + .cpu_features = CPU_FTRS_7400_NOTAU, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_7400, + .machine_check = machine_check_generic, + .platform = "ppc7400", + }, + { /* 7400 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x000c0000, + .cpu_name = "7400", + .cpu_features = CPU_FTRS_7400, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_7400, + .machine_check = machine_check_generic, + .platform = "ppc7400", + }, + { /* 7410 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x800c0000, + .cpu_name = "7410", + .cpu_features = CPU_FTRS_7400, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_7410, + .machine_check = machine_check_generic, + .platform = "ppc7400", + }, + { /* 7450 2.0 - no doze/nap */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x80000200, + .cpu_name = "7450", + .cpu_features = CPU_FTRS_7450_20, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7450 2.1 */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x80000201, + .cpu_name = "7450", + .cpu_features = CPU_FTRS_7450_21, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7450 2.3 and newer */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80000000, + .cpu_name = "7450", + .cpu_features = CPU_FTRS_7450_23, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7455 rev 1.x */ + .pvr_mask = 0xffffff00, + .pvr_value = 0x80010100, + .cpu_name = "7455", + .cpu_features = CPU_FTRS_7455_1, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7455 rev 2.0 */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x80010200, + .cpu_name = "7455", + .cpu_features = CPU_FTRS_7455_20, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7455 others */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80010000, + .cpu_name = "7455", + .cpu_features = CPU_FTRS_7455, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7447/7457 Rev 1.0 */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x80020100, + .cpu_name = "7447/7457", + .cpu_features = CPU_FTRS_7447_10, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7447/7457 Rev 1.1 */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x80020101, + .cpu_name = "7447/7457", + .cpu_features = CPU_FTRS_7447_10, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7447/7457 Rev 1.2 and later */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80020000, + .cpu_name = "7447/7457", + .cpu_features = CPU_FTRS_7447, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7447A */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80030000, + .cpu_name = "7447A", + .cpu_features = CPU_FTRS_7447A, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* 7448 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80040000, + .cpu_name = "7448", + .cpu_features = CPU_FTRS_7448, + .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | + PPC_FEATURE_PPC_LE, + .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 6, + .pmc_type = PPC_PMC_G4, + .cpu_setup = __setup_cpu_745x, + .machine_check = machine_check_generic, + .platform = "ppc7450", + }, + { /* default match, we assume split I/D cache & TB (non-601)... */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic PPC)", + .cpu_features = CPU_FTRS_CLASSIC32, + .cpu_user_features = COMMON_USER, + .mmu_features = MMU_FTR_HPTE_TABLE, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_generic, + .platform = "ppc603", + }, +#endif /* CONFIG_PPC_BOOK3S_604 */ +}; diff --git a/arch/powerpc/kernel/cpu_specs_book3s_64.h b/arch/powerpc/kernel/cpu_specs_book3s_64.h new file mode 100644 index 000000000000..c370c1b804a9 --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_book3s_64.h @@ -0,0 +1,481 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * Modifications for ppc64: + * Copyright (C) 2003 Dave Engebretsen + */ + +/* NOTE: + * Unlike ppc32, ppc64 will only call cpu_setup() for the boot CPU, it's + * the responsibility of the appropriate CPU save/restore functions to + * eventually copy these settings over. Those save/restore aren't yet + * part of the cputable though. That has to be fixed for both ppc32 + * and ppc64 + */ +#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ + PPC_FEATURE_HAS_MMU | PPC_FEATURE_64) +#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) +#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) +#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) +#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ + PPC_FEATURE_TRUE_LE | \ + PPC_FEATURE_PSERIES_PERFMON_COMPAT) +#define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ + PPC_FEATURE_TRUE_LE | \ + PPC_FEATURE_PSERIES_PERFMON_COMPAT) +#define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR) +#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ + PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ + PPC_FEATURE_TRUE_LE | \ + PPC_FEATURE_PSERIES_PERFMON_COMPAT) +#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ + PPC_FEATURE2_HTM_COMP | \ + PPC_FEATURE2_HTM_NOSC_COMP | \ + PPC_FEATURE2_DSCR | \ + PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ + PPC_FEATURE2_VEC_CRYPTO) +#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ + PPC_FEATURE_TRUE_LE | \ + PPC_FEATURE_HAS_ALTIVEC_COMP) +#define COMMON_USER_POWER9 COMMON_USER_POWER8 +#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ + PPC_FEATURE2_ARCH_3_00 | \ + PPC_FEATURE2_HAS_IEEE128 | \ + PPC_FEATURE2_DARN | \ + PPC_FEATURE2_SCV) +#define COMMON_USER_POWER10 COMMON_USER_POWER9 +#define COMMON_USER2_POWER10 (PPC_FEATURE2_ARCH_3_1 | \ + PPC_FEATURE2_MMA | \ + PPC_FEATURE2_ARCH_3_00 | \ + PPC_FEATURE2_HAS_IEEE128 | \ + PPC_FEATURE2_DARN | \ + PPC_FEATURE2_SCV | \ + PPC_FEATURE2_ARCH_2_07 | \ + PPC_FEATURE2_DSCR | \ + PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ + PPC_FEATURE2_VEC_CRYPTO) + +static struct cpu_spec cpu_specs[] __initdata = { + { /* PPC970 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00390000, + .cpu_name = "PPC970", + .cpu_features = CPU_FTRS_PPC970, + .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTRS_PPC970, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 8, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_ppc970, + .cpu_restore = __restore_cpu_ppc970, + .platform = "ppc970", + }, + { /* PPC970FX */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003c0000, + .cpu_name = "PPC970FX", + .cpu_features = CPU_FTRS_PPC970, + .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTRS_PPC970, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 8, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_ppc970, + .cpu_restore = __restore_cpu_ppc970, + .platform = "ppc970", + }, + { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x00440100, + .cpu_name = "PPC970MP", + .cpu_features = CPU_FTRS_PPC970, + .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTRS_PPC970, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 8, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_ppc970, + .cpu_restore = __restore_cpu_ppc970, + .platform = "ppc970", + }, + { /* PPC970MP */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00440000, + .cpu_name = "PPC970MP", + .cpu_features = CPU_FTRS_PPC970, + .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTRS_PPC970, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 8, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_ppc970MP, + .cpu_restore = __restore_cpu_ppc970, + .platform = "ppc970", + }, + { /* PPC970GX */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00450000, + .cpu_name = "PPC970GX", + .cpu_features = CPU_FTRS_PPC970, + .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP, + .mmu_features = MMU_FTRS_PPC970, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 8, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_ppc970, + .platform = "ppc970", + }, + { /* Power5 GR */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003a0000, + .cpu_name = "POWER5 (gr)", + .cpu_features = CPU_FTRS_POWER5, + .cpu_user_features = COMMON_USER_POWER5, + .mmu_features = MMU_FTRS_POWER5, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .platform = "power5", + }, + { /* Power5++ */ + .pvr_mask = 0xffffff00, + .pvr_value = 0x003b0300, + .cpu_name = "POWER5+ (gs)", + .cpu_features = CPU_FTRS_POWER5, + .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTRS_POWER5, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .platform = "power5+", + }, + { /* Power5 GS */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003b0000, + .cpu_name = "POWER5+ (gs)", + .cpu_features = CPU_FTRS_POWER5, + .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTRS_POWER5, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .platform = "power5+", + }, + { /* POWER6 in P5+ mode; 2.04-compliant processor */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000001, + .cpu_name = "POWER5+", + .cpu_features = CPU_FTRS_POWER5, + .cpu_user_features = COMMON_USER_POWER5_PLUS, + .mmu_features = MMU_FTRS_POWER5, + .icache_bsize = 128, + .dcache_bsize = 128, + .platform = "power5+", + }, + { /* Power6 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003e0000, + .cpu_name = "POWER6 (raw)", + .cpu_features = CPU_FTRS_POWER6, + .cpu_user_features = COMMON_USER_POWER6 | PPC_FEATURE_POWER6_EXT, + .mmu_features = MMU_FTRS_POWER6, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .platform = "power6x", + }, + { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000002, + .cpu_name = "POWER6 (architected)", + .cpu_features = CPU_FTRS_POWER6, + .cpu_user_features = COMMON_USER_POWER6, + .mmu_features = MMU_FTRS_POWER6, + .icache_bsize = 128, + .dcache_bsize = 128, + .platform = "power6", + }, + { /* 2.06-compliant processor, i.e. Power7 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000003, + .cpu_name = "POWER7 (architected)", + .cpu_features = CPU_FTRS_POWER7, + .cpu_user_features = COMMON_USER_POWER7, + .cpu_user_features2 = COMMON_USER2_POWER7, + .mmu_features = MMU_FTRS_POWER7, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power7, + .cpu_restore = __restore_cpu_power7, + .machine_check_early = __machine_check_early_realmode_p7, + .platform = "power7", + }, + { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000004, + .cpu_name = "POWER8 (architected)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .cpu_user_features2 = COMMON_USER2_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .machine_check_early = __machine_check_early_realmode_p8, + .platform = "power8", + }, + { /* 3.00-compliant processor, i.e. Power9 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000005, + .cpu_name = "POWER9 (architected)", + .cpu_features = CPU_FTRS_POWER9, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .platform = "power9", + }, + { /* 3.1-compliant processor, i.e. Power10 "architected" mode */ + .pvr_mask = 0xffffffff, + .pvr_value = 0x0f000006, + .cpu_name = "POWER10 (architected)", + .cpu_features = CPU_FTRS_POWER10, + .cpu_user_features = COMMON_USER_POWER10, + .cpu_user_features2 = COMMON_USER2_POWER10, + .mmu_features = MMU_FTRS_POWER10, + .icache_bsize = 128, + .dcache_bsize = 128, + .cpu_setup = __setup_cpu_power10, + .cpu_restore = __restore_cpu_power10, + .platform = "power10", + }, + { /* Power7 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x003f0000, + .cpu_name = "POWER7 (raw)", + .cpu_features = CPU_FTRS_POWER7, + .cpu_user_features = COMMON_USER_POWER7, + .cpu_user_features2 = COMMON_USER2_POWER7, + .mmu_features = MMU_FTRS_POWER7, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power7, + .cpu_restore = __restore_cpu_power7, + .machine_check_early = __machine_check_early_realmode_p7, + .platform = "power7", + }, + { /* Power7+ */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004A0000, + .cpu_name = "POWER7+ (raw)", + .cpu_features = CPU_FTRS_POWER7, + .cpu_user_features = COMMON_USER_POWER7, + .cpu_user_features2 = COMMON_USER2_POWER7, + .mmu_features = MMU_FTRS_POWER7, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power7, + .cpu_restore = __restore_cpu_power7, + .machine_check_early = __machine_check_early_realmode_p7, + .platform = "power7+", + }, + { /* Power8E */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004b0000, + .cpu_name = "POWER8E (raw)", + .cpu_features = CPU_FTRS_POWER8E, + .cpu_user_features = COMMON_USER_POWER8, + .cpu_user_features2 = COMMON_USER2_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .machine_check_early = __machine_check_early_realmode_p8, + .platform = "power8", + }, + { /* Power8NVL */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004c0000, + .cpu_name = "POWER8NVL (raw)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .cpu_user_features2 = COMMON_USER2_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .machine_check_early = __machine_check_early_realmode_p8, + .platform = "power8", + }, + { /* Power8 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004d0000, + .cpu_name = "POWER8 (raw)", + .cpu_features = CPU_FTRS_POWER8, + .cpu_user_features = COMMON_USER_POWER8, + .cpu_user_features2 = COMMON_USER2_POWER8, + .mmu_features = MMU_FTRS_POWER8, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power8, + .cpu_restore = __restore_cpu_power8, + .machine_check_early = __machine_check_early_realmode_p8, + .platform = "power8", + }, + { /* Power9 DD2.0 */ + .pvr_mask = 0xffffefff, + .pvr_value = 0x004e0200, + .cpu_name = "POWER9 (raw)", + .cpu_features = CPU_FTRS_POWER9_DD2_0, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .machine_check_early = __machine_check_early_realmode_p9, + .platform = "power9", + }, + { /* Power9 DD 2.1 */ + .pvr_mask = 0xffffefff, + .pvr_value = 0x004e0201, + .cpu_name = "POWER9 (raw)", + .cpu_features = CPU_FTRS_POWER9_DD2_1, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .machine_check_early = __machine_check_early_realmode_p9, + .platform = "power9", + }, + { /* Power9 DD2.2 */ + .pvr_mask = 0xffffefff, + .pvr_value = 0x004e0202, + .cpu_name = "POWER9 (raw)", + .cpu_features = CPU_FTRS_POWER9_DD2_2, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .machine_check_early = __machine_check_early_realmode_p9, + .platform = "power9", + }, + { /* Power9 DD2.3 or later */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x004e0000, + .cpu_name = "POWER9 (raw)", + .cpu_features = CPU_FTRS_POWER9_DD2_3, + .cpu_user_features = COMMON_USER_POWER9, + .cpu_user_features2 = COMMON_USER2_POWER9, + .mmu_features = MMU_FTRS_POWER9, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power9, + .cpu_restore = __restore_cpu_power9, + .machine_check_early = __machine_check_early_realmode_p9, + .platform = "power9", + }, + { /* Power10 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00800000, + .cpu_name = "POWER10 (raw)", + .cpu_features = CPU_FTRS_POWER10, + .cpu_user_features = COMMON_USER_POWER10, + .cpu_user_features2 = COMMON_USER2_POWER10, + .mmu_features = MMU_FTRS_POWER10, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .cpu_setup = __setup_cpu_power10, + .cpu_restore = __restore_cpu_power10, + .machine_check_early = __machine_check_early_realmode_p10, + .platform = "power10", + }, + { /* Cell Broadband Engine */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x00700000, + .cpu_name = "Cell Broadband Engine", + .cpu_features = CPU_FTRS_CELL, + .cpu_user_features = COMMON_USER_PPC64 | PPC_FEATURE_CELL | + PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_SMT, + .mmu_features = MMU_FTRS_CELL, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 4, + .pmc_type = PPC_PMC_IBM, + .platform = "ppc-cell-be", + }, + { /* PA Semi PA6T */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00900000, + .cpu_name = "PA6T", + .cpu_features = CPU_FTRS_PA6T, + .cpu_user_features = COMMON_USER_PA6T, + .mmu_features = MMU_FTRS_PA6T, + .icache_bsize = 64, + .dcache_bsize = 64, + .num_pmcs = 6, + .pmc_type = PPC_PMC_PA6T, + .cpu_setup = __setup_cpu_pa6t, + .cpu_restore = __restore_cpu_pa6t, + .platform = "pa6t", + }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "POWER5 (compatible)", + .cpu_features = CPU_FTRS_COMPATIBLE, + .cpu_user_features = COMMON_USER_PPC64, + .mmu_features = MMU_FTRS_POWER, + .icache_bsize = 128, + .dcache_bsize = 128, + .num_pmcs = 6, + .pmc_type = PPC_PMC_IBM, + .platform = "power5", + } +}; diff --git a/arch/powerpc/kernel/cpu_specs_e500.h b/arch/powerpc/kernel/cpu_specs_e500.h new file mode 100644 index 000000000000..1f366f2a0215 --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_e500.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * Modifications for ppc64: + * Copyright (C) 2003 Dave Engebretsen + */ + +#ifdef CONFIG_PPC64 +#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ + PPC_FEATURE_HAS_FPU | PPC_FEATURE_64) +#else +#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ + PPC_FEATURE_BOOKE) +#endif + +static struct cpu_spec cpu_specs[] __initdata = { +#ifdef CONFIG_PPC32 +#ifndef CONFIG_PPC_E500MC + { /* e500 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80200000, + .cpu_name = "e500", + .cpu_features = CPU_FTRS_E500, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .cpu_user_features2 = PPC_FEATURE2_ISEL, + .mmu_features = MMU_FTR_TYPE_FSL_E, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_e500v1, + .machine_check = machine_check_e500, + .platform = "ppc8540", + }, + { /* e500v2 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80210000, + .cpu_name = "e500v2", + .cpu_features = CPU_FTRS_E500_2, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP | + PPC_FEATURE_HAS_EFP_DOUBLE_COMP, + .cpu_user_features2 = PPC_FEATURE2_ISEL, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_e500v2, + .machine_check = machine_check_e500, + .platform = "ppc8548", + .cpu_down_flush = cpu_down_flush_e500v2, + }, +#else + { /* e500mc */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80230000, + .cpu_name = "e500mc", + .cpu_features = CPU_FTRS_E500MC, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .cpu_user_features2 = PPC_FEATURE2_ISEL, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | + MMU_FTR_USE_TLBILX, + .icache_bsize = 64, + .dcache_bsize = 64, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_e500mc, + .machine_check = machine_check_e500mc, + .platform = "ppce500mc", + .cpu_down_flush = cpu_down_flush_e500mc, + }, +#endif /* CONFIG_PPC_E500MC */ +#endif /* CONFIG_PPC32 */ +#ifdef CONFIG_PPC_E500MC + { /* e5500 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80240000, + .cpu_name = "e5500", + .cpu_features = CPU_FTRS_E5500, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, + .cpu_user_features2 = PPC_FEATURE2_ISEL, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | + MMU_FTR_USE_TLBILX, + .icache_bsize = 64, + .dcache_bsize = 64, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_e5500, +#ifndef CONFIG_PPC32 + .cpu_restore = __restore_cpu_e5500, +#endif + .machine_check = machine_check_e500mc, + .platform = "ppce5500", + .cpu_down_flush = cpu_down_flush_e5500, + }, + { /* e6500 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80400000, + .cpu_name = "e6500", + .cpu_features = CPU_FTRS_E6500, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU | + PPC_FEATURE_HAS_ALTIVEC_COMP, + .cpu_user_features2 = PPC_FEATURE2_ISEL, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | + MMU_FTR_USE_TLBILX, + .icache_bsize = 64, + .dcache_bsize = 64, + .num_pmcs = 6, + .cpu_setup = __setup_cpu_e6500, +#ifndef CONFIG_PPC32 + .cpu_restore = __restore_cpu_e6500, +#endif + .machine_check = machine_check_e500mc, + .platform = "ppce6500", + .cpu_down_flush = cpu_down_flush_e6500, + }, +#endif /* CONFIG_PPC_E500MC */ +#ifdef CONFIG_PPC32 + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic E500 PPC)", + .cpu_features = CPU_FTRS_E500, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_e500, + .platform = "powerpc", + } +#endif /* CONFIG_PPC32 */ +}; diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9229e0930332..8a32bffefa5b 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -28,1882 +28,7 @@ EXPORT_SYMBOL(cur_cpu_spec); /* The platform string corresponding to the real PVR */ const char *powerpc_base_platform; -/* NOTE: - * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's - * the responsibility of the appropriate CPU save/restore functions to - * eventually copy these settings over. Those save/restore aren't yet - * part of the cputable though. That has to be fixed for both ppc32 - * and ppc64 - */ - -/* This table only contains "desktop" CPUs, it need to be filled with embedded - * ones as well... - */ -#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ - PPC_FEATURE_HAS_MMU) -#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64) -#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) -#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\ - PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) -#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\ - PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP) -#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ - PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ - PPC_FEATURE_TRUE_LE | \ - PPC_FEATURE_PSERIES_PERFMON_COMPAT) -#define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ - PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ - PPC_FEATURE_TRUE_LE | \ - PPC_FEATURE_PSERIES_PERFMON_COMPAT) -#define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR) -#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\ - PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ - PPC_FEATURE_TRUE_LE | \ - PPC_FEATURE_PSERIES_PERFMON_COMPAT) -#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ - PPC_FEATURE2_HTM_COMP | \ - PPC_FEATURE2_HTM_NOSC_COMP | \ - PPC_FEATURE2_DSCR | \ - PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ - PPC_FEATURE2_VEC_CRYPTO) -#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ - PPC_FEATURE_TRUE_LE | \ - PPC_FEATURE_HAS_ALTIVEC_COMP) -#define COMMON_USER_POWER9 COMMON_USER_POWER8 -#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ - PPC_FEATURE2_ARCH_3_00 | \ - PPC_FEATURE2_HAS_IEEE128 | \ - PPC_FEATURE2_DARN | \ - PPC_FEATURE2_SCV) -#define COMMON_USER_POWER10 COMMON_USER_POWER9 -#define COMMON_USER2_POWER10 (PPC_FEATURE2_ARCH_3_1 | \ - PPC_FEATURE2_MMA | \ - PPC_FEATURE2_ARCH_3_00 | \ - PPC_FEATURE2_HAS_IEEE128 | \ - PPC_FEATURE2_DARN | \ - PPC_FEATURE2_SCV | \ - PPC_FEATURE2_ARCH_2_07 | \ - PPC_FEATURE2_DSCR | \ - PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ - PPC_FEATURE2_VEC_CRYPTO) - -#ifdef CONFIG_PPC_BOOK3E_64 -#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) -#else -#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ - PPC_FEATURE_BOOKE) -#endif - -static struct cpu_spec __initdata cpu_specs[] = { -#ifdef CONFIG_PPC_BOOK3S_64 - { /* PPC970 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00390000, - .cpu_name = "PPC970", - .cpu_features = CPU_FTRS_PPC970, - .cpu_user_features = COMMON_USER_POWER4 | - PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTRS_PPC970, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 8, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_ppc970, - .cpu_restore = __restore_cpu_ppc970, - .platform = "ppc970", - }, - { /* PPC970FX */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x003c0000, - .cpu_name = "PPC970FX", - .cpu_features = CPU_FTRS_PPC970, - .cpu_user_features = COMMON_USER_POWER4 | - PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTRS_PPC970, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 8, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_ppc970, - .cpu_restore = __restore_cpu_ppc970, - .platform = "ppc970", - }, - { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x00440100, - .cpu_name = "PPC970MP", - .cpu_features = CPU_FTRS_PPC970, - .cpu_user_features = COMMON_USER_POWER4 | - PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTRS_PPC970, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 8, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_ppc970, - .cpu_restore = __restore_cpu_ppc970, - .platform = "ppc970", - }, - { /* PPC970MP */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00440000, - .cpu_name = "PPC970MP", - .cpu_features = CPU_FTRS_PPC970, - .cpu_user_features = COMMON_USER_POWER4 | - PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTRS_PPC970, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 8, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_ppc970MP, - .cpu_restore = __restore_cpu_ppc970, - .platform = "ppc970", - }, - { /* PPC970GX */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00450000, - .cpu_name = "PPC970GX", - .cpu_features = CPU_FTRS_PPC970, - .cpu_user_features = COMMON_USER_POWER4 | - PPC_FEATURE_HAS_ALTIVEC_COMP, - .mmu_features = MMU_FTRS_PPC970, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 8, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_ppc970, - .platform = "ppc970", - }, - { /* Power5 GR */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x003a0000, - .cpu_name = "POWER5 (gr)", - .cpu_features = CPU_FTRS_POWER5, - .cpu_user_features = COMMON_USER_POWER5, - .mmu_features = MMU_FTRS_POWER5, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .platform = "power5", - }, - { /* Power5++ */ - .pvr_mask = 0xffffff00, - .pvr_value = 0x003b0300, - .cpu_name = "POWER5+ (gs)", - .cpu_features = CPU_FTRS_POWER5, - .cpu_user_features = COMMON_USER_POWER5_PLUS, - .mmu_features = MMU_FTRS_POWER5, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .platform = "power5+", - }, - { /* Power5 GS */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x003b0000, - .cpu_name = "POWER5+ (gs)", - .cpu_features = CPU_FTRS_POWER5, - .cpu_user_features = COMMON_USER_POWER5_PLUS, - .mmu_features = MMU_FTRS_POWER5, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .platform = "power5+", - }, - { /* POWER6 in P5+ mode; 2.04-compliant processor */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x0f000001, - .cpu_name = "POWER5+", - .cpu_features = CPU_FTRS_POWER5, - .cpu_user_features = COMMON_USER_POWER5_PLUS, - .mmu_features = MMU_FTRS_POWER5, - .icache_bsize = 128, - .dcache_bsize = 128, - .platform = "power5+", - }, - { /* Power6 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x003e0000, - .cpu_name = "POWER6 (raw)", - .cpu_features = CPU_FTRS_POWER6, - .cpu_user_features = COMMON_USER_POWER6 | - PPC_FEATURE_POWER6_EXT, - .mmu_features = MMU_FTRS_POWER6, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .platform = "power6x", - }, - { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x0f000002, - .cpu_name = "POWER6 (architected)", - .cpu_features = CPU_FTRS_POWER6, - .cpu_user_features = COMMON_USER_POWER6, - .mmu_features = MMU_FTRS_POWER6, - .icache_bsize = 128, - .dcache_bsize = 128, - .platform = "power6", - }, - { /* 2.06-compliant processor, i.e. Power7 "architected" mode */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x0f000003, - .cpu_name = "POWER7 (architected)", - .cpu_features = CPU_FTRS_POWER7, - .cpu_user_features = COMMON_USER_POWER7, - .cpu_user_features2 = COMMON_USER2_POWER7, - .mmu_features = MMU_FTRS_POWER7, - .icache_bsize = 128, - .dcache_bsize = 128, - .cpu_setup = __setup_cpu_power7, - .cpu_restore = __restore_cpu_power7, - .machine_check_early = __machine_check_early_realmode_p7, - .platform = "power7", - }, - { /* 2.07-compliant processor, i.e. Power8 "architected" mode */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x0f000004, - .cpu_name = "POWER8 (architected)", - .cpu_features = CPU_FTRS_POWER8, - .cpu_user_features = COMMON_USER_POWER8, - .cpu_user_features2 = COMMON_USER2_POWER8, - .mmu_features = MMU_FTRS_POWER8, - .icache_bsize = 128, - .dcache_bsize = 128, - .cpu_setup = __setup_cpu_power8, - .cpu_restore = __restore_cpu_power8, - .machine_check_early = __machine_check_early_realmode_p8, - .platform = "power8", - }, - { /* 3.00-compliant processor, i.e. Power9 "architected" mode */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x0f000005, - .cpu_name = "POWER9 (architected)", - .cpu_features = CPU_FTRS_POWER9, - .cpu_user_features = COMMON_USER_POWER9, - .cpu_user_features2 = COMMON_USER2_POWER9, - .mmu_features = MMU_FTRS_POWER9, - .icache_bsize = 128, - .dcache_bsize = 128, - .cpu_setup = __setup_cpu_power9, - .cpu_restore = __restore_cpu_power9, - .platform = "power9", - }, - { /* 3.1-compliant processor, i.e. Power10 "architected" mode */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x0f000006, - .cpu_name = "POWER10 (architected)", - .cpu_features = CPU_FTRS_POWER10, - .cpu_user_features = COMMON_USER_POWER10, - .cpu_user_features2 = COMMON_USER2_POWER10, - .mmu_features = MMU_FTRS_POWER10, - .icache_bsize = 128, - .dcache_bsize = 128, - .cpu_setup = __setup_cpu_power10, - .cpu_restore = __restore_cpu_power10, - .platform = "power10", - }, - { /* Power7 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x003f0000, - .cpu_name = "POWER7 (raw)", - .cpu_features = CPU_FTRS_POWER7, - .cpu_user_features = COMMON_USER_POWER7, - .cpu_user_features2 = COMMON_USER2_POWER7, - .mmu_features = MMU_FTRS_POWER7, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power7, - .cpu_restore = __restore_cpu_power7, - .machine_check_early = __machine_check_early_realmode_p7, - .platform = "power7", - }, - { /* Power7+ */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x004A0000, - .cpu_name = "POWER7+ (raw)", - .cpu_features = CPU_FTRS_POWER7, - .cpu_user_features = COMMON_USER_POWER7, - .cpu_user_features2 = COMMON_USER2_POWER7, - .mmu_features = MMU_FTRS_POWER7, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power7, - .cpu_restore = __restore_cpu_power7, - .machine_check_early = __machine_check_early_realmode_p7, - .platform = "power7+", - }, - { /* Power8E */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x004b0000, - .cpu_name = "POWER8E (raw)", - .cpu_features = CPU_FTRS_POWER8E, - .cpu_user_features = COMMON_USER_POWER8, - .cpu_user_features2 = COMMON_USER2_POWER8, - .mmu_features = MMU_FTRS_POWER8, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power8, - .cpu_restore = __restore_cpu_power8, - .machine_check_early = __machine_check_early_realmode_p8, - .platform = "power8", - }, - { /* Power8NVL */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x004c0000, - .cpu_name = "POWER8NVL (raw)", - .cpu_features = CPU_FTRS_POWER8, - .cpu_user_features = COMMON_USER_POWER8, - .cpu_user_features2 = COMMON_USER2_POWER8, - .mmu_features = MMU_FTRS_POWER8, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power8, - .cpu_restore = __restore_cpu_power8, - .machine_check_early = __machine_check_early_realmode_p8, - .platform = "power8", - }, - { /* Power8 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x004d0000, - .cpu_name = "POWER8 (raw)", - .cpu_features = CPU_FTRS_POWER8, - .cpu_user_features = COMMON_USER_POWER8, - .cpu_user_features2 = COMMON_USER2_POWER8, - .mmu_features = MMU_FTRS_POWER8, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power8, - .cpu_restore = __restore_cpu_power8, - .machine_check_early = __machine_check_early_realmode_p8, - .platform = "power8", - }, - { /* Power9 DD2.0 */ - .pvr_mask = 0xffffefff, - .pvr_value = 0x004e0200, - .cpu_name = "POWER9 (raw)", - .cpu_features = CPU_FTRS_POWER9_DD2_0, - .cpu_user_features = COMMON_USER_POWER9, - .cpu_user_features2 = COMMON_USER2_POWER9, - .mmu_features = MMU_FTRS_POWER9, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power9, - .cpu_restore = __restore_cpu_power9, - .machine_check_early = __machine_check_early_realmode_p9, - .platform = "power9", - }, - { /* Power9 DD 2.1 */ - .pvr_mask = 0xffffefff, - .pvr_value = 0x004e0201, - .cpu_name = "POWER9 (raw)", - .cpu_features = CPU_FTRS_POWER9_DD2_1, - .cpu_user_features = COMMON_USER_POWER9, - .cpu_user_features2 = COMMON_USER2_POWER9, - .mmu_features = MMU_FTRS_POWER9, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power9, - .cpu_restore = __restore_cpu_power9, - .machine_check_early = __machine_check_early_realmode_p9, - .platform = "power9", - }, - { /* Power9 DD2.2 */ - .pvr_mask = 0xffffefff, - .pvr_value = 0x004e0202, - .cpu_name = "POWER9 (raw)", - .cpu_features = CPU_FTRS_POWER9_DD2_2, - .cpu_user_features = COMMON_USER_POWER9, - .cpu_user_features2 = COMMON_USER2_POWER9, - .mmu_features = MMU_FTRS_POWER9, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power9, - .cpu_restore = __restore_cpu_power9, - .machine_check_early = __machine_check_early_realmode_p9, - .platform = "power9", - }, - { /* Power9 DD2.3 or later */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x004e0000, - .cpu_name = "POWER9 (raw)", - .cpu_features = CPU_FTRS_POWER9_DD2_3, - .cpu_user_features = COMMON_USER_POWER9, - .cpu_user_features2 = COMMON_USER2_POWER9, - .mmu_features = MMU_FTRS_POWER9, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power9, - .cpu_restore = __restore_cpu_power9, - .machine_check_early = __machine_check_early_realmode_p9, - .platform = "power9", - }, - { /* Power10 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00800000, - .cpu_name = "POWER10 (raw)", - .cpu_features = CPU_FTRS_POWER10, - .cpu_user_features = COMMON_USER_POWER10, - .cpu_user_features2 = COMMON_USER2_POWER10, - .mmu_features = MMU_FTRS_POWER10, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_power10, - .cpu_restore = __restore_cpu_power10, - .machine_check_early = __machine_check_early_realmode_p10, - .platform = "power10", - }, - { /* Cell Broadband Engine */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00700000, - .cpu_name = "Cell Broadband Engine", - .cpu_features = CPU_FTRS_CELL, - .cpu_user_features = COMMON_USER_PPC64 | - PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | - PPC_FEATURE_SMT, - .mmu_features = MMU_FTRS_CELL, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .platform = "ppc-cell-be", - }, - { /* PA Semi PA6T */ - .pvr_mask = 0x7fff0000, - .pvr_value = 0x00900000, - .cpu_name = "PA6T", - .cpu_features = CPU_FTRS_PA6T, - .cpu_user_features = COMMON_USER_PA6T, - .mmu_features = MMU_FTRS_PA6T, - .icache_bsize = 64, - .dcache_bsize = 64, - .num_pmcs = 6, - .pmc_type = PPC_PMC_PA6T, - .cpu_setup = __setup_cpu_pa6t, - .cpu_restore = __restore_cpu_pa6t, - .platform = "pa6t", - }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "POWER5 (compatible)", - .cpu_features = CPU_FTRS_COMPATIBLE, - .cpu_user_features = COMMON_USER_PPC64, - .mmu_features = MMU_FTRS_POWER, - .icache_bsize = 128, - .dcache_bsize = 128, - .num_pmcs = 6, - .pmc_type = PPC_PMC_IBM, - .platform = "power5", - } -#endif /* CONFIG_PPC_BOOK3S_64 */ - -#ifdef CONFIG_PPC32 -#ifdef CONFIG_PPC_BOOK3S_32 -#ifdef CONFIG_PPC_BOOK3S_604 - { /* 604 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00040000, - .cpu_name = "604", - .cpu_features = CPU_FTRS_604, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 2, - .cpu_setup = __setup_cpu_604, - .machine_check = machine_check_generic, - .platform = "ppc604", - }, - { /* 604e */ - .pvr_mask = 0xfffff000, - .pvr_value = 0x00090000, - .cpu_name = "604e", - .cpu_features = CPU_FTRS_604, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_604, - .machine_check = machine_check_generic, - .platform = "ppc604", - }, - { /* 604r */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00090000, - .cpu_name = "604r", - .cpu_features = CPU_FTRS_604, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_604, - .machine_check = machine_check_generic, - .platform = "ppc604", - }, - { /* 604ev */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x000a0000, - .cpu_name = "604ev", - .cpu_features = CPU_FTRS_604, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_604, - .machine_check = machine_check_generic, - .platform = "ppc604", - }, - { /* 740/750 (0x4202, don't support TAU ?) */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x00084202, - .cpu_name = "740/750", - .cpu_features = CPU_FTRS_740_NOTAU, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_750, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750CX (80100 and 8010x?) */ - .pvr_mask = 0xfffffff0, - .pvr_value = 0x00080100, - .cpu_name = "750CX", - .cpu_features = CPU_FTRS_750, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_750cx, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750CX (82201 and 82202) */ - .pvr_mask = 0xfffffff0, - .pvr_value = 0x00082200, - .cpu_name = "750CX", - .cpu_features = CPU_FTRS_750, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750cx, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750CXe (82214) */ - .pvr_mask = 0xfffffff0, - .pvr_value = 0x00082210, - .cpu_name = "750CXe", - .cpu_features = CPU_FTRS_750, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750cx, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750CXe "Gekko" (83214) */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x00083214, - .cpu_name = "750CXe", - .cpu_features = CPU_FTRS_750, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750cx, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750CL (and "Broadway") */ - .pvr_mask = 0xfffff0e0, - .pvr_value = 0x00087000, - .cpu_name = "750CL", - .cpu_features = CPU_FTRS_750CL, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 745/755 */ - .pvr_mask = 0xfffff000, - .pvr_value = 0x00083000, - .cpu_name = "745/755", - .cpu_features = CPU_FTRS_750, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750FX rev 1.x */ - .pvr_mask = 0xffffff00, - .pvr_value = 0x70000100, - .cpu_name = "750FX", - .cpu_features = CPU_FTRS_750FX1, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750FX rev 2.0 must disable HID0[DPM] */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x70000200, - .cpu_name = "750FX", - .cpu_features = CPU_FTRS_750FX2, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750FX (All revs except 2.0) */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x70000000, - .cpu_name = "750FX", - .cpu_features = CPU_FTRS_750FX, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750fx, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 750GX */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x70020000, - .cpu_name = "750GX", - .cpu_features = CPU_FTRS_750GX, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750fx, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 740/750 (L2CR bit need fixup for 740) */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00080000, - .cpu_name = "740/750", - .cpu_features = CPU_FTRS_740, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_IBM, - .cpu_setup = __setup_cpu_750, - .machine_check = machine_check_generic, - .platform = "ppc750", - }, - { /* 7400 rev 1.1 ? (no TAU) */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x000c1101, - .cpu_name = "7400 (1.1)", - .cpu_features = CPU_FTRS_7400_NOTAU, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_7400, - .machine_check = machine_check_generic, - .platform = "ppc7400", - }, - { /* 7400 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x000c0000, - .cpu_name = "7400", - .cpu_features = CPU_FTRS_7400, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_7400, - .machine_check = machine_check_generic, - .platform = "ppc7400", - }, - { /* 7410 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x800c0000, - .cpu_name = "7410", - .cpu_features = CPU_FTRS_7400, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_7410, - .machine_check = machine_check_generic, - .platform = "ppc7400", - }, - { /* 7450 2.0 - no doze/nap */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x80000200, - .cpu_name = "7450", - .cpu_features = CPU_FTRS_7450_20, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7450 2.1 */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x80000201, - .cpu_name = "7450", - .cpu_features = CPU_FTRS_7450_21, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7450 2.3 and newer */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80000000, - .cpu_name = "7450", - .cpu_features = CPU_FTRS_7450_23, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7455 rev 1.x */ - .pvr_mask = 0xffffff00, - .pvr_value = 0x80010100, - .cpu_name = "7455", - .cpu_features = CPU_FTRS_7455_1, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7455 rev 2.0 */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x80010200, - .cpu_name = "7455", - .cpu_features = CPU_FTRS_7455_20, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7455 others */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80010000, - .cpu_name = "7455", - .cpu_features = CPU_FTRS_7455, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7447/7457 Rev 1.0 */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x80020100, - .cpu_name = "7447/7457", - .cpu_features = CPU_FTRS_7447_10, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7447/7457 Rev 1.1 */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x80020101, - .cpu_name = "7447/7457", - .cpu_features = CPU_FTRS_7447_10, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7447/7457 Rev 1.2 and later */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80020000, - .cpu_name = "7447/7457", - .cpu_features = CPU_FTRS_7447, - .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7447A */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80030000, - .cpu_name = "7447A", - .cpu_features = CPU_FTRS_7447A, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, - { /* 7448 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80040000, - .cpu_name = "7448", - .cpu_features = CPU_FTRS_7448, - .cpu_user_features = COMMON_USER | - PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE, - .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 6, - .pmc_type = PPC_PMC_G4, - .cpu_setup = __setup_cpu_745x, - .machine_check = machine_check_generic, - .platform = "ppc7450", - }, -#endif /* CONFIG_PPC_BOOK3S_604 */ -#ifdef CONFIG_PPC_BOOK3S_603 - { /* 603 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00030000, - .cpu_name = "603", - .cpu_features = CPU_FTRS_603, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = 0, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, - .platform = "ppc603", - }, - { /* 603e */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00060000, - .cpu_name = "603e", - .cpu_features = CPU_FTRS_603, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = 0, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, - .platform = "ppc603", - }, - { /* 603ev */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00070000, - .cpu_name = "603ev", - .cpu_features = CPU_FTRS_603, - .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE, - .mmu_features = 0, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, - .platform = "ppc603", - }, - { /* 82xx (8240, 8245, 8260 are all 603e cores) */ - .pvr_mask = 0x7fff0000, - .pvr_value = 0x00810000, - .cpu_name = "82xx", - .cpu_features = CPU_FTRS_82XX, - .cpu_user_features = COMMON_USER, - .mmu_features = 0, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, - .platform = "ppc603", - }, - { /* All G2_LE (603e core, plus some) have the same pvr */ - .pvr_mask = 0x7fff0000, - .pvr_value = 0x00820000, - .cpu_name = "G2_LE", - .cpu_features = CPU_FTRS_G2_LE, - .cpu_user_features = COMMON_USER, - .mmu_features = MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_generic, - .platform = "ppc603", - }, -#ifdef CONFIG_PPC_83xx - { /* e300c1 (a 603e core, plus some) on 83xx */ - .pvr_mask = 0x7fff0000, - .pvr_value = 0x00830000, - .cpu_name = "e300c1", - .cpu_features = CPU_FTRS_E300, - .cpu_user_features = COMMON_USER, - .mmu_features = MMU_FTR_USE_HIGH_BATS, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_83xx, - .platform = "ppc603", - }, - { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */ - .pvr_mask = 0x7fff0000, - .pvr_value = 0x00840000, - .cpu_name = "e300c2", - .cpu_features = CPU_FTRS_E300C2, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, - .mmu_features = MMU_FTR_USE_HIGH_BATS | - MMU_FTR_NEED_DTLB_SW_LRU, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_83xx, - .platform = "ppc603", - }, - { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */ - .pvr_mask = 0x7fff0000, - .pvr_value = 0x00850000, - .cpu_name = "e300c3", - .cpu_features = CPU_FTRS_E300, - .cpu_user_features = COMMON_USER, - .mmu_features = MMU_FTR_USE_HIGH_BATS | - MMU_FTR_NEED_DTLB_SW_LRU, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_83xx, - .num_pmcs = 4, - .platform = "ppc603", - }, - { /* e300c4 (e300c1, plus one IU) */ - .pvr_mask = 0x7fff0000, - .pvr_value = 0x00860000, - .cpu_name = "e300c4", - .cpu_features = CPU_FTRS_E300, - .cpu_user_features = COMMON_USER, - .mmu_features = MMU_FTR_USE_HIGH_BATS | - MMU_FTR_NEED_DTLB_SW_LRU, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603, - .machine_check = machine_check_83xx, - .num_pmcs = 4, - .platform = "ppc603", - }, -#endif -#endif /* CONFIG_PPC_BOOK3S_603 */ -#ifdef CONFIG_PPC_BOOK3S_604 - { /* default match, we assume split I/D cache & TB (non-601)... */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic PPC)", - .cpu_features = CPU_FTRS_CLASSIC32, - .cpu_user_features = COMMON_USER, - .mmu_features = MMU_FTR_HPTE_TABLE, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_generic, - .platform = "ppc603", - }, -#endif /* CONFIG_PPC_BOOK3S_604 */ -#endif /* CONFIG_PPC_BOOK3S_32 */ -#ifdef CONFIG_PPC_8xx - { /* 8xx */ - .pvr_mask = 0xffff0000, - .pvr_value = PVR_8xx, - .cpu_name = "8xx", - /* CPU_FTR_MAYBE_CAN_DOZE is possible, - * if the 8xx code is there.... */ - .cpu_features = CPU_FTRS_8XX, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, - .mmu_features = MMU_FTR_TYPE_8xx, - .icache_bsize = 16, - .dcache_bsize = 16, - .machine_check = machine_check_8xx, - .platform = "ppc823", - }, -#endif /* CONFIG_PPC_8xx */ -#ifdef CONFIG_40x - { /* STB 04xxx */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41810000, - .cpu_name = "STB04xxx", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* NP405L */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41610000, - .cpu_name = "NP405L", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* NP4GS3 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x40B10000, - .cpu_name = "NP4GS3", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* NP405H */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41410000, - .cpu_name = "NP405H", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405GPr */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x50910000, - .cpu_name = "405GPr", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* STBx25xx */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x51510000, - .cpu_name = "STBx25xx", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405LP */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41F10000, - .cpu_name = "405LP", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EP */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x51210000, - .cpu_name = "405EP", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. A/B with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910007, - .cpu_name = "405EX Rev. A/B", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. C without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x1291000d, - .cpu_name = "405EX Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. C with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x1291000f, - .cpu_name = "405EX Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. D without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910003, - .cpu_name = "405EX Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EX Rev. D with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910005, - .cpu_name = "405EX Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. A/B without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910001, - .cpu_name = "405EXr Rev. A/B", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. C without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910009, - .cpu_name = "405EXr Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. C with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x1291000b, - .cpu_name = "405EXr Rev. C", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. D without Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910000, - .cpu_name = "405EXr Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* 405EXr Rev. D with Security */ - .pvr_mask = 0xffff000f, - .pvr_value = 0x12910002, - .cpu_name = "405EXr Rev. D", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { - /* 405EZ */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x41510000, - .cpu_name = "405EZ", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* APM8018X */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x7ff11432, - .cpu_name = "APM8018X", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic 40x PPC)", - .cpu_features = CPU_FTRS_40X, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, - .mmu_features = MMU_FTR_TYPE_40x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc405", - } - -#endif /* CONFIG_40x */ -#ifdef CONFIG_44x -#ifndef CONFIG_PPC_47x - { - .pvr_mask = 0xf0000fff, - .pvr_value = 0x40000850, - .cpu_name = "440GR Rev. A", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc440", - }, - { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x40000858, - .cpu_name = "440EP Rev. A", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440ep, - .machine_check = machine_check_4xx, - .platform = "ppc440", - }, - { - .pvr_mask = 0xf0000fff, - .pvr_value = 0x400008d3, - .cpu_name = "440GR Rev. B", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc440", - }, - { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */ - .pvr_mask = 0xf0000ff7, - .pvr_value = 0x400008d4, - .cpu_name = "440EP Rev. C", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440ep, - .machine_check = machine_check_4xx, - .platform = "ppc440", - }, - { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x400008db, - .cpu_name = "440EP Rev. B", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440ep, - .machine_check = machine_check_4xx, - .platform = "ppc440", - }, - { /* 440GRX */ - .pvr_mask = 0xf0000ffb, - .pvr_value = 0x200008D0, - .cpu_name = "440GRX", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440grx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */ - .pvr_mask = 0xf0000ffb, - .pvr_value = 0x200008D8, - .cpu_name = "440EPX", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440epx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 440GP Rev. B */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x40000440, - .cpu_name = "440GP Rev. B", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc440gp", - }, - { /* 440GP Rev. C */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x40000481, - .cpu_name = "440GP Rev. C", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc440gp", - }, - { /* 440GX Rev. A */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x50000850, - .cpu_name = "440GX Rev. A", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440gx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 440GX Rev. B */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x50000851, - .cpu_name = "440GX Rev. B", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440gx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 440GX Rev. C */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x50000892, - .cpu_name = "440GX Rev. C", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440gx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 440GX Rev. F */ - .pvr_mask = 0xf0000fff, - .pvr_value = 0x50000894, - .cpu_name = "440GX Rev. F", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440gx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 440SP Rev. A */ - .pvr_mask = 0xfff00fff, - .pvr_value = 0x53200891, - .cpu_name = "440SP Rev. A", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc440", - }, - { /* 440SPe Rev. A */ - .pvr_mask = 0xfff00fff, - .pvr_value = 0x53400890, - .cpu_name = "440SPe Rev. A", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440spe, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 440SPe Rev. B */ - .pvr_mask = 0xfff00fff, - .pvr_value = 0x53400891, - .cpu_name = "440SPe Rev. B", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_440spe, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 460EX */ - .pvr_mask = 0xffff0006, - .pvr_value = 0x13020002, - .cpu_name = "460EX", - .cpu_features = CPU_FTRS_440x6, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_460ex, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 460EX Rev B */ - .pvr_mask = 0xffff0007, - .pvr_value = 0x13020004, - .cpu_name = "460EX Rev. B", - .cpu_features = CPU_FTRS_440x6, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_460ex, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 460GT */ - .pvr_mask = 0xffff0006, - .pvr_value = 0x13020000, - .cpu_name = "460GT", - .cpu_features = CPU_FTRS_440x6, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_460gt, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 460GT Rev B */ - .pvr_mask = 0xffff0007, - .pvr_value = 0x13020005, - .cpu_name = "460GT Rev. B", - .cpu_features = CPU_FTRS_440x6, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_460gt, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 460SX */ - .pvr_mask = 0xffffff00, - .pvr_value = 0x13541800, - .cpu_name = "460SX", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_460sx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* 464 in APM821xx */ - .pvr_mask = 0xfffffff0, - .pvr_value = 0x12C41C80, - .cpu_name = "APM821XX", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .cpu_setup = __setup_cpu_apm821xx, - .machine_check = machine_check_440A, - .platform = "ppc440", - }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic 44x PPC)", - .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_44x, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_4xx, - .platform = "ppc440", - } -#else /* CONFIG_PPC_47x */ - { /* 476 DD2 core */ - .pvr_mask = 0xffffffff, - .pvr_value = 0x11a52080, - .cpu_name = "476", - .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_47x | - MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, - .icache_bsize = 32, - .dcache_bsize = 128, - .machine_check = machine_check_47x, - .platform = "ppc470", - }, - { /* 476fpe */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x7ff50000, - .cpu_name = "476fpe", - .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_47x | - MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, - .icache_bsize = 32, - .dcache_bsize = 128, - .machine_check = machine_check_47x, - .platform = "ppc470", - }, - { /* 476 iss */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00050000, - .cpu_name = "476", - .cpu_features = CPU_FTRS_47X, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_47x | - MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, - .icache_bsize = 32, - .dcache_bsize = 128, - .machine_check = machine_check_47x, - .platform = "ppc470", - }, - { /* 476 others */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x11a50000, - .cpu_name = "476", - .cpu_features = CPU_FTRS_47X, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_FPU, - .mmu_features = MMU_FTR_TYPE_47x | - MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, - .icache_bsize = 32, - .dcache_bsize = 128, - .machine_check = machine_check_47x, - .platform = "ppc470", - }, - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic 47x PPC)", - .cpu_features = CPU_FTRS_47X, - .cpu_user_features = COMMON_USER_BOOKE, - .mmu_features = MMU_FTR_TYPE_47x, - .icache_bsize = 32, - .dcache_bsize = 128, - .machine_check = machine_check_47x, - .platform = "ppc470", - } -#endif /* CONFIG_PPC_47x */ -#endif /* CONFIG_44x */ -#endif /* CONFIG_PPC32 */ -#ifdef CONFIG_E500 -#ifdef CONFIG_PPC32 -#ifndef CONFIG_PPC_E500MC - { /* e500 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80200000, - .cpu_name = "e500", - .cpu_features = CPU_FTRS_E500, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE_COMP, - .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_e500v1, - .machine_check = machine_check_e500, - .platform = "ppc8540", - }, - { /* e500v2 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80210000, - .cpu_name = "e500v2", - .cpu_features = CPU_FTRS_E500_2, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE_COMP | - PPC_FEATURE_HAS_EFP_DOUBLE_COMP, - .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_e500v2, - .machine_check = machine_check_e500, - .platform = "ppc8548", - .cpu_down_flush = cpu_down_flush_e500v2, - }, -#else - { /* e500mc */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80230000, - .cpu_name = "e500mc", - .cpu_features = CPU_FTRS_E500MC, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | - MMU_FTR_USE_TLBILX, - .icache_bsize = 64, - .dcache_bsize = 64, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_e500mc, - .machine_check = machine_check_e500mc, - .platform = "ppce500mc", - .cpu_down_flush = cpu_down_flush_e500mc, - }, -#endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_PPC32 */ -#ifdef CONFIG_PPC_E500MC - { /* e5500 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80240000, - .cpu_name = "e5500", - .cpu_features = CPU_FTRS_E5500, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, - .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | - MMU_FTR_USE_TLBILX, - .icache_bsize = 64, - .dcache_bsize = 64, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_e5500, -#ifndef CONFIG_PPC32 - .cpu_restore = __restore_cpu_e5500, -#endif - .machine_check = machine_check_e500mc, - .platform = "ppce5500", - .cpu_down_flush = cpu_down_flush_e5500, - }, - { /* e6500 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80400000, - .cpu_name = "e6500", - .cpu_features = CPU_FTRS_E6500, - .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU | - PPC_FEATURE_HAS_ALTIVEC_COMP, - .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | - MMU_FTR_USE_TLBILX, - .icache_bsize = 64, - .dcache_bsize = 64, - .num_pmcs = 6, - .cpu_setup = __setup_cpu_e6500, -#ifndef CONFIG_PPC32 - .cpu_restore = __restore_cpu_e6500, -#endif - .machine_check = machine_check_e500mc, - .platform = "ppce6500", - .cpu_down_flush = cpu_down_flush_e6500, - }, -#endif /* CONFIG_PPC_E500MC */ -#ifdef CONFIG_PPC32 - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic E500 PPC)", - .cpu_features = CPU_FTRS_E500, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE_COMP, - .mmu_features = MMU_FTR_TYPE_FSL_E, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_e500, - .platform = "powerpc", - } -#endif /* CONFIG_PPC32 */ -#endif /* CONFIG_E500 */ -}; +#include "cpu_specs.h" void __init set_cur_cpu_spec(struct cpu_spec *s) { From dfc3095cec27f402c183da920f4733785e4c873d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:31 +0200 Subject: [PATCH 109/214] powerpc: Remove CONFIG_FSL_BOOKE PPC_85xx is PPC32 only. PPC_85xx always selects E500 and is the only PPC32 that selects E500. FSL_BOOKE is selected when E500 and PPC32 are selected. So FSL_BOOKE is redundant with PPC_85xx. Remove FSL_BOOKE. And rename four files accordingly. cpu_setup_fsl_booke.S is not renamed because it is linked to PPC_FSL_BOOK3E and not to FSL_BOOKE as suggested by its name. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/08e3e15594e66d63b9e89c5b4f9c35153913c28f.1663606875.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 28 +++++++++---------- arch/powerpc/Makefile | 2 +- arch/powerpc/include/asm/kexec.h | 2 +- arch/powerpc/include/asm/nohash/32/pgtable.h | 6 ++-- .../nohash/32/{pte-fsl-booke.h => pte-85xx.h} | 6 ++-- arch/powerpc/include/asm/nohash/tlbflush.h | 2 +- ...e_entry_mapping.S => 85xx_entry_mapping.S} | 0 arch/powerpc/kernel/Makefile | 6 ++-- .../kernel/{head_fsl_booke.S => head_85xx.S} | 4 +-- arch/powerpc/kernel/kgdb.c | 12 ++++---- .../kernel/{swsusp_booke.S => swsusp_85xx.S} | 0 arch/powerpc/kernel/traps.c | 4 +-- arch/powerpc/kexec/core_32.c | 2 +- arch/powerpc/kexec/relocate_32.S | 4 +-- arch/powerpc/kvm/booke_interrupts.S | 4 +-- arch/powerpc/mm/init_32.c | 4 +-- arch/powerpc/mm/mmu_decl.h | 4 +-- arch/powerpc/mm/nohash/fsl_book3e.c | 2 +- arch/powerpc/mm/nohash/tlb.c | 2 +- arch/powerpc/mm/nohash/tlb_low.S | 2 +- arch/powerpc/platforms/Kconfig.cputype | 11 ++------ 21 files changed, 51 insertions(+), 56 deletions(-) rename arch/powerpc/include/asm/nohash/32/{pte-fsl-booke.h => pte-85xx.h} (94%) rename arch/powerpc/kernel/{fsl_booke_entry_mapping.S => 85xx_entry_mapping.S} (100%) rename arch/powerpc/kernel/{head_fsl_booke.S => head_85xx.S} (99%) rename arch/powerpc/kernel/{swsusp_booke.S => swsusp_85xx.S} (100%) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 220045692e48..dafb14f44672 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -135,7 +135,7 @@ config PPC select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION - select ARCH_HAS_STRICT_KERNEL_RWX if FSL_BOOKE && !HIBERNATION && !RANDOMIZE_BASE + select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE @@ -548,7 +548,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config KEXEC bool "kexec system call" - depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E + depends on (PPC_BOOK3S || PPC_85xx || (44x && !SMP)) || PPC_BOOK3E select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your @@ -583,7 +583,7 @@ config ARCH_HAS_KEXEC_PURGATORY config RELOCATABLE bool "Build a relocatable kernel" - depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE)) + depends on PPC64 || (FLATMEM && (44x || PPC_85xx)) select NONSTATIC_KERNEL help This builds a kernel image that is capable of running at the @@ -606,7 +606,7 @@ config RELOCATABLE config RANDOMIZE_BASE bool "Randomize the address of the kernel image" - depends on (FSL_BOOKE && FLATMEM && PPC32) + depends on (PPC_85xx && FLATMEM && PPC32) depends on RELOCATABLE help Randomizes the virtual address at which the kernel image is @@ -625,8 +625,8 @@ config RELOCATABLE_TEST config CRASH_DUMP bool "Build a dump capture kernel" - depends on PPC64 || PPC_BOOK3S_32 || FSL_BOOKE || (44x && !SMP) - select RELOCATABLE if PPC64 || 44x || FSL_BOOKE + depends on PPC64 || PPC_BOOK3S_32 || PPC_85xx || (44x && !SMP) + select RELOCATABLE if PPC64 || 44x || PPC_85xx help Build a kernel suitable for use as a dump capture kernel. The same kernel binary can be used as production kernel and dump @@ -815,7 +815,7 @@ config DATA_SHIFT_BOOL depends on ADVANCED_OPTIONS depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \ - FSL_BOOKE + PPC_85xx help This option allows you to set the kernel data alignment. When RAM is mapped by blocks, the alignment needs to fit the size and @@ -828,13 +828,13 @@ config DATA_SHIFT default 24 if STRICT_KERNEL_RWX && PPC64 range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx - range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && FSL_BOOKE + range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32 default 23 if STRICT_KERNEL_RWX && PPC_8xx default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx - default 24 if STRICT_KERNEL_RWX && FSL_BOOKE + default 24 if STRICT_KERNEL_RWX && PPC_85xx default PPC_PAGE_SHIFT help On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. @@ -1150,7 +1150,7 @@ config LOWMEM_SIZE config LOWMEM_CAM_NUM_BOOL bool "Set number of CAMs to use to map low memory" - depends on ADVANCED_OPTIONS && FSL_BOOKE + depends on ADVANCED_OPTIONS && PPC_85xx help This option allows you to set the maximum number of CAM slots that will be used to map low memory. There are a limited number of slots @@ -1161,7 +1161,7 @@ config LOWMEM_CAM_NUM_BOOL Say N here unless you know what you are doing. config LOWMEM_CAM_NUM - depends on FSL_BOOKE + depends on PPC_85xx int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL default 3 if !STRICT_KERNEL_RWX default 9 if DATA_SHIFT >= 24 @@ -1170,7 +1170,7 @@ config LOWMEM_CAM_NUM config DYNAMIC_MEMSTART bool "Enable page aligned dynamic load address for kernel" - depends on ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x) + depends on ADVANCED_OPTIONS && FLATMEM && (PPC_85xx || 44x) select NONSTATIC_KERNEL help This option enables the kernel to be loaded at any page aligned @@ -1219,7 +1219,7 @@ config KERNEL_START config PHYSICAL_START_BOOL bool "Set physical address where the kernel is loaded" - depends on ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE + depends on ADVANCED_OPTIONS && FLATMEM && PPC_85xx help This gives the physical address where the kernel is loaded. @@ -1232,7 +1232,7 @@ config PHYSICAL_START config PHYSICAL_ALIGN hex - default "0x04000000" if FSL_BOOKE + default "0x04000000" if PPC_85xx help This value puts the alignment restrictions on physical address where kernel is loaded and run from. Kernel is compiled for an diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 02742facf895..f6d477c4aa64 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -231,7 +231,7 @@ head-$(CONFIG_PPC_BOOK3S_32) := arch/powerpc/kernel/head_book3s_32.o head-$(CONFIG_PPC_8xx) := arch/powerpc/kernel/head_8xx.o head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o -head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o +head-$(CONFIG_PPC_85xx) := arch/powerpc/kernel/head_85xx.o head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index f8d122d16af4..a1ddba01e7d1 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -3,7 +3,7 @@ #define _ASM_POWERPC_KEXEC_H #ifdef __KERNEL__ -#if defined(CONFIG_FSL_BOOKE) || defined(CONFIG_44x) +#if defined(CONFIG_PPC_85xx) || defined(CONFIG_44x) /* * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9091e4904a6b..197e7552d9f6 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -130,10 +130,10 @@ void unmap_kernel_page(unsigned long va); #include #elif defined(CONFIG_44x) #include -#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) +#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) #include -#elif defined(CONFIG_FSL_BOOKE) -#include +#elif defined(CONFIG_PPC_85xx) +#include #elif defined(CONFIG_PPC_8xx) #include #endif diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h similarity index 94% rename from arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h rename to arch/powerpc/include/asm/nohash/32/pte-85xx.h index 0fc1bd42bb3e..93fb8e11a3f1 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H -#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H +#ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H +#define _ASM_POWERPC_NOHASH_32_PTE_85xx_H #ifdef __KERNEL__ /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based @@ -71,4 +71,4 @@ #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */ diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h index 698935d4f72d..bdaf34ad41ea 100644 --- a/arch/powerpc/include/asm/nohash/tlbflush.h +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -18,7 +18,7 @@ /* * TLB flushing for software loaded TLB chips * - * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * TODO: (CONFIG_PPC_85xx) determine if flush_tlb_range & * flush_tlb_kernel_range are best implemented as tlbia vs * specific tlbie's */ diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/85xx_entry_mapping.S similarity index 100% rename from arch/powerpc/kernel/fsl_booke_entry_mapping.S rename to arch/powerpc/kernel/85xx_entry_mapping.S diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 06d2d1f78f71..4483cae7dc9f 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -106,8 +106,8 @@ endif obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o -ifdef CONFIG_FSL_BOOKE -obj-$(CONFIG_HIBERNATION) += swsusp_booke.o +ifdef CONFIG_PPC_85xx +obj-$(CONFIG_HIBERNATION) += swsusp_85xx.o else obj-$(CONFIG_HIBERNATION) += swsusp_$(BITS).o endif @@ -122,7 +122,7 @@ extra-$(CONFIG_PPC64) := head_64.o extra-$(CONFIG_PPC_BOOK3S_32) := head_book3s_32.o extra-$(CONFIG_40x) := head_40x.o extra-$(CONFIG_44x) := head_44x.o -extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o +extra-$(CONFIG_PPC_85xx) := head_85xx.o extra-$(CONFIG_PPC_8xx) := head_8xx.o extra-y += vmlinux.lds diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_85xx.S similarity index 99% rename from arch/powerpc/kernel/head_fsl_booke.S rename to arch/powerpc/kernel/head_85xx.S index f0db4f52bc00..48b168b5dc57 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_85xx.S @@ -129,7 +129,7 @@ _GLOBAL(_start); /* * For the second relocation, we already set the right tlb entries - * for the kernel space, so skip the code in fsl_booke_entry_mapping.S + * for the kernel space, so skip the code in 85xx_entry_mapping.S */ cmpwi r19,1 beq set_ivor @@ -159,7 +159,7 @@ _GLOBAL(__early_start) lwz r20,0(r20) #define ENTRY_MAPPING_BOOT_SETUP -#include "fsl_booke_entry_mapping.S" +#include "85xx_entry_mapping.S" #undef ENTRY_MAPPING_BOOT_SETUP set_ivor: diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index a20deebf233f..1a1e9995dae3 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -47,7 +47,7 @@ static struct hard_trap_info { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */ #ifdef CONFIG_BOOKE_OR_40x { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */ -#if defined(CONFIG_FSL_BOOKE) +#if defined(CONFIG_PPC_85xx) { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */ { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */ { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */ @@ -57,7 +57,7 @@ static struct hard_trap_info { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */ { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */ { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */ -#else /* ! CONFIG_FSL_BOOKE */ +#else /* ! CONFIG_PPC_85xx */ { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */ { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */ { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */ @@ -208,7 +208,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) for (reg = 14; reg < 32; reg++) PACK64(ptr, regs->gpr[reg]); -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx #ifdef CONFIG_SPE for (reg = 0; reg < 32; reg++) PACK64(ptr, p->thread.evr[reg]); @@ -234,7 +234,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) #define GDB_SIZEOF_REG sizeof(unsigned long) #define GDB_SIZEOF_REG_U32 sizeof(u32) -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx #define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long) #else #define GDB_SIZEOF_FLOAT_REG sizeof(u64) @@ -329,7 +329,7 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) if (regno >= 32 && regno < 64) { /* FP registers 32 -> 63 */ -#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) +#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE) if (current) memcpy(mem, ¤t->thread.evr[regno-32], dbg_reg_def[regno].size); @@ -355,7 +355,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) if (regno >= 32 && regno < 64) { /* FP registers 32 -> 63 */ -#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) +#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE) memcpy(¤t->thread.evr[regno-32], mem, dbg_reg_def[regno].size); #else diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_85xx.S similarity index 100% rename from arch/powerpc/kernel/swsusp_booke.S rename to arch/powerpc/kernel/swsusp_85xx.S diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index dcf4046f8565..f181c434289e 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -2085,7 +2085,7 @@ DEFINE_INTERRUPT_HANDLER(altivec_assist_exception) } #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx DEFINE_INTERRUPT_HANDLER(CacheLockingException) { unsigned long error_code = regs->dsisr; @@ -2098,7 +2098,7 @@ DEFINE_INTERRUPT_HANDLER(CacheLockingException) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); return; } -#endif /* CONFIG_FSL_BOOKE */ +#endif /* CONFIG_PPC_85xx */ #ifdef CONFIG_SPE DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException) diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c index b50aed48d09d..c95f96850c9e 100644 --- a/arch/powerpc/kexec/core_32.c +++ b/arch/powerpc/kexec/core_32.c @@ -55,7 +55,7 @@ void default_machine_kexec(struct kimage *image) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); - if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x)) + if (!IS_ENABLED(CONFIG_PPC_85xx) && !IS_ENABLED(CONFIG_44x)) relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start); /* now call it */ diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S index cf6e52bdf8d8..d9f0dd9b34ff 100644 --- a/arch/powerpc/kexec/relocate_32.S +++ b/arch/powerpc/kexec/relocate_32.S @@ -25,14 +25,14 @@ relocate_new_kernel: /* r4 = reboot_code_buffer */ /* r5 = start_address */ -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx mr r29, r3 mr r30, r4 mr r31, r5 #define ENTRY_MAPPING_KEXEC_SETUP -#include +#include #undef ENTRY_MAPPING_KEXEC_SETUP mr r3, r29 diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 6fa82efe833b..205545d820a1 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -223,7 +223,7 @@ _GLOBAL(kvmppc_resume_host) lwz r3, VCPU_HOST_PID(r4) mtspr SPRN_PID, r3 -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx /* we cheat and know that Linux doesn't use PID1 which is always 0 */ lis r3, 0 mtspr SPRN_PID1, r3 @@ -406,7 +406,7 @@ lightweight_exit: lwz r3, VCPU_SHADOW_PID(r4) mtspr SPRN_PID, r3 -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx lwz r3, VCPU_SHADOW_PID1(r4) mtspr SPRN_PID1, r3 #endif diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3142d7617412..d4cc3749e621 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -85,12 +85,12 @@ void __init MMU_init(void) total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr; lowmem_end_addr = memstart_addr + total_lowmem; -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB * entries, so we need to adjust lowmem to match the amount we can map * in the fixed entries */ adjust_total_lowmem(); -#endif /* CONFIG_FSL_BOOKE */ +#endif /* CONFIG_PPC_85xx */ if (total_lowmem > __max_low_memory) { total_lowmem = __max_low_memory; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 0e3528aec49e..88805757d0c9 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -146,9 +146,9 @@ struct tlbcam { extern struct tlbcam TLBCAM[NUM_TLBCAMS]; #endif -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_8xx) /* 6xx have BATS */ -/* FSL_BOOKE have TLBCAM */ +/* PPC_85xx have TLBCAM */ /* 8xx have LTLB */ phys_addr_t v_block_mapped(unsigned long va); unsigned long p_block_mapped(phys_addr_t pa); diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c index c1ad173de318..40a4e69ae1a9 100644 --- a/arch/powerpc/mm/nohash/fsl_book3e.c +++ b/arch/powerpc/mm/nohash/fsl_book3e.c @@ -59,7 +59,7 @@ static struct { phys_addr_t phys; } tlbcam_addrs[NUM_TLBCAMS]; -#ifdef CONFIG_FSL_BOOKE +#ifdef CONFIG_PPC_85xx /* * Return PA for this VA if it is mapped by a CAM, or 0 */ diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index 5e7ccb48b79c..f21896ebdc5a 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -130,7 +130,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { .enc = BOOK3E_PAGESZ_1GB, }, }; -#endif /* CONFIG_FSL_BOOKE */ +#endif /* CONFIG_PPC_85xx */ static inline int mmu_get_tsize(int psize) { diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index d62b613a0d5d..d378031246ab 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -221,7 +221,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2) blr #endif /* CONFIG_PPC_47x */ -#elif defined(CONFIG_FSL_BOOKE) +#elif defined(CONFIG_PPC_85xx) /* * FSL BookE implementations. * diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 19fd95a06352..11780074eb23 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -324,11 +324,6 @@ config BOOKE_OR_40x depends on BOOKE || 40x default y -config FSL_BOOKE - bool - depends on E500 && PPC32 - default y - # this is for common code between PPC32 & PPC64 FSL BOOKE config PPC_FSL_BOOK3E bool @@ -337,7 +332,7 @@ config PPC_FSL_BOOK3E select PPC_SMP_MUXED_IPI select PPC_DOORBELL select PPC_KUEP - default y if FSL_BOOKE + default y if PPC_85xx config PTE_64BIT bool @@ -485,7 +480,7 @@ config PPC_MMU_NOHASH config PPC_BOOK3E_MMU def_bool y - depends on FSL_BOOKE || PPC_BOOK3E + depends on PPC_85xx || PPC_BOOK3E config PPC_HAVE_PMU_SUPPORT bool @@ -508,7 +503,7 @@ config FORCE_SMP select SMP config SMP - depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x + depends on PPC_BOOK3S || PPC_BOOK3E || PPC_85xx || PPC_47x select GENERIC_IRQ_MIGRATION bool "Symmetric multi-processing support" if !FORCE_SMP help From d7216567c65cbed655f9bf87ef906f9246d6f698 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:32 +0200 Subject: [PATCH 110/214] powerpc/cputable: Split cpu_specs[] for mpc85xx and e500mc e500v1/v2 and e500mc are said to be mutually exclusive in Kconfig. Split e500 cpu_specs[] and then restrict the non e500mc to PPC32 which is then 85xx. Signed-off-by: Christophe Leroy [mpe: Tweak formatting] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/553b901ea91e393df231103da4b018e9b251b0e9.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/cpu_specs.h | 6 +- arch/powerpc/kernel/cpu_specs_85xx.h | 57 ++++++++++++++++ .../{cpu_specs_e500.h => cpu_specs_e500mc.h} | 68 ++----------------- 3 files changed, 65 insertions(+), 66 deletions(-) create mode 100644 arch/powerpc/kernel/cpu_specs_85xx.h rename arch/powerpc/kernel/{cpu_specs_e500.h => cpu_specs_e500mc.h} (51%) diff --git a/arch/powerpc/kernel/cpu_specs.h b/arch/powerpc/kernel/cpu_specs.h index 658c68acf96e..85ded3f77204 100644 --- a/arch/powerpc/kernel/cpu_specs.h +++ b/arch/powerpc/kernel/cpu_specs.h @@ -14,8 +14,10 @@ #include "cpu_specs_8xx.h" #endif -#ifdef CONFIG_E500 -#include "cpu_specs_e500.h" +#ifdef CONFIG_PPC_E500MC +#include "cpu_specs_e500mc.h" +#elif defined(CONFIG_PPC_85xx) +#include "cpu_specs_85xx.h" #endif #ifdef CONFIG_PPC_BOOK3S_32 diff --git a/arch/powerpc/kernel/cpu_specs_85xx.h b/arch/powerpc/kernel/cpu_specs_85xx.h new file mode 100644 index 000000000000..aaae202c1a89 --- /dev/null +++ b/arch/powerpc/kernel/cpu_specs_85xx.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + */ + +#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ + PPC_FEATURE_BOOKE) + +static struct cpu_spec cpu_specs[] __initdata = { + { /* e500 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80200000, + .cpu_name = "e500", + .cpu_features = CPU_FTRS_E500, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .cpu_user_features2 = PPC_FEATURE2_ISEL, + .mmu_features = MMU_FTR_TYPE_FSL_E, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_e500v1, + .machine_check = machine_check_e500, + .platform = "ppc8540", + }, + { /* e500v2 */ + .pvr_mask = 0xffff0000, + .pvr_value = 0x80210000, + .cpu_name = "e500v2", + .cpu_features = CPU_FTRS_E500_2, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP | + PPC_FEATURE_HAS_EFP_DOUBLE_COMP, + .cpu_user_features2 = PPC_FEATURE2_ISEL, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, + .icache_bsize = 32, + .dcache_bsize = 32, + .num_pmcs = 4, + .cpu_setup = __setup_cpu_e500v2, + .machine_check = machine_check_e500, + .platform = "ppc8548", + .cpu_down_flush = cpu_down_flush_e500v2, + }, + { /* default match */ + .pvr_mask = 0x00000000, + .pvr_value = 0x00000000, + .cpu_name = "(generic E500 PPC)", + .cpu_features = CPU_FTRS_E500, + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE_COMP, + .mmu_features = MMU_FTR_TYPE_FSL_E, + .icache_bsize = 32, + .dcache_bsize = 32, + .machine_check = machine_check_e500, + .platform = "powerpc", + } +}; diff --git a/arch/powerpc/kernel/cpu_specs_e500.h b/arch/powerpc/kernel/cpu_specs_e500mc.h similarity index 51% rename from arch/powerpc/kernel/cpu_specs_e500.h rename to arch/powerpc/kernel/cpu_specs_e500mc.h index 1f366f2a0215..ceb06b109f83 100644 --- a/arch/powerpc/kernel/cpu_specs_e500.h +++ b/arch/powerpc/kernel/cpu_specs_e500mc.h @@ -16,44 +16,6 @@ static struct cpu_spec cpu_specs[] __initdata = { #ifdef CONFIG_PPC32 -#ifndef CONFIG_PPC_E500MC - { /* e500 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80200000, - .cpu_name = "e500", - .cpu_features = CPU_FTRS_E500, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE_COMP, - .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_e500v1, - .machine_check = machine_check_e500, - .platform = "ppc8540", - }, - { /* e500v2 */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x80210000, - .cpu_name = "e500v2", - .cpu_features = CPU_FTRS_E500_2, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE_COMP | - PPC_FEATURE_HAS_EFP_DOUBLE_COMP, - .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, - .icache_bsize = 32, - .dcache_bsize = 32, - .num_pmcs = 4, - .cpu_setup = __setup_cpu_e500v2, - .machine_check = machine_check_e500, - .platform = "ppc8548", - .cpu_down_flush = cpu_down_flush_e500v2, - }, -#else { /* e500mc */ .pvr_mask = 0xffff0000, .pvr_value = 0x80230000, @@ -61,8 +23,7 @@ static struct cpu_spec cpu_specs[] __initdata = { .cpu_features = CPU_FTRS_E500MC, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | - MMU_FTR_USE_TLBILX, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, @@ -71,9 +32,7 @@ static struct cpu_spec cpu_specs[] __initdata = { .platform = "ppce500mc", .cpu_down_flush = cpu_down_flush_e500mc, }, -#endif /* CONFIG_PPC_E500MC */ #endif /* CONFIG_PPC32 */ -#ifdef CONFIG_PPC_E500MC { /* e5500 */ .pvr_mask = 0xffff0000, .pvr_value = 0x80240000, @@ -81,8 +40,7 @@ static struct cpu_spec cpu_specs[] __initdata = { .cpu_features = CPU_FTRS_E5500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | - MMU_FTR_USE_TLBILX, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 4, @@ -100,10 +58,9 @@ static struct cpu_spec cpu_specs[] __initdata = { .cpu_name = "e6500", .cpu_features = CPU_FTRS_E6500, .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU | - PPC_FEATURE_HAS_ALTIVEC_COMP, + PPC_FEATURE_HAS_ALTIVEC_COMP, .cpu_user_features2 = PPC_FEATURE2_ISEL, - .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | - MMU_FTR_USE_TLBILX, + .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX, .icache_bsize = 64, .dcache_bsize = 64, .num_pmcs = 6, @@ -115,21 +72,4 @@ static struct cpu_spec cpu_specs[] __initdata = { .platform = "ppce6500", .cpu_down_flush = cpu_down_flush_e6500, }, -#endif /* CONFIG_PPC_E500MC */ -#ifdef CONFIG_PPC32 - { /* default match */ - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "(generic E500 PPC)", - .cpu_features = CPU_FTRS_E500, - .cpu_user_features = COMMON_USER_BOOKE | - PPC_FEATURE_HAS_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE_COMP, - .mmu_features = MMU_FTR_TYPE_FSL_E, - .icache_bsize = 32, - .dcache_bsize = 32, - .machine_check = machine_check_e500, - .platform = "powerpc", - } -#endif /* CONFIG_PPC32 */ }; From e0d68273d7069537701bb91c51d90d1e12aacc33 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:33 +0200 Subject: [PATCH 111/214] powerpc: Remove CONFIG_PPC_BOOK3E CONFIG_PPC_BOOK3E is redundant with CONFIG_PPC_BOOK3E_64. The later is more explicit about the fact that it's a 64 bits target. Remove CONFIG_PPC_BOOK3E. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/5d0891490813c19cdcfc04678f512ea68cba3e64.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/cputable.h | 4 +-- arch/powerpc/include/asm/interrupt.h | 2 +- arch/powerpc/include/asm/nohash/pgalloc.h | 2 +- arch/powerpc/include/asm/paca.h | 8 ++--- arch/powerpc/include/asm/ppc_asm.h | 4 +-- arch/powerpc/kernel/asm-offsets.c | 6 ++-- arch/powerpc/kernel/entry_64.S | 6 ++-- arch/powerpc/kernel/head_64.S | 40 +++++++++++------------ arch/powerpc/kernel/misc_64.S | 6 ++-- arch/powerpc/kernel/paca.c | 6 ++-- arch/powerpc/kernel/setup.h | 2 +- arch/powerpc/kernel/setup_64.c | 8 ++--- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/powerpc/kexec/core_64.c | 2 +- arch/powerpc/mm/mmu_decl.h | 6 ++-- arch/powerpc/mm/nohash/tlb_low.S | 2 +- arch/powerpc/platforms/85xx/Kconfig | 2 +- arch/powerpc/platforms/Kconfig.cputype | 10 ++---- arch/powerpc/xmon/xmon.c | 16 ++++----- 20 files changed, 66 insertions(+), 70 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index dafb14f44672..9d721cac20b8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -548,7 +548,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config KEXEC bool "kexec system call" - depends on (PPC_BOOK3S || PPC_85xx || (44x && !SMP)) || PPC_BOOK3E + depends on (PPC_BOOK3S || PPC_85xx || (44x && !SMP)) || PPC_BOOK3E_64 select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index ae8c3e13cfce..27875f0b7bc7 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -463,7 +463,7 @@ static inline void cpu_feature_keys_init(void) { } #define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2) #ifdef CONFIG_PPC64 -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500) #else #ifdef CONFIG_CPU_LITTLE_ENDIAN @@ -521,7 +521,7 @@ enum { #endif /* __powerpc64__ */ #ifdef CONFIG_PPC64 -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500) #else diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 8069dbc4b8d1..84a1cdc3204c 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -281,7 +281,7 @@ static inline bool nmi_disables_ftrace(struct pt_regs *regs) if (TRAP(regs) == INTERRUPT_PERFMON) return false; } - if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { if (TRAP(regs) == INTERRUPT_PERFMON) return false; } diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h index 29c43665a753..4b62376318e1 100644 --- a/arch/powerpc/include/asm/nohash/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/pgalloc.h @@ -15,7 +15,7 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb, { } -#endif /* !CONFIG_PPC_BOOK3E */ +#endif /* !CONFIG_PPC_BOOK3E_64 */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 3537b0500f4d..09f1790d0ae1 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -18,7 +18,7 @@ #include #include #include -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #include #else #include @@ -127,7 +127,7 @@ struct paca_struct { #endif #endif /* CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 u64 exgen[8] __aligned(0x40); /* Keep pgd in the same cacheline as the start of extlb */ pgd_t *pgd __aligned(0x40); /* Current PGD */ @@ -151,7 +151,7 @@ struct paca_struct { void *dbg_kstack; struct tlb_core_data tcd; -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ #ifdef CONFIG_PPC_64S_HASH_MMU unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE]; @@ -168,7 +168,7 @@ struct paca_struct { #ifdef CONFIG_PPC64 u64 exit_save_r1; /* Syscall/interrupt R1 save */ #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 u16 trap_save; /* Used when bad stack is encountered */ #endif #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 83c02f5a7f2a..55149a0384db 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -709,7 +709,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) * kernel is built for. */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #define FIXUP_ENDIAN #else /* @@ -749,7 +749,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) .long 0x2402004c; /* hrfid */ \ 191: -#endif /* !CONFIG_PPC_BOOK3E */ +#endif /* !CONFIG_PPC_BOOK3E_64 */ #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8c10f536e478..10ce03052a19 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -197,7 +197,7 @@ int main(void) OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened); OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 OFFSET(PACAPGD, paca_struct, pgd); OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd); OFFSET(PACA_EXGEN, paca_struct, exgen); @@ -213,7 +213,7 @@ int main(void) OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next); OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max); OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first); -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ #ifdef CONFIG_PPC_BOOK3S_64 OFFSET(PACA_EXGEN, paca_struct, exgen); @@ -248,7 +248,7 @@ int main(void) #ifdef CONFIG_PPC64 OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1); #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save); #endif OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 01ace4c56104..3e2e37e6ecab 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -292,16 +292,16 @@ _GLOBAL(enter_prom) /* Prepare a 32-bit mode big endian MSR */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 rlwinm r11,r11,0,1,31 mtsrr1 r11 rfi -#else /* CONFIG_PPC_BOOK3E */ +#else /* CONFIG_PPC_BOOK3E_64 */ LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE) andc r11,r11,r12 mtsrr1 r11 RFI_TO_KERNEL -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ 1: /* Return from OF */ FIXUP_ENDIAN diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index cf2c08902c05..da544ea0ce01 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -143,7 +143,7 @@ DEFINE_FIXED_SYMBOL(__run_at_load, first_256B) .globl __secondary_hold __secondary_hold: FIXUP_ENDIAN -#ifndef CONFIG_PPC_BOOK3E +#ifndef CONFIG_PPC_BOOK3E_64 mfmsr r24 ori r24,r24,MSR_RI mtmsrd r24 /* RI on */ @@ -160,7 +160,7 @@ __secondary_hold: sync li r26,0 -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 tovirt(r26,r26) #endif /* All secondary cpus wait here until told to start. */ @@ -169,7 +169,7 @@ __secondary_hold: beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 tovirt(r12,r12) #endif mtctr r12 @@ -178,7 +178,7 @@ __secondary_hold: * it may be the case that other platforms have r4 right to * begin with, this gives us some safety in case it is not */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 mr r4,r25 #else li r4,0 @@ -214,7 +214,7 @@ USE_TEXT_SECTION() #include "interrupt_64.S" -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* * The booting_thread_hwid holds the thread id we want to boot in cpu * hotplug case. It is set by cpu hotplug code, and is invalid by default. @@ -322,7 +322,7 @@ _GLOBAL(fsl_secondary_thread_init) bl book3e_secondary_thread_init b generic_secondary_common_init -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ /* * On pSeries and most other platforms, secondary processors spin @@ -345,7 +345,7 @@ _GLOBAL(generic_secondary_smp_init) bl relative_toc tovirt(r2,r2) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* Book3E initialization */ mr r3,r24 mr r4,r25 @@ -417,7 +417,7 @@ generic_secondary_common_init: b kexec_wait /* next kernel might do better */ 2: SET_PACA(r13) -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ mtspr SPRN_SPRG_TLB_EXFRAME,r12 #endif @@ -519,7 +519,7 @@ __start_initialization_multiplatform: mr r29,r9 #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 bl start_initialization_book3e b __after_prom_start #else @@ -540,7 +540,7 @@ __start_initialization_multiplatform: /* Switch off MMU if not already off */ bl __mmu_off b __after_prom_start -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ __REF __boot_from_prom: @@ -587,11 +587,11 @@ __after_prom_start: /* process relocations for the final address of the kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ sldi r25,r25,32 -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ #endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tophys(r26,r26) #endif cmplwi cr0,r7,1 /* flagged to stay where we are ? */ @@ -599,7 +599,7 @@ __after_prom_start: add r25,r25,r26 1: mr r3,r25 bl relocate -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) /* IVPR needs to be set after relocation. */ bl init_core_book3e #endif @@ -613,11 +613,11 @@ __after_prom_start: * Note: This process overwrites the OF exception vectors. */ li r3,0 /* target addr */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */ #endif mr. r4,r26 /* In some cases the loader may */ -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tovirt(r4,r4) #endif beq 9f /* have already put us at zero */ @@ -630,14 +630,14 @@ __after_prom_start: * variable __run_at_load, if it is set the kernel is treated as relocatable * kernel, otherwise it will be moved to PHYSICAL_START */ -#if defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC_BOOK3E_64) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */ #endif lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) cmplwi cr0,r7,1 bne 3f -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 LOAD_REG_ADDR(r5, __end_interrupts) LOAD_REG_ADDR(r11, _stext) sub r5,r5,r11 @@ -871,10 +871,10 @@ _GLOBAL(start_secondary_resume) */ enable_64b_mode: mfmsr r11 /* grab the current MSR */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ mtmsr r11 -#else /* CONFIG_PPC_BOOK3E */ +#else /* CONFIG_PPC_BOOK3E_64 */ LOAD_REG_IMMEDIATE(r12, MSR_64BIT) or r11,r11,r12 mtmsrd r11 @@ -940,7 +940,7 @@ start_here_multiplatform: std r29,8(r11); #endif -#ifndef CONFIG_PPC_BOOK3E +#ifndef CONFIG_PPC_BOOK3E_64 mfmsr r6 ori r6,r6,MSR_RI mtmsrd r6 /* RI on */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index fd6d8d3a548e..36184cada00b 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -286,7 +286,7 @@ kexec_flag: #ifdef CONFIG_KEXEC_CORE -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* * BOOK3E has no real MMU mode, so we have to setup the initial TLB * for a core to identity map v:0 to p:0. This current implementation @@ -354,7 +354,7 @@ _GLOBAL(kexec_smp_wait) * don't overwrite r3 here, it is live for kexec_wait above. */ real_mode: /* assume normal blr return */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* Create an identity mapping. */ b kexec_create_tlb #else @@ -413,7 +413,7 @@ _GLOBAL(kexec_sequence) lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ /* disable interrupts, we are overwriting kernel data next */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 wrteei 0 #else mfmsr r3 diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index dfd097b79160..be8db402e963 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -186,7 +186,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) #ifdef CONFIG_PPC_PSERIES new_paca->lppaca_ptr = NULL; #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 new_paca->kernel_pgd = swapper_pg_dir; #endif new_paca->lock_token = 0x8000; @@ -203,7 +203,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) new_paca->slb_shadow_ptr = NULL; #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* For now -- if we have threads this will be adjusted later */ new_paca->tcd_ptr = &new_paca->tcd; #endif @@ -215,7 +215,7 @@ void setup_paca(struct paca_struct *new_paca) /* Setup r13 */ local_paca = new_paca; -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* On Book3E, initialize the TLB miss exception frames */ mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); #else diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h index 93f22da12abe..7912bb50a7cb 100644 --- a/arch/powerpc/kernel/setup.h +++ b/arch/powerpc/kernel/setup.h @@ -23,7 +23,7 @@ void check_smt_enabled(void); static inline void check_smt_enabled(void) { } #endif -#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) +#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP) void setup_tlb_core_data(void); #else static inline void setup_tlb_core_data(void) { } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2b2d0b0fbb30..6434a3f6acb5 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -86,7 +86,7 @@ struct ppc64_caches ppc64_caches = { }; EXPORT_SYMBOL_GPL(ppc64_caches); -#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) +#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP) void __init setup_tlb_core_data(void) { int cpu; @@ -673,7 +673,7 @@ void __init initialize_cache_info(void) */ __init u64 ppc64_bolted_size(void) { -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 /* Freescale BookE bolts the entire linear mapping */ /* XXX: BookE ppc64_rma_limit setup seems to disagree? */ if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) @@ -723,7 +723,7 @@ void __init irqstack_early_init(void) } } -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 void __init exc_lvl_early_init(void) { unsigned int i; @@ -825,7 +825,7 @@ void __init setup_per_cpu_areas(void) /* * BookE and BookS radix are historical values and should be revisited. */ - if (IS_ENABLED(CONFIG_PPC_BOOK3E)) { + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { atom_size = SZ_1M; } else if (radix_enabled()) { atom_size = PAGE_SIZE; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index e68eb9381066..b60d81acccfc 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -71,7 +71,7 @@ SECTIONS .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { #ifdef CONFIG_PPC64 KEEP(*(.head.text.first_256B)); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 #else KEEP(*(.head.text.real_vectors)); *(.head.text.real_trampolines); diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index c2bea9db1c1e..a79e28c91e2b 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -360,7 +360,7 @@ void default_machine_kexec(struct kimage *image) * the RMA. On BookE there is no real MMU off mode, so we have to * keep it enabled as well (but then we have bolted TLB entries). */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 copy_with_mmu_off = false; #else copy_with_mmu_off = radix_enabled() || diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 88805757d0c9..341c2e0c71d2 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -38,7 +38,7 @@ static inline void _tlbil_pid(unsigned int pid) #else /* CONFIG_40x || CONFIG_PPC_8xx */ extern void _tlbil_all(void); extern void _tlbil_pid(unsigned int pid); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 extern void _tlbil_pid_noind(unsigned int pid); #else #define _tlbil_pid_noind(pid) _tlbil_pid(pid) @@ -55,7 +55,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); trace_tlbie(0, 0, address, pid, 0, 0, 0); } -#elif defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_PPC_BOOK3E_64) extern void _tlbil_va(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind); #else @@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid, } #endif /* CONFIG_PPC_8xx */ -#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x) +#if defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_47x) extern void _tlbivax_bcast(unsigned long address, unsigned int pid, unsigned int tsize, unsigned int ind); #else diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index d378031246ab..6914bc8e4ead 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -294,7 +294,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) isync 1: wrtee r10 blr -#elif defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_PPC_BOOK3E_64) /* * New Book3E (>= 2.06) implementation * diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index be16eba0f704..069628670a0c 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig FSL_SOC_BOOKE bool "Freescale Book-E Machine Type" - depends on PPC_85xx || PPC_BOOK3E + depends on PPC_85xx || PPC_BOOK3E_64 select FSL_SOC select PPC_UDBG_16550 select MPIC diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 11780074eb23..d63b6386a974 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -257,10 +257,6 @@ config PPC_BOOK3S def_bool y depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 -config PPC_BOOK3E - def_bool y - depends on PPC_BOOK3E_64 - config E500 select FSL_EMB_PERFMON select PPC_FSL_BOOK3E @@ -316,7 +312,7 @@ config 4xx config BOOKE bool - depends on E500 || 44x || PPC_BOOK3E + depends on E500 || 44x || PPC_BOOK3E_64 default y config BOOKE_OR_40x @@ -480,7 +476,7 @@ config PPC_MMU_NOHASH config PPC_BOOK3E_MMU def_bool y - depends on PPC_85xx || PPC_BOOK3E + depends on PPC_85xx || PPC_BOOK3E_64 config PPC_HAVE_PMU_SUPPORT bool @@ -503,7 +499,7 @@ config FORCE_SMP select SMP config SMP - depends on PPC_BOOK3S || PPC_BOOK3E || PPC_85xx || PPC_47x + depends on PPC_BOOK3S || PPC_BOOK3E_64 || PPC_85xx || PPC_47x select GENERIC_IRQ_MIGRATION bool "Symmetric multi-processing support" if !FORCE_SMP help diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 26ef3388c24c..e6d678d27b0f 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -195,7 +195,7 @@ static int do_spu_cmd(void); #ifdef CONFIG_44x static void dump_tlb_44x(void); #endif -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 static void dump_tlb_book3e(void); #endif @@ -288,11 +288,11 @@ Commands:\n\ t print backtrace\n\ x exit monitor and recover\n\ X exit monitor and don't recover\n" -#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E) +#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E_64) " u dump segment table or SLB\n" #elif defined(CONFIG_PPC_BOOK3S_32) " u dump segment registers\n" -#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E_64) " u dump TLB\n" #endif " U show uptime information\n" @@ -1166,7 +1166,7 @@ cmds(struct pt_regs *excp) case 'u': dump_tlb_44x(); break; -#elif defined(CONFIG_PPC_BOOK3E) +#elif defined(CONFIG_PPC_BOOK3E_64) case 'u': dump_tlb_book3e(); break; @@ -2686,7 +2686,7 @@ static void dump_one_paca(int cpu) DUMP(p, rfi_flush_fallback_area, "%-*px"); #endif DUMP(p, dscr_default, "%#-*llx"); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 DUMP(p, pgd, "%-*px"); DUMP(p, kernel_pgd, "%-*px"); DUMP(p, tcd_ptr, "%-*px"); @@ -2701,7 +2701,7 @@ static void dump_one_paca(int cpu) DUMP(p, canary, "%#-*lx"); #endif DUMP(p, saved_r1, "%#-*llx"); -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 DUMP(p, trap_save, "%#-*x"); #endif DUMP(p, irq_soft_mask, "%#-*x"); @@ -3823,7 +3823,7 @@ static void dump_tlb_44x(void) } #endif /* CONFIG_44x */ -#ifdef CONFIG_PPC_BOOK3E +#ifdef CONFIG_PPC_BOOK3E_64 static void dump_tlb_book3e(void) { u32 mmucfg, pidmask, lpidmask; @@ -3965,7 +3965,7 @@ static void dump_tlb_book3e(void) } } } -#endif /* CONFIG_PPC_BOOK3E */ +#endif /* CONFIG_PPC_BOOK3E_64 */ static void xmon_init(int enable) { From 1df399012b6ab0b24466a0675710a53e3feb000f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:34 +0200 Subject: [PATCH 112/214] powerpc: Remove redundant selection of E500 and E500MC PPC_85xx and PPC_BOOK3E_64 already select E500 so no need to select it again by PPC_QEMU_E500 and CORENET_GENERIC as they depend on PPC_85xx || PPC_BOOK3E_64. PPC_BOOK3E_64 already selects E500MC so no need to select it again by PPC_QEMU_E500 if PPC64, PPC_BOOK3E_64 is the only way into PPC_QEMU_E500 with PPC64. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/44f03fa1506892fabf626dceb2f47a049908b6af.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/platforms/85xx/Kconfig | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 069628670a0c..63fec86e41b4 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -241,8 +241,6 @@ endif # PPC32 config PPC_QEMU_E500 bool "QEMU generic e500 platform" select DEFAULT_UIMAGE - select E500 - select PPC_E500MC if PPC64 help This option enables support for running as a QEMU guest using QEMU's generic e500 machine. This is not required if you're @@ -258,7 +256,6 @@ config PPC_QEMU_E500 config CORENET_GENERIC bool "Freescale CoreNet Generic" select DEFAULT_UIMAGE - select E500 select PPC_E500MC select PHYS_64BIT select SWIOTLB From 688de017efaab8a7764ab2c05ce7128d0361023b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:35 +0200 Subject: [PATCH 113/214] powerpc: Change CONFIG_E500 to CONFIG_PPC_E500 It will be used outside arch/powerpc, make it clear its a powerpc configuration item. And we already have CONFIG_PPC_E500MC, so that will make it more consistent. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/e63b22083c11c4300f4a82d3123a46e5fdd54fa6.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Makefile | 2 +- arch/powerpc/include/asm/cputable.h | 4 ++-- arch/powerpc/include/asm/kgdb.h | 2 +- arch/powerpc/include/asm/mmu.h | 4 ++-- arch/powerpc/include/asm/reg_booke.h | 6 +++--- arch/powerpc/include/asm/synch.h | 2 +- arch/powerpc/include/asm/vdso/timebase.h | 2 +- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/cpu_setup_fsl_booke.S | 4 ++-- arch/powerpc/kernel/entry_32.S | 4 ++-- arch/powerpc/kernel/head_85xx.S | 4 ++-- arch/powerpc/kernel/head_booke.h | 2 +- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/kernel/traps.c | 2 +- arch/powerpc/kvm/Kconfig | 4 ++-- arch/powerpc/platforms/Kconfig.cputype | 26 +++++++++++------------ arch/powerpc/sysdev/fsl_pci.c | 2 +- arch/powerpc/sysdev/fsl_rio.c | 2 +- 18 files changed, 38 insertions(+), 38 deletions(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index f6d477c4aa64..cb01832385d0 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -210,7 +210,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-string) cpu-as-$(CONFIG_40x) += -Wa,-m405 cpu-as-$(CONFIG_44x) += -Wa,-m440 cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec) -cpu-as-$(CONFIG_E500) += -Wa,-me500 +cpu-as-$(CONFIG_PPC_E500) += -Wa,-me500 # When using '-many -mpower4' gas will first try and find a matching power4 # mnemonic and failing that it will allow any valid mnemonic that GAS knows diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 27875f0b7bc7..757dbded11dc 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -510,7 +510,7 @@ enum { #elif defined(CONFIG_44x) CPU_FTRS_44X | CPU_FTRS_440x6 | #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 CPU_FTRS_E500 | CPU_FTRS_E500_2 | #endif #ifdef CONFIG_PPC_E500MC @@ -584,7 +584,7 @@ enum { #elif defined(CONFIG_44x) CPU_FTRS_44X & CPU_FTRS_440x6 & #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 CPU_FTRS_E500 & CPU_FTRS_E500_2 & #endif #ifdef CONFIG_PPC_E500MC diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h index a9e098a3b881..715c18b75334 100644 --- a/arch/powerpc/include/asm/kgdb.h +++ b/arch/powerpc/include/asm/kgdb.h @@ -52,7 +52,7 @@ static inline void arch_kgdb_breakpoint(void) /* On non-E500 family PPC32 we determine the size by picking the last * register we need, but on E500 we skip sections so we list what we * need to store, and add it up. */ -#ifndef CONFIG_E500 +#ifndef CONFIG_PPC_E500 #define MAXREG (PT_FPSCR+1) #else /* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 860d0290ca4d..5b46da9ba7f6 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -162,7 +162,7 @@ enum { #elif defined(CONFIG_44x) MMU_FTR_TYPE_44x | #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX | #endif #ifdef CONFIG_PPC_BOOK3S_32 @@ -211,7 +211,7 @@ enum { #elif defined(CONFIG_44x) #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E #endif diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 17b8dcd9a40d..af56980b6cdb 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -246,7 +246,7 @@ #define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */ #define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 /* All e500 */ #define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ #define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */ @@ -282,7 +282,7 @@ #endif /* Bit definitions for the HID1 */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 /* e500v1/v2 */ #define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */ #define HID1_RFXE 0x00020000 /* Read fault exception enable */ @@ -545,7 +545,7 @@ #define TCR_FIE 0x00800000 /* FIT Interrupt Enable */ #define TCR_ARE 0x00400000 /* Auto Reload Enable */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \ (((tcr) & 0x1E0000) >> 15)) #else diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h index 7130176d8cb8..b0b4c64870d7 100644 --- a/arch/powerpc/include/asm/synch.h +++ b/arch/powerpc/include/asm/synch.h @@ -44,7 +44,7 @@ static inline void ppc_after_tlbiel_barrier(void) #if defined(__powerpc64__) # define LWSYNC lwsync -#elif defined(CONFIG_E500) +#elif defined(CONFIG_PPC_E500) # define LWSYNC \ START_LWSYNC_SECTION(96); \ sync; \ diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h index 891c9d5eaabe..e9245f86a46c 100644 --- a/arch/powerpc/include/asm/vdso/timebase.h +++ b/arch/powerpc/include/asm/vdso/timebase.h @@ -12,7 +12,7 @@ * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit * version below in the else case of the ifdef. */ -#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_E500)) +#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500)) #define mftb() ({unsigned long rval; \ asm volatile( \ "90: mfspr %0, %2;\n" \ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4483cae7dc9f..33dafd12e81d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -101,7 +101,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_FA_DUMP) += fadump.o obj-$(CONFIG_PRESERVE_FA_DUMP) += fadump.o ifdef CONFIG_PPC32 -obj-$(CONFIG_E500) += idle_e500.o +obj-$(CONFIG_PPC_E500) += idle_e500.o endif obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 4bf33f1b4193..058336079069 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -108,7 +108,7 @@ _GLOBAL(__setup_cpu_e6500) #endif /* CONFIG_PPC_E500MC */ #ifdef CONFIG_PPC32 -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #ifndef CONFIG_PPC_E500MC _GLOBAL(__setup_cpu_e500v1) _GLOBAL(__setup_cpu_e500v2) @@ -156,7 +156,7 @@ _GLOBAL(__setup_cpu_e5500) mtlr r5 blr #endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_E500 */ +#endif /* CONFIG_PPC_E500 */ #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC_BOOK3E_64 diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 1d599df6f169..e6d5fe3a8585 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -49,7 +49,7 @@ */ .align 12 -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500) .globl prepare_transfer_to_handler prepare_transfer_to_handler: /* if from kernel, check interrupted DOZE/NAP mode */ @@ -71,7 +71,7 @@ prepare_transfer_to_handler: lwz r2, GPR2(r11) b fast_exception_return _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) -#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */ +#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32) .globl __kuep_lock diff --git a/arch/powerpc/kernel/head_85xx.S b/arch/powerpc/kernel/head_85xx.S index 48b168b5dc57..52c0ab416326 100644 --- a/arch/powerpc/kernel/head_85xx.S +++ b/arch/powerpc/kernel/head_85xx.S @@ -912,7 +912,7 @@ get_phys_addr: * Global functions */ -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 #ifndef CONFIG_PPC_E500MC /* Adjust or setup IVORs for e500v1/v2 */ _GLOBAL(__setup_e500_ivors) @@ -955,7 +955,7 @@ _GLOBAL(__setup_ehv_ivors) sync blr #endif /* CONFIG_PPC_E500MC */ -#endif /* CONFIG_E500 */ +#endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_SPE /* diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index bb6d5d0fc4ac..a2f82ced6e4a 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -103,7 +103,7 @@ END_BTB_FLUSH_SECTION .endm .macro prepare_transfer_to_handler -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 andi. r12,r9,MSR_PR bne 777f bl prepare_transfer_to_handler diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 813261789303..b761cc1a403c 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -207,7 +207,7 @@ void __init setup_power_save(void) ppc_md.power_save = ppc6xx_idle; #endif -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 if (cpu_has_feature(CPU_FTR_CAN_DOZE) || cpu_has_feature(CPU_FTR_CAN_NAP)) ppc_md.power_save = e500_idle; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f181c434289e..62ec50a7a8ef 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -600,7 +600,7 @@ static inline int check_io_access(struct pt_regs *regs) #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4) -#if defined(CONFIG_E500) +#if defined(CONFIG_PPC_E500) int machine_check_e500mc(struct pt_regs *regs) { unsigned long mcsr = mfspr(SPRN_MCSR); diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index dcb398d5e009..61cdd782d3c5 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -189,7 +189,7 @@ config KVM_EXIT_TIMING config KVM_E500V2 bool "KVM support for PowerPC E500v2 processors" - depends on E500 && !PPC_E500MC + depends on PPC_E500 && !PPC_E500MC select KVM select KVM_MMIO select MMU_NOTIFIER @@ -220,7 +220,7 @@ config KVM_E500MC config KVM_MPIC bool "KVM in-kernel MPIC emulation" - depends on KVM && E500 + depends on KVM && PPC_E500 select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index d63b6386a974..5b065186ace5 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -33,7 +33,7 @@ config PPC_BOOK3S_32 config PPC_85xx bool "Freescale 85xx" - select E500 + select PPC_E500 config PPC_8xx bool "Freescale 8xx" @@ -108,7 +108,7 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" select PPC_FSL_BOOK3E - select E500 + select PPC_E500 select PPC_E500MC select PPC_FPU # Make it a choice ? select PPC_SMP_MUXED_IPI @@ -175,11 +175,11 @@ config POWER9_CPU config E5500_CPU bool "Freescale e5500" - depends on PPC64 && E500 + depends on PPC64 && PPC_E500 config E6500_CPU bool "Freescale e6500" - depends on PPC64 && E500 + depends on PPC64 && PPC_E500 config 405_CPU bool "40x family" @@ -257,7 +257,7 @@ config PPC_BOOK3S def_bool y depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 -config E500 +config PPC_E500 select FSL_EMB_PERFMON select PPC_FSL_BOOK3E bool @@ -266,7 +266,7 @@ config PPC_E500MC bool "e500mc Support" select PPC_FPU select COMMON_CLK - depends on E500 + depends on PPC_E500 help This must be enabled for running on e500mc (and derivatives such as e5500/e6500), and must be disabled for running on @@ -289,7 +289,7 @@ config PPC_FPU config FSL_EMB_PERFMON bool "Freescale Embedded Perfmon" - depends on E500 || PPC_83xx + depends on PPC_E500 || PPC_83xx help This is the Performance Monitor support found on the e500 core and some e300 cores (c3 and c4). Select this only if your @@ -302,7 +302,7 @@ config FSL_EMB_PERF_EVENT config FSL_EMB_PERF_EVENT_E500 bool - depends on FSL_EMB_PERF_EVENT && E500 + depends on FSL_EMB_PERF_EVENT && PPC_E500 default y config 4xx @@ -312,7 +312,7 @@ config 4xx config BOOKE bool - depends on E500 || 44x || PPC_BOOK3E_64 + depends on PPC_E500 || 44x || PPC_BOOK3E_64 default y config BOOKE_OR_40x @@ -332,12 +332,12 @@ config PPC_FSL_BOOK3E config PTE_64BIT bool - depends on 44x || E500 || PPC_86xx + depends on 44x || PPC_E500 || PPC_86xx default y if PHYS_64BIT config PHYS_64BIT - bool 'Large physical address support' if E500 || PPC_86xx - depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx + bool 'Large physical address support' if PPC_E500 || PPC_86xx + depends on (44x || PPC_E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx select PHYS_ADDR_T_64BIT help This option enables kernel support for larger than 32-bit physical @@ -384,7 +384,7 @@ config VSX config SPE_POSSIBLE def_bool y - depends on E500 && !PPC_E500MC + depends on PPC_E500 && !PPC_E500MC config SPE bool "SPE Support" diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 1ef7400ef244..974d3db6faab 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -943,7 +943,7 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose) return 0; } -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 static int mcheck_handle_load(struct pt_regs *regs, u32 inst) { unsigned int rd, ra, rb, d; diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 4647c6074f3b..c8f044d62fe2 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -98,7 +98,7 @@ resource_size_t rio_law_start; struct fsl_rio_dbell *dbell; struct fsl_rio_pw *pw; -#ifdef CONFIG_E500 +#ifdef CONFIG_PPC_E500 int fsl_rio_mcheck_exception(struct pt_regs *regs) { const struct exception_table_entry *entry; From 404a5e72f4dfd80dda6a3e9edd18012f79287bff Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:36 +0200 Subject: [PATCH 114/214] Documentation: Rename PPC_FSL_BOOK3E to PPC_E500 CONFIG_PPC_FSL_BOOK3E is redundant with CONFIG_PPC_E500. Rename it so that CONFIG_PPC_FSL_BOOK3E can be removed later. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/d3d42b395c09e66b0705fda1e51779f33e13ac38.1663606876.git.christophe.leroy@csgroup.eu --- Documentation/admin-guide/kernel-parameters.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d62fdd0ac497..ad0af89260c9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3626,7 +3626,7 @@ (bounds check bypass). With this option data leaks are possible in the system. - nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for + nospectre_v2 [X86,PPC_E500,ARM64] Disable all mitigations for the Spectre variant 2 (indirect branch prediction) vulnerability. System may allow data leaks with this option. From ec65560ad84d9d2eb98cf864e3b530856cafd233 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:37 +0200 Subject: [PATCH 115/214] watchdog: booke_wdt: Replace PPC_FSL_BOOK3E by PPC_E500 CONFIG_PPC_FSL_BOOK3E is redundant with CONFIG_PPC_E500. Replace it so that CONFIG_PPC_FSL_BOOK3E can be removed later. Signed-off-by: Christophe Leroy Acked-by: Guenter Roeck Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/01a9132d51d3d8d9c74576d3da4d9d1fa5a88bde.1663606876.git.christophe.leroy@csgroup.eu --- drivers/watchdog/Kconfig | 8 ++++---- drivers/watchdog/booke_wdt.c | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9295492d24f7..b7c03c600567 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1935,10 +1935,10 @@ config BOOKE_WDT config BOOKE_WDT_DEFAULT_TIMEOUT int "PowerPC Book-E Watchdog Timer Default Timeout" depends on BOOKE_WDT - default 38 if PPC_FSL_BOOK3E - range 0 63 if PPC_FSL_BOOK3E - default 3 if !PPC_FSL_BOOK3E - range 0 3 if !PPC_FSL_BOOK3E + default 38 if PPC_E500 + range 0 63 if PPC_E500 + default 3 if !PPC_E500 + range 0 3 if !PPC_E500 help Select the default watchdog timer period to be used by the PowerPC Book-E watchdog driver. A watchdog "event" occurs when the bit diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 75da5cd02615..932a03f4436a 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -27,7 +27,7 @@ */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) #define WDTP_MASK (WDTP(0x3f)) #else @@ -45,7 +45,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 /* For the specified period, determine the number of seconds * corresponding to the reset time. There will be a watchdog @@ -88,7 +88,7 @@ static unsigned int sec_to_period(unsigned int secs) #define MAX_WDT_TIMEOUT period_to_sec(1) -#else /* CONFIG_PPC_FSL_BOOK3E */ +#else /* CONFIG_PPC_E500 */ static unsigned long long period_to_sec(unsigned int period) { @@ -102,7 +102,7 @@ static unsigned int sec_to_period(unsigned int secs) #define MAX_WDT_TIMEOUT 3 /* from Kconfig */ -#endif /* !CONFIG_PPC_FSL_BOOK3E */ +#endif /* !CONFIG_PPC_E500 */ static void __booke_wdt_set(void *data) { From 3e7318584dfec11992f3ac45658c4bc1210b3778 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:38 +0200 Subject: [PATCH 116/214] powerpc: Remove CONFIG_PPC_FSL_BOOK3E CONFIG_PPC_FSL_BOOK3E is redundant with CONFIG_PPC_E500. Remove it. And rename five files accordingly. Signed-off-by: Christophe Leroy [mpe: Rename include guards to match new file names] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/795cb93b88c9a0279289712e674f39e3b108a1b4.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/barrier.h | 2 +- arch/powerpc/include/asm/hugetlb.h | 4 ++-- arch/powerpc/include/asm/kvm_host.h | 2 +- arch/powerpc/include/asm/mmu.h | 2 +- arch/powerpc/include/asm/nohash/32/pgtable.h | 2 +- arch/powerpc/include/asm/nohash/64/pgtable.h | 2 +- .../nohash/{hugetlb-book3e.h => hugetlb-e500.h} | 8 ++++---- arch/powerpc/include/asm/nohash/pgtable.h | 2 +- .../asm/nohash/{pte-book3e.h => pte-e500.h} | 6 +++--- arch/powerpc/include/asm/page.h | 2 +- arch/powerpc/include/asm/ppc_asm.h | 6 +++--- arch/powerpc/include/asm/setup.h | 2 +- arch/powerpc/kernel/Makefile | 2 +- arch/powerpc/kernel/asm-offsets.c | 4 ++-- .../{cpu_setup_fsl_booke.S => cpu_setup_e500.S} | 0 arch/powerpc/kernel/head_booke.h | 2 +- arch/powerpc/kernel/interrupt_64.S | 2 +- arch/powerpc/kernel/security.c | 10 +++++----- arch/powerpc/kernel/smp.c | 2 +- arch/powerpc/kernel/sysfs.c | 6 +++--- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/powerpc/lib/feature-fixups.c | 4 ++-- arch/powerpc/mm/hugetlbpage.c | 2 +- arch/powerpc/mm/mem.c | 2 +- arch/powerpc/mm/mmu_decl.h | 4 ++-- arch/powerpc/mm/nohash/Makefile | 6 +++--- arch/powerpc/mm/nohash/{fsl_book3e.c => e500.c} | 0 .../{book3e_hugetlbpage.c => e500_hugetlbpage.c} | 0 arch/powerpc/mm/nohash/tlb.c | 16 ++++++++-------- arch/powerpc/mm/nohash/tlb_low.S | 2 +- arch/powerpc/platforms/Kconfig.cputype | 16 ++++------------ 32 files changed, 58 insertions(+), 66 deletions(-) rename arch/powerpc/include/asm/nohash/{hugetlb-book3e.h => hugetlb-e500.h} (84%) rename arch/powerpc/include/asm/nohash/{pte-book3e.h => pte-e500.h} (96%) rename arch/powerpc/kernel/{cpu_setup_fsl_booke.S => cpu_setup_e500.S} (100%) rename arch/powerpc/mm/nohash/{fsl_book3e.c => e500.c} (100%) rename arch/powerpc/mm/nohash/{book3e_hugetlbpage.c => e500_hugetlbpage.c} (100%) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9d721cac20b8..a7b58645cc3f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -290,7 +290,7 @@ config PPC_LONG_DOUBLE_128 config PPC_BARRIER_NOSPEC bool default y - depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E + depends on PPC_BOOK3S_64 || PPC_E500 config EARLY_PRINTK bool diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ef2d8b15eaab..e80b2c0e9315 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -86,7 +86,7 @@ do { \ #ifdef CONFIG_PPC_BOOK3S_64 #define NOSPEC_BARRIER_SLOT nop -#elif defined(CONFIG_PPC_FSL_BOOK3E) +#elif defined(CONFIG_PPC_E500) #define NOSPEC_BARRIER_SLOT nop; nop #endif diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 32ce0fb7548f..ea71f7245a63 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -7,8 +7,8 @@ #ifdef CONFIG_PPC_BOOK3S_64 #include -#elif defined(CONFIG_PPC_FSL_BOOK3E) -#include +#elif defined(CONFIG_PPC_E500) +#include #elif defined(CONFIG_PPC_8xx) #include #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index c2b003550dc9..caea15dcb91d 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -443,7 +443,7 @@ struct kvmppc_passthru_irqmap { }; #endif -# ifdef CONFIG_PPC_FSL_BOOK3E +# ifdef CONFIG_PPC_E500 #define KVMPPC_BOOKE_IAC_NUM 2 #define KVMPPC_BOOKE_DAC_NUM 2 # else diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 5b46da9ba7f6..39057320e436 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -141,7 +141,7 @@ typedef pte_t *pgtable_t; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #include DECLARE_PER_CPU(int, next_tlbcam_idx); #endif diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 197e7552d9f6..0d40b33184eb 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -131,7 +131,7 @@ void unmap_kernel_page(unsigned long va); #elif defined(CONFIG_44x) #include #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) -#include +#include #elif defined(CONFIG_PPC_85xx) #include #elif defined(CONFIG_PPC_8xx) diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 599921cc257e..879e9a6e5a87 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -70,7 +70,7 @@ /* * Include the PTE bits definitions */ -#include +#include #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) diff --git a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h similarity index 84% rename from arch/powerpc/include/asm/nohash/hugetlb-book3e.h rename to arch/powerpc/include/asm/nohash/hugetlb-e500.h index ecd8694cb229..8f04ad20e040 100644 --- a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h +++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H -#define _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H +#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H +#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H static inline pte_t *hugepd_page(hugepd_t hpd) { @@ -30,7 +30,7 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) { - /* We use the old format for PPC_FSL_BOOK3E */ + /* We use the old format for PPC_E500 */ *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); } @@ -42,4 +42,4 @@ static inline int check_and_get_huge_psize(int shift) return shift_to_mmu_psize(shift); } -#endif /* _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H */ +#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 4fd73c7412d0..d9067dfc531c 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -266,7 +266,7 @@ static inline int pud_huge(pud_t pud) * We use it to ensure coherency between the i-cache and d-cache * for the page which has just been mapped in. */ -#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) +#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE) void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #else static inline diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-e500.h similarity index 96% rename from arch/powerpc/include/asm/nohash/pte-book3e.h rename to arch/powerpc/include/asm/nohash/pte-e500.h index f798640422c2..0934e8965e4e 100644 --- a/arch/powerpc/include/asm/nohash/pte-book3e.h +++ b/arch/powerpc/include/asm/nohash/pte-e500.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H -#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H +#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H +#define _ASM_POWERPC_NOHASH_PTE_E500_H #ifdef __KERNEL__ /* PTE bit definitions for processors compliant to the Book3E @@ -126,4 +126,4 @@ static inline pte_t pte_mkexec(pte_t pte) #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */ +#endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 7f20636d13ed..edf1dd1b0ca9 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -31,7 +31,7 @@ extern unsigned int hpage_shift; #define HPAGE_SHIFT hpage_shift #elif defined(CONFIG_PPC_8xx) #define HPAGE_SHIFT 19 /* 512k pages */ -#elif defined(CONFIG_PPC_FSL_BOOK3E) +#elif defined(CONFIG_PPC_E500) #define HPAGE_SHIFT 22 /* 4M pages */ #endif #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 55149a0384db..7e4fe766e247 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -342,7 +342,7 @@ n: #endif /* various errata or part fixups */ -#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500) #define MFTB(dest) \ 90: mfspr dest, SPRN_TBRL; \ BEGIN_FTR_SECTION_NESTED(96); \ @@ -768,7 +768,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) stringify_in_c(.llong (_target);) \ stringify_in_c(.previous) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define BTB_FLUSH(reg) \ lis reg,BUCSR_INIT@h; \ ori reg,reg,BUCSR_INIT@l; \ @@ -776,6 +776,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) isync; #else #define BTB_FLUSH(reg) -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index dd461b2c825c..85143849a586 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -69,7 +69,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { } #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void __init setup_spectre_v2(void); #else static inline void setup_spectre_v2(void) {} diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 33dafd12e81d..658c4dffaa56 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -114,7 +114,7 @@ endif obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o obj-$(CONFIG_MODULES) += module.o module_$(BITS).o obj-$(CONFIG_44x) += cpu_setup_44x.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o +obj-$(CONFIG_PPC_E500) += cpu_setup_e500.o obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 10ce03052a19..4ce2a4aa3985 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -59,7 +59,7 @@ #endif #endif -#if defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_E500) #include "../mm/mmu_decl.h" #endif @@ -651,7 +651,7 @@ int main(void) DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2); #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); OFFSET(TLBCAM_MAS0, tlbcam, MAS0); OFFSET(TLBCAM_MAS1, tlbcam, MAS1); diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_e500.S similarity index 100% rename from arch/powerpc/kernel/cpu_setup_fsl_booke.S rename to arch/powerpc/kernel/cpu_setup_e500.S diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index a2f82ced6e4a..1047dc053b47 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -34,7 +34,7 @@ */ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define BOOKE_CLEAR_BTB(reg) \ START_BTB_FLUSH_SECTION \ BTB_FLUSH(reg) \ diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 4b4ba3364665..a2d3abb48075 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -230,7 +230,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_common) std r0,GPR0(r1) std r10,GPR1(r1) std r2,GPR2(r1) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index b562a1d2c750..206475e3e0b4 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -35,7 +35,7 @@ static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_N bool barrier_nospec_enabled; static bool no_nospec; static bool btb_flush_enabled; -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) static bool no_spectrev2; #endif @@ -122,7 +122,7 @@ static __init int security_feature_debugfs_init(void) device_initcall(security_feature_debugfs_init); #endif /* CONFIG_DEBUG_FS */ -#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64) static int __init handle_nospectre_v2(char *p) { no_spectrev2 = true; @@ -130,9 +130,9 @@ static int __init handle_nospectre_v2(char *p) return 0; } early_param("nospectre_v2", handle_nospectre_v2); -#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ +#endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void __init setup_spectre_v2(void) { if (no_spectrev2 || cpu_mitigations_off()) @@ -140,7 +140,7 @@ void __init setup_spectre_v2(void) else btb_flush_enabled = true; } -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 169703fead57..11ded19186b9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -708,7 +708,7 @@ static struct task_struct *current_set[NR_CPUS]; static void smp_store_cpu_info(int id) { per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 per_cpu(next_tlbcam_idx, id) = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 3a10cda9c05e..ef9a61718940 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -228,7 +228,7 @@ static void __init sysfs_create_dscr_default(void) } #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 #define MAX_BIT 63 static u64 pw20_wt; @@ -907,7 +907,7 @@ static int register_cpu_online(unsigned int cpu) device_create_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { device_create_file(s, &dev_attr_pw20_state); device_create_file(s, &dev_attr_pw20_wait_time); @@ -1003,7 +1003,7 @@ static int unregister_cpu_online(unsigned int cpu) device_remove_file(s, &dev_attr_tscr); #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) { device_remove_file(s, &dev_attr_pw20_state); device_remove_file(s, &dev_attr_pw20_wait_time); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index b60d81acccfc..c025c83dfdc3 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -239,7 +239,7 @@ SECTIONS } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 . = ALIGN(8); __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { __start__btb_flush_fixup = .; diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 993d3f31832a..31f40f544de5 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -550,7 +550,7 @@ void do_barrier_nospec_fixups(bool enable) } #endif /* CONFIG_PPC_BARRIER_NOSPEC */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { unsigned int instr[2], *dest; @@ -602,7 +602,7 @@ void __init do_btb_flush_fixups(void) for (; start < end; start += 2) patch_btb_flush_section(start); } -#endif /* CONFIG_PPC_FSL_BOOK3E */ +#endif /* CONFIG_PPC_E500 */ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index bc84a594ca62..8c3ea5300ac3 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -623,7 +623,7 @@ static int __init hugetlbpage_init(void) if (pdshift > shift) { if (!IS_ENABLED(CONFIG_PPC_8xx)) pgtable_cache_add(pdshift - shift); - } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || + } else if (IS_ENABLED(CONFIG_PPC_E500) || IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 6ddbd6cb3a2a..84d171953ba4 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -308,7 +308,7 @@ void __init mem_init(void) } #endif /* CONFIG_HIGHMEM */ -#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) +#if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up * functions.... do it here for the non-smp case. diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 341c2e0c71d2..bd9784f77f2e 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -111,7 +111,7 @@ void MMU_init_hw_patch(void); unsigned long mmu_mapin_ram(unsigned long base, unsigned long top); #endif -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init); #ifdef CONFIG_PPC32 @@ -157,7 +157,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif -#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500) void mmu_mark_initmem_nx(void); void mmu_mark_rodata_ro(void); #else diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile index b467a25ee155..f3894e79d5f7 100644 --- a/arch/powerpc/mm/nohash/Makefile +++ b/arch/powerpc/mm/nohash/Makefile @@ -7,13 +7,13 @@ obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o obj-$(CONFIG_40x) += 40x.o obj-$(CONFIG_44x) += 44x.o obj-$(CONFIG_PPC_8xx) += 8xx.o -obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_book3e.o +obj-$(CONFIG_PPC_E500) += e500.o obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o ifdef CONFIG_HUGETLB_PAGE -obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o +obj-$(CONFIG_PPC_E500) += e500_hugetlbpage.o endif # Disable kcov instrumentation on sensitive code # This is necessary for booting with kcov enabled on book3e machines KCOV_INSTRUMENT_tlb.o := n -KCOV_INSTRUMENT_fsl_book3e.o := n +KCOV_INSTRUMENT_e500.o := n diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/e500.c similarity index 100% rename from arch/powerpc/mm/nohash/fsl_book3e.c rename to arch/powerpc/mm/nohash/e500.c diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c similarity index 100% rename from arch/powerpc/mm/nohash/book3e_hugetlbpage.c rename to arch/powerpc/mm/nohash/e500_hugetlbpage.c diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index f21896ebdc5a..fcb1e5ae5c55 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -50,7 +50,7 @@ * indirect page table entries. */ #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, @@ -166,7 +166,7 @@ int extlb_level_exc; #endif /* CONFIG_PPC64 */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */ DEFINE_PER_CPU(int, next_tlbcam_idx); EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx); @@ -441,7 +441,7 @@ static void __init setup_page_sizes(void) unsigned int eptcfg; int i, psize; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 unsigned int mmucfg = mfspr(SPRN_MMUCFG); int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E); @@ -584,7 +584,7 @@ static void __init setup_mmu_htw(void) patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); break; -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 case PPC_HTW_E6500: extlb_level_exc = EX_TLB_SIZE; patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); @@ -627,7 +627,7 @@ static void early_init_this_mmu(void) } mtspr(SPRN_MAS4, mas4); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned int num_cams; bool map = true; @@ -680,7 +680,7 @@ static void __init early_init_mmu_global(void) /* Look for HW tablewalk support */ setup_mmu_htw(); -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { if (book3e_htw_mode == PPC_HTW_NONE) { extlb_level_exc = EX_TLB_SIZE; @@ -701,7 +701,7 @@ static void __init early_init_mmu_global(void) static void __init early_mmu_set_memory_limit(void) { -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { /* * Limit memory so we dont have linear faults. @@ -750,7 +750,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base, * We crop it to the size of the first MEMBLOCK to * avoid going over total available memory just in case... */ -#ifdef CONFIG_PPC_FSL_BOOK3E +#ifdef CONFIG_PPC_E500 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned long linear_sz; unsigned int num_cams; diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S index 6914bc8e4ead..e1199608ff4d 100644 --- a/arch/powerpc/mm/nohash/tlb_low.S +++ b/arch/powerpc/mm/nohash/tlb_low.S @@ -364,7 +364,7 @@ _GLOBAL(_tlbivax_bcast) #error Unsupported processor type ! #endif -#if defined(CONFIG_PPC_FSL_BOOK3E) +#if defined(CONFIG_PPC_E500) /* * extern void loadcam_entry(unsigned int index) * diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 5b065186ace5..32c60ad8f45d 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -107,7 +107,6 @@ config PPC_BOOK3S_64 config PPC_BOOK3E_64 bool "Embedded processors" - select PPC_FSL_BOOK3E select PPC_E500 select PPC_E500MC select PPC_FPU # Make it a choice ? @@ -259,8 +258,11 @@ config PPC_BOOK3S config PPC_E500 select FSL_EMB_PERFMON - select PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 + select PPC_SMP_MUXED_IPI + select PPC_DOORBELL + select PPC_KUEP config PPC_E500MC bool "e500mc Support" @@ -320,16 +322,6 @@ config BOOKE_OR_40x depends on BOOKE || 40x default y -# this is for common code between PPC32 & PPC64 FSL BOOKE -config PPC_FSL_BOOK3E - bool - select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 - imply FSL_EMB_PERFMON - select PPC_SMP_MUXED_IPI - select PPC_DOORBELL - select PPC_KUEP - default y if PPC_85xx - config PTE_64BIT bool depends on 44x || PPC_E500 || PPC_86xx From aa5f59df201dd350f7c291c845ac8b62c0d0edd5 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:39 +0200 Subject: [PATCH 117/214] powerpc: Remove CONFIG_PPC_BOOK3E_MMU CONFIG_PPC_BOOK3E_MMU is redundant with CONFIG_PPC_E500. Remove it. Also rename mmu-book3e.h to mmu-e500.h Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c5549cd59a131204ff94ab909cad2e2dad4ddf2f.1663606876.git.christophe.leroy@csgroup.eu --- .../include/asm/nohash/{mmu-book3e.h => mmu-e500.h} | 0 arch/powerpc/include/asm/nohash/mmu.h | 4 ++-- arch/powerpc/kernel/cpu_setup_e500.S | 2 +- arch/powerpc/kernel/entry_32.S | 2 +- arch/powerpc/kernel/head_booke.h | 4 ++-- arch/powerpc/kernel/kvm.c | 8 ++++---- arch/powerpc/kvm/e500.h | 2 +- arch/powerpc/mm/nohash/tlb.c | 4 ++-- arch/powerpc/mm/ptdump/Makefile | 2 +- arch/powerpc/platforms/Kconfig.cputype | 4 ---- 10 files changed, 14 insertions(+), 18 deletions(-) rename arch/powerpc/include/asm/nohash/{mmu-book3e.h => mmu-e500.h} (100%) diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-e500.h similarity index 100% rename from arch/powerpc/include/asm/nohash/mmu-book3e.h rename to arch/powerpc/include/asm/nohash/mmu-e500.h diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h index edc793e5f08f..e264be219fdb 100644 --- a/arch/powerpc/include/asm/nohash/mmu.h +++ b/arch/powerpc/include/asm/nohash/mmu.h @@ -8,9 +8,9 @@ #elif defined(CONFIG_44x) /* 44x-style software loaded TLB */ #include -#elif defined(CONFIG_PPC_BOOK3E_MMU) +#elif defined(CONFIG_PPC_E500) /* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ -#include +#include #elif defined (CONFIG_PPC_8xx) /* Motorola/Freescale 8xx software loaded TLB */ #include diff --git a/arch/powerpc/kernel/cpu_setup_e500.S b/arch/powerpc/kernel/cpu_setup_e500.S index 058336079069..2ab25161b0ad 100644 --- a/arch/powerpc/kernel/cpu_setup_e500.S +++ b/arch/powerpc/kernel/cpu_setup_e500.S @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e6d5fe3a8585..2b5b0677d36c 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -488,7 +488,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) mtspr SPRN_##exc_lvl_srr0,r9; \ mtspr SPRN_##exc_lvl_srr1,r10; -#if defined(CONFIG_PPC_BOOK3E_MMU) +#if defined(CONFIG_PPC_E500) #ifdef CONFIG_PHYS_64BIT #define RESTORE_MAS7 \ lwz r11,MAS7(r1); \ diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 1047dc053b47..1cb9d0f7cbf2 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -242,7 +242,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) .macro SAVE_MMU_REGS -#ifdef CONFIG_PPC_BOOK3E_MMU +#ifdef CONFIG_PPC_E500 mfspr r0,SPRN_MAS0 stw r0,MAS0(r1) mfspr r0,SPRN_MAS1 @@ -257,7 +257,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) mfspr r0,SPRN_MAS7 stw r0,MAS7(r1) #endif /* CONFIG_PHYS_64BIT */ -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ #ifdef CONFIG_44x mfspr r0,SPRN_MMUCR stw r0,MMUCR(r1) diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 6568823cf306..5b3c093611ba 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -455,7 +455,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); break; -#ifdef CONFIG_PPC_BOOK3E_MMU +#ifdef CONFIG_PPC_E500 case KVM_INST_MFSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); @@ -484,7 +484,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); break; -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ case KVM_INST_MFSPR(SPRN_SPRG4): #ifdef CONFIG_BOOKE @@ -557,7 +557,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) case KVM_INST_MTSPR(SPRN_DSISR): kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); break; -#ifdef CONFIG_PPC_BOOK3E_MMU +#ifdef CONFIG_PPC_E500 case KVM_INST_MTSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); @@ -586,7 +586,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features) if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt); break; -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ case KVM_INST_MTSPR(SPRN_SPRG4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index c3ef751465fb..6d0d329cbb35 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -17,7 +17,7 @@ #define KVM_E500_H #include -#include +#include #include #include diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index fcb1e5ae5c55..fac59fbd475a 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -49,7 +49,7 @@ * other sizes not listed here. The .ind field is only used on MMUs that have * indirect page table entries. */ -#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx) +#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_8xx) #ifdef CONFIG_PPC_E500 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { @@ -142,7 +142,7 @@ static inline int mmu_get_tsize(int psize) /* This isn't used on !Book3E for now */ return 0; } -#endif /* CONFIG_PPC_BOOK3E_MMU */ +#endif /* CONFIG_PPC_E500 */ /* The variables below are currently only used on 64-bit Book3E * though this will probably be made common with other nohash diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile index b533caaf0910..dc896d2874f3 100644 --- a/arch/powerpc/mm/ptdump/Makefile +++ b/arch/powerpc/mm/ptdump/Makefile @@ -4,7 +4,7 @@ obj-y += ptdump.o obj-$(CONFIG_4xx) += shared.o obj-$(CONFIG_PPC_8xx) += 8xx.o -obj-$(CONFIG_PPC_BOOK3E_MMU) += shared.o +obj-$(CONFIG_PPC_E500) += shared.o obj-$(CONFIG_PPC_BOOK3S_32) += shared.o obj-$(CONFIG_PPC_BOOK3S_64) += book3s64.o diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 32c60ad8f45d..1746d19d058f 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -466,10 +466,6 @@ config PPC_MMU_NOHASH def_bool y depends on !PPC_BOOK3S -config PPC_BOOK3E_MMU - def_bool y - depends on PPC_85xx || PPC_BOOK3E_64 - config PPC_HAVE_PMU_SUPPORT bool From 772fd56deca62628c638d1a9bd2d34cbd371bb81 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:40 +0200 Subject: [PATCH 118/214] powerpc: Replace PPC_85xx || PPC_BOOKE_64 by PPC_E500 PPC_E500 is the same as PPC_85xx || PPC_BOOKE_64 Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/af79696f8cb8536fb4e20c0d98a6bf159a9e371b.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- arch/powerpc/platforms/85xx/Kconfig | 2 +- arch/powerpc/platforms/Kconfig.cputype | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a7b58645cc3f..f305c2f13177 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -548,7 +548,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config KEXEC bool "kexec system call" - depends on (PPC_BOOK3S || PPC_85xx || (44x && !SMP)) || PPC_BOOK3E_64 + depends on PPC_BOOK3S || PPC_E500 || (44x && !SMP) select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index 63fec86e41b4..b92cb2b4d54d 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig FSL_SOC_BOOKE bool "Freescale Book-E Machine Type" - depends on PPC_85xx || PPC_BOOK3E_64 + depends on PPC_E500 select FSL_SOC select PPC_UDBG_16550 select MPIC diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 1746d19d058f..6a216e88423b 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -487,7 +487,7 @@ config FORCE_SMP select SMP config SMP - depends on PPC_BOOK3S || PPC_BOOK3E_64 || PPC_85xx || PPC_47x + depends on PPC_BOOK3S || PPC_E500 || PPC_47x select GENERIC_IRQ_MIGRATION bool "Symmetric multi-processing support" if !FORCE_SMP help From 73d11498793f495d64230308afa50905f012f080 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:41 +0200 Subject: [PATCH 119/214] powerpc: Simplify redundant Kconfig tests PPC_85xx implies PPC32 so no need to check PPC32 in addition. PPC64 && !PPC_BOOK3E_64 means PPC_BOOK3S_64. PPC_BOOK3E_64 implies PPC_E500. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/244cce3e603f2b79796314c0c1c46cab927b9adc.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/Kconfig | 2 +- arch/powerpc/platforms/Kconfig.cputype | 2 +- arch/powerpc/xmon/xmon.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f305c2f13177..44d98d32e3bf 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -606,7 +606,7 @@ config RELOCATABLE config RANDOMIZE_BASE bool "Randomize the address of the kernel image" - depends on (PPC_85xx && FLATMEM && PPC32) + depends on PPC_85xx && FLATMEM depends on RELOCATABLE help Randomizes the virtual address at which the kernel image is diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 6a216e88423b..51059af63856 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -314,7 +314,7 @@ config 4xx config BOOKE bool - depends on PPC_E500 || 44x || PPC_BOOK3E_64 + depends on PPC_E500 || 44x default y config BOOKE_OR_40x diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index e6d678d27b0f..f51c882bf902 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -288,7 +288,7 @@ Commands:\n\ t print backtrace\n\ x exit monitor and recover\n\ X exit monitor and don't recover\n" -#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E_64) +#if defined(CONFIG_PPC_BOOK3S_64) " u dump segment table or SLB\n" #elif defined(CONFIG_PPC_BOOK3S_32) " u dump segment registers\n" From 6556fd1a1e9fcd180348c4368d2387bdc6a17613 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:42 +0200 Subject: [PATCH 120/214] powerpc: Cleanup idle for e500 e500 idle setup is a bit messy. e500_idle() is used for PPC32 while book3e_idle() is used for PPC64. As they are mutually exclusive, call them all e500_idle(). Use CONFIG_MPC_85xx instead of PPC32 + E500 in Makefile and rename idle_e500.c to idle_85xx.c . Rename idle_book3e.c to idle_64e.c and remove #ifdef PPC64 in as it's only built on PPC64. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8039301334e948974c85ec5ef2db37751075185b.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/machdep.h | 1 - arch/powerpc/kernel/Makefile | 6 ++---- arch/powerpc/kernel/{idle_book3e.S => idle_64e.S} | 8 ++------ arch/powerpc/kernel/{idle_e500.S => idle_85xx.S} | 0 arch/powerpc/platforms/85xx/corenet_generic.c | 4 ---- arch/powerpc/platforms/85xx/qemu_e500.c | 4 ---- 6 files changed, 4 insertions(+), 19 deletions(-) rename arch/powerpc/kernel/{idle_book3e.S => idle_64e.S} (93%) rename arch/powerpc/kernel/{idle_e500.S => idle_85xx.S} (100%) diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8cb83600c434..378b8d5836a7 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -204,7 +204,6 @@ struct machdep_calls { extern void e500_idle(void); extern void power4_idle(void); extern void ppc6xx_idle(void); -extern void book3e_idle(void); /* * ppc_md contains a copy of the machine description structure for the diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 658c4dffaa56..1f121c188805 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -81,7 +81,7 @@ obj-$(CONFIG_PPC_DAWR) += dawr.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o -obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o +obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_64e.o obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o obj-$(CONFIG_PPC64) += vdso64_wrapper.o obj-$(CONFIG_ALTIVEC) += vecemu.o @@ -100,9 +100,7 @@ obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_FA_DUMP) += fadump.o obj-$(CONFIG_PRESERVE_FA_DUMP) += fadump.o -ifdef CONFIG_PPC32 -obj-$(CONFIG_PPC_E500) += idle_e500.o -endif +obj-$(CONFIG_PPC_85xx) += idle_85xx.o obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_64e.S similarity index 93% rename from arch/powerpc/kernel/idle_book3e.S rename to arch/powerpc/kernel/idle_64e.S index cc008de58b05..1736aad2afe9 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_64e.S @@ -2,7 +2,7 @@ /* * Copyright 2010 IBM Corp, Benjamin Herrenschmidt * - * Generic idle routine for Book3E processors + * Generic idle routine for 64 bits e500 processors */ #include @@ -16,8 +16,6 @@ #include /* 64-bit version only for now */ -#ifdef CONFIG_PPC64 - .macro BOOK3E_IDLE name loop _GLOBAL(\name) /* Save LR for later */ @@ -98,6 +96,4 @@ epapr_ev_idle_start: BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP -BOOK3E_IDLE book3e_idle BOOK3E_IDLE_LOOP - -#endif /* CONFIG_PPC64 */ +BOOK3E_IDLE e500_idle BOOK3E_IDLE_LOOP diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_85xx.S similarity index 100% rename from arch/powerpc/kernel/idle_e500.S rename to arch/powerpc/kernel/idle_85xx.S diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index 28d6b36f1ccd..2c539de2d629 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -200,9 +200,5 @@ define_machine(corenet_generic) { #endif .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else .power_save = e500_idle, -#endif }; diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index 64109ad6736c..1639e222cc33 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -68,9 +68,5 @@ define_machine(qemu_e500) { .get_irq = mpic_get_coreint_irq, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, -#ifdef CONFIG_PPC64 - .power_save = book3e_idle, -#else .power_save = e500_idle, -#endif }; From 605ba9ee8aaabc77178b369ec6f773616089020d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 19 Sep 2022 19:01:43 +0200 Subject: [PATCH 121/214] powerpc: Remove impossible mmu_psize_defs[] on nohash Today there is: if e500 or 8xx if e500 mmu_psize_defs[] = else if 8xx mmu_psize_defs[] = else mmu_psize_defs[] = endif endif The else leg is dead definition. Drop that else leg and rewrite as: if e500 mmu_psize_defs[] = endif if 8xx mmu_psize_defs[] = endif Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/030a843449f348c0b709ca5349640624f36a016f.1663606876.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/nohash/tlb.c | 64 +++++++++--------------------------- 1 file changed, 15 insertions(+), 49 deletions(-) diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index fac59fbd475a..2c15c86c7015 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -49,7 +49,6 @@ * other sizes not listed here. The .ind field is only used on MMUs that have * indirect page table entries. */ -#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_8xx) #ifdef CONFIG_PPC_E500 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { @@ -81,7 +80,20 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { .enc = BOOK3E_PAGESZ_1GB, }, }; -#elif defined(CONFIG_PPC_8xx) + +static inline int mmu_get_tsize(int psize) +{ + return mmu_psize_defs[psize].enc; +} +#else +static inline int mmu_get_tsize(int psize) +{ + /* This isn't used on !Book3E for now */ + return 0; +} +#endif + +#ifdef CONFIG_PPC_8xx struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { [MMU_PAGE_4K] = { .shift = 12, @@ -96,53 +108,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { .shift = 23, }, }; -#else -struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { - [MMU_PAGE_4K] = { - .shift = 12, - .ind = 20, - .enc = BOOK3E_PAGESZ_4K, - }, - [MMU_PAGE_16K] = { - .shift = 14, - .enc = BOOK3E_PAGESZ_16K, - }, - [MMU_PAGE_64K] = { - .shift = 16, - .ind = 28, - .enc = BOOK3E_PAGESZ_64K, - }, - [MMU_PAGE_1M] = { - .shift = 20, - .enc = BOOK3E_PAGESZ_1M, - }, - [MMU_PAGE_16M] = { - .shift = 24, - .ind = 36, - .enc = BOOK3E_PAGESZ_16M, - }, - [MMU_PAGE_256M] = { - .shift = 28, - .enc = BOOK3E_PAGESZ_256M, - }, - [MMU_PAGE_1G] = { - .shift = 30, - .enc = BOOK3E_PAGESZ_1GB, - }, -}; -#endif /* CONFIG_PPC_85xx */ - -static inline int mmu_get_tsize(int psize) -{ - return mmu_psize_defs[psize].enc; -} -#else -static inline int mmu_get_tsize(int psize) -{ - /* This isn't used on !Book3E for now */ - return 0; -} -#endif /* CONFIG_PPC_E500 */ +#endif /* The variables below are currently only used on 64-bit Book3E * though this will probably be made common with other nohash From 4af83545538a4fa80d14b9247ffc0db556e6a556 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 20 Sep 2022 08:41:08 +0200 Subject: [PATCH 122/214] powerpc/irq: Refactor irq_soft_mask_{set,or}_return() This partialy reapply commit ef5b570d3700 ("powerpc/irq: Don't open code irq_soft_mask helpers") which was reverted by commit 684c68d92e2e ("Revert "powerpc/irq: Don't open code irq_soft_mask helpers"") irq_soft_mask_set_return() and irq_soft_mask_or_return() are overset of irq_soft_mask_set(). Have them use irq_soft_mask_set() instead of duplicating it. Signed-off-by: Christophe Leroy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/18473da42362ee8f07bce36b9caef8ba77d7633f.1663656054.git.christophe.leroy@csgroup.eu --- arch/powerpc/include/asm/hw_irq.h | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 983551859891..e8de249339d8 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -157,36 +157,18 @@ static inline notrace void irq_soft_mask_set(unsigned long mask) static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) { - unsigned long flags; + unsigned long flags = irq_soft_mask_return(); -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON(mask && !(mask & IRQS_DISABLED)); -#endif - - asm volatile( - "lbz %0,%1(13); stb %2,%1(13)" - : "=&r" (flags) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); + irq_soft_mask_set(mask); return flags; } static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) { - unsigned long flags, tmp; + unsigned long flags = irq_soft_mask_return(); - asm volatile( - "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)" - : "=&r" (flags), "=r" (tmp) - : "i" (offsetof(struct paca_struct, irq_soft_mask)), - "r" (mask) - : "memory"); - -#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG - WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED)); -#endif + irq_soft_mask_set(flags | mask); return flags; } From 5ba6c9a912fe4c60f84d6617ad10d2b8d7910990 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:41 +1000 Subject: [PATCH 123/214] powerpc: Remove asmlinkage from syscall handler definitions The asmlinkage macro has no special meaning in powerpc, and prior to this patch is used sporadically on some syscall handler definitions. On architectures that do not define asmlinkage, it resolves to extern "C" for C++ compilers and a nop otherwise. The current invocations of asmlinkage provide far from complete support for C++ toolchains, and so the macro serves no purpose in powerpc. Remove all invocations of asmlinkage in arch/powerpc. These incidentally only occur in syscall definitions and prototypes. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Reviewed-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-2-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 16 ++++++++-------- arch/powerpc/kernel/sys_ppc32.c | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index a2b13e55254f..21c2faaa2957 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -10,14 +10,14 @@ struct rtas_args; -asmlinkage long sys_mmap(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, off_t offset); -asmlinkage long sys_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); -asmlinkage long ppc64_personality(unsigned long personality); -asmlinkage long sys_rtas(struct rtas_args __user *uargs); +long sys_mmap(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, off_t offset); +long sys_mmap2(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +long ppc64_personality(unsigned long personality); +long sys_rtas(struct rtas_args __user *uargs); int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp); long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 16ff0399a257..f4edcc9489fb 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -85,20 +85,20 @@ compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u3 return ksys_readahead(fd, merge_64(offset1, offset2), count); } -asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4, +int compat_sys_truncate64(const char __user * path, u32 reg4, unsigned long len1, unsigned long len2) { return ksys_truncate(path, merge_64(len1, len2)); } -asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, +long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, u32 len1, u32 len2) { return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2, merge_64(len1, len2)); } -asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, +int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2) { return ksys_ftruncate(fd, merge_64(len1, len2)); @@ -111,7 +111,7 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, advice); } -asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, +long compat_sys_sync_file_range2(int fd, unsigned int flags, unsigned offset1, unsigned offset2, unsigned nbytes1, unsigned nbytes2) { From 2c27d4a419f627636b8c6038e55acb26df05c391 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:42 +1000 Subject: [PATCH 124/214] powerpc: Save caller r3 prior to system_call_exception This reverts commit 8875f47b7681 ("powerpc/syscall: Save r3 in regs->orig_r3 "). Save caller's original r3 state to the kernel stackframe before entering system_call_exception. This allows for user registers to be cleared by the time system_call_exception is entered, reducing the influence of user registers on speculation within the kernel. Prior to this commit, orig_r3 was saved at the beginning of system_call_exception. Instead, save orig_r3 while the user value is still live in r3. Also replicate this early save in 32-bit. A similar save was removed in commit 6f76a01173cc ("powerpc/syscall: implement system call entry/exit logic in C for PPC32") when 32-bit adopted system_call_exception. Revert its removal of orig_r3 saves. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-3-rmclure@linux.ibm.com --- arch/powerpc/kernel/entry_32.S | 1 + arch/powerpc/kernel/interrupt_64.S | 2 ++ arch/powerpc/kernel/syscall.c | 1 - 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 2b5b0677d36c..497f04cde7dc 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -101,6 +101,7 @@ __kuep_unlock: .globl transfer_to_syscall transfer_to_syscall: + stw r3, ORIG_GPR3(r1) stw r11, GPR1(r1) stw r11, 0(r1) mflr r12 diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index a2d3abb48075..9d6c8c5e5634 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -81,6 +81,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) li r11,\trapnr std r11,_TRAP(r1) std r12,_CCR(r1) + std r3,ORIG_GPR3(r1) addi r10,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ @@ -265,6 +266,7 @@ END_BTB_FLUSH_SECTION std r10,_LINK(r1) std r11,_TRAP(r1) std r12,_CCR(r1) + std r3,ORIG_GPR3(r1) addi r10,r1,STACK_FRAME_OVERHEAD ld r11,exception_marker@toc(r2) std r11,-16(r10) /* "regshere" marker */ diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index 81ace9e8b72b..64102a64fd84 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -25,7 +25,6 @@ notrace long system_call_exception(long r3, long r4, long r5, kuap_lock(); add_random_kstack_offset(); - regs->orig_gpr3 = r3; if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); From 9d54a5ce3aa87810f13cd33b314097ac6d28c350 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:43 +1000 Subject: [PATCH 125/214] powerpc: Add ZEROIZE_GPRS macros for register clears Provide register zeroing macros, following the same convention as existing register stack save/restore macros, to be used in later change to concisely zero a sequence of consecutive gprs. The resulting macros are called ZEROIZE_GPRS and ZEROIZE_NVGPRS, keeping with the naming of the accompanying restore and save macros, and usage of zeroize to describe this operation elsewhere in the kernel. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-4-rmclure@linux.ibm.com --- arch/powerpc/include/asm/ppc_asm.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 7e4fe766e247..eeb7dc8cd45f 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -33,6 +33,20 @@ .endr .endm +/* + * This expands to a sequence of register clears for regs start to end + * inclusive, of the form: + * + * li rN, 0 + */ +.macro ZEROIZE_REGS start, end + .Lreg=\start + .rept (\end - \start + 1) + li .Lreg, 0 + .Lreg=.Lreg+1 + .endr +.endm + /* * Macros for storing registers into and loading registers from * exception frames. @@ -49,6 +63,14 @@ #define REST_NVGPRS(base) REST_GPRS(13, 31, base) #endif +#define ZEROIZE_GPRS(start, end) ZEROIZE_REGS start, end +#ifdef __powerpc64__ +#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(14, 31) +#else +#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(13, 31) +#endif +#define ZEROIZE_GPR(n) ZEROIZE_GPRS(n, n) + #define SAVE_GPR(n, base) SAVE_GPRS(n, n, base) #define REST_GPR(n, base) REST_GPRS(n, n, base) From 2b1dac4b5f97ea88fb01dfcab7fc24500b5dea95 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:44 +1000 Subject: [PATCH 126/214] powerpc/64s: Use {ZEROIZE,SAVE,REST}_GPRS macros in sc, scv 0 handlers Use the convenience macros for saving/clearing/restoring gprs in keeping with syscall calling conventions. The plural variants of these macros can store a range of registers for concision. This works well when the user gpr value we are hoping to save is still live. In the syscall interrupt handlers, user register state is sometimes juggled between registers. Hold-off from issuing the SAVE_GPR macro for applicable neighbouring lines to highlight the delicate register save logic. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-5-rmclure@linux.ibm.com --- arch/powerpc/kernel/interrupt_64.S | 43 +++++++----------------------- 1 file changed, 9 insertions(+), 34 deletions(-) diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 9d6c8c5e5634..931f984b02b1 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -61,12 +61,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) mfcr r12 li r11,0 /* Can we avoid saving r3-r8 in common case? */ - std r3,GPR3(r1) - std r4,GPR4(r1) - std r5,GPR5(r1) - std r6,GPR6(r1) - std r7,GPR7(r1) - std r8,GPR8(r1) + SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) std r11,GPR10(r1) @@ -139,17 +134,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Could zero these as per ABI, but we may consider a stricter ABI * which preserves these if libc implementations can benefit, so * restore them for now until further measurement is done. */ - ld r0,GPR0(r1) - ld r4,GPR4(r1) - ld r5,GPR5(r1) - ld r6,GPR6(r1) - ld r7,GPR7(r1) - ld r8,GPR8(r1) + REST_GPR(0, r1) + REST_GPRS(4, 8, r1) /* Zero volatile regs that may contain sensitive kernel data */ - li r9,0 - li r10,0 - li r11,0 - li r12,0 + ZEROIZE_GPRS(9, 12) mtspr SPRN_XER,r0 /* @@ -172,7 +160,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r5,_XER(r1) REST_NVGPRS(r1) - ld r0,GPR0(r1) + REST_GPR(0, r1) mtcr r2 mtctr r3 mtlr r4 @@ -240,12 +228,7 @@ END_BTB_FLUSH_SECTION mfcr r12 li r11,0 /* Can we avoid saving r3-r8 in common case? */ - std r3,GPR3(r1) - std r4,GPR4(r1) - std r5,GPR5(r1) - std r6,GPR6(r1) - std r7,GPR7(r1) - std r8,GPR8(r1) + SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) std r11,GPR10(r1) @@ -335,16 +318,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) cmpdi r3,0 bne .Lsyscall_restore_regs /* Zero volatile regs that may contain sensitive kernel data */ - li r0,0 - li r4,0 - li r5,0 - li r6,0 - li r7,0 - li r8,0 - li r9,0 - li r10,0 - li r11,0 - li r12,0 + ZEROIZE_GPR(0) + ZEROIZE_GPRS(4, 12) mtctr r0 mtspr SPRN_XER,r0 .Lsyscall_restore_regs_cont: @@ -370,7 +345,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) REST_NVGPRS(r1) mtctr r3 mtspr SPRN_XER,r4 - ld r0,GPR0(r1) + REST_GPR(0, r1) REST_GPRS(4, 12, r1) b .Lsyscall_restore_regs_cont .Lsyscall_rst_end: From 15ba74502ccfd0b34dad0ea022093ccc66b334d6 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:45 +1000 Subject: [PATCH 127/214] powerpc/32: Clarify interrupt restores with REST_GPR macro in entry_32.S Restoring the register state of the interrupted thread involves issuing a large number of predictable loads to the kernel stack frame. Issue the REST_GPR{,S} macros to clearly signal when this is happening, and bunch together restores at the end of the interrupt handler where the saved value is not consumed earlier in the handler code. Signed-off-by: Rohan McLure Reported-by: Christophe Leroy Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-6-rmclure@linux.ibm.com --- arch/powerpc/kernel/entry_32.S | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 497f04cde7dc..e3d43b6e3197 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -68,7 +68,7 @@ prepare_transfer_to_handler: lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ rlwinm r9,r9,0,~MSR_EE lwz r12,_LINK(r11) /* and return to address in LR */ - lwz r2, GPR2(r11) + REST_GPR(2, r11) b fast_exception_return _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler) #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */ @@ -144,7 +144,7 @@ ret_from_syscall: lwz r7,_NIP(r1) lwz r8,_MSR(r1) cmpwi r3,0 - lwz r3,GPR3(r1) + REST_GPR(3, r1) syscall_exit_finish: mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 @@ -152,8 +152,8 @@ syscall_exit_finish: bne 3f mtcr r5 -1: lwz r2,GPR2(r1) - lwz r1,GPR1(r1) +1: REST_GPR(2, r1) + REST_GPR(1, r1) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ @@ -165,10 +165,8 @@ syscall_exit_finish: REST_NVGPRS(r1) mtctr r4 mtxer r5 - lwz r0,GPR0(r1) - lwz r3,GPR3(r1) - REST_GPRS(4, 11, r1) - lwz r12,GPR12(r1) + REST_GPR(0, r1) + REST_GPRS(3, 12, r1) b 1b #ifdef CONFIG_44x @@ -260,9 +258,8 @@ fast_exception_return: beq 3f /* if not, we've got problems */ #endif -2: REST_GPRS(3, 6, r11) - lwz r10,_CCR(r11) - REST_GPRS(1, 2, r11) +2: lwz r10,_CCR(r11) + REST_GPRS(1, 6, r11) mtcr r10 lwz r10,_LINK(r11) mtlr r10 @@ -277,7 +274,7 @@ fast_exception_return: mtspr SPRN_SRR0,r12 REST_GPR(9, r11) REST_GPR(12, r11) - lwz r11,GPR11(r11) + REST_GPR(11, r11) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ @@ -454,9 +451,8 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) lwz r3,_MSR(r1); \ andi. r3,r3,MSR_PR; \ bne interrupt_return; \ - lwz r0,GPR0(r1); \ - lwz r2,GPR2(r1); \ - REST_GPRS(3, 8, r1); \ + REST_GPR(0, r1); \ + REST_GPRS(2, 8, r1); \ lwz r10,_XER(r1); \ lwz r11,_CTR(r1); \ mtspr SPRN_XER,r10; \ @@ -475,11 +471,8 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) lwz r12,_MSR(r1); \ mtspr exc_lvl_srr0,r11; \ mtspr exc_lvl_srr1,r12; \ - lwz r9,GPR9(r1); \ - lwz r12,GPR12(r1); \ - lwz r10,GPR10(r1); \ - lwz r11,GPR11(r1); \ - lwz r1,GPR1(r1); \ + REST_GPRS(9, 12, r1); \ + REST_GPR(1, r1); \ exc_lvl_rfi; \ b .; /* prevent prefetch past exc_lvl_rfi */ From 53ecaa6778d613807e590c320ccfcf48a4114108 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:46 +1000 Subject: [PATCH 128/214] powerpc/64e: Clarify register saves and clears with {SAVE,ZEROIZE}_GPRS The common interrupt handler prologue macro and the bad_stack trampolines include consecutive sequences of register saves, and some register clears. Neaten such instances by expanding use of the SAVE_GPRS macro and employing the ZEROIZE_GPR macro when appropriate. Also simplify an invocation of SAVE_GPRS targetting all non-volatile registers to SAVE_NVGPRS. Signed-off-by: Rohan McLure Reported-by: Nicholas Piggin Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-7-rmclure@linux.ibm.com --- arch/powerpc/kernel/exceptions-64e.S | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 3afba070a5d8..d6b7aa0229be 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -216,17 +216,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mtlr r10 mtcr r11 - ld r10,GPR10(r1) - ld r11,GPR11(r1) - ld r12,GPR12(r1) + REST_GPRS(10, 12, r1) mtspr \scratch,r0 std r10,\paca_ex+EX_R10(r13); std r11,\paca_ex+EX_R11(r13); ld r10,_NIP(r1) ld r11,_MSR(r1) - ld r0,GPR0(r1) - ld r1,GPR1(r1) + REST_GPR(0, r1) + REST_GPR(1, r1) mtspr \srr0,r10 mtspr \srr1,r11 ld r10,\paca_ex+EX_R10(r13) @@ -364,16 +362,15 @@ ret_from_mc_except: /* Core exception code for all exceptions except TLB misses. */ #define EXCEPTION_COMMON_LVL(n, scratch, excf) \ exc_##n##_common: \ - std r0,GPR0(r1); /* save r0 in stackframe */ \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ - SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \ + SAVE_GPR(0, r1); /* save r0 in stackframe */ \ + SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \ std r10,_NIP(r1); /* save SRR0 to stackframe */ \ std r11,_MSR(r1); /* save SRR1 to stackframe */ \ beq 2f; /* if from kernel mode */ \ 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ mfspr r5,scratch; /* get back r13 */ \ - std r12,GPR12(r1); /* save r12 in stackframe */ \ + SAVE_GPR(12, r1); /* save r12 in stackframe */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ mflr r6; /* save LR in stackframe */ \ mfctr r7; /* save CTR in stackframe */ \ @@ -382,7 +379,7 @@ exc_##n##_common: \ lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ ld r12,exception_marker@toc(r2); \ - li r0,0; \ + ZEROIZE_GPR(0); \ std r3,GPR10(r1); /* save r10 to stackframe */ \ std r4,GPR11(r1); /* save r11 to stackframe */ \ std r5,GPR13(r1); /* save it to stackframe */ \ @@ -1048,15 +1045,14 @@ bad_stack_book3e: mfspr r11,SPRN_ESR std r10,_DEAR(r1) std r11,_ESR(r1) - std r0,GPR0(r1); /* save r0 in stackframe */ \ - std r2,GPR2(r1); /* save r2 in stackframe */ \ - SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \ + SAVE_GPR(0, r1); /* save r0 in stackframe */ \ + SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \ ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ std r3,GPR10(r1); /* save r10 to stackframe */ \ std r4,GPR11(r1); /* save r11 to stackframe */ \ - std r12,GPR12(r1); /* save r12 in stackframe */ \ + SAVE_GPR(12, r1); /* save r12 in stackframe */ \ std r5,GPR13(r1); /* save it to stackframe */ \ mflr r10 mfctr r11 @@ -1064,12 +1060,12 @@ bad_stack_book3e: std r10,_LINK(r1) std r11,_CTR(r1) std r12,_XER(r1) - SAVE_GPRS(14, 31, r1) + SAVE_NVGPRS(r1) lhz r12,PACA_TRAP_SAVE(r13) std r12,_TRAP(r1) addi r11,r1,INT_FRAME_SIZE std r11,0(r1) - li r12,0 + ZEROIZE_GPR(12) std r12,0(r11) ld r2,PACATOC(r13) 1: addi r3,r1,STACK_FRAME_OVERHEAD From 620f5c59c8617d623428c03414a022fca4e9eea2 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:47 +1000 Subject: [PATCH 129/214] powerpc/64s: Fix comment on interrupt handler prologue Interrupt handlers on 64s systems will often need to save register state from the interrupted process to make space for loading special purpose registers or for internal state. Fix a comment documenting a common code path macro in the beginning of interrupt handlers where r10 is saved to the PACA to afford space for the value of the CFAR. Comment is currently written as if r10-r12 are saved to PACA, but in fact only r10 is saved, with r11-r12 saved much later. The distance in code between these saves has grown over the many revisions of this macro. Fix this by signalling with a comment where r11-r12 are saved to the PACA. Signed-off-by: Rohan McLure Reported-by: Nicholas Piggin Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-8-rmclure@linux.ibm.com --- arch/powerpc/kernel/exceptions-64s.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3d0dc133a9ae..a3b51441b039 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -281,7 +281,7 @@ BEGIN_FTR_SECTION mfspr r9,SPRN_PPR END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) HMT_MEDIUM - std r10,IAREA+EX_R10(r13) /* save r10 - r12 */ + std r10,IAREA+EX_R10(r13) /* save r10 */ .if ICFAR BEGIN_FTR_SECTION mfspr r10,SPRN_CFAR @@ -321,7 +321,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) mfctr r10 std r10,IAREA+EX_CTR(r13) mfcr r9 - std r11,IAREA+EX_R11(r13) + std r11,IAREA+EX_R11(r13) /* save r11 - r12 */ std r12,IAREA+EX_R12(r13) /* From 016ff72bd2090903715c0f9422a44afbb966f4ee Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:48 +1000 Subject: [PATCH 130/214] powerpc: Fix fallocate and fadvise64_64 compat parameter combination As reported[1] by Arnd, the arch-specific fadvise64_64 and fallocate compatibility handlers assume parameters are passed with 32-bit big-endian ABI. This affects the assignment of odd-even parameter pairs to the high or low words of a 64-bit syscall parameter. Fix fadvise64_64 fallocate compat handlers to correctly swap upper/lower 32 bits conditioned on endianness. A future patch will replace the arch-specific compat fallocate with an asm-generic implementation. This patch is intended for ease of back-port. [1]: https://lore.kernel.org/all/be29926f-226e-48dc-871a-e29a54e80583@www.fastmail.com/ Fixes: 57f48b4b74e7 ("powerpc/compat_sys: swap hi/lo parts of 64-bit syscall args in LE mode") Reported-by: Arnd Bergmann Signed-off-by: Rohan McLure Reviewed-by: Arnd Bergmann Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-9-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 12 ++++++++++++ arch/powerpc/kernel/sys_ppc32.c | 14 +------------- arch/powerpc/kernel/syscalls.c | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 21c2faaa2957..ae7ca59f6267 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -8,6 +8,18 @@ #include #include +/* + * long long munging: + * The 32 bit ABI passes long longs in an odd even register pair. + * High and low parts are swapped depending on endian mode, + * so define a macro (similar to mips linux32) to handle that. + */ +#ifdef __LITTLE_ENDIAN__ +#define merge_64(low, high) (((u64)high << 32) | low) +#else +#define merge_64(high, low) (((u64)high << 32) | low) +#endif + struct rtas_args; long sys_mmap(unsigned long addr, size_t len, diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index f4edcc9489fb..ba363328da2b 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -56,18 +56,6 @@ unsigned long compat_sys_mmap2(unsigned long addr, size_t len, return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); } -/* - * long long munging: - * The 32 bit ABI passes long longs in an odd even register pair. - * High and low parts are swapped depending on endian mode, - * so define a macro (similar to mips linux32) to handle that. - */ -#ifdef __LITTLE_ENDIAN__ -#define merge_64(low, high) ((u64)high << 32) | low -#else -#define merge_64(high, low) ((u64)high << 32) | low -#endif - compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 reg6, u32 pos1, u32 pos2) { @@ -94,7 +82,7 @@ int compat_sys_truncate64(const char __user * path, u32 reg4, long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, u32 len1, u32 len2) { - return ksys_fallocate(fd, mode, ((loff_t)offset1 << 32) | offset2, + return ksys_fallocate(fd, mode, merge_64(offset1, offset2), merge_64(len1, len2)); } diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index fc999140bc27..abc3fbb3c490 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -98,8 +98,8 @@ long ppc64_personality(unsigned long personality) long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low) { - return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, - (u64)len_high << 32 | len_low, advice); + return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low), + merge_64(len_high, len_low), advice); } SYSCALL_DEFINE0(switch_endian) From 43d5de2b67d7f4a8478820005152f7f689608f2f Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:49 +1000 Subject: [PATCH 131/214] asm-generic: compat: Support BE for long long args in 32-bit ABIs 32-bit ABIs support passing 64-bit integers by registers via argument translation. Commit 59c10c52f573 ("riscv: compat: syscall: Add compat_sys_call_table implementation") implements the compat_arg_u64 macro for efficiently defining little endian compatibility syscalls. Architectures supporting big endianness may benefit from reciprocal argument translation, but are welcome also to implement their own. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Reviewed-by: Arnd Bergmann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-10-rmclure@linux.ibm.com --- include/asm-generic/compat.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h index d06308a2a7a8..aeb257ad3d1a 100644 --- a/include/asm-generic/compat.h +++ b/include/asm-generic/compat.h @@ -14,12 +14,17 @@ #define COMPAT_OFF_T_MAX 0x7fffffff #endif -#if !defined(compat_arg_u64) && !defined(CONFIG_CPU_BIG_ENDIAN) +#ifndef compat_arg_u64 +#ifdef CONFIG_CPU_BIG_ENDIAN #define compat_arg_u64(name) u32 name##_lo, u32 name##_hi #define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi +#else +#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo +#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo +#endif #define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \ ((u64)name##_hi << 32)) -#endif +#endif /* compat_arg_u64 */ /* These types are common across all compat ABIs */ typedef u32 compat_size_t; From c2e7a19827eec443a7cbe85e8d959052412d6dc3 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:50 +1000 Subject: [PATCH 132/214] powerpc: Use generic fallocate compatibility syscall The powerpc fallocate compat syscall handler is identical to the generic implementation provided by commit 59c10c52f573f ("riscv: compat: syscall: Add compat_sys_call_table implementation"), and as such can be removed in favour of the generic implementation. A future patch series will replace more architecture-defined syscall handlers with generic implementations, dependent on introducing generic implementations that are compatible with powerpc and arm's parameter reorderings. Reported-by: Arnd Bergmann Signed-off-by: Rohan McLure Reviewed-by: Arnd Bergmann Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-11-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 2 -- arch/powerpc/include/asm/unistd.h | 1 + arch/powerpc/kernel/sys_ppc32.c | 7 ------- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index ae7ca59f6267..52f5e1985989 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -51,8 +51,6 @@ compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u3 int compat_sys_truncate64(const char __user *path, u32 reg4, unsigned long len1, unsigned long len2); -long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, u32 len1, u32 len2); - int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2); diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index b1129b4ef57d..659a996c75aa 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -45,6 +45,7 @@ #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_NEWFSTATAT #define __ARCH_WANT_COMPAT_STAT +#define __ARCH_WANT_COMPAT_FALLOCATE #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif #define __ARCH_WANT_SYS_FORK diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index ba363328da2b..d961634976d8 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -79,13 +79,6 @@ int compat_sys_truncate64(const char __user * path, u32 reg4, return ksys_truncate(path, merge_64(len1, len2)); } -long compat_sys_fallocate(int fd, int mode, u32 offset1, u32 offset2, - u32 len1, u32 len2) -{ - return ksys_fallocate(fd, mode, merge_64(offset1, offset2), - merge_64(len1, len2)); -} - int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2) { From b6b1334c9510e162bd8ca0ae58403cafad9572f1 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:51 +1000 Subject: [PATCH 133/214] powerpc/32: Remove powerpc select specialisation Syscall #82 has been implemented for 32-bit platforms in a unique way on powerpc systems. This hack will in effect guess whether the caller is expecting new select semantics or old select semantics. It does so via a guess, based off the first parameter. In new select, this parameter represents the length of a user-memory array of file descriptors, and in old select this is a pointer to an arguments structure. The heuristic simply interprets sufficiently large values of its first parameter as being a call to old select. The following is a discussion on how this syscall should be handled. As discussed in this thread, the existence of such a hack suggests that for whatever powerpc binaries may predate glibc, it is most likely that they would have taken use of the old select semantics. x86 and arm64 both implement this syscall with oldselect semantics. Remove the powerpc implementation, and update syscall.tbl to refer to emit a reference to sys_old_select and compat_sys_old_select for 32-bit binaries, in keeping with how other architectures support syscall #82. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/lkml/13737de5-0eb7-e881-9af0-163b0d29a1a0@csgroup.eu/ Link: https://lore.kernel.org/r/20220921065605.1051927-12-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 2 -- arch/powerpc/kernel/syscalls.c | 17 ----------------- arch/powerpc/kernel/syscalls/syscall.tbl | 2 +- .../arch/powerpc/entry/syscalls/syscall.tbl | 2 +- 4 files changed, 2 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 52f5e1985989..565b4b1d7d41 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -30,8 +30,6 @@ long sys_mmap2(unsigned long addr, size_t len, unsigned long fd, unsigned long pgoff); long ppc64_personality(unsigned long personality); long sys_rtas(struct rtas_args __user *uargs); -int ppc_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct __kernel_old_timeval __user *tvp); long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low); diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index abc3fbb3c490..34e1ae88e15b 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -63,23 +63,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT); } -#ifdef CONFIG_PPC32 -/* - * Due to some executables calling the wrong select we sometimes - * get wrong args. This determines how the args are being passed - * (a single ptr to them all args passed) then calls - * sys_select() with the appropriate args. -- Cort - */ -int -ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp) -{ - if ((unsigned long)n >= 4096) - return sys_old_select((void __user *)n); - - return sys_select(n, inp, outp, exp, tvp); -} -#endif - #ifdef CONFIG_PPC64 long ppc64_personality(unsigned long personality) { diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 2600b4237292..64f27cbbdd2c 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -110,7 +110,7 @@ 79 common settimeofday sys_settimeofday compat_sys_settimeofday 80 common getgroups sys_getgroups 81 common setgroups sys_setgroups -82 32 select ppc_select sys_ni_syscall +82 32 select sys_old_select compat_sys_old_select 82 64 select sys_ni_syscall 82 spu select sys_ni_syscall 83 common symlink sys_symlink diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 2600b4237292..64f27cbbdd2c 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -110,7 +110,7 @@ 79 common settimeofday sys_settimeofday compat_sys_settimeofday 80 common getgroups sys_getgroups 81 common setgroups sys_setgroups -82 32 select ppc_select sys_ni_syscall +82 32 select sys_old_select compat_sys_old_select 82 64 select sys_ni_syscall 82 spu select sys_ni_syscall 83 common symlink sys_symlink From 4df0221f9ded8c39aecfb1a80cef346026671cb7 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:52 +1000 Subject: [PATCH 134/214] powerpc: Remove direct call to personality syscall handler Syscall handlers should not be invoked internally by their symbol names, as these symbols defined by the architecture-defined SYSCALL_DEFINE macro. Fortunately, in the case of ppc64_personality, its call to sys_personality can be replaced with an invocation to the equivalent ksys_personality inline helper in . Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-13-rmclure@linux.ibm.com --- arch/powerpc/kernel/syscalls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 34e1ae88e15b..a04c97faa21a 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -71,7 +71,7 @@ long ppc64_personality(unsigned long personality) if (personality(current->personality) == PER_LINUX32 && personality(personality) == PER_LINUX) personality = (personality & ~PER_MASK) | PER_LINUX32; - ret = sys_personality(personality); + ret = ksys_personality(personality); if (personality(ret) == PER_LINUX32) ret = (ret & ~PER_MASK) | PER_LINUX; return ret; From 0a5bfb824a6ea35e54b7e5ac6f881beea5e309d2 Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Tue, 16 Aug 2022 19:25:17 -0300 Subject: [PATCH 135/214] KVM: PPC: Book3S HV: Fix decrementer migration We used to have a workaround[1] for a hang during migration that was made ineffective when we converted the decrementer expiry to be relative to guest timebase. The point of the workaround was that in the absence of an explicit decrementer expiry value provided by userspace during migration, KVM needs to initialize dec_expires to a value that will result in an expired decrementer after subtracting the current guest timebase. That stops the vcpu from hanging after migration due to a decrementer that's too large. If the dec_expires is now relative to guest timebase, its initialization needs to be guest timebase-relative as well, otherwise we end up with a decrementer expiry that is still larger than the guest timebase. 1- https://git.kernel.org/torvalds/c/5855564c8ab2 Fixes: 3c1a4322bba7 ("KVM: PPC: Book3S HV: Change dec_expires to be relative to guest timebase") Signed-off-by: Fabiano Rosas Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220816222517.1916391-1-farosas@linux.ibm.com --- arch/powerpc/kvm/book3s_hv.c | 18 ++++++++++++++++-- arch/powerpc/kvm/powerpc.c | 1 - 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 57d0835e56fd..917abda9e5ce 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2517,10 +2517,24 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); break; case KVM_REG_PPC_TB_OFFSET: + { /* round up to multiple of 2^24 */ - vcpu->arch.vcore->tb_offset = - ALIGN(set_reg_val(id, *val), 1UL << 24); + u64 tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24); + + /* + * Now that we know the timebase offset, update the + * decrementer expiry with a guest timebase value. If + * the userspace does not set DEC_EXPIRY, this ensures + * a migrated vcpu at least starts with an expired + * decrementer, which is better than a large one that + * causes a hang. + */ + if (!vcpu->arch.dec_expires && tb_offset) + vcpu->arch.dec_expires = get_tb() + tb_offset; + + vcpu->arch.vcore->tb_offset = tb_offset; break; + } case KVM_REG_PPC_LPCR: kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); break; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index fb1490761c87..757491dd6b7b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -786,7 +786,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; - vcpu->arch.dec_expires = get_tb(); #ifdef CONFIG_KVM_EXIT_TIMING mutex_init(&vcpu->arch.exit_timing_lock); From bc91c04bfff7cdf676011b97bb21b2861d7b21c9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 8 Sep 2022 23:25:41 +1000 Subject: [PATCH 136/214] KVM: PPC: Book3S HV P9: Clear vcpu cpu fields before enabling host irqs On guest entry, vcpu->cpu and vcpu->arch.thread_cpu are set after disabling host irqs. On guest exit there is a window whre tick time accounting briefly enables irqs before these fields are cleared. Move them up to ensure they are cleared before host irqs are run. This is possibly not a problem, but is more symmetric and makes the fields less surprising. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220908132545.4085849-1-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 917abda9e5ce..0868c015c6b0 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4629,6 +4629,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, set_irq_happened(trap); + vcpu->cpu = -1; + vcpu->arch.thread_cpu = -1; + context_tracking_guest_exit(); if (!vtime_accounting_enabled_this_cpu()) { local_irq_enable(); @@ -4644,9 +4647,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, } vtime_account_guest_exit(); - vcpu->cpu = -1; - vcpu->arch.thread_cpu = -1; - powerpc_local_irq_pmu_restore(flags); preempt_enable(); From c953f7500b65f2b157d1eb468ca8b86328834cce Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 8 Sep 2022 23:25:42 +1000 Subject: [PATCH 137/214] KVM: PPC: Book3S HV P9: Fix irq disabling in tick accounting kvmhv_run_single_vcpu() disables PMIs as well as Linux irqs, however the tick time accounting code enables and disables irqs and not PMIs within this region. By chance this might not actually cause a bug, but it is clearly an incorrect use of the APIs. Fixes: 2251fbe76395e ("KVM: PPC: Book3S HV P9: Improve mtmsrd scheduling by delaying MSR[EE] disable") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220908132545.4085849-2-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0868c015c6b0..0f8dee657336 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4634,7 +4634,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, context_tracking_guest_exit(); if (!vtime_accounting_enabled_this_cpu()) { - local_irq_enable(); + powerpc_local_irq_pmu_restore(flags); /* * Service IRQs here before vtime_account_guest_exit() so any * ticks that occurred while running the guest are accounted to @@ -4643,7 +4643,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, * interrupts here, which has the problem that it accounts * interrupt processing overhead to the host. */ - local_irq_disable(); + powerpc_local_irq_pmu_save(flags); } vtime_account_guest_exit(); From b31bc24a49037aad7aa00d2b0354e9704d8134dc Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 8 Sep 2022 23:25:43 +1000 Subject: [PATCH 138/214] KVM: PPC: Book3S HV: Update guest state entry/exit accounting to new API Update the guest state and timing entry/exit accounting to use the new API, which was introduced following issues found[1]. KVM HV does possibly call instrumented code inside the guest context, and it does call srcu inside the guest context which is fragile at best. Switch to the new API, moving the guest context inside the srcu_read_lock/unlock region. [1] https://lore.kernel.org/lkml/20220201132926.3301912-1-mark.rutland@arm.com/ Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220908132545.4085849-3-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv.c | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0f8dee657336..23bcda3585fe 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3854,23 +3854,17 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) for (sub = 0; sub < core_info.n_subcores; ++sub) spin_unlock(&core_info.vc[sub]->lock); - guest_enter_irqoff(); + guest_timing_enter_irqoff(); srcu_idx = srcu_read_lock(&vc->kvm->srcu); + guest_state_enter_irqoff(); this_cpu_disable_ftrace(); - /* - * Interrupts will be enabled once we get into the guest, - * so tell lockdep that we're about to enable interrupts. - */ - trace_hardirqs_on(); - trap = __kvmppc_vcore_entry(); - trace_hardirqs_off(); - this_cpu_enable_ftrace(); + guest_state_exit_irqoff(); srcu_read_unlock(&vc->kvm->srcu, srcu_idx); @@ -3905,11 +3899,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) kvmppc_set_host_core(pcpu); - context_tracking_guest_exit(); if (!vtime_accounting_enabled_this_cpu()) { local_irq_enable(); /* - * Service IRQs here before vtime_account_guest_exit() so any + * Service IRQs here before guest_timing_exit_irqoff() so any * ticks that occurred while running the guest are accounted to * the guest. If vtime accounting is enabled, accounting uses * TB rather than ticks, so it can be done without enabling @@ -3918,7 +3911,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ local_irq_disable(); } - vtime_account_guest_exit(); + guest_timing_exit_irqoff(); local_irq_enable(); @@ -4609,21 +4602,18 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, trace_kvm_guest_enter(vcpu); - guest_enter_irqoff(); + guest_timing_enter_irqoff(); srcu_idx = srcu_read_lock(&kvm->srcu); + guest_state_enter_irqoff(); this_cpu_disable_ftrace(); - /* Tell lockdep that we're about to enable interrupts */ - trace_hardirqs_on(); - trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); vcpu->arch.trap = trap; - trace_hardirqs_off(); - this_cpu_enable_ftrace(); + guest_state_exit_irqoff(); srcu_read_unlock(&kvm->srcu, srcu_idx); @@ -4632,11 +4622,10 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; - context_tracking_guest_exit(); if (!vtime_accounting_enabled_this_cpu()) { powerpc_local_irq_pmu_restore(flags); /* - * Service IRQs here before vtime_account_guest_exit() so any + * Service IRQs here before guest_timing_exit_irqoff() so any * ticks that occurred while running the guest are accounted to * the guest. If vtime accounting is enabled, accounting uses * TB rather than ticks, so it can be done without enabling @@ -4645,7 +4634,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, */ powerpc_local_irq_pmu_save(flags); } - vtime_account_guest_exit(); + guest_timing_exit_irqoff(); powerpc_local_irq_pmu_restore(flags); From 1a5486b3c3517aa1f608a10003ade4da122cb175 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 8 Sep 2022 23:25:44 +1000 Subject: [PATCH 139/214] KVM: PPC: Book3S HV P9: Restore stolen time logging in dtl Stolen time logging in dtl was removed from the P9 path, so guests had no stolen time accounting. Add it back in a simpler way that still avoids locks and per-core accounting code. Fixes: ecb6a7207f92 ("KVM: PPC: Book3S HV P9: Remove most of the vcore logic") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220908132545.4085849-4-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv.c | 49 +++++++++++++++++++++++++++++++++--- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 23bcda3585fe..ecce10a4486d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -249,6 +249,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) /* * We use the vcpu_load/put functions to measure stolen time. + * * Stolen time is counted as time when either the vcpu is able to * run as part of a virtual core, but the task running the vcore * is preempted or sleeping, or when the vcpu needs something done @@ -278,6 +279,12 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) * lock. The stolen times are measured in units of timebase ticks. * (Note that the != TB_NIL checks below are purely defensive; * they should never fail.) + * + * The POWER9 path is simpler, one vcpu per virtual core so the + * former case does not exist. If a vcpu is preempted when it is + * BUSY_IN_HOST and not ceded or otherwise blocked, then accumulate + * the stolen cycles in busy_stolen. RUNNING is not a preemptible + * state in the P9 path. */ static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb) @@ -311,8 +318,14 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) unsigned long flags; u64 now; - if (cpu_has_feature(CPU_FTR_ARCH_300)) + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (vcpu->arch.busy_preempt != TB_NIL) { + WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); + vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; + vcpu->arch.busy_preempt = TB_NIL; + } return; + } now = mftb(); @@ -340,8 +353,21 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) unsigned long flags; u64 now; - if (cpu_has_feature(CPU_FTR_ARCH_300)) + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + /* + * In the P9 path, RUNNABLE is not preemptible + * (nor takes host interrupts) + */ + WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); + /* + * Account stolen time when preempted while the vcpu task is + * running in the kernel (but not in qemu, which is INACTIVE). + */ + if (task_is_running(current) && + vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) + vcpu->arch.busy_preempt = mftb(); return; + } now = mftb(); @@ -740,6 +766,18 @@ static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, vcpu->arch.dtl.dirty = true; } +static void kvmppc_create_dtl_entry_p9(struct kvm_vcpu *vcpu, + struct kvmppc_vcore *vc, + u64 now) +{ + unsigned long stolen; + + stolen = vc->stolen_tb - vcpu->arch.stolen_logged; + vcpu->arch.stolen_logged = vc->stolen_tb; + + __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now, stolen); +} + static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { @@ -4527,7 +4565,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, vc = vcpu->arch.vcore; vcpu->arch.ceded = 0; vcpu->arch.run_task = current; - vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; /* See if the MMU is ready to go */ @@ -4554,6 +4591,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, /* flags save not required, but irq_pmu has no disable/enable API */ powerpc_local_irq_pmu_save(flags); + vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; + if (signal_pending(current)) goto sigpend; if (need_resched() || !kvm->arch.mmu_ready) @@ -4598,7 +4637,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, tb = mftb(); - __kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0); + kvmppc_create_dtl_entry_p9(vcpu, vc, tb + vc->tb_offset); trace_kvm_guest_enter(vcpu); @@ -4621,6 +4660,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; + vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; if (!vtime_accounting_enabled_this_cpu()) { powerpc_local_irq_pmu_restore(flags); @@ -4697,6 +4737,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, out: vcpu->cpu = -1; vcpu->arch.thread_cpu = -1; + vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; powerpc_local_irq_pmu_restore(flags); preempt_enable(); goto done; From b7fa9ce86d32baf2a3a8bf8fdaa44870084edd85 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:53 +1000 Subject: [PATCH 140/214] powerpc: Remove direct call to mmap2 syscall handlers Syscall handlers should not be invoked internally by their symbol names, as these symbols defined by the architecture-defined SYSCALL_DEFINE macro. Move the compatibility syscall definition for mmap2 to syscalls.c, so that all mmap implementations can share a helper function. Remove 'inline' on static mmap helper. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin [mpe: Fix compat_sys_mmap2() prototype and offset handling] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-14-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 2 +- arch/powerpc/kernel/sys_ppc32.c | 9 --------- arch/powerpc/kernel/syscalls.c | 16 +++++++++++++--- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 565b4b1d7d41..6610dc8d078a 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -34,7 +34,7 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, u32 len_high, u32 len_low); #ifdef CONFIG_COMPAT -unsigned long compat_sys_mmap2(unsigned long addr, size_t len, +long compat_sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index d961634976d8..776ae7565fc5 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -48,14 +47,6 @@ #include #include -unsigned long compat_sys_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff) -{ - /* This should remain 12 even if PAGE_SIZE changes */ - return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); -} - compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 reg6, u32 pos1, u32 pos2) { diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index a04c97faa21a..e84a523cd65e 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -36,9 +36,9 @@ #include #include -static inline long do_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long off, int shift) +static long do_mmap2(unsigned long addr, size_t len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long off, int shift) { if (!arch_validate_prot(prot, addr)) return -EINVAL; @@ -56,6 +56,16 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len, return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12); } +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE6(mmap2, + unsigned long, addr, size_t, len, + unsigned long, prot, unsigned long, flags, + unsigned long, fd, unsigned long, off_4k) +{ + return do_mmap2(addr, len, prot, flags, fd, off_4k, PAGE_SHIFT-12); +} +#endif + SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, off_t, offset) From ac17defbeb4e8285c5b9752164b1d68b13bf3e3b Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:54 +1000 Subject: [PATCH 141/214] powerpc: Provide do_ppc64_personality helper Avoid duplication in future patch that will define the ppc64_personality syscall handler in terms of the SYSCALL_DEFINE and COMPAT_SYSCALL_DEFINE macros, by extracting the common body of ppc64_personality into a helper function. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-15-rmclure@linux.ibm.com --- arch/powerpc/kernel/syscalls.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index e84a523cd65e..8d4b7f6d7cf3 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -74,7 +74,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len, } #ifdef CONFIG_PPC64 -long ppc64_personality(unsigned long personality) +static long do_ppc64_personality(unsigned long personality) { long ret; @@ -86,6 +86,10 @@ long ppc64_personality(unsigned long personality) ret = (ret & ~PER_MASK) | PER_LINUX; return ret; } +long ppc64_personality(unsigned long personality) +{ + return do_ppc64_personality(personality); +} #endif long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, From dec20c50df79cadaff17e964ef7f622491a52134 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:55 +1000 Subject: [PATCH 142/214] powerpc: Adopt SYSCALL_DEFINE for arch-specific syscall handlers Arch-specific implementations of syscall handlers are currently used over generic implementations for the following reasons: 1. Semantics unique to powerpc 2. Compatibility syscalls require 'argument padding' to comply with 64-bit argument convention in ELF32 abi. 3. Parameter types or order is different in other architectures. These syscall handlers have been defined prior to this patch series without invoking the SYSCALL_DEFINE or COMPAT_SYSCALL_DEFINE macros with custom input and output types. We remove every such direct definition in favour of the aforementioned macros. Also update syscalls.tbl in order to refer to the symbol names generated by each of these macros. Since ppc64_personality can be called by both 64 bit and 32 bit binaries through compatibility, we must generate both both compat_sys_ and sys_ symbols for this handler. As an aside: A number of architectures including arm and powerpc agree on an alternative argument order and numbering for most of these arch-specific handlers. A future patch series may allow for asm/unistd.h to signal through its defines that a generic implementation of these syscall handlers with the correct calling convention be emitted, through the __ARCH_WANT_COMPAT_SYS_... convention. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-16-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 10 ++--- arch/powerpc/kernel/sys_ppc32.c | 38 ++++++++++++------- arch/powerpc/kernel/syscalls.c | 17 +++++++-- arch/powerpc/kernel/syscalls/syscall.tbl | 22 +++++------ .../arch/powerpc/entry/syscalls/syscall.tbl | 22 +++++------ 5 files changed, 64 insertions(+), 45 deletions(-) diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 6610dc8d078a..8e1787ff6c93 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -28,10 +28,10 @@ long sys_mmap(unsigned long addr, size_t len, long sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); -long ppc64_personality(unsigned long personality); +long sys_ppc64_personality(unsigned long personality); long sys_rtas(struct rtas_args __user *uargs); -long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, - u32 len_high, u32 len_low); +long sys_ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, + u32 len_high, u32 len_low); #ifdef CONFIG_COMPAT long compat_sys_mmap2(unsigned long addr, size_t len, @@ -52,8 +52,8 @@ int compat_sys_truncate64(const char __user *path, u32 reg4, int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, unsigned long len2); -long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, - size_t len, int advice); +long compat_sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, + size_t len, int advice); long compat_sys_sync_file_range2(int fd, unsigned int flags, unsigned int offset1, unsigned int offset2, diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 776ae7565fc5..dcc3c9fd4cfd 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -47,45 +47,55 @@ #include #include -compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2) +COMPAT_SYSCALL_DEFINE6(ppc_pread64, + unsigned int, fd, + char __user *, ubuf, compat_size_t, count, + u32, reg6, u32, pos1, u32, pos2) { return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2)); } -compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2) +COMPAT_SYSCALL_DEFINE6(ppc_pwrite64, + unsigned int, fd, + const char __user *, ubuf, compat_size_t, count, + u32, reg6, u32, pos1, u32, pos2) { return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2)); } -compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count) +COMPAT_SYSCALL_DEFINE5(ppc_readahead, + int, fd, u32, r4, + u32, offset1, u32, offset2, u32, count) { return ksys_readahead(fd, merge_64(offset1, offset2), count); } -int compat_sys_truncate64(const char __user * path, u32 reg4, - unsigned long len1, unsigned long len2) +COMPAT_SYSCALL_DEFINE4(ppc_truncate64, + const char __user *, path, u32, reg4, + unsigned long, len1, unsigned long, len2) { return ksys_truncate(path, merge_64(len1, len2)); } -int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, - unsigned long len2) +COMPAT_SYSCALL_DEFINE4(ppc_ftruncate64, + unsigned int, fd, u32, reg4, + unsigned long, len1, unsigned long, len2) { return ksys_ftruncate(fd, merge_64(len1, len2)); } -long ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, - size_t len, int advice) +COMPAT_SYSCALL_DEFINE6(ppc32_fadvise64, + int, fd, u32, unused, u32, offset1, u32, offset2, + size_t, len, int, advice) { return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len, advice); } -long compat_sys_sync_file_range2(int fd, unsigned int flags, - unsigned offset1, unsigned offset2, - unsigned nbytes1, unsigned nbytes2) +COMPAT_SYSCALL_DEFINE6(ppc_sync_file_range2, + int, fd, unsigned int, flags, + unsigned int, offset1, unsigned int, offset2, + unsigned int, nbytes1, unsigned int, nbytes2) { loff_t offset = merge_64(offset1, offset2); loff_t nbytes = merge_64(nbytes1, nbytes2); diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 8d4b7f6d7cf3..68ebb23a5af4 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -86,14 +86,23 @@ static long do_ppc64_personality(unsigned long personality) ret = (ret & ~PER_MASK) | PER_LINUX; return ret; } -long ppc64_personality(unsigned long personality) + +SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality) { return do_ppc64_personality(personality); } -#endif -long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, - u32 len_high, u32 len_low) +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality) +{ + return do_ppc64_personality(personality); +} +#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_PPC64 */ + +SYSCALL_DEFINE6(ppc_fadvise64_64, + int, fd, int, advice, u32, offset_high, u32, offset_low, + u32, len_high, u32, len_low) { return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low), merge_64(len_high, len_low), advice); diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index 64f27cbbdd2c..2bca64f96164 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -178,9 +178,9 @@ 133 common fchdir sys_fchdir 134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs -136 32 personality sys_personality ppc64_personality -136 64 personality ppc64_personality -136 spu personality ppc64_personality +136 32 personality sys_personality compat_sys_ppc64_personality +136 64 personality sys_ppc64_personality +136 spu personality sys_ppc64_personality 137 common afs_syscall sys_ni_syscall 138 common setfsuid sys_setfsuid 139 common setfsgid sys_setfsgid @@ -228,8 +228,8 @@ 176 64 rt_sigtimedwait sys_rt_sigtimedwait 177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend -179 common pread64 sys_pread64 compat_sys_pread64 -180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +179 common pread64 sys_pread64 compat_sys_ppc_pread64 +180 common pwrite64 sys_pwrite64 compat_sys_ppc_pwrite64 181 common chown sys_chown 182 common getcwd sys_getcwd 183 common capget sys_capget @@ -242,10 +242,10 @@ 188 common putpmsg sys_ni_syscall 189 nospu vfork sys_vfork 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit -191 common readahead sys_readahead compat_sys_readahead +191 common readahead sys_readahead compat_sys_ppc_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 -193 32 truncate64 sys_truncate64 compat_sys_truncate64 -194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +193 32 truncate64 sys_truncate64 compat_sys_ppc_truncate64 +194 32 ftruncate64 sys_ftruncate64 compat_sys_ppc_ftruncate64 195 32 stat64 sys_stat64 196 32 lstat64 sys_lstat64 197 32 fstat64 sys_fstat64 @@ -288,7 +288,7 @@ 230 common io_submit sys_io_submit compat_sys_io_submit 231 common io_cancel sys_io_cancel 232 nospu set_tid_address sys_set_tid_address -233 common fadvise64 sys_fadvise64 ppc32_fadvise64 +233 common fadvise64 sys_fadvise64 compat_sys_ppc32_fadvise64 234 nospu exit_group sys_exit_group 235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie 236 common epoll_create sys_epoll_create @@ -323,7 +323,7 @@ 251 spu utimes sys_utimes 252 common statfs64 sys_statfs64 compat_sys_statfs64 253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 -254 32 fadvise64_64 ppc_fadvise64_64 +254 32 fadvise64_64 sys_ppc_fadvise64_64 254 spu fadvise64_64 sys_ni_syscall 255 common rtas sys_rtas 256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall @@ -390,7 +390,7 @@ 305 common signalfd sys_signalfd compat_sys_signalfd 306 common timerfd_create sys_timerfd_create 307 common eventfd sys_eventfd -308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 +308 common sync_file_range2 sys_sync_file_range2 compat_sys_ppc_sync_file_range2 309 nospu fallocate sys_fallocate compat_sys_fallocate 310 nospu subpage_prot sys_subpage_prot 311 32 timerfd_settime sys_timerfd_settime32 diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 64f27cbbdd2c..2bca64f96164 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -178,9 +178,9 @@ 133 common fchdir sys_fchdir 134 common bdflush sys_ni_syscall 135 common sysfs sys_sysfs -136 32 personality sys_personality ppc64_personality -136 64 personality ppc64_personality -136 spu personality ppc64_personality +136 32 personality sys_personality compat_sys_ppc64_personality +136 64 personality sys_ppc64_personality +136 spu personality sys_ppc64_personality 137 common afs_syscall sys_ni_syscall 138 common setfsuid sys_setfsuid 139 common setfsgid sys_setfsgid @@ -228,8 +228,8 @@ 176 64 rt_sigtimedwait sys_rt_sigtimedwait 177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend -179 common pread64 sys_pread64 compat_sys_pread64 -180 common pwrite64 sys_pwrite64 compat_sys_pwrite64 +179 common pread64 sys_pread64 compat_sys_ppc_pread64 +180 common pwrite64 sys_pwrite64 compat_sys_ppc_pwrite64 181 common chown sys_chown 182 common getcwd sys_getcwd 183 common capget sys_capget @@ -242,10 +242,10 @@ 188 common putpmsg sys_ni_syscall 189 nospu vfork sys_vfork 190 common ugetrlimit sys_getrlimit compat_sys_getrlimit -191 common readahead sys_readahead compat_sys_readahead +191 common readahead sys_readahead compat_sys_ppc_readahead 192 32 mmap2 sys_mmap2 compat_sys_mmap2 -193 32 truncate64 sys_truncate64 compat_sys_truncate64 -194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64 +193 32 truncate64 sys_truncate64 compat_sys_ppc_truncate64 +194 32 ftruncate64 sys_ftruncate64 compat_sys_ppc_ftruncate64 195 32 stat64 sys_stat64 196 32 lstat64 sys_lstat64 197 32 fstat64 sys_fstat64 @@ -288,7 +288,7 @@ 230 common io_submit sys_io_submit compat_sys_io_submit 231 common io_cancel sys_io_cancel 232 nospu set_tid_address sys_set_tid_address -233 common fadvise64 sys_fadvise64 ppc32_fadvise64 +233 common fadvise64 sys_fadvise64 compat_sys_ppc32_fadvise64 234 nospu exit_group sys_exit_group 235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie 236 common epoll_create sys_epoll_create @@ -323,7 +323,7 @@ 251 spu utimes sys_utimes 252 common statfs64 sys_statfs64 compat_sys_statfs64 253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 -254 32 fadvise64_64 ppc_fadvise64_64 +254 32 fadvise64_64 sys_ppc_fadvise64_64 254 spu fadvise64_64 sys_ni_syscall 255 common rtas sys_rtas 256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall @@ -390,7 +390,7 @@ 305 common signalfd sys_signalfd compat_sys_signalfd 306 common timerfd_create sys_timerfd_create 307 common eventfd sys_eventfd -308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 +308 common sync_file_range2 sys_sync_file_range2 compat_sys_ppc_sync_file_range2 309 nospu fallocate sys_fallocate compat_sys_fallocate 310 nospu subpage_prot sys_subpage_prot 311 32 timerfd_settime sys_timerfd_settime32 From 8cd1def4b8e4a592949509fac443e850da8428d0 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:56 +1000 Subject: [PATCH 143/214] powerpc: Include all arch-specific syscall prototypes Forward declare all syscall handler prototypes where a generic prototype is not provided in either linux/syscalls.h or linux/compat.h in asm/syscalls.h. This is required for compile-time type-checking for syscall handlers, which is implemented later in this series. 32-bit compatibility syscall handlers are expressed in terms of types in ppc32.h. Expose this header globally. Signed-off-by: Rohan McLure Acked-by: Nicholas Piggin [mpe: Use standard include guard naming for syscalls_32.h] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-17-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscalls.h | 103 +++++++++++++----- .../ppc32.h => include/asm/syscalls_32.h} | 6 +- arch/powerpc/kernel/signal_32.c | 2 +- arch/powerpc/perf/callchain_32.c | 2 +- 4 files changed, 83 insertions(+), 30 deletions(-) rename arch/powerpc/{kernel/ppc32.h => include/asm/syscalls_32.h} (93%) diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 8e1787ff6c93..7911738ad4a9 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -8,6 +8,14 @@ #include #include +#ifdef CONFIG_PPC64 +#include +#endif +#include +#include + +struct rtas_args; + /* * long long munging: * The 32 bit ABI passes long longs in an odd even register pair. @@ -20,44 +28,89 @@ #define merge_64(high, low) (((u64)high << 32) | low) #endif -struct rtas_args; +long sys_ni_syscall(void); +/* + * PowerPC architecture-specific syscalls + */ + +long sys_rtas(struct rtas_args __user *uargs); + +#ifdef CONFIG_PPC64 +long sys_ppc64_personality(unsigned long personality); +#ifdef CONFIG_COMPAT +long compat_sys_ppc64_personality(unsigned long personality); +#endif /* CONFIG_COMPAT */ +#endif /* CONFIG_PPC64 */ + +long sys_swapcontext(struct ucontext __user *old_ctx, + struct ucontext __user *new_ctx, long ctx_size); long sys_mmap(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, off_t offset); long sys_mmap2(unsigned long addr, size_t len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); -long sys_ppc64_personality(unsigned long personality); -long sys_rtas(struct rtas_args __user *uargs); -long sys_ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, - u32 len_high, u32 len_low); +long sys_switch_endian(void); + +#ifdef CONFIG_PPC32 +long sys_sigreturn(void); +long sys_debug_setcontext(struct ucontext __user *ctx, int ndbg, + struct sig_dbg_op __user *dbg); +#endif + +long sys_rt_sigreturn(void); + +long sys_subpage_prot(unsigned long addr, + unsigned long len, u32 __user *map); + +#ifdef CONFIG_COMPAT +long compat_sys_swapcontext(struct ucontext32 __user *old_ctx, + struct ucontext32 __user *new_ctx, + int ctx_size); +long compat_sys_old_getrlimit(unsigned int resource, + struct compat_rlimit __user *rlim); +long compat_sys_sigreturn(void); +long compat_sys_rt_sigreturn(void); +#endif /* CONFIG_COMPAT */ + +/* + * Architecture specific signatures required by long long munging: + * The 32 bit ABI passes long longs in an odd even register pair. + * The following signatures provide a machine long parameter for + * each register that will be supplied. The implementation is + * responsible for combining parameter pairs. + */ #ifdef CONFIG_COMPAT long compat_sys_mmap2(unsigned long addr, size_t len, - unsigned long prot, unsigned long flags, - unsigned long fd, unsigned long pgoff); - -compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2); - -compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, - u32 reg6, u32 pos1, u32 pos2); - -compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offset1, u32 offset2, u32 count); - -int compat_sys_truncate64(const char __user *path, u32 reg4, - unsigned long len1, unsigned long len2); - -int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long len1, - unsigned long len2); - + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +long compat_sys_ppc_pread64(unsigned int fd, + char __user *ubuf, compat_size_t count, + u32 reg6, u32 pos1, u32 pos2); +long compat_sys_ppc_pwrite64(unsigned int fd, + const char __user *ubuf, compat_size_t count, + u32 reg6, u32 pos1, u32 pos2); +long compat_sys_ppc_readahead(int fd, u32 r4, + u32 offset1, u32 offset2, u32 count); +long compat_sys_ppc_truncate64(const char __user *path, u32 reg4, + unsigned long len1, unsigned long len2); +long compat_sys_ppc_ftruncate64(unsigned int fd, u32 reg4, + unsigned long len1, unsigned long len2); long compat_sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, size_t len, int advice); +long compat_sys_ppc_sync_file_range2(int fd, unsigned int flags, + unsigned int offset1, + unsigned int offset2, + unsigned int nbytes1, + unsigned int nbytes2); +#endif /* CONFIG_COMPAT */ -long compat_sys_sync_file_range2(int fd, unsigned int flags, - unsigned int offset1, unsigned int offset2, - unsigned int nbytes1, unsigned int nbytes2); +#if defined(CONFIG_PPC32) || defined(CONFIG_COMPAT) +long sys_ppc_fadvise64_64(int fd, int advice, + u32 offset_high, u32 offset_low, + u32 len_high, u32 len_low); #endif #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/include/asm/syscalls_32.h similarity index 93% rename from arch/powerpc/kernel/ppc32.h rename to arch/powerpc/include/asm/syscalls_32.h index 2346f8c7ff2e..749255568be9 100644 --- a/arch/powerpc/kernel/ppc32.h +++ b/arch/powerpc/include/asm/syscalls_32.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _PPC64_PPC32_H -#define _PPC64_PPC32_H +#ifndef _ASM_POWERPC_SYSCALLS_32_H +#define _ASM_POWERPC_SYSCALLS_32_H #include #include @@ -57,4 +57,4 @@ struct ucontext32 { struct mcontext32 uc_mcontext; }; -#endif /* _PPC64_PPC32_H */ +#endif // _ASM_POWERPC_SYSCALLS_32_H diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 157a7403e3eb..c114c7f25645 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -43,7 +43,7 @@ #include #include #ifdef CONFIG_PPC64 -#include "ppc32.h" +#include #include #else #include diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c index b83c47b7947f..ea8cfe3806dc 100644 --- a/arch/powerpc/perf/callchain_32.c +++ b/arch/powerpc/perf/callchain_32.c @@ -19,7 +19,7 @@ #include "callchain.h" #ifdef CONFIG_PPC64 -#include "../kernel/ppc32.h" +#include #else /* CONFIG_PPC64 */ #define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE From 39859aea411b1696c6bc0c04bd2b5095ddba6196 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:57 +1000 Subject: [PATCH 144/214] powerpc: Enable compile-time check for syscall handlers The table of syscall handlers and registered compatibility syscall handlers has in past been produced using assembly, with function references resolved at link time. This moves link-time errors to compile-time, by rewriting systbl.S in C, and including the linux/syscalls.h, linux/compat.h and asm/syscalls.h headers for prototypes. Reported-by: Arnd Bergmann Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-18-rmclure@linux.ibm.com --- arch/powerpc/kernel/{systbl.S => systbl.c} | 32 ++++++++-------------- 1 file changed, 11 insertions(+), 21 deletions(-) rename arch/powerpc/kernel/{systbl.S => systbl.c} (57%) diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.c similarity index 57% rename from arch/powerpc/kernel/systbl.S rename to arch/powerpc/kernel/systbl.c index 280d6b6955e2..ce52bd2ec292 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.c @@ -10,36 +10,26 @@ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) */ -#include +#include +#include +#include +#include -#ifdef CONFIG_RELOCATABLE -.section .data.rel.ro,"aw" -#else -.section .rodata,"a" -#endif +#define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) +#define __SYSCALL(nr, entry) [nr] = (unsigned long) &entry, -#ifdef CONFIG_PPC64 - .p2align 3 -#define __SYSCALL(nr, entry) .8byte entry -#else - .p2align 2 -#define __SYSCALL(nr, entry) .long entry -#endif - -#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) -.globl sys_call_table -sys_call_table: +const unsigned long sys_call_table[] = { #ifdef CONFIG_PPC64 #include #else #include #endif +}; #ifdef CONFIG_COMPAT #undef __SYSCALL_WITH_COMPAT #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) -.globl compat_sys_call_table -compat_sys_call_table: -#define compat_sys_sigsuspend sys_sigsuspend +const unsigned long compat_sys_call_table[] = { #include -#endif +}; +#endif /* CONFIG_COMPAT */ From 8640de0dee49cec50040d9845a2bc96fd15adc9e Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:55:58 +1000 Subject: [PATCH 145/214] powerpc: Use common syscall handler type Cause syscall handlers to be typed as follows when called indirectly throughout the kernel. This is to allow for better type checking. typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); Since both 32 and 64-bit abis allow for at least the first six machine-word length parameters to a function to be passed by registers, even handlers which admit fewer than six parameters may be viewed as having the above type. Coercing syscalls to syscall_fn requires a cast to void* to avoid -Wcast-function-type. Fixup comparisons in VDSO to avoid pointer-integer comparison. Introduce explicit cast on systems with SPUs. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-19-rmclure@linux.ibm.com --- arch/powerpc/include/asm/syscall.h | 7 +++++-- arch/powerpc/include/asm/syscalls.h | 1 + arch/powerpc/kernel/syscall.c | 2 -- arch/powerpc/kernel/systbl.c | 11 ++++++++--- arch/powerpc/kernel/vdso.c | 4 ++-- arch/powerpc/platforms/cell/spu_callbacks.c | 6 +++--- 6 files changed, 19 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 25fc8ad9a27a..d2a8dfd5de33 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -14,9 +14,12 @@ #include #include +typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long, + unsigned long, unsigned long, unsigned long); + /* ftrace syscalls requires exporting the sys_call_table */ -extern const unsigned long sys_call_table[]; -extern const unsigned long compat_sys_call_table[]; +extern const syscall_fn sys_call_table[]; +extern const syscall_fn compat_sys_call_table[]; static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 7911738ad4a9..211bb8393dee 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -8,6 +8,7 @@ #include #include +#include #ifdef CONFIG_PPC64 #include #endif diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index 64102a64fd84..9875486f6168 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -12,8 +12,6 @@ #include -typedef long (*syscall_fn)(long, long, long, long, long, long); - /* Has to run notrace because it is entered not completely "reconciled" */ notrace long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, diff --git a/arch/powerpc/kernel/systbl.c b/arch/powerpc/kernel/systbl.c index ce52bd2ec292..d64dfafc2e5e 100644 --- a/arch/powerpc/kernel/systbl.c +++ b/arch/powerpc/kernel/systbl.c @@ -16,9 +16,14 @@ #include #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) -#define __SYSCALL(nr, entry) [nr] = (unsigned long) &entry, -const unsigned long sys_call_table[] = { +/* + * Coerce syscall handlers with arbitrary parameters to common type + * requires cast to void* to avoid -Wcast-function-type. + */ +#define __SYSCALL(nr, entry) [nr] = (void *) entry, + +const syscall_fn sys_call_table[] = { #ifdef CONFIG_PPC64 #include #else @@ -29,7 +34,7 @@ const unsigned long sys_call_table[] = { #ifdef CONFIG_COMPAT #undef __SYSCALL_WITH_COMPAT #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) -const unsigned long compat_sys_call_table[] = { +const syscall_fn compat_sys_call_table[] = { #include }; #endif /* CONFIG_COMPAT */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index bf9574ec26ce..fcca06d200d3 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -304,10 +304,10 @@ static void __init vdso_setup_syscall_map(void) unsigned int i; for (i = 0; i < NR_syscalls; i++) { - if (sys_call_table[i] != (unsigned long)&sys_ni_syscall) + if (sys_call_table[i] != (void *)&sys_ni_syscall) vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); if (IS_ENABLED(CONFIG_COMPAT) && - compat_sys_call_table[i] != (unsigned long)&sys_ni_syscall) + compat_sys_call_table[i] != (void *)&sys_ni_syscall) vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f); } } diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index fe0d8797a00a..e780c14c5733 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -34,15 +34,15 @@ * mbind, mq_open, ipc, ... */ -static void *spu_syscall_table[] = { +static const syscall_fn spu_syscall_table[] = { #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) -#define __SYSCALL(nr, entry) [nr] = entry, +#define __SYSCALL(nr, entry) [nr] = (void *) entry, #include }; long spu_sys_callback(struct spu_syscall_block *s) { - long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); + syscall_fn syscall; if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); From f8971c627b14040e533768985a99f4fd6ffa420f Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:56:00 +1000 Subject: [PATCH 146/214] powerpc: Change system_call_exception calling convention Change system_call_exception arguments to pass a pointer to a stack frame container caller state, as well as the original r0, which determines the number of the syscall. This has been observed to yield improved performance to passing them by registers, circumventing the need to allocate a stack frame. Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin [mpe: Retain clearing of high bits of args for compat tasks] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-21-rmclure@linux.ibm.com --- arch/powerpc/include/asm/interrupt.h | 3 +-- arch/powerpc/kernel/entry_32.S | 6 +++--- arch/powerpc/kernel/interrupt_64.S | 20 ++++++++++---------- arch/powerpc/kernel/syscall.c | 18 +++++++++--------- 4 files changed, 23 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 84a1cdc3204c..0d3405bd55c4 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -665,8 +665,7 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs) local_irq_enable(); } -long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, - unsigned long r0, struct pt_regs *regs); +long system_call_exception(struct pt_regs *regs, unsigned long r0); notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv); notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs); notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e3d43b6e3197..55fcc70f2cc0 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -122,9 +122,9 @@ transfer_to_syscall: SAVE_NVGPRS(r1) kuep_lock - /* Calling convention has r9 = orig r0, r10 = regs */ - addi r10,r1,STACK_FRAME_OVERHEAD - mr r9,r0 + /* Calling convention has r3 = regs, r4 = orig r0 */ + addi r3,r1,STACK_FRAME_OVERHEAD + mr r4,r0 bl system_call_exception ret_from_syscall: diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 931f984b02b1..90dbd6a8d4da 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -60,7 +60,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) ld r2,PACATOC(r13) mfcr r12 li r11,0 - /* Can we avoid saving r3-r8 in common case? */ + /* Save syscall parameters in r3-r8 */ SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) @@ -77,9 +77,11 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) - addi r10,r1,STACK_FRAME_OVERHEAD + /* Calling convention has r3 = regs, r4 = orig r0 */ + addi r3,r1,STACK_FRAME_OVERHEAD + mr r4,r0 ld r11,exception_marker@toc(r2) - std r11,-16(r10) /* "regshere" marker */ + std r11,-16(r3) /* "regshere" marker */ BEGIN_FTR_SECTION HMT_MEDIUM @@ -94,8 +96,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) * but this is the best we can do. */ - /* Calling convention has r9 = orig r0, r10 = regs */ - mr r9,r0 bl system_call_exception .Lsyscall_vectored_\name\()_exit: @@ -227,7 +227,7 @@ END_BTB_FLUSH_SECTION ld r2,PACATOC(r13) mfcr r12 li r11,0 - /* Can we avoid saving r3-r8 in common case? */ + /* Save syscall parameters in r3-r8 */ SAVE_GPRS(3, 8, r1) /* Zero r9-r12, this should only be required when restoring all GPRs */ std r11,GPR9(r1) @@ -250,9 +250,11 @@ END_BTB_FLUSH_SECTION std r11,_TRAP(r1) std r12,_CCR(r1) std r3,ORIG_GPR3(r1) - addi r10,r1,STACK_FRAME_OVERHEAD + /* Calling convention has r3 = regs, r4 = orig r0 */ + addi r3,r1,STACK_FRAME_OVERHEAD + mr r4,r0 ld r11,exception_marker@toc(r2) - std r11,-16(r10) /* "regshere" marker */ + std r11,-16(r3) /* "regshere" marker */ #ifdef CONFIG_PPC_BOOK3S li r11,1 @@ -273,8 +275,6 @@ END_BTB_FLUSH_SECTION wrteei 1 #endif - /* Calling convention has r9 = orig r0, r10 = regs */ - mr r9,r0 bl system_call_exception .Lsyscall_exit: diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index 9875486f6168..2c002cbbc676 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -13,10 +13,9 @@ /* Has to run notrace because it is entered not completely "reconciled" */ -notrace long system_call_exception(long r3, long r4, long r5, - long r6, long r7, long r8, - unsigned long r0, struct pt_regs *regs) +notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) { + unsigned long r3, r4, r5, r6, r7, r8; long ret; syscall_fn f; @@ -136,12 +135,6 @@ notrace long system_call_exception(long r3, long r4, long r5, r0 = do_syscall_trace_enter(regs); if (unlikely(r0 >= NR_syscalls)) return regs->gpr[3]; - r3 = regs->gpr[3]; - r4 = regs->gpr[4]; - r5 = regs->gpr[5]; - r6 = regs->gpr[6]; - r7 = regs->gpr[7]; - r8 = regs->gpr[8]; } else if (unlikely(r0 >= NR_syscalls)) { if (unlikely(trap_is_unsupported_scv(regs))) { @@ -152,6 +145,13 @@ notrace long system_call_exception(long r3, long r4, long r5, return -ENOSYS; } + r3 = regs->gpr[3]; + r4 = regs->gpr[4]; + r5 = regs->gpr[5]; + r6 = regs->gpr[6]; + r7 = regs->gpr[7]; + r8 = regs->gpr[8]; + /* May be faster to do array_index_nospec? */ barrier_nospec(); From 7e92e01b724526b98cbc7f03dd4afa0295780d56 Mon Sep 17 00:00:00 2001 From: Rohan McLure Date: Wed, 21 Sep 2022 16:56:01 +1000 Subject: [PATCH 147/214] powerpc: Provide syscall wrapper Implement syscall wrapper as per s390, x86, arm64. When enabled cause handlers to accept parameters from a stack frame rather than from user scratch register state. This allows for user registers to be safely cleared in order to reduce caller influence on speculation within syscall routine. The wrapper is a macro that emits syscall handler symbols that call into the target handler, obtaining its parameters from a struct pt_regs on the stack. As registers are already saved to the stack prior to calling system_call_exception, it appears that this function is executed more efficiently with the new stack-pointer convention than with parameters passed by registers, avoiding the allocation of a stack frame for this method. On a 32-bit system, we see >20% performance increases on the null_syscall microbenchmark, and on a Power 8 the performance gains amortise the cost of clearing and restoring registers which is implemented at the end of this series, seeing final result of ~5.6% performance improvement on null_syscall. Syscalls are wrapped in this fashion on all platforms except for the Cell processor as this commit does not provide SPU support. This can be quickly fixed in a successive patch, but requires spu_sys_callback to allocate a pt_regs structure to satisfy the wrapped calling convention. Co-developed-by: Andrew Donnellan Signed-off-by: Andrew Donnellan Signed-off-by: Rohan McLure Reviewed-by: Nicholas Piggin [mpe: Make incompatible with COMPAT to retain clearing of high bits of args] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921065605.1051927-22-rmclure@linux.ibm.com --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/syscall.h | 4 ++ arch/powerpc/include/asm/syscall_wrapper.h | 51 ++++++++++++++++++++++ arch/powerpc/include/asm/syscalls.h | 24 +++++++++- arch/powerpc/kernel/syscall.c | 34 ++++++++------- arch/powerpc/kernel/systbl.c | 7 +++ arch/powerpc/kernel/vdso.c | 2 + 7 files changed, 105 insertions(+), 18 deletions(-) create mode 100644 arch/powerpc/include/asm/syscall_wrapper.h diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 44d98d32e3bf..237c0e071e5e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -137,6 +137,7 @@ config PPC select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_HAS_SYSCALL_WRAPPER if !SPU_BASE && !COMPAT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN_SANITIZE_ALL diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index d2a8dfd5de33..3dd36c5e334a 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h @@ -14,8 +14,12 @@ #include #include +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +typedef long (*syscall_fn)(const struct pt_regs *); +#else typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#endif /* ftrace syscalls requires exporting the sys_call_table */ extern const syscall_fn sys_call_table[]; diff --git a/arch/powerpc/include/asm/syscall_wrapper.h b/arch/powerpc/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..75b41b173f7a --- /dev/null +++ b/arch/powerpc/include/asm/syscall_wrapper.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - powerpc specific wrappers to syscall definitions + * + * Based on arch/{x86,arm64}/include/asm/syscall_wrapper.h + */ + +#ifndef __ASM_POWERPC_SYSCALL_WRAPPER_H +#define __ASM_POWERPC_SYSCALL_WRAPPER_H + +struct pt_regs; + +#define SC_POWERPC_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->gpr[3],,regs->gpr[4],,regs->gpr[5] \ + ,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8]) + +#define __SYSCALL_DEFINEx(x, name, ...) \ + long __powerpc_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__powerpc_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + long __powerpc_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + long __powerpc_sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(__powerpc_sys_##sname, ERRNO); \ + long __powerpc_sys_##sname(const struct pt_regs *__unused) + +#define COND_SYSCALL(name) \ + long __powerpc_sys_##name(const struct pt_regs *regs); \ + long __weak __powerpc_sys_##name(const struct pt_regs *regs) \ + { \ + return sys_ni_syscall(); \ + } + +#define SYS_NI(name) SYSCALL_ALIAS(__powerpc_sys_##name, sys_ni_posix_timers); + +#endif // __ASM_POWERPC_SYSCALL_WRAPPER_H diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 211bb8393dee..49bbc3e0733d 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -15,6 +15,12 @@ #include #include +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +long sys_ni_syscall(void); +#else +long sys_ni_syscall(const struct pt_regs *regs); +#endif + struct rtas_args; /* @@ -29,12 +35,12 @@ struct rtas_args; #define merge_64(high, low) (((u64)high << 32) | low) #endif -long sys_ni_syscall(void); - /* * PowerPC architecture-specific syscalls */ +#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER + long sys_rtas(struct rtas_args __user *uargs); #ifdef CONFIG_PPC64 @@ -114,5 +120,19 @@ long sys_ppc_fadvise64_64(int fd, int advice, u32 len_high, u32 len_low); #endif +#else + +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) +#define __SYSCALL(nr, entry) \ + long __powerpc_##entry(const struct pt_regs *regs); + +#ifdef CONFIG_PPC64 +#include +#else +#include +#endif /* CONFIG_PPC64 */ + +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + #endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_SYSCALLS_H */ diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c index 2c002cbbc676..18b9d325395f 100644 --- a/arch/powerpc/kernel/syscall.c +++ b/arch/powerpc/kernel/syscall.c @@ -15,7 +15,6 @@ /* Has to run notrace because it is entered not completely "reconciled" */ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) { - unsigned long r3, r4, r5, r6, r7, r8; long ret; syscall_fn f; @@ -145,31 +144,34 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0) return -ENOSYS; } - r3 = regs->gpr[3]; - r4 = regs->gpr[4]; - r5 = regs->gpr[5]; - r6 = regs->gpr[6]; - r7 = regs->gpr[7]; - r8 = regs->gpr[8]; - /* May be faster to do array_index_nospec? */ barrier_nospec(); +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER + // No COMPAT if we have SYSCALL_WRAPPER, see Kconfig + f = (void *)sys_call_table[r0]; + ret = f(regs); +#else if (unlikely(is_compat_task())) { + unsigned long r3, r4, r5, r6, r7, r8; + f = (void *)compat_sys_call_table[r0]; - r3 &= 0x00000000ffffffffULL; - r4 &= 0x00000000ffffffffULL; - r5 &= 0x00000000ffffffffULL; - r6 &= 0x00000000ffffffffULL; - r7 &= 0x00000000ffffffffULL; - r8 &= 0x00000000ffffffffULL; + r3 = regs->gpr[3] & 0x00000000ffffffffULL; + r4 = regs->gpr[4] & 0x00000000ffffffffULL; + r5 = regs->gpr[5] & 0x00000000ffffffffULL; + r6 = regs->gpr[6] & 0x00000000ffffffffULL; + r7 = regs->gpr[7] & 0x00000000ffffffffULL; + r8 = regs->gpr[8] & 0x00000000ffffffffULL; + ret = f(r3, r4, r5, r6, r7, r8); } else { f = (void *)sys_call_table[r0]; - } - ret = f(r3, r4, r5, r6, r7, r8); + ret = f(regs->gpr[3], regs->gpr[4], regs->gpr[5], + regs->gpr[6], regs->gpr[7], regs->gpr[8]); + } +#endif /* * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), diff --git a/arch/powerpc/kernel/systbl.c b/arch/powerpc/kernel/systbl.c index d64dfafc2e5e..9d7c5a596171 100644 --- a/arch/powerpc/kernel/systbl.c +++ b/arch/powerpc/kernel/systbl.c @@ -15,13 +15,20 @@ #include #include +#undef __SYSCALL_WITH_COMPAT #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) +#undef __SYSCALL +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +#define __SYSCALL(nr, entry) [nr] = __powerpc_##entry, +#define __powerpc_sys_ni_syscall sys_ni_syscall +#else /* * Coerce syscall handlers with arbitrary parameters to common type * requires cast to void* to avoid -Wcast-function-type. */ #define __SYSCALL(nr, entry) [nr] = (void *) entry, +#endif const syscall_fn sys_call_table[] = { #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index fcca06d200d3..e1f36fd61db3 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -39,6 +39,8 @@ extern char vdso32_start, vdso32_end; extern char vdso64_start, vdso64_end; +long sys_ni_syscall(void); + /* * The vdso data page (aka. systemcfg for old ppc64 fans) is here. * Once the early boot kernel code no longer needs to muck around From bd7dc90e52e8db7ee0f38c51bc9047bafb54fe43 Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Mon, 12 Sep 2022 12:20:31 +0530 Subject: [PATCH 148/214] powerpc/64/kdump: Limit kdump base to 512MB Since commit e641eb03ab2b0 ("powerpc: Fix up the kdump base cap to 128M") memory for kdump kernel has been reserved at an offset of 128MB. This held up well for a long time before running into boot failure on LPARs having a lot of cores. Commit 7c5ed82b800d8 ("powerpc: Set crashkernel offset to mid of RMA region") fixed this boot failure by moving the offset to mid of RMA region. This change meant the offset is either 256MB or 512MB on LPARs as ppc64_rma_size was 512MB or 1024MB owing to commit 103a8542cb35b ("powerpc/book3s64/ radix: Fix boot failure with large amount of guest memory"). But ppc64_rma_size can be larger as well with newer f/w. So, limit crashkernel reservation offset to 512MB to avoid running into boot failures during kdump kernel boot, due to RTAS or other allocation restrictions. Also, while here, use SZ_128M instead of opening coding it. Signed-off-by: Hari Bathini Tested-by: Sachin Sant Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220912065031.57416-1-hbathini@linux.ibm.com --- arch/powerpc/kexec/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index cf84bfe9e27e..de64c7962991 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -136,7 +136,7 @@ void __init reserve_crashkernel(void) #ifdef CONFIG_PPC64 /* * On the LPAR platform place the crash kernel to mid of - * RMA size (512MB or more) to ensure the crash kernel + * RMA size (max. of 512MB) to ensure the crash kernel * gets enough space to place itself and some stack to be * in the first segment. At the same time normal kernel * also get enough space to allocate memory for essential @@ -144,9 +144,9 @@ void __init reserve_crashkernel(void) * kernel starts at 128MB offset on other platforms. */ if (firmware_has_feature(FW_FEATURE_LPAR)) - crashk_res.start = ppc64_rma_size / 2; + crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M); else - crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); + crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M); #else crashk_res.start = KDUMP_KERNELBASE; #endif From b19448fe846baad689ff51a991ebfc74b4b5e0a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Tue, 23 Aug 2022 01:15:01 +0200 Subject: [PATCH 149/214] powerpc: Add support for early debugging via Serial 16550 console MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently powerpc early debugging contains lot of platform specific options, but does not support standard UART / serial 16550 console. Later legacy_serial.c code supports registering UART as early debug console from device tree but it is not early during booting, but rather later after machine description code finishes. So for real early debugging via UART is current code unsuitable. Add support for new early debugging option CONFIG_PPC_EARLY_DEBUG_16550 which enable Serial 16550 console on address defined by new option CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR and by stride by option CONFIG_PPC_EARLY_DEBUG_16550_STRIDE. With this change it is possible to debug powerpc machine descriptor code. For example this early debugging code can print on serial console also "No suitable machine description found" error which is done before legacy_serial.c code. Signed-off-by: Pali Rohár Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220822231501.16827-1-pali@kernel.org --- arch/powerpc/Kconfig.debug | 15 +++++++++++++++ arch/powerpc/include/asm/udbg.h | 1 + arch/powerpc/kernel/udbg.c | 2 ++ arch/powerpc/kernel/udbg_16550.c | 33 ++++++++++++++++++++++++++++++++ 4 files changed, 51 insertions(+) diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index ae727d4218b9..6aaf8dc60610 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -283,6 +283,12 @@ config PPC_EARLY_DEBUG_MEMCONS This console provides input and output buffers stored within the kernel BSS and should be safe to select on any system. A debugger can then be used to read kernel output or send input to the console. + +config PPC_EARLY_DEBUG_16550 + bool "Serial 16550" + depends on PPC_UDBG_16550 + help + Select this to enable early debugging via Serial 16550 console endchoice config PPC_MEMCONS_OUTPUT_SIZE @@ -354,6 +360,15 @@ config PPC_EARLY_DEBUG_CPM_ADDR platform probing is done, all platforms selected must share the same address. +config PPC_EARLY_DEBUG_16550_PHYSADDR + hex "Early debug Serial 16550 physical address" + depends on PPC_EARLY_DEBUG_16550 + +config PPC_EARLY_DEBUG_16550_STRIDE + int "Early debug Serial 16550 stride" + depends on PPC_EARLY_DEBUG_16550 + default 1 + config FAIL_IOMMU bool "Fault-injection capability for IOMMU" depends on FAULT_INJECTION diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 524c2085070f..88add5593e6f 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -52,6 +52,7 @@ extern void __init udbg_init_ehv_bc(void); extern void __init udbg_init_ps3gelic(void); extern void __init udbg_init_debug_opal_raw(void); extern void __init udbg_init_debug_opal_hvsi(void); +extern void __init udbg_init_debug_16550(void); #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UDBG_H */ diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index b1544b2f6321..92b3fc258d11 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -67,6 +67,8 @@ void __init udbg_early_init(void) udbg_init_debug_opal_raw(); #elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI) udbg_init_debug_opal_hvsi(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_16550) + udbg_init_debug_16550(); #endif #ifdef CONFIG_PPC_EARLY_DEBUG diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index ddfbc74bf85f..74ddf836f7a2 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -8,6 +8,7 @@ #include #include #include +#include extern u8 real_readb(volatile u8 __iomem *addr); extern void real_writeb(u8 data, volatile u8 __iomem *addr); @@ -296,3 +297,35 @@ void __init udbg_init_40x_realmode(void) } #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ + +#ifdef CONFIG_PPC_EARLY_DEBUG_16550 + +static void __iomem *udbg_uart_early_addr; + +void __init udbg_init_debug_16550(void) +{ + udbg_uart_early_addr = early_ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000); + udbg_uart_init_mmio(udbg_uart_early_addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE); +} + +static int __init udbg_init_debug_16550_ioremap(void) +{ + void __iomem *addr; + + if (!udbg_uart_early_addr) + return 0; + + addr = ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000); + if (WARN_ON(!addr)) + return -ENOMEM; + + udbg_uart_init_mmio(addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE); + early_iounmap(udbg_uart_early_addr, 0x1000); + udbg_uart_early_addr = NULL; + + return 0; +} + +early_initcall(udbg_init_debug_16550_ioremap); + +#endif /* CONFIG_PPC_EARLY_DEBUG_16550 */ From c84550203b3173511e8cdbe94bc2e33175ba1d72 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 10 Sep 2022 00:24:57 +1000 Subject: [PATCH 150/214] powerpc/time: avoid programming DEC at the start of the timer interrupt Setting DEC to maximum at the start of the timer interrupt is not necessary and can be avoided for performance when MSR[EE] is not enabled during the handler as explained in commit 0faf20a1ad16 ("powerpc/64s/interrupt: Don't enable MSR[EE] in irq handlers unless perf is in use"), where this change was first attempted. The idea is that the timer interrupt runs with MSR[EE]=0, and at the end of the interrupt DEC is programmed to the next timer interval, so there is no need to clear the decrementer exception before then. When the above commit was merged, that was not quite true. The low res timer subsystem had some cases in the oneshot timer code where if the tick was to be stopped and no timers active, the clock device would not get the ->set_state_oneshot_stopped() call, so DEC would not get reprogrammed, and this would hang taking continual timer interrupts. So this was reverted in commit d2b9be1f4af5 ("powerpc/time: Always set decrementer in timer_interrupt()"), which was a partial revert of the above commit. Commit 62c1256d5447 ("timers/nohz: Switch to ONESHOT_STOPPED in the low-res handler when the tick is stopped") was later merged to fix this missing case in the timer subsystem, so now the behaviour can be restored. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220909142457.278032-1-npiggin@gmail.com --- arch/powerpc/kernel/time.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index ae3e33b4ef95..a2ab397065c6 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -532,22 +532,23 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt) return; } - /* - * Ensure a positive value is written to the decrementer, or - * else some CPUs will continue to take decrementer exceptions. - * When the PPC_WATCHDOG (decrementer based) is configured, - * keep this at most 31 bits, which is about 4 seconds on most - * systems, which gives the watchdog a chance of catching timer - * interrupt hard lockups. - */ - if (IS_ENABLED(CONFIG_PPC_WATCHDOG)) - set_dec(0x7fffffff); - else - set_dec(decrementer_max); - /* Conditionally hard-enable interrupts. */ - if (should_hard_irq_enable()) + if (should_hard_irq_enable()) { + /* + * Ensure a positive value is written to the decrementer, or + * else some CPUs will continue to take decrementer exceptions. + * When the PPC_WATCHDOG (decrementer based) is configured, + * keep this at most 31 bits, which is about 4 seconds on most + * systems, which gives the watchdog a chance of catching timer + * interrupt hard lockups. + */ + if (IS_ENABLED(CONFIG_PPC_WATCHDOG)) + set_dec(0x7fffffff); + else + set_dec(decrementer_max); + do_hard_irq_enable(); + } #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) From dabeb572adf24bbd7cb21d1cc4d118bdf2c2ab74 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 20 Sep 2022 22:22:58 +1000 Subject: [PATCH 151/214] powerpc: add ISA v3.0 / v3.1 wait opcode macro The wait instruction encoding changed between ISA v2.07 and ISA v3.0. In v3.1 the instruction gained a new field. Update the PPC_WAIT macro to the current encoding. Rename the older incompatible one with a _v203 suffix as it was introduced in v2.03 (the WC field was introduced in v2.07 but the kernel only uses WC=0). Reviewed-by: Segher Boessenkool Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220920122259.363092-1-npiggin@gmail.com --- arch/powerpc/include/asm/ppc-opcode.h | 7 +++++-- arch/powerpc/kernel/idle_64e.S | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index c6d724104ed1..21e33e46f4b8 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -330,6 +330,7 @@ #define __PPC_XSP(s) ((((s) & 0x1e) | (((s) >> 5) & 0x1)) << 21) #define __PPC_XTP(s) __PPC_XSP(s) #define __PPC_T_TLB(t) (((t) & 0x3) << 21) +#define __PPC_PL(p) (((p) & 0x3) << 16) #define __PPC_WC(w) (((w) & 0x3) << 21) #define __PPC_WS(w) (((w) & 0x1f) << 11) #define __PPC_SH(s) __PPC_WS(s) @@ -388,7 +389,8 @@ #define PPC_RAW_RFDI (0x4c00004e) #define PPC_RAW_RFMCI (0x4c00004c) #define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b)) -#define PPC_RAW_WAIT(w) (0x7c00007c | __PPC_WC(w)) +#define PPC_RAW_WAIT_v203 (0x7c00007c) +#define PPC_RAW_WAIT(w, p) (0x7c00003c | __PPC_WC(w) | __PPC_PL(p)) #define PPC_RAW_TLBIE(lp, a) (0x7c000264 | ___PPC_RB(a) | ___PPC_RS(lp)) #define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \ (0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r)) @@ -606,7 +608,8 @@ #define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b) #define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b) #define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b) -#define PPC_WAIT(w) stringify_in_c(.long PPC_RAW_WAIT(w)) +#define PPC_WAIT_v203 stringify_in_c(.long PPC_RAW_WAIT_v203) +#define PPC_WAIT(w, p) stringify_in_c(.long PPC_RAW_WAIT(w, p)) #define PPC_TLBIE(lp, a) stringify_in_c(.long PPC_RAW_TLBIE(lp, a)) #define PPC_TLBIE_5(rb, rs, ric, prs, r) \ stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r)) diff --git a/arch/powerpc/kernel/idle_64e.S b/arch/powerpc/kernel/idle_64e.S index 1736aad2afe9..0fc680e03dee 100644 --- a/arch/powerpc/kernel/idle_64e.S +++ b/arch/powerpc/kernel/idle_64e.S @@ -75,7 +75,7 @@ _GLOBAL(\name) .macro BOOK3E_IDLE_LOOP 1: - PPC_WAIT(0) + PPC_WAIT_v203 b 1b .endm From 9c7bfc2dc21e737e8e4a753630bce675e1e7c0ad Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 20 Sep 2022 22:22:59 +1000 Subject: [PATCH 152/214] powerpc/64s: Make POWER10 and later use pause_short in cpu_relax loops We want to move away from using SMT priority updates for cpu_relax, and use a 'wait' instruction which is similar to x86. As well as being a much better fit for what everybody else uses and tests with, priority nops are stateful which is nasty (interrupts have to consider they might be taken at a different priority), and they're expensive to execute, similar to a mtSPR which can effect other threads in the pipe. This has shown to give results that are less affected by code alignment on benchmarks that cause a lot of spin waiting (e.g., rwsem contention on unixbench filesystem benchmarks) on POWER10. QEMU TCG only supports this instruction correctly since v7.1, versions without the fix may cause hangs whne running POWER10 CPUs. Reviewed-by: Segher Boessenkool Signed-off-by: Nicholas Piggin [mpe: Fix checkpatch warnings RE the macros] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220920122259.363092-2-npiggin@gmail.com --- arch/powerpc/include/asm/processor.h | 18 +++++++++++++++--- arch/powerpc/include/asm/vdso/processor.h | 8 +++++++- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 97a77b37daa3..9bff4ab8242d 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -355,11 +355,23 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) #ifdef CONFIG_PPC64 -#define spin_begin() HMT_low() +#define spin_begin() \ + asm volatile(ASM_FTR_IFCLR( \ + "or 1,1,1", /* HMT_LOW */ \ + "nop", /* v3.1 uses pause_short in cpu_relax instead */ \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") -#define spin_cpu_relax() barrier() +#define spin_cpu_relax() \ + asm volatile(ASM_FTR_IFCLR( \ + "nop", /* Before v3.1 use priority nops in spin_begin/end */ \ + PPC_WAIT(2, 0), /* aka pause_short */ \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") -#define spin_end() HMT_medium() +#define spin_end() \ + asm volatile(ASM_FTR_IFCLR( \ + "or 2,2,2", /* HMT_MEDIUM */ \ + "nop", \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") #endif diff --git a/arch/powerpc/include/asm/vdso/processor.h b/arch/powerpc/include/asm/vdso/processor.h index 8d79f994b4aa..80d13207c568 100644 --- a/arch/powerpc/include/asm/vdso/processor.h +++ b/arch/powerpc/include/asm/vdso/processor.h @@ -22,7 +22,13 @@ #endif #ifdef CONFIG_PPC64 -#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0) +#define cpu_relax() \ + asm volatile(ASM_FTR_IFCLR( \ + /* Pre-POWER10 uses low ; medium priority nops */ \ + "or 1,1,1 ; or 2,2,2", \ + /* POWER10 onward uses pause_short (wait 2,0) */ \ + PPC_WAIT(2, 0), \ + %0) :: "i" (CPU_FTR_ARCH_31) : "memory") #else #define cpu_relax() barrier() #endif From 58ec7f06b74e0d6e76c4110afce367c8b5f0837d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Sep 2022 11:41:02 +1000 Subject: [PATCH 153/214] powerpc/64s: Fix GENERIC_CPU build flags for PPC970 / G5 Big-endian GENERIC_CPU supports 970, but builds with -mcpu=power5. POWER5 is ISA v2.02 whereas 970 is v2.01 plus Altivec. 2.02 added the popcntb instruction which a compiler might use. Use -mcpu=power4. Fixes: 471d7ff8b51b ("powerpc/64s: Remove POWER4 support") Signed-off-by: Nicholas Piggin Reviewed-by: Segher Boessenkool Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921014103.587954-1-npiggin@gmail.com --- arch/powerpc/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index cb01832385d0..3bed2c6dcf12 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -152,7 +152,7 @@ CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8 CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8) else CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) -CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) +CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4 endif else ifdef CONFIG_PPC_BOOK3E_64 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 From 7fd123e544886bf04fa853869efe55cb3f22d0c0 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Wed, 21 Sep 2022 11:41:03 +1000 Subject: [PATCH 154/214] powerpc/64s: update cpu selection options Update the 64s GENERIC_CPU option. POWER4 support has been dropped, so make that clear in the option name. The POWER5_CPU option is dropped because it's uncommon, and GENERIC_CPU covers it. -mtune= before power8 is dropped because the minimum gcc version supports power8, and tuning is made consistent between big and little endian. A 970 option is added for PowerPC 970 / G5 because they still have a user base, and -mtune=power8 does not generate good code for the 970. This also updates the ISA versions document to add Power4/Power4+ because I didn't realise Power4+ used 2.01. Signed-off-by: Nicholas Piggin Reviewed-by: Segher Boessenkool Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921014103.587954-2-npiggin@gmail.com --- Documentation/powerpc/isa-versions.rst | 14 ++++++++++++++ arch/powerpc/Makefile | 5 +++-- arch/powerpc/platforms/Kconfig.cputype | 8 ++++---- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/Documentation/powerpc/isa-versions.rst b/Documentation/powerpc/isa-versions.rst index 5592b8899a48..a8d6b6028b3e 100644 --- a/Documentation/powerpc/isa-versions.rst +++ b/Documentation/powerpc/isa-versions.rst @@ -4,6 +4,8 @@ CPU to ISA Version Mapping Mapping of some CPU versions to relevant ISA versions. +Note Power4 and Power4+ are not supported. + ========= ==================================================================== CPU Architecture version ========= ==================================================================== @@ -26,6 +28,12 @@ PPC970 - PowerPC User Instruction Set Architecture Book I v2.01 - PowerPC Virtual Environment Architecture Book II v2.01 - PowerPC Operating Environment Architecture Book III v2.01 - Plus Altivec/VMX ~= 2.03 +Power4+ - PowerPC User Instruction Set Architecture Book I v2.01 + - PowerPC Virtual Environment Architecture Book II v2.01 + - PowerPC Operating Environment Architecture Book III v2.01 +Power4 - PowerPC User Instruction Set Architecture Book I v2.00 + - PowerPC Virtual Environment Architecture Book II v2.00 + - PowerPC Operating Environment Architecture Book III v2.00 ========= ==================================================================== @@ -48,6 +56,8 @@ Power5++ No Power5+ No Power5 No PPC970 Yes +Power4+ No +Power4 No ========== ================== ========== ==== @@ -66,6 +76,8 @@ Power5++ No Power5+ No Power5 No PPC970 No +Power4+ No +Power4 No ========== ==== ========== ==================================== @@ -84,4 +96,6 @@ Power5++ No Power5+ No Power5 No PPC970 No +Power4+ No +Power4 No ========== ==================================== diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 3bed2c6dcf12..aeabcc945110 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -149,11 +149,12 @@ CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata) ifdef CONFIG_PPC_BOOK3S_64 ifdef CONFIG_CPU_LITTLE_ENDIAN CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8 -CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8) else -CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4 endif +CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power10, \ + $(call cc-option,-mtune=power9, \ + $(call cc-option,-mtune=power8))) else ifdef CONFIG_PPC_BOOK3E_64 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 51059af63856..8333c558d932 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -126,7 +126,7 @@ choice If unsure, select Generic. config GENERIC_CPU - bool "Generic (POWER4 and above)" + bool "Generic (POWER5 and PowerPC 970 and above)" depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN select PPC_64S_HASH_MMU @@ -145,8 +145,8 @@ config CELL_CPU depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN select PPC_64S_HASH_MMU -config POWER5_CPU - bool "POWER5" +config PPC_970_CPU + bool "PowerPC 970 (including PowerPC G5)" depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN select PPC_64S_HASH_MMU @@ -235,7 +235,7 @@ config TARGET_CPU string depends on TARGET_CPU_BOOL default "cell" if CELL_CPU - default "power5" if POWER5_CPU + default "970" if PPC_970_CPU default "power6" if POWER6_CPU default "power7" if POWER7_CPU default "power8" if POWER8_CPU From 5e8b2c4dd3a0a4a2966e61d60dbeafab441cff28 Mon Sep 17 00:00:00 2001 From: Nicholas Miehlbradt Date: Mon, 26 Sep 2022 07:57:23 +0000 Subject: [PATCH 155/214] powerpc/64s: Add DEBUG_PAGEALLOC for radix There is support for DEBUG_PAGEALLOC on hash but not on radix. Add support on radix. Signed-off-by: Nicholas Miehlbradt Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926075726.2846-1-nicholas@linux.ibm.com --- arch/powerpc/mm/book3s64/radix_pgtable.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 60c277a9b043..94a05c66be95 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -30,6 +30,7 @@ #include #include #include +#include #include @@ -267,13 +268,16 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end) static int __meminit create_physical_mapping(unsigned long start, unsigned long end, - unsigned long max_mapping_size, int nid, pgprot_t _prot) { unsigned long vaddr, addr, mapping_size = 0; bool prev_exec, exec = false; pgprot_t prot; int psize; + unsigned long max_mapping_size = radix_mem_block_size; + + if (debug_pagealloc_enabled()) + max_mapping_size = PAGE_SIZE; start = ALIGN(start, PAGE_SIZE); end = ALIGN_DOWN(end, PAGE_SIZE); @@ -352,7 +356,6 @@ static void __init radix_init_pgtable(void) } WARN_ON(create_physical_mapping(start, end, - radix_mem_block_size, -1, PAGE_KERNEL)); } @@ -850,7 +853,7 @@ int __meminit radix__create_section_mapping(unsigned long start, } return create_physical_mapping(__pa(start), __pa(end), - radix_mem_block_size, nid, prot); + nid, prot); } int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) @@ -899,7 +902,14 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long #ifdef CONFIG_DEBUG_PAGEALLOC void radix__kernel_map_pages(struct page *page, int numpages, int enable) { - pr_warn_once("DEBUG_PAGEALLOC not supported in radix mode\n"); + unsigned long addr; + + addr = (unsigned long)page_address(page); + + if (enable) + set_memory_p(addr, numpages); + else + set_memory_np(addr, numpages); } #endif From 3e791d0f32b10eff9437822c6099c7a158560151 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 26 Sep 2022 07:57:24 +0000 Subject: [PATCH 156/214] powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils debug_pagealloc_enabled() is always defined and constant folds to 'false' when CONFIG_DEBUG_PAGEALLOC is not enabled. Remove the #ifdefs, the code and associated static variables will be optimised out by the compiler when CONFIG_DEBUG_PAGEALLOC is not defined. Signed-off-by: Christophe Leroy Signed-off-by: Nicholas Miehlbradt Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926075726.2846-2-nicholas@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_utils.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index ced1107b1677..4ec003d9e809 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -123,11 +123,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size); #ifdef CONFIG_PPC_64K_PAGES int mmu_ci_restrictions; #endif -#ifdef CONFIG_DEBUG_PAGEALLOC static u8 *linear_map_hash_slots; static unsigned long linear_map_hash_count; -static DEFINE_SPINLOCK(linear_map_hash_lock); -#endif /* CONFIG_DEBUG_PAGEALLOC */ struct mmu_hash_ops mmu_hash_ops; EXPORT_SYMBOL(mmu_hash_ops); @@ -427,11 +424,9 @@ repeat: break; cond_resched(); -#ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; -#endif /* CONFIG_DEBUG_PAGEALLOC */ } return ret < 0 ? ret : 0; } @@ -1066,7 +1061,6 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); -#ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled()) { linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_slots = memblock_alloc_try_nid( @@ -1076,7 +1070,6 @@ static void __init htab_initialize(void) panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", __func__, linear_map_hash_count, &ppc64_rma_size); } -#endif /* CONFIG_DEBUG_PAGEALLOC */ /* create bolted the linear mapping in the hash table */ for_each_mem_range(i, &base, &end) { @@ -1988,6 +1981,8 @@ repeat: } #ifdef CONFIG_DEBUG_PAGEALLOC +static DEFINE_SPINLOCK(linear_map_hash_lock); + static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) { unsigned long hash; From d7902d31cbc3bf72722768831a684b0286ccd523 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 26 Sep 2022 07:57:25 +0000 Subject: [PATCH 157/214] powerpc/64s: Allow double call of kernel_[un]map_linear_page() If the page is already mapped resp. already unmapped, bail out. Signed-off-by: Christophe Leroy Signed-off-by: Nicholas Miehlbradt Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926075726.2846-3-nicholas@linux.ibm.com --- arch/powerpc/mm/book3s64/hash_utils.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 4ec003d9e809..6d985acb1d39 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -1997,6 +1997,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) if (!vsid) return; + if (linear_map_hash_slots[lmi] & 0x80) + return; + ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, HPTE_V_BOLTED, mmu_linear_psize, mmu_kernel_ssize); @@ -2016,7 +2019,10 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); spin_lock(&linear_map_hash_lock); - BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); + if (!(linear_map_hash_slots[lmi] & 0x80)) { + spin_unlock(&linear_map_hash_lock); + return; + } hidx = linear_map_hash_slots[lmi] & 0x7f; linear_map_hash_slots[lmi] = 0; spin_unlock(&linear_map_hash_lock); From a5edf9815dd739fce660b4c8658f61b7d2517042 Mon Sep 17 00:00:00 2001 From: Nicholas Miehlbradt Date: Mon, 26 Sep 2022 07:57:26 +0000 Subject: [PATCH 158/214] powerpc/64s: Enable KFENCE on book3s64 KFENCE support was added for ppc32 in commit 90cbac0e995d ("powerpc: Enable KFENCE for PPC32"). Enable KFENCE on ppc64 architecture with hash and radix MMUs. It uses the same mechanism as debug pagealloc to protect/unprotect pages. All KFENCE kunit tests pass on both MMUs. KFENCE memory is initially allocated using memblock but is later marked as SLAB allocated. This necessitates the change to __pud_free to ensure that the KFENCE pages are freed appropriately. Based on previous work by Christophe Leroy and Jordan Niethe. Signed-off-by: Nicholas Miehlbradt Reviewed-by: Russell Currey Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926075726.2846-4-nicholas@linux.ibm.com --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/book3s/64/pgalloc.h | 6 ++++-- arch/powerpc/include/asm/book3s/64/pgtable.h | 2 +- arch/powerpc/include/asm/kfence.h | 15 +++++++++++++++ arch/powerpc/mm/book3s64/hash_utils.c | 10 +++++----- arch/powerpc/mm/book3s64/radix_pgtable.c | 6 ++++-- 6 files changed, 30 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 237c0e071e5e..81c9f895d690 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -195,7 +195,7 @@ config PPC select HAVE_ARCH_KASAN if PPC_RADIX_MMU select HAVE_ARCH_KASAN if PPC_BOOK3E_64 select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN - select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x + select HAVE_ARCH_KFENCE if ARCH_SUPPORTS_DEBUG_PAGEALLOC select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index e1af0b394ceb..dd2cff53a111 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud) /* * Early pud pages allocated via memblock allocator - * can't be directly freed to slab + * can't be directly freed to slab. KFENCE pages have + * both reserved and slab flags set so need to be freed + * kmem_cache_free. */ - if (PageReserved(page)) + if (PageReserved(page) && !PageSlab(page)) free_reserved_page(page); else kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index b5f8264cc980..c436d8422654 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1106,7 +1106,7 @@ static inline void vmemmap_remove_mapping(unsigned long start, } #endif -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static inline void __kernel_map_pages(struct page *page, int numpages, int enable) { if (radix_enabled()) diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h index a9846b68c6b9..6fd2b4d486c5 100644 --- a/arch/powerpc/include/asm/kfence.h +++ b/arch/powerpc/include/asm/kfence.h @@ -11,11 +11,25 @@ #include #include +#ifdef CONFIG_PPC64_ELF_ABI_V1 +#define ARCH_FUNC_PREFIX "." +#endif + static inline bool arch_kfence_init_pool(void) { return true; } +#ifdef CONFIG_PPC64 +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + struct page *page = virt_to_page(addr); + + __kernel_map_pages(page, 1, !protect); + + return true; +} +#else static inline bool kfence_protect_page(unsigned long addr, bool protect) { pte_t *kpte = virt_to_kpte(addr); @@ -29,5 +43,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +#endif #endif /* __ASM_POWERPC_KFENCE_H */ diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 6d985acb1d39..df008edf7be0 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -424,7 +424,7 @@ repeat: break; cond_resched(); - if (debug_pagealloc_enabled() && + if (debug_pagealloc_enabled_or_kfence() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; } @@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void) bool aligned = true; init_hpte_page_sizes(); - if (!debug_pagealloc_enabled()) { + if (!debug_pagealloc_enabled_or_kfence()) { /* * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default @@ -1061,7 +1061,7 @@ static void __init htab_initialize(void) prot = pgprot_val(PAGE_KERNEL); - if (debug_pagealloc_enabled()) { + if (debug_pagealloc_enabled_or_kfence()) { linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_slots = memblock_alloc_try_nid( linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, @@ -1980,7 +1980,7 @@ repeat: return slot; } -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) static DEFINE_SPINLOCK(linear_map_hash_lock); static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) @@ -2053,7 +2053,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable) } local_irq_restore(flags); } -#endif /* CONFIG_DEBUG_PAGEALLOC */ +#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 94a05c66be95..70f1580afc3a 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -34,6 +34,8 @@ #include +#include + unsigned int mmu_base_pid; unsigned long radix_mem_block_size __ro_after_init; @@ -276,7 +278,7 @@ static int __meminit create_physical_mapping(unsigned long start, int psize; unsigned long max_mapping_size = radix_mem_block_size; - if (debug_pagealloc_enabled()) + if (debug_pagealloc_enabled_or_kfence()) max_mapping_size = PAGE_SIZE; start = ALIGN(start, PAGE_SIZE); @@ -899,7 +901,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long #endif #endif -#ifdef CONFIG_DEBUG_PAGEALLOC +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) void radix__kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long addr; From 56adbb7a8b6cc7fc9b940829c38494e53c9e57d1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:42:59 +1000 Subject: [PATCH 159/214] powerpc/64/interrupt: Fix false warning in context tracking due to idle state Commit 171476775d32 ("context_tracking: Convert state to atomic_t") added a CONTEXT_IDLE state which can be encountered by interrupts from kernel mode in the idle thread, causing a false positive warning. Fixes: 171476775d32 ("context_tracking: Convert state to atomic_t") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-2-npiggin@gmail.com --- arch/powerpc/include/asm/interrupt.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 0d3405bd55c4..73031783bb1e 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -195,7 +195,8 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * so avoid recursion. */ if (TRAP(regs) != INTERRUPT_PROGRAM) { - CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + CT_WARN_ON(ct_state() != CONTEXT_KERNEL && + ct_state() != CONTEXT_IDLE); if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) BUG_ON(is_implicit_soft_masked(regs)); } From 799f7063c7645f9a751d17f5dfd73b952f962cd2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:00 +1000 Subject: [PATCH 160/214] powerpc/64: mark irqs hard disabled in boot paca This prevents interrupts in early boot (e.g., program check) from enabling MSR[EE], potentially causing endian mismatch or other crashes when reporting early boot traps. Fixes: 4423eb5ae32ec ("powerpc/64/interrupt: make normal synchronous interrupts enable MSR[EE] if possible") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-3-npiggin@gmail.com --- arch/powerpc/kernel/setup_64.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6434a3f6acb5..e1ac803c9046 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -182,8 +182,10 @@ static void __init fixup_boot_paca(void) get_paca()->cpu_start = 1; /* Allow percpu accesses to work until we setup percpu data */ get_paca()->data_offset = 0; - /* Mark interrupts disabled in PACA */ + /* Mark interrupts soft and hard disabled in PACA */ irq_soft_mask_set(IRQS_DISABLED); + get_paca()->irq_happened = PACA_IRQ_HARD_DIS; + WARN_ON(mfmsr() & MSR_EE); } static void __init configure_exceptions(void) From e485f6c751e0a969327336c635ca602feea117f0 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:01 +1000 Subject: [PATCH 161/214] powerpc/64/interrupt: Fix return to masked context after hard-mask irq becomes pending If a synchronous interrupt (e.g., hash fault) is taken inside an irqs-disabled region which has MSR[EE]=1, then an asynchronous interrupt that is PACA_IRQ_MUST_HARD_MASK (e.g., PMI) is taken inside the synchronous interrupt handler, then the synchronous interrupt will return with MSR[EE]=1 and the asynchronous interrupt fires again. If the asynchronous interrupt is a PMI and the original context does not have PMIs disabled (only Linux IRQs), the asynchronous interrupt will fire despite having the PMI marked soft pending. This can confuse the perf code and cause warnings. This patch changes the interrupt return so that irqs-disabled MSR[EE]=1 contexts will be returned to with MSR[EE]=0 if a PACA_IRQ_MUST_HARD_MASK interrupt has become pending in the meantime. The longer explanation for what happens: 1. local_irq_disable() 2. Hash fault interrupt fires, do_hash_fault handler runs 3. interrupt_enter_prepare() sets IRQS_ALL_DISABLED 4. interrupt_enter_prepare() sets MSR[EE]=1 5. PMU interrupt fires, masked handler runs 6. Masked handler marks PMI pending 7. Masked handler returns with PACA_IRQ_HARD_DIS set, MSR[EE]=0 8. do_hash_fault interrupt return handler runs 9. interrupt_exit_kernel_prepare() clears PACA_IRQ_HARD_DIS 10. interrupt returns with MSR[EE]=1 11. PMU interrupt fires, perf handler runs Fixes: 4423eb5ae32e ("powerpc/64/interrupt: make normal synchronous interrupts enable MSR[EE] if possible") Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-4-npiggin@gmail.com --- arch/powerpc/kernel/interrupt.c | 10 --------- arch/powerpc/kernel/interrupt_64.S | 34 +++++++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c index 0e75cb03244a..f9db0a172401 100644 --- a/arch/powerpc/kernel/interrupt.c +++ b/arch/powerpc/kernel/interrupt.c @@ -431,16 +431,6 @@ again: if (unlikely(stack_store)) __hard_EE_RI_disable(); - /* - * Returning to a kernel context with local irqs disabled. - * Here, if EE was enabled in the interrupted context, enable - * it on return as well. A problem exists here where a soft - * masked interrupt may have cleared MSR[EE] and set HARD_DIS - * here, and it will still exist on return to the caller. This - * will be resolved by the masked interrupt firing again. - */ - if (regs->msr & MSR_EE) - local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; #endif /* CONFIG_PPC64 */ } diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 90dbd6a8d4da..1dfa67e9199a 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -526,15 +526,43 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel) ld r11,SOFTE(r1) cmpwi r11,IRQS_ENABLED stb r11,PACAIRQSOFTMASK(r13) - bne 1f + beq .Linterrupt_return_\srr\()_soft_enabled + + /* + * Returning to soft-disabled context. + * Check if a MUST_HARD_MASK interrupt has become pending, in which + * case we need to disable MSR[EE] in the return context. + */ + ld r12,_MSR(r1) + andi. r10,r12,MSR_EE + beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled + lbz r11,PACAIRQHAPPENED(r13) + andi. r10,r11,PACA_IRQ_MUST_HARD_MASK + beq 1f // No HARD_MASK pending + + /* Must clear MSR_EE from _MSR */ +#ifdef CONFIG_PPC_BOOK3S + li r10,0 + /* Clear valid before changing _MSR */ + .ifc \srr,srr + stb r10,PACASRR_VALID(r13) + .else + stb r10,PACAHSRR_VALID(r13) + .endif +#endif + xori r12,r12,MSR_EE + std r12,_MSR(r1) + b .Lfast_kernel_interrupt_return_\srr\() + +.Linterrupt_return_\srr\()_soft_enabled: #ifdef CONFIG_PPC_BOOK3S lbz r11,PACAIRQHAPPENED(r13) andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l bne- interrupt_return_\srr\()_kernel_restart #endif - li r11,0 - stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS 1: + li r11,0 + stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS .Lfast_kernel_interrupt_return_\srr\(): cmpdi cr1,r3,0 From 9524f2278f2e6925f147d9140c83f658e7a7c84f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:02 +1000 Subject: [PATCH 162/214] powerpc/64s: Fix irq state management in runlatch functions When irqs are soft-disabled, MSR[EE] is volatile and can change from 1 to 0 asynchronously (if a PACA_IRQ_MUST_HARD_MASK interrupt hits). So it can not be used to check hard IRQ enabled status, except to confirm it is disabled. ppc64_runlatch_on/off functions use MSR this way to decide whether to re-enable MSR[EE] after disabling it, which leads to MSR[EE] being enabled when it shouldn't be (when a PACA_IRQ_MUST_HARD_MASK had disabled it between reading the MSR and clearing EE). This has been tolerated in the kernel previously, and it doesn't seem to cause a problem, but it is unexpected and may trip warnings or cause other problems as we tighten up this state management. Fix this by only re-enabling if PACA_IRQ_HARD_DIS is clear. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-5-npiggin@gmail.com --- arch/powerpc/include/asm/runlatch.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h index cfb390edf7d0..ceb66d761fe1 100644 --- a/arch/powerpc/include/asm/runlatch.h +++ b/arch/powerpc/include/asm/runlatch.h @@ -19,10 +19,9 @@ extern void __ppc64_runlatch_off(void); do { \ if (cpu_has_feature(CPU_FTR_CTRL) && \ test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ __hard_irq_disable(); \ __ppc64_runlatch_off(); \ - if (msr & MSR_EE) \ + if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \ __hard_irq_enable(); \ } \ } while (0) @@ -31,10 +30,9 @@ extern void __ppc64_runlatch_off(void); do { \ if (cpu_has_feature(CPU_FTR_CTRL) && \ !test_thread_local_flags(_TLF_RUNLATCH)) { \ - unsigned long msr = mfmsr(); \ __hard_irq_disable(); \ __ppc64_runlatch_on(); \ - if (msr & MSR_EE) \ + if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \ __hard_irq_enable(); \ } \ } while (0) From c39fb71a54f09977eba7584ef0eebb25047097c6 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:03 +1000 Subject: [PATCH 163/214] powerpc/64s/interrupt: masked handler debug check for previous hard disable Prior changes eliminated cases of masked PACA_IRQ_MUST_HARD_MASK interrupts that re-fire due to MSR[EE] being enabled while they are pending. Add a debug check in the masked interrupt handler to catch if this occurs. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-6-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index a3b51441b039..43e538502f52 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -2794,6 +2794,16 @@ masked_Hinterrupt: masked_interrupt: .endif stw r9,PACA_EXGEN+EX_CCR(r13) +#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG + /* + * Ensure there was no previous MUST_HARD_MASK interrupt or + * HARD_DIS setting. + */ + lbz r9,PACAIRQHAPPENED(r13) + andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS) +0: tdnei r9,0 + EMIT_BUG_ENTRY 0b,__FILE__,__LINE__,0 +#endif lbz r9,PACAIRQHAPPENED(r13) or r9,r9,r10 stb r9,PACAIRQHAPPENED(r13) From f7bff6e7759b1abb59334f6448f9ef3172c4c04a Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:04 +1000 Subject: [PATCH 164/214] powerpc/64/interrupt: avoid BUG/WARN recursion in interrupt entry BUG/WARN are handled with a program interrupt which can turn into an infinite recursion when there are bugs in interrupt handler entry (which can be irritated by bugs in other parts of the code). There is one feeble attempt to avoid this recursion, but it misses several cases. Make a tidier macro for this and switch most bugs in the interrupt entry wrapper over to use it. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-7-npiggin@gmail.com --- arch/powerpc/include/asm/interrupt.h | 33 +++++++++++++++++----------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 73031783bb1e..4745bb9998bd 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -74,6 +74,19 @@ #include #include +#ifdef CONFIG_PPC64 +/* + * WARN/BUG is handled with a program interrupt so minimise checks here to + * avoid recursion and maximise the chance of getting the first oops handled. + */ +#define INT_SOFT_MASK_BUG_ON(regs, cond) \ +do { \ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \ + (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \ + BUG_ON(cond); \ +} while (0) +#endif + #ifdef CONFIG_PPC_BOOK3S_64 extern char __end_soft_masked[]; bool search_kernel_soft_mask_table(unsigned long addr); @@ -170,8 +183,7 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * context. */ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) { - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(!(regs->msr & MSR_EE)); + INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE)); __hard_irq_enable(); } else { __hard_RI_enable(); @@ -194,20 +206,15 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs) * CT_WARN_ON comes here via program_check_exception, * so avoid recursion. */ - if (TRAP(regs) != INTERRUPT_PROGRAM) { + if (TRAP(regs) != INTERRUPT_PROGRAM) CT_WARN_ON(ct_state() != CONTEXT_KERNEL && ct_state() != CONTEXT_IDLE); - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(is_implicit_soft_masked(regs)); - } - - /* Move this under a debugging check */ - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && - arch_irq_disabled_regs(regs)) - BUG_ON(search_kernel_restart_table(regs->nip)); + INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs)); + INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) && + search_kernel_restart_table(regs->nip)); } - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE)); + INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) && + !(regs->msr & MSR_EE)); #endif booke_restore_dbcr0(); From 1da5351f9eb9b72a7d25316b4d38bf10b6e671b1 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:43:05 +1000 Subject: [PATCH 165/214] powerpc/64/irq: tidy soft-masked irq replay and improve documentation irq replay is quite complicated because of softirq processing which itself enables and disables irqs. Several considerations need to be accounted for due to this, and they are not clearly documented. Refactor the irq replay code a bit to tidy and deduplicate some common functions. Add comments, debug checks. This has a minor functional change that irq tracing enable/disable is done after each interrupt replayed, rather than after a batch. It also re-sets state to IRQS_ALL_DISABLED after an interrupt, which doesn't matter much because interrupts are hard disabled at this point, but it is more consistent with how interrupt handlers are called. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926054305.2671436-8-npiggin@gmail.com --- arch/powerpc/kernel/irq_64.c | 93 +++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c index 01645e03e9f0..eb2b380e52a0 100644 --- a/arch/powerpc/kernel/irq_64.c +++ b/arch/powerpc/kernel/irq_64.c @@ -68,6 +68,35 @@ int distribute_irqs = 1; +static inline void next_interrupt(struct pt_regs *regs) +{ + /* + * Softirq processing can enable/disable irqs, which will leave + * MSR[EE] enabled and the soft mask set to IRQS_DISABLED. Fix + * this up. + */ + if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) + hard_irq_disable(); + else + irq_soft_mask_set(IRQS_ALL_DISABLED); + + /* + * We are responding to the next interrupt, so interrupt-off + * latencies should be reset here. + */ + trace_hardirqs_on(); + trace_hardirqs_off(); +} + +static inline bool irq_happened_test_and_clear(u8 irq) +{ + if (local_paca->irq_happened & irq) { + local_paca->irq_happened &= ~irq; + return true; + } + return false; +} + void replay_soft_interrupts(void) { struct pt_regs regs; @@ -79,18 +108,25 @@ void replay_soft_interrupts(void) * recurse into this function. Don't keep any state across * interrupt handler calls which may change underneath us. * + * Softirqs can not be disabled over replay to stop this recursion + * because interrupts taken in idle code may require RCU softirq + * to run in the irq RCU tracking context. This is a hard problem + * to fix without changes to the softirq or idle layer. + * * We use local_paca rather than get_paca() to avoid all the * debug_smp_processor_id() business in this low level function. */ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { + WARN_ON_ONCE(mfmsr() & MSR_EE); + WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)); + } + ppc_save_regs(®s); regs.softe = IRQS_ENABLED; regs.msr |= MSR_EE; again: - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - WARN_ON_ONCE(mfmsr() & MSR_EE); - /* * Force the delivery of pending soft-disabled interrupts on PS3. * Any HV call will have this side effect. @@ -105,56 +141,47 @@ again: * This is a higher priority interrupt than the others, so * replay it first. */ - if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) { - local_paca->irq_happened &= ~PACA_IRQ_HMI; + if (IS_ENABLED(CONFIG_PPC_BOOK3S) && + irq_happened_test_and_clear(PACA_IRQ_HMI)) { regs.trap = INTERRUPT_HMI; handle_hmi_exception(®s); - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); + next_interrupt(®s); } - if (local_paca->irq_happened & PACA_IRQ_DEC) { - local_paca->irq_happened &= ~PACA_IRQ_DEC; + if (irq_happened_test_and_clear(PACA_IRQ_DEC)) { regs.trap = INTERRUPT_DECREMENTER; timer_interrupt(®s); - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); + next_interrupt(®s); } - if (local_paca->irq_happened & PACA_IRQ_EE) { - local_paca->irq_happened &= ~PACA_IRQ_EE; + if (irq_happened_test_and_clear(PACA_IRQ_EE)) { regs.trap = INTERRUPT_EXTERNAL; do_IRQ(®s); - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); + next_interrupt(®s); } - if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) { - local_paca->irq_happened &= ~PACA_IRQ_DBELL; + if (IS_ENABLED(CONFIG_PPC_DOORBELL) && + irq_happened_test_and_clear(PACA_IRQ_DBELL)) { regs.trap = INTERRUPT_DOORBELL; doorbell_exception(®s); - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); + next_interrupt(®s); } /* Book3E does not support soft-masking PMI interrupts */ - if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) { - local_paca->irq_happened &= ~PACA_IRQ_PMI; + if (IS_ENABLED(CONFIG_PPC_BOOK3S) && + irq_happened_test_and_clear(PACA_IRQ_PMI)) { regs.trap = INTERRUPT_PERFMON; performance_monitor_exception(®s); - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); + next_interrupt(®s); } - if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) { - /* - * We are responding to the next interrupt, so interrupt-off - * latencies should be reset here. - */ - trace_hardirqs_on(); - trace_hardirqs_off(); + /* + * Softirq processing can enable and disable interrupts, which can + * result in new irqs becoming pending. Must keep looping until we + * have cleared out all pending interrupts. + */ + if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) goto again; - } } #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) @@ -270,10 +297,12 @@ happened: trace_hardirqs_off(); replay_soft_interrupts_irqrestore(); - local_paca->irq_happened = 0; trace_hardirqs_on(); irq_soft_mask_set(IRQS_ENABLED); + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); + local_paca->irq_happened = 0; __hard_irq_enable(); preempt_enable(); } From 465dda9d320d1cb9424f1015b0520ec4c4f0d279 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Thu, 22 Sep 2022 01:27:07 -0700 Subject: [PATCH 166/214] powerpc/pseries: Move vas_migration_handler early during migration When the migration is initiated, the hypervisor changes VAS mappings as part of pre-migration event. Then the OS gets the migration event which closes all VAS windows before the migration starts. NX generates continuous faults until windows are closed and the user space can not differentiate these NX faults coming from the actual migration. So to reduce this time window, close VAS windows first in pseries_migrate_partition(). Signed-off-by: Haren Myneni Reviewed-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/d8efade91dda831c9ed4abb226dab627da594c5f.camel@linux.ibm.com --- arch/powerpc/platforms/pseries/mobility.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c index c92c78332303..634fac5db3f9 100644 --- a/arch/powerpc/platforms/pseries/mobility.c +++ b/arch/powerpc/platforms/pseries/mobility.c @@ -740,11 +740,19 @@ static int pseries_migrate_partition(u64 handle) #ifdef CONFIG_PPC_WATCHDOG factor = nmi_wd_lpm_factor; #endif + /* + * When the migration is initiated, the hypervisor changes VAS + * mappings to prepare before OS gets the notification and + * closes all VAS windows. NX generates continuous faults during + * this time and the user space can not differentiate these + * faults from the migration event. So reduce this time window + * by closing VAS windows at the beginning of this function. + */ + vas_migration_handler(VAS_SUSPEND); + ret = wait_for_vasi_session_suspending(handle); if (ret) - return ret; - - vas_migration_handler(VAS_SUSPEND); + goto out; if (factor) watchdog_nmi_set_timeout_pct(factor); @@ -765,6 +773,7 @@ static int pseries_migrate_partition(u64 handle) if (factor) watchdog_nmi_set_timeout_pct(0); +out: vas_migration_handler(VAS_RESUME); return ret; From 4b2a9315f20d98576e25c9e4572e9a8e028d7aa2 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 23 Sep 2022 13:30:04 +1000 Subject: [PATCH 167/214] powerpc/64s: POWER10 CPU Kconfig build option This adds basic POWER10_CPU option, which builds with -mcpu=power10. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220923033004.536127-1-npiggin@gmail.com --- arch/powerpc/Makefile | 7 ++++++- arch/powerpc/platforms/Kconfig.cputype | 6 ++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index aeabcc945110..19470d29de16 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -192,9 +192,14 @@ ifdef CONFIG_476FPE_ERR46 -T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds endif -# No AltiVec or VSX instructions when building kernel +# No prefix or pcrel +KBUILD_CFLAGS += $(call cc-option,-mno-prefixed) +KBUILD_CFLAGS += $(call cc-option,-mno-pcrel) + +# No AltiVec or VSX or MMA instructions when building kernel KBUILD_CFLAGS += $(call cc-option,-mno-altivec) KBUILD_CFLAGS += $(call cc-option,-mno-vsx) +KBUILD_CFLAGS += $(call cc-option,-mno-mma) # No SPE instruction when building kernel # (We use all available options to help semi-broken compilers) diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 8333c558d932..0c4eed9aea80 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -172,6 +172,11 @@ config POWER9_CPU depends on PPC_BOOK3S_64 select ARCH_HAS_FAST_MULTIPLIER +config POWER10_CPU + bool "POWER10" + depends on PPC_BOOK3S_64 + select ARCH_HAS_FAST_MULTIPLIER + config E5500_CPU bool "Freescale e5500" depends on PPC64 && PPC_E500 @@ -240,6 +245,7 @@ config TARGET_CPU default "power7" if POWER7_CPU default "power8" if POWER8_CPU default "power9" if POWER9_CPU + default "power10" if POWER10_CPU default "405" if 405_CPU default "440" if 440_CPU default "464" if 464_CPU From 17773afdcd1589c5925a984f512330410cb2ba4f Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:53 +1000 Subject: [PATCH 168/214] powerpc/64: use 32-bit immediate for STACK_FRAME_REGS_MARKER Using a 32-bit constant for this marker allows it to be loaded with two ALU instructions, like 32-bit. This avoids a TOC entry and a TOC load that depends on the r2 value that has just been loaded from the PACA. This changes the value for 32-bit as well, so both have the same value in the low 4 bytes and 64-bit has 0 in the top bytes. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-2-npiggin@gmail.com --- arch/powerpc/include/asm/ptrace.h | 4 ++-- arch/powerpc/kernel/entry_32.S | 6 +++--- arch/powerpc/kernel/exceptions-64e.S | 8 +------- arch/powerpc/kernel/exceptions-64s.S | 2 +- arch/powerpc/kernel/head_64.S | 7 ------- arch/powerpc/kernel/interrupt_64.S | 6 +++--- 6 files changed, 10 insertions(+), 23 deletions(-) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index a03403695cd4..5b496e589d54 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -99,6 +99,8 @@ struct pt_regs #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) +#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) + #ifdef __powerpc64__ /* @@ -115,7 +117,6 @@ struct pt_regs #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ -#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) #define STACK_FRAME_MARKER 12 @@ -136,7 +137,6 @@ struct pt_regs #define KERNEL_REDZONE_SIZE 0 #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ -#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) #define STACK_FRAME_MARKER 2 #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 55fcc70f2cc0..3fc7c9886bb7 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -263,7 +263,7 @@ fast_exception_return: mtcr r10 lwz r10,_LINK(r11) mtlr r10 - /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + /* Clear the exception marker on the stack to avoid confusing stacktrace */ li r10, 0 stw r10, 8(r11) REST_GPR(10, r11) @@ -320,7 +320,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) li r0,0 /* - * Leaving a stale exception_marker on the stack can confuse + * Leaving a stale exception marker on the stack can confuse * the reliable stack unwinder later on. Clear it. */ stw r0,8(r1) @@ -372,7 +372,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) mtspr SPRN_XER,r5 /* - * Leaving a stale exception_marker on the stack can confuse + * Leaving a stale exception marker on the stack can confuse * the reliable stack unwinder later on. Clear it. */ stw r0,8(r1) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index d6b7aa0229be..478127153529 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -378,7 +378,7 @@ exc_##n##_common: \ ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ - ld r12,exception_marker@toc(r2); \ + LOAD_REG_IMMEDIATE(r12, STACK_FRAME_REGS_MARKER); \ ZEROIZE_GPR(0); \ std r3,GPR10(r1); /* save r10 to stackframe */ \ std r4,GPR11(r1); /* save r11 to stackframe */ \ @@ -459,12 +459,6 @@ exc_##n##_bad_stack: \ bl hdlr; \ b interrupt_return -/* This value is used to mark exception frames on the stack. */ - .section ".toc","aw" -exception_marker: - .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER - - /* * And here we have the exception vectors ! */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 43e538502f52..3b56fc689a04 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -589,7 +589,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) li r9,IVEC std r9,_TRAP(r1) /* set trap number */ li r10,0 - ld r11,exception_marker@toc(r2) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r10,RESULT(r1) /* clear regs->result */ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ .endm diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index da544ea0ce01..c79da95dfd2e 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -192,13 +192,6 @@ __secondary_hold: #endif CLOSE_FIXED_SECTION(first_256B) -/* This value is used to mark exception frames on the stack. */ - .section ".toc","aw" -/* This value is used to mark exception frames on the stack. */ -exception_marker: - .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER - .previous - /* * On server, we include the exception vectors code here as it * relies on absolute addressing which is only possible within diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 1dfa67e9199a..f02c55930b5c 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -80,7 +80,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) /* Calling convention has r3 = regs, r4 = orig r0 */ addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r0 - ld r11,exception_marker@toc(r2) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r11,-16(r3) /* "regshere" marker */ BEGIN_FTR_SECTION @@ -253,7 +253,7 @@ END_BTB_FLUSH_SECTION /* Calling convention has r3 = regs, r4 = orig r0 */ addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r0 - ld r11,exception_marker@toc(r2) + LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) std r11,-16(r3) /* "regshere" marker */ #ifdef CONFIG_PPC_BOOK3S @@ -614,7 +614,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) mtspr SPRN_XER,r5 /* - * Leaving a stale exception_marker on the stack can confuse + * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse * the reliable stack unwinder later on. Clear it. */ std r0,STACK_FRAME_OVERHEAD-16(r1) From dab3b8f4fd09c22e8dbb2d9608194c7d52252f33 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:54 +1000 Subject: [PATCH 169/214] powerpc/64: asm use consistent global variable declaration and access Use helper macros to access global variables, and place them in .data sections rather than in .toc. Putting addresses in TOC is not required because the kernel is linked with a single TOC. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-3-npiggin@gmail.com --- arch/powerpc/boot/opal-calls.S | 6 +++--- arch/powerpc/boot/ppc_asm.h | 9 +++++++++ arch/powerpc/kernel/swsusp_asm64.S | 16 +++++----------- arch/powerpc/kernel/trace/ftrace_mprofile.S | 3 +-- arch/powerpc/kernel/vector.S | 15 +++++++-------- arch/powerpc/lib/copypage_64.S | 7 +------ arch/powerpc/lib/string_64.S | 7 +------ arch/powerpc/perf/bhrb.S | 2 +- arch/powerpc/xmon/spr_access.S | 4 ++-- 9 files changed, 30 insertions(+), 39 deletions(-) diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S index ad0e15d930c4..1f2f330a459e 100644 --- a/arch/powerpc/boot/opal-calls.S +++ b/arch/powerpc/boot/opal-calls.S @@ -16,7 +16,7 @@ opal_kentry: li r5, 0 li r6, 0 li r7, 0 - ld r11,opal@got(r2) + LOAD_REG_ADDR(r11, opal) ld r8,0(r11) ld r9,8(r11) bctr @@ -35,7 +35,7 @@ opal_call: mr r13,r2 /* Set opal return address */ - ld r11,opal_return@got(r2) + LOAD_REG_ADDR(r11, opal_return) mtlr r11 mfmsr r12 @@ -45,7 +45,7 @@ opal_call: mtspr SPRN_HSRR1,r12 /* load the opal call entry point and base */ - ld r11,opal@got(r2) + LOAD_REG_ADDR(r11, opal) ld r12,8(r11) ld r2,0(r11) mtspr SPRN_HSRR0,r12 diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h index 192b97523b05..2824a3e32aab 100644 --- a/arch/powerpc/boot/ppc_asm.h +++ b/arch/powerpc/boot/ppc_asm.h @@ -84,4 +84,13 @@ #define MFTBU(dest) mfspr dest, SPRN_TBRU #endif +#ifdef CONFIG_PPC64_BOOT_WRAPPER +#define LOAD_REG_ADDR(reg,name) \ + ld reg,name@got(r2) +#else +#define LOAD_REG_ADDR(reg,name) \ + lis reg,name@ha; \ + addi reg,reg,name@l +#endif + #endif /* _PPC64_PPC_ASM_H */ diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S index 9f1903c7f540..f645652c2654 100644 --- a/arch/powerpc/kernel/swsusp_asm64.S +++ b/arch/powerpc/kernel/swsusp_asm64.S @@ -76,16 +76,10 @@ swsusp_save_area: .space SL_SIZE - .section ".toc","aw" -swsusp_save_area_ptr: - .tc swsusp_save_area[TC],swsusp_save_area -restore_pblist_ptr: - .tc restore_pblist[TC],restore_pblist - .section .text .align 5 _GLOBAL(swsusp_arch_suspend) - ld r11,swsusp_save_area_ptr@toc(r2) + LOAD_REG_ADDR(r11, swsusp_save_area) SAVE_SPECIAL(LR) SAVE_REGISTER(r1) SAVE_SPECIAL(CR) @@ -131,7 +125,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) bl swsusp_save /* restore LR */ - ld r11,swsusp_save_area_ptr@toc(r2) + LOAD_REG_ADDR(r11, swsusp_save_area) RESTORE_SPECIAL(LR) addi r1,r1,128 @@ -145,7 +139,7 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) sync - ld r12,restore_pblist_ptr@toc(r2) + LOAD_REG_ADDR(r11, restore_pblist) ld r12,0(r12) cmpdi r12,0 @@ -187,7 +181,7 @@ nothing_to_copy: tlbia #endif - ld r11,swsusp_save_area_ptr@toc(r2) + LOAD_REG_ADDR(r11, swsusp_save_area) RESTORE_SPECIAL(CR) @@ -265,7 +259,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR) bl do_after_copyback addi r1,r1,128 - ld r11,swsusp_save_area_ptr@toc(r2) + LOAD_REG_ADDR(r11, swsusp_save_area) RESTORE_SPECIAL(LR) li r3, 0 diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S index 4fa23e260cab..33fcfb2eaded 100644 --- a/arch/powerpc/kernel/trace/ftrace_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S @@ -85,8 +85,7 @@ std r2, STK_GOT(r1) ld r2,PACATOC(r13) /* get kernel TOC in r2 */ - addis r3,r2,function_trace_op@toc@ha - addi r3,r3,function_trace_op@toc@l + LOAD_REG_ADDR(r3, function_trace_op) ld r5,0(r3) #else lis r3,function_trace_op@ha diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 5cc24d8cce94..5cf64740edb8 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -155,8 +155,8 @@ _GLOBAL(load_up_vsx) * usage of floating-point registers. These routines must be called * with preempt disabled. */ -#ifdef CONFIG_PPC32 .data +#ifdef CONFIG_PPC32 fpzero: .long 0 fpone: @@ -169,18 +169,17 @@ fphalf: lfs fr,name@l(r11) #else - .section ".toc","aw" fpzero: - .tc FD_0_0[TC],0 + .quad 0 fpone: - .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ + .quad 0x3ff0000000000000 /* 1.0 */ fphalf: - .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ + .quad 0x3fe0000000000000 /* 0.5 */ -#define LDCONST(fr, name) \ - lfd fr,name@toc(r2) +#define LDCONST(fr, name) \ + addis r11,r2,name@toc@ha; \ + lfd fr,name@toc@l(r11) #endif - .text /* * Internal routine to enable floating point and set FPSCR to 0. diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index d1091b5ee5da..6812cb19d04a 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -9,11 +9,6 @@ #include #include - .section ".toc","aw" -PPC64_CACHES: - .tc ppc64_caches[TC],ppc64_caches - .section ".text" - _GLOBAL_TOC(copy_page) BEGIN_FTR_SECTION lis r5,PAGE_SIZE@h @@ -24,7 +19,7 @@ FTR_SECTION_ELSE ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION - ld r10,PPC64_CACHES@toc(r2) + LOAD_REG_ADDR(r10, ppc64_caches) lwz r11,DCACHEL1LOGBLOCKSIZE(r10) /* log2 of cache block size */ lwz r12,DCACHEL1BLOCKSIZE(r10) /* get cache block size */ li r9,0 diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S index 169872bc0892..df41ce06f86b 100644 --- a/arch/powerpc/lib/string_64.S +++ b/arch/powerpc/lib/string_64.S @@ -11,11 +11,6 @@ #include #include - .section ".toc","aw" -PPC64_CACHES: - .tc ppc64_caches[TC],ppc64_caches - .section ".text" - /** * __arch_clear_user: - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. @@ -133,7 +128,7 @@ err1; stb r0,0(r3) blr .Llong_clear: - ld r5,PPC64_CACHES@toc(r2) + LOAD_REG_ADDR(r5, ppc64_caches) bf cr7*4+0,11f err2; std r0,0(r3) diff --git a/arch/powerpc/perf/bhrb.S b/arch/powerpc/perf/bhrb.S index 1aa3259716b8..47ba05d5ae76 100644 --- a/arch/powerpc/perf/bhrb.S +++ b/arch/powerpc/perf/bhrb.S @@ -21,7 +21,7 @@ _GLOBAL(read_bhrb) cmpldi r3,31 bgt 1f - ld r4,bhrb_table@got(r2) + LOAD_REG_ADDR(r4, bhrb_table) sldi r3,r3,3 add r3,r4,r3 mtctr r3 diff --git a/arch/powerpc/xmon/spr_access.S b/arch/powerpc/xmon/spr_access.S index 720a52afdd58..c308ddf268fb 100644 --- a/arch/powerpc/xmon/spr_access.S +++ b/arch/powerpc/xmon/spr_access.S @@ -4,12 +4,12 @@ /* unsigned long xmon_mfspr(sprn, default_value) */ _GLOBAL(xmon_mfspr) - PPC_LL r5, .Lmfspr_table@got(r2) + LOAD_REG_ADDR(r5, .Lmfspr_table) b xmon_mxspr /* void xmon_mtspr(sprn, new_value) */ _GLOBAL(xmon_mtspr) - PPC_LL r5, .Lmtspr_table@got(r2) + LOAD_REG_ADDR(r5, .Lmtspr_table) b xmon_mxspr /* From 754f611774e4b9357a944f5b703dd291c85161cf Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:55 +1000 Subject: [PATCH 170/214] powerpc/64: switch asm helpers from GOT to TOC relative addressing There is no need to use GOT addressing within the kernel. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-4-npiggin@gmail.com --- arch/powerpc/boot/ppc_asm.h | 3 ++- arch/powerpc/include/asm/ppc_asm.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h index 2824a3e32aab..a66cfd76fa4d 100644 --- a/arch/powerpc/boot/ppc_asm.h +++ b/arch/powerpc/boot/ppc_asm.h @@ -86,7 +86,8 @@ #ifdef CONFIG_PPC64_BOOT_WRAPPER #define LOAD_REG_ADDR(reg,name) \ - ld reg,name@got(r2) + addis reg,r2,name@toc@ha; \ + addi reg,reg,name@toc@l #else #define LOAD_REG_ADDR(reg,name) \ lis reg,name@ha; \ diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index eeb7dc8cd45f..9972f3e90c9d 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -337,7 +337,8 @@ n: rldimi reg, tmp, 32, 0 #define LOAD_REG_ADDR(reg,name) \ - ld reg,name@got(r2) + addis reg,r2,name@toc@ha; \ + addi reg,reg,name@toc@l #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) #define ADDROFF(name) 0 From 8e93fb33c84f68db20c0bc2821334a4c54c3e251 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:56 +1000 Subject: [PATCH 171/214] powerpc/64: provide a helper macro to load r2 with the kernel TOC A later change stops the kernel using r2 and loads it with a poison value. Provide a PACATOC loading abstraction which can hide this detail. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-5-npiggin@gmail.com --- arch/powerpc/include/asm/ppc_asm.h | 6 ++++++ arch/powerpc/kernel/exceptions-64e.S | 12 ++++++------ arch/powerpc/kernel/exceptions-64s.S | 6 +++--- arch/powerpc/kernel/head_64.S | 4 ++-- arch/powerpc/kernel/interrupt_64.S | 12 ++++++------ arch/powerpc/kernel/optprobes_head.S | 2 +- arch/powerpc/kernel/trace/ftrace_low.S | 2 +- arch/powerpc/kernel/trace/ftrace_mprofile.S | 3 +-- arch/powerpc/kvm/book3s_64_entry.S | 2 +- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++-- arch/powerpc/kvm/tm.S | 2 +- arch/powerpc/mm/nohash/tlb_low_64e.S | 2 +- arch/powerpc/platforms/powernv/opal-wrappers.S | 2 +- 13 files changed, 32 insertions(+), 27 deletions(-) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 9972f3e90c9d..cf6bec9770d6 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -327,6 +327,12 @@ n: #ifdef __powerpc64__ +#define __LOAD_PACA_TOC(reg) \ + ld reg,PACATOC(r13) + +#define LOAD_PACA_TOC() \ + __LOAD_PACA_TOC(r2) + #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr #define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 478127153529..ece1bd2a4a39 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -371,7 +371,7 @@ exc_##n##_common: \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ mfspr r5,scratch; /* get back r13 */ \ SAVE_GPR(12, r1); /* save r12 in stackframe */ \ - ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ + LOAD_PACA_TOC(); /* get kernel TOC into r2 */ \ mflr r6; /* save LR in stackframe */ \ mfctr r7; /* save CTR in stackframe */ \ mfspr r8,SPRN_XER; /* save XER in stackframe */ \ @@ -687,7 +687,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) beq+ 1f #ifdef CONFIG_RELOCATABLE - ld r15,PACATOC(r13) + __LOAD_PACA_TOC(r15) ld r14,interrupt_base_book3e@got(r15) ld r15,__end_interrupts@got(r15) cmpld cr0,r10,r14 @@ -758,7 +758,7 @@ kernel_dbg_exc: beq+ 1f #ifdef CONFIG_RELOCATABLE - ld r15,PACATOC(r13) + __LOAD_PACA_TOC(r15) ld r14,interrupt_base_book3e@got(r15) ld r15,__end_interrupts@got(r15) cmpld cr0,r10,r14 @@ -883,7 +883,7 @@ kernel_dbg_exc: .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE - ld r11,PACATOC(r13) + __LOAD_PACA_TOC(r11) ld r14,__start___restart_table@got(r11) ld r15,__stop___restart_table@got(r11) #else @@ -1061,7 +1061,7 @@ bad_stack_book3e: std r11,0(r1) ZEROIZE_GPR(12) std r12,0(r11) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() 1: addi r3,r1,STACK_FRAME_OVERHEAD bl kernel_bad_stack b 1b @@ -1302,7 +1302,7 @@ a2_tlbinit_after_linear_map: /* Now we branch the new virtual address mapped by this entry */ #ifdef CONFIG_RELOCATABLE - ld r5,PACATOC(r13) + __LOAD_PACA_TOC(r5) ld r3,1f@got(r5) #else LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3b56fc689a04..5956ad47a8a0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -580,7 +580,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r2,GPR2(r1) /* save r2 in stackframe */ SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */ mflr r9 /* Get LR, later save to stack */ - ld r2,PACATOC(r13) /* get kernel TOC into r2 */ + LOAD_PACA_TOC() /* get kernel TOC into r2 */ std r9,_LINK(r1) lbz r10,PACAIRQSOFTMASK(r13) mfspr r11,SPRN_XER /* save XER in stackframe */ @@ -610,7 +610,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE mr r12,r2 - ld r2,PACATOC(r13) + LOAD_PACA_TOC() LOAD_REG_ADDR(r9, __start___restart_table) LOAD_REG_ADDR(r10, __stop___restart_table) mr r2,r12 @@ -640,7 +640,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) .macro SEARCH_SOFT_MASK_TABLE #ifdef CONFIG_RELOCATABLE mr r12,r2 - ld r2,PACATOC(r13) + LOAD_PACA_TOC() LOAD_REG_ADDR(r9, __start___soft_mask_table) LOAD_REG_ADDR(r10, __stop___soft_mask_table) mr r2,r12 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index c79da95dfd2e..c63e0c086f03 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -841,7 +841,7 @@ __secondary_start: * before going into C code. */ start_secondary_prolog: - ld r2,PACATOC(r13) + LOAD_PACA_TOC() li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ bl start_secondary @@ -981,7 +981,7 @@ start_here_common: std r1,PACAKSAVE(r13) /* Load the TOC (virtual address) */ - ld r2,PACATOC(r13) + LOAD_PACA_TOC() /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index f02c55930b5c..904a5608cbe3 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -57,7 +57,7 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) std r0,GPR0(r1) std r10,GPR1(r1) std r2,GPR2(r1) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() mfcr r12 li r11,0 /* Save syscall parameters in r3-r8 */ @@ -174,7 +174,7 @@ syscall_vectored_\name\()_restart: _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() ld r3,RESULT(r1) addi r4,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED @@ -224,7 +224,7 @@ START_BTB_FLUSH_SECTION BTB_FLUSH(r10) END_BTB_FLUSH_SECTION #endif - ld r2,PACATOC(r13) + LOAD_PACA_TOC() mfcr r12 li r11,0 /* Save syscall parameters in r3-r8 */ @@ -355,7 +355,7 @@ syscall_restart: _ASM_NOKPROBE_SYMBOL(syscall_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() ld r3,RESULT(r1) addi r4,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED @@ -502,7 +502,7 @@ interrupt_return_\srr\()_user_restart: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() addi r3,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) @@ -663,7 +663,7 @@ interrupt_return_\srr\()_kernel_restart: _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart) GET_PACA(r13) ld r1,PACA_EXIT_SAVE_R1(r13) - ld r2,PACATOC(r13) + LOAD_PACA_TOC() addi r3,r1,STACK_FRAME_OVERHEAD li r11,IRQS_ALL_DISABLED stb r11,PACAIRQSOFTMASK(r13) diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S index 5c7f0b4b784b..cd4e7bc32609 100644 --- a/arch/powerpc/kernel/optprobes_head.S +++ b/arch/powerpc/kernel/optprobes_head.S @@ -73,7 +73,7 @@ optprobe_template_entry: * further below. */ #ifdef CONFIG_PPC64 - ld r2,PACATOC(r13) + LOAD_PACA_TOC() #endif .global optprobe_template_op_address diff --git a/arch/powerpc/kernel/trace/ftrace_low.S b/arch/powerpc/kernel/trace/ftrace_low.S index 0bddf1fa6636..294d1e05958a 100644 --- a/arch/powerpc/kernel/trace/ftrace_low.S +++ b/arch/powerpc/kernel/trace/ftrace_low.S @@ -48,7 +48,7 @@ _GLOBAL(return_to_handler) * We might be called from a module. * Switch to our TOC to run inside the core kernel. */ - ld r2, PACATOC(r13) + LOAD_PACA_TOC() #else stwu r1, -16(r1) stw r3, 8(r1) diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S index 33fcfb2eaded..d031093bc436 100644 --- a/arch/powerpc/kernel/trace/ftrace_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S @@ -83,8 +83,7 @@ #ifdef CONFIG_PPC64 /* Save callee's TOC in the ABI compliant location */ std r2, STK_GOT(r1) - ld r2,PACATOC(r13) /* get kernel TOC in r2 */ - + LOAD_PACA_TOC() /* get kernel TOC in r2 */ LOAD_REG_ADDR(r3, function_trace_op) ld r5,0(r3) #else diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S index e43704547a1e..6c2b1d17cb63 100644 --- a/arch/powerpc/kvm/book3s_64_entry.S +++ b/arch/powerpc/kvm/book3s_64_entry.S @@ -315,7 +315,7 @@ kvmppc_p9_exit_interrupt: reg = reg + 1 .endr - ld r2,PACATOC(r13) + LOAD_PACA_TOC() mflr r4 std r4,VCPU_LR(r3) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 7ded202bf995..c984021e62c8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1024,7 +1024,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Restore R1/R2 so we can handle faults */ ld r1, HSTATE_HOST_R1(r13) - ld r2, PACATOC(r13) + LOAD_PACA_TOC() mfspr r10, SPRN_SRR0 mfspr r11, SPRN_SRR1 @@ -2727,7 +2727,7 @@ kvmppc_bad_host_intr: std r4, _CTR(r1) std r5, _XER(r1) std r6, SOFTE(r1) - ld r2, PACATOC(r13) + LOAD_PACA_TOC() LOAD_REG_IMMEDIATE(3, 0x7265677368657265) std r3, STACK_FRAME_OVERHEAD-16(r1) diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S index 3bf17c854be4..2158f61e317f 100644 --- a/arch/powerpc/kvm/tm.S +++ b/arch/powerpc/kvm/tm.S @@ -110,7 +110,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST) mtmsrd r2, 1 /* Reload TOC pointer. */ - ld r2, PACATOC(r13) + LOAD_PACA_TOC() /* Save all but r0-r2, r9 & r13 */ reg = 3 diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index be26f33a6ac0..b3b3dfeec8f5 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -1118,7 +1118,7 @@ tlb_load_linear: * we only use 1G pages for now. That might have to be changed in a * final implementation, especially when dealing with hypervisors */ - ld r11,PACATOC(r13) + __LOAD_PACA_TOC(r11) ld r11,linear_map_top@got(r11) ld r10,0(r11) tovirt(10,10) diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index e5acc33b3b20..0ed95f753416 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -57,7 +57,7 @@ opal_return: .long 0xa64b7b7d /* mthsrr1 r11 */ .long 0x2402004c /* hrfid */ #endif - ld r2,PACATOC(r13) + LOAD_PACA_TOC() ld r0,PPC_LR_STKOFF(r1) mtlr r0 blr From 3569d84bb26f6f07d426446da3d2c836180f1565 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 13:40:57 +1000 Subject: [PATCH 172/214] powerpc/64e: provide an addressing macro for use with TOC in alternate register The interrupt entry code carefully saves a minimal number of registers, so in some places the TOC is required, it is loaded into a different register, so provide a macro that can supply an alternate TOC register. This continues to use got addressing because TOC-relative results in "got/toc optimization is not supported" messages by the linker. Having r2 be one of the saved registers and using that for TOC addressing may be the best way to avoid that and switch this to TOC addressing. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926034057.2360083-6-npiggin@gmail.com --- arch/powerpc/include/asm/ppc_asm.h | 11 +++++++++++ arch/powerpc/kernel/exceptions-64e.S | 14 +++++++------- arch/powerpc/mm/nohash/tlb_low_64e.S | 2 +- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index cf6bec9770d6..753a2757bcd4 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -346,6 +346,17 @@ n: addis reg,r2,name@toc@ha; \ addi reg,reg,name@toc@l +#ifdef CONFIG_PPC_BOOK3E_64 +/* + * This is used in register-constrained interrupt handlers. Not to be used + * by BOOK3S. ld complains with "got/toc optimization is not supported" if r2 + * is not used for the TOC offset, so use @got(tocreg). If the interrupt + * handlers saved r2 instead, LOAD_REG_ADDR could be used. + */ +#define LOAD_REG_ADDR_ALTTOC(reg,tocreg,name) \ + ld reg,name@got(tocreg) +#endif + #define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) #define ADDROFF(name) 0 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index ece1bd2a4a39..930e36099015 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -688,8 +688,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r15) - ld r14,interrupt_base_book3e@got(r15) - ld r15,__end_interrupts@got(r15) + LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e) + LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 #else @@ -759,8 +759,8 @@ kernel_dbg_exc: #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r15) - ld r14,interrupt_base_book3e@got(r15) - ld r15,__end_interrupts@got(r15) + LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e) + LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts) cmpld cr0,r10,r14 cmpld cr1,r10,r15 #else @@ -884,8 +884,8 @@ kernel_dbg_exc: .macro SEARCH_RESTART_TABLE #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r11) - ld r14,__start___restart_table@got(r11) - ld r15,__stop___restart_table@got(r11) + LOAD_REG_ADDR_ALTTOC(r14, r11, __start___restart_table) + LOAD_REG_ADDR_ALTTOC(r15, r11, __stop___restart_table) #else LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table) LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table) @@ -1303,7 +1303,7 @@ a2_tlbinit_after_linear_map: /* Now we branch the new virtual address mapped by this entry */ #ifdef CONFIG_RELOCATABLE __LOAD_PACA_TOC(r5) - ld r3,1f@got(r5) + LOAD_REG_ADDR_ALTTOC(r3, r5, 1f) #else LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) #endif diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index b3b3dfeec8f5..76cf456d7976 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -1119,7 +1119,7 @@ tlb_load_linear: * final implementation, especially when dealing with hypervisors */ __LOAD_PACA_TOC(r11) - ld r11,linear_map_top@got(r11) + LOAD_REG_ADDR_ALTTOC(r11, r11, linear_map_top) ld r10,0(r11) tovirt(10,10) cmpld cr0,r16,r10 From bf75a3258a40327b73c5b4458ae8102cfa921b40 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:56:16 +1000 Subject: [PATCH 173/214] powerpc/64s/interrupt: move early boot ILE fixup into a macro In preparation for using this sequence in machine check interrupt, move it into a macro, with a small change to make it position independent. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926055620.2676869-2-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 101 +++++++++++++++------------ 1 file changed, 56 insertions(+), 45 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 5956ad47a8a0..c1ce7deeeb6e 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -590,6 +590,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r9,_TRAP(r1) /* set trap number */ li r10,0 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) + rldimi r11, r11, 32, 0 std r10,RESULT(r1) /* clear regs->result */ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ .endm @@ -702,6 +703,60 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) ld r1,GPR1(r1) .endm +/* + * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot. + * + * There's a short window during boot where although the kernel is running + * little endian, any exceptions will cause the CPU to switch back to big + * endian. For example a WARN() boils down to a trap instruction, which will + * cause a program check, and we end up here but with the CPU in big endian + * mode. The first instruction of the program check handler (in GEN_INT_ENTRY + * below) is an mtsprg, which when executed in the wrong endian is an lhzu with + * a ~3GB displacement from r3. The content of r3 is random, so that is a load + * from some random location, and depending on the system can easily lead to a + * checkstop, or an infinitely recursive page fault. + * + * So to handle that case we have a trampoline here that can detect we are in + * the wrong endian and flip us back to the correct endian. We can't flip + * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1 + * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for + * the paca. SPRG3 is user readable, but this trampoline is only active very + * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before + * userspace starts. + */ +.macro EARLY_BOOT_FIXUP +#ifdef CONFIG_CPU_LITTLE_ENDIAN +BEGIN_FTR_SECTION + tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 + b 2f // Skip trampoline if endian is correct + .long 0xa643707d // mtsprg 0, r11 Backup r11 + .long 0xa6027a7d // mfsrr0 r11 + .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 + .long 0xa6027b7d // mfsrr1 r11 + .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 + .long 0xa600607d // mfmsr r11 + .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] + .long 0xa6037b7d // mtsrr1 r11 + /* + * This is 'li r11,1f' where 1f is the absolute address of that + * label, byteswapped into the SI field of the instruction. + */ + .long 0x00006039 | \ + ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \ + ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8) + .long 0xa6037a7d // mtsrr0 r11 + .long 0x2400004c // rfid +1: + mfsprg r11, 3 + mtsrr1 r11 // Restore SRR1 + mfsprg r11, 2 + mtsrr0 r11 // Restore SRR0 + mfsprg r11, 0 // Restore r11 +2: +END_FTR_SECTION(0, 1) // nop out after boot +#endif +.endm + /* * There are a few constraints to be concerned with. * - Real mode exceptions code/data must be located at their physical location. @@ -1619,51 +1674,7 @@ INT_DEFINE_BEGIN(program_check) INT_DEFINE_END(program_check) EXC_REAL_BEGIN(program_check, 0x700, 0x100) - -#ifdef CONFIG_CPU_LITTLE_ENDIAN - /* - * There's a short window during boot where although the kernel is - * running little endian, any exceptions will cause the CPU to switch - * back to big endian. For example a WARN() boils down to a trap - * instruction, which will cause a program check, and we end up here but - * with the CPU in big endian mode. The first instruction of the program - * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when - * executed in the wrong endian is an lhzu with a ~3GB displacement from - * r3. The content of r3 is random, so that is a load from some random - * location, and depending on the system can easily lead to a checkstop, - * or an infinitely recursive page fault. - * - * So to handle that case we have a trampoline here that can detect we - * are in the wrong endian and flip us back to the correct endian. We - * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires - * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as - * SPRG1 is already used for the paca. SPRG3 is user readable, but this - * trampoline is only active very early in boot, and SPRG3 will be - * reinitialised in vdso_getcpu_init() before userspace starts. - */ -BEGIN_FTR_SECTION - tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 - b 1f // Skip trampoline if endian is correct - .long 0xa643707d // mtsprg 0, r11 Backup r11 - .long 0xa6027a7d // mfsrr0 r11 - .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2 - .long 0xa6027b7d // mfsrr1 r11 - .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3 - .long 0xa600607d // mfmsr r11 - .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE] - .long 0xa6037b7d // mtsrr1 r11 - .long 0x34076039 // li r11, 0x734 - .long 0xa6037a7d // mtsrr0 r11 - .long 0x2400004c // rfid - mfsprg r11, 3 - mtsrr1 r11 // Restore SRR1 - mfsprg r11, 2 - mtsrr0 r11 // Restore SRR0 - mfsprg r11, 0 // Restore r11 -1: -END_FTR_SECTION(0, 1) // nop out after boot -#endif /* CONFIG_CPU_LITTLE_ENDIAN */ - + EARLY_BOOT_FIXUP GEN_INT_ENTRY program_check, virt=0 EXC_REAL_END(program_check, 0x700, 0x100) EXC_VIRT_BEGIN(program_check, 0x4700, 0x100) From 2f5182cffa43f31c241131a2c10a4ecd8e90fb3e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:56:17 +1000 Subject: [PATCH 174/214] powerpc/64s: early boot machine check handler Use the early boot interrupt fixup in the machine check handler to allow the machine check handler to run before interrupt endian is set up. Branch to an early boot handler that just does a basic crash, which allows it to run before ppc_md is set up. MSR[ME] is enabled on the boot CPU earlier, and the machine check stack is temporarily set to the middle of the init task stack. This allows machine checks (e.g., due to invalid data access in real mode) to print something useful earlier in boot (as soon as udbg is set up, if CONFIG_PPC_EARLY_DEBUG=y). Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926055620.2676869-3-npiggin@gmail.com --- arch/powerpc/include/asm/asm-prototypes.h | 1 + arch/powerpc/kernel/exceptions-64s.S | 6 +++++- arch/powerpc/kernel/setup_64.c | 14 ++++++++++++++ arch/powerpc/kernel/traps.c | 14 ++++++++++++++ 4 files changed, 34 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index e1b3e90eec94..274bce76f5da 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -36,6 +36,7 @@ int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3, int64_t opcode, uint64_t msr); /* misc runtime */ +void enable_machine_check(void); extern u64 __bswapdi2(u64); extern s64 __lshrdi3(s64, int); extern s64 __ashldi3(s64, int); diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c1ce7deeeb6e..8d658c740db8 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1134,6 +1134,7 @@ INT_DEFINE_BEGIN(machine_check) INT_DEFINE_END(machine_check) EXC_REAL_BEGIN(machine_check, 0x200, 0x100) + EARLY_BOOT_FIXUP GEN_INT_ENTRY machine_check_early, virt=0 EXC_REAL_END(machine_check, 0x200, 0x100) EXC_VIRT_NONE(0x4200, 0x100) @@ -1198,6 +1199,9 @@ BEGIN_FTR_SECTION bl enable_machine_check END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) addi r3,r1,STACK_FRAME_OVERHEAD +BEGIN_FTR_SECTION + bl machine_check_early_boot +END_FTR_SECTION(0, 1) // nop out after boot bl machine_check_early std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) @@ -3096,7 +3100,7 @@ CLOSE_FIXED_SECTION(virt_trampolines); USE_TEXT_SECTION() /* MSR[RI] should be clear because this uses SRR[01] */ -enable_machine_check: +_GLOBAL(enable_machine_check) mflr r0 bcl 20,31,$+4 0: mflr r3 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e1ac803c9046..f50476da2f2c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -180,6 +181,16 @@ static void __init fixup_boot_paca(void) { /* The boot cpu is started */ get_paca()->cpu_start = 1; +#ifdef CONFIG_PPC_BOOK3S_64 + /* + * Give the early boot machine check stack somewhere to use, use + * half of the init stack. This is a bit hacky but there should not be + * deep stack usage in early init so shouldn't overflow it or overwrite + * things. + */ + get_paca()->mc_emergency_sp = (void *)&init_thread_union + + (THREAD_SIZE/2); +#endif /* Allow percpu accesses to work until we setup percpu data */ get_paca()->data_offset = 0; /* Mark interrupts soft and hard disabled in PACA */ @@ -357,6 +368,9 @@ void __init early_setup(unsigned long dt_ptr) /* -------- printk is now safe to use ------- */ + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (mfmsr() & MSR_HV)) + enable_machine_check(); + /* Try new device tree based feature discovery ... */ if (!dt_cpu_ftrs_init(__va(dt_ptr))) /* Otherwise use the old style CPU table */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 62ec50a7a8ef..9bdd79aa51cf 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -68,6 +68,7 @@ #include #include #include +#include #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) int (*__debugger)(struct pt_regs *regs) __read_mostly; @@ -850,6 +851,19 @@ bail: } #ifdef CONFIG_PPC_BOOK3S_64 +DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot) +{ + udbg_printf("Machine check (early boot)\n"); + udbg_printf("SRR0=0x%016lx SRR1=0x%016lx\n", regs->nip, regs->msr); + udbg_printf(" DAR=0x%016lx DSISR=0x%08lx\n", regs->dar, regs->dsisr); + udbg_printf(" LR=0x%016lx R1=0x%08lx\n", regs->link, regs->gpr[1]); + udbg_printf("------\n"); + die("Machine check (early boot)", regs, SIGBUS); + for (;;) + ; + return 0; +} + DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async) { __machine_check_exception(regs); From b830c8754e046f96e84da9d3b3e028c4ceef2b18 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:56:18 +1000 Subject: [PATCH 175/214] powerpc/64: avoid using r13 in relocate relocate() uses r13 in early boot before it is used for the paca. Use a different register for this so r13 is kept unchanged until it is set to the paca pointer. Avoid r14 as well while we're here, there's no reason not to use the volatile registers which is a bit less surprising, and r14 could be used as another fixed reg one day. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926055620.2676869-4-npiggin@gmail.com --- arch/powerpc/kernel/reloc_64.S | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index 232e4549defe..efd52f2e7033 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -27,8 +27,8 @@ _GLOBAL(relocate) add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */ ld r10,(p_st - 0b)(r12) add r10,r10,r12 /* r10 has runtime addr of _stext */ - ld r13,(p_sym - 0b)(r12) - add r13,r13,r12 /* r13 has runtime addr of .dynsym */ + ld r4,(p_sym - 0b)(r12) + add r4,r4,r12 /* r4 has runtime addr of .dynsym */ /* * Scan the dynamic section for the RELA, RELASZ and RELAENT entries. @@ -84,16 +84,16 @@ _GLOBAL(relocate) ld r0,16(r9) /* reloc->r_addend */ b .Lstore .Luaddr64: - srdi r14,r0,32 /* ELF64_R_SYM(reloc->r_info) */ + srdi r5,r0,32 /* ELF64_R_SYM(reloc->r_info) */ clrldi r0,r0,32 cmpdi r0,R_PPC64_UADDR64 bne .Lnext ld r6,0(r9) ld r0,16(r9) - mulli r14,r14,24 /* 24 == sizeof(elf64_sym) */ - add r14,r14,r13 /* elf64_sym[ELF64_R_SYM] */ - ld r14,8(r14) - add r0,r0,r14 + mulli r5,r5,24 /* 24 == sizeof(elf64_sym) */ + add r5,r5,r4 /* elf64_sym[ELF64_R_SYM] */ + ld r5,8(r5) + add r0,r0,r5 .Lstore: add r0,r0,r3 stdx r0,r7,r6 From 519b2e317e39ac99ce589a7c8480c47a17d62638 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:56:19 +1000 Subject: [PATCH 176/214] powerpc/64: don't set boot CPU's r13 to paca until the structure is set up The idea is to get to the point where if r13 is non-zero, then it should contain a reasonable paca. This can be used in early boot program check and machine check handlers to avoid running off into the weeds if they hit before r13 has a paca. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926055620.2676869-5-npiggin@gmail.com --- arch/powerpc/kernel/setup_64.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f50476da2f2c..6312a3fe8eff 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -177,10 +177,10 @@ early_param("smt-enabled", early_smt_enabled); #endif /* CONFIG_SMP */ /** Fix up paca fields required for the boot cpu */ -static void __init fixup_boot_paca(void) +static void __init fixup_boot_paca(struct paca_struct *boot_paca) { /* The boot cpu is started */ - get_paca()->cpu_start = 1; + boot_paca->cpu_start = 1; #ifdef CONFIG_PPC_BOOK3S_64 /* * Give the early boot machine check stack somewhere to use, use @@ -188,14 +188,14 @@ static void __init fixup_boot_paca(void) * deep stack usage in early init so shouldn't overflow it or overwrite * things. */ - get_paca()->mc_emergency_sp = (void *)&init_thread_union + + boot_paca->mc_emergency_sp = (void *)&init_thread_union + (THREAD_SIZE/2); #endif /* Allow percpu accesses to work until we setup percpu data */ - get_paca()->data_offset = 0; + boot_paca->data_offset = 0; /* Mark interrupts soft and hard disabled in PACA */ - irq_soft_mask_set(IRQS_DISABLED); - get_paca()->irq_happened = PACA_IRQ_HARD_DIS; + boot_paca->irq_soft_mask = IRQS_DISABLED; + boot_paca->irq_happened = PACA_IRQ_HARD_DIS; WARN_ON(mfmsr() & MSR_EE); } @@ -363,8 +363,8 @@ void __init early_setup(unsigned long dt_ptr) * what CPU we are on. */ initialise_paca(&boot_paca, 0); - setup_paca(&boot_paca); - fixup_boot_paca(); + fixup_boot_paca(&boot_paca); + setup_paca(&boot_paca); /* install the paca into registers */ /* -------- printk is now safe to use ------- */ @@ -393,8 +393,8 @@ void __init early_setup(unsigned long dt_ptr) /* Poison paca_ptrs[0] again if it's not the boot cpu */ memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); } - setup_paca(paca_ptrs[boot_cpuid]); - fixup_boot_paca(); + fixup_boot_paca(paca_ptrs[boot_cpuid]); + setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */ /* * Configure exception handlers. This include setting up trampolines From e1100cee059ad0bea6a668177e835baa087a0c65 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 26 Sep 2022 15:56:20 +1000 Subject: [PATCH 177/214] powerpc/64s/interrupt: halt early boot interrupts if paca is not set up Ensure r13 is zero from very early in boot until it gets set to the boot paca pointer. This allows early program and mce handlers to halt if there is no valid paca, rather than potentially run off into the weeds. This preserves register and memory contents for low level debugging tools. Nothing could be printed to console at this point in any case because even udbg is only set up after the boot paca is set, so this shouldn't be missed. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926055620.2676869-6-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 15 +++++++++++++-- arch/powerpc/kernel/head_64.S | 3 +++ arch/powerpc/kernel/setup_64.c | 1 + 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 8d658c740db8..c3b803d6d805 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -725,8 +725,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) * userspace starts. */ .macro EARLY_BOOT_FIXUP -#ifdef CONFIG_CPU_LITTLE_ENDIAN BEGIN_FTR_SECTION +#ifdef CONFIG_CPU_LITTLE_ENDIAN tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8 b 2f // Skip trampoline if endian is correct .long 0xa643707d // mtsprg 0, r11 Backup r11 @@ -753,8 +753,19 @@ BEGIN_FTR_SECTION mtsrr0 r11 // Restore SRR0 mfsprg r11, 0 // Restore r11 2: -END_FTR_SECTION(0, 1) // nop out after boot #endif + /* + * program check could hit at any time, and pseries can not block + * MSR[ME] in early boot. So check if there is anything useful in r13 + * yet, and spin forever if not. + */ + mtsprg 0, r11 + mfcr r11 + cmpdi r13, 0 + beq . + mtcr r11 + mfsprg r11, 0 +END_FTR_SECTION(0, 1) // nop out after boot .endm /* diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index c63e0c086f03..cfc09a96a420 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -487,6 +487,9 @@ __start_initialization_multiplatform: /* Make sure we are running in 64 bits mode */ bl enable_64b_mode + /* Zero r13 (paca) so early program check / mce don't use it */ + li r13,0 + /* Get TOC pointer (current runtime address) */ bl relative_toc diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 6312a3fe8eff..a0dee7354fe6 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -364,6 +364,7 @@ void __init early_setup(unsigned long dt_ptr) */ initialise_paca(&boot_paca, 0); fixup_boot_paca(&boot_paca); + WARN_ON(local_paca != 0); setup_paca(&boot_paca); /* install the paca into registers */ /* -------- printk is now safe to use ------- */ From b9c001276d4a756f98cc7dc4672eff5343949203 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Wed, 21 Sep 2022 20:22:53 +0530 Subject: [PATCH 178/214] powerpc/perf: Fix branch_filter support for multiple filters For PERF_SAMPLE_BRANCH_STACK sample type, different branch_sample_type ie branch filters are supported. The branch filters are requested via event attribute "branch_sample_type". Multiple branch filters can be passed in event attribute. eg: $ perf record -b -o- -B --branch-filter any,ind_call true None of the Power PMUs support having multiple branch filters at the same time. Branch filters for branch stack sampling is set via MMCRA IFM bits [32:33]. But currently when requesting for multiple filter types, the "perf record" command does not report any error. eg: $ perf record -b -o- -B --branch-filter any,save_type true $ perf record -b -o- -B --branch-filter any,ind_call true The "bhrb_filter_map" function in PMU driver code does the validity check for supported branch filters. But this check is done for single filter. Hence "perf record" will proceed here without reporting any error. Fix power_pmu_event_init() to return EOPNOTSUPP when multiple branch filters are requested in the event attr. After the fix: $ perf record --branch-filter any,ind_call -- ls Error: cycles: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat' Reported-by: Disha Goel Signed-off-by: Athira Rajeev Tested-by: Disha Goel Reviewed-by: Madhavan Srinivasan Reviewed-by: Kajol Jain [mpe: Tweak comment and change log wording] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921145255.20972-1-atrajeev@linux.vnet.ibm.com --- arch/powerpc/perf/core-book3s.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 13919eb96931..03e31ae97741 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2131,6 +2131,23 @@ static int power_pmu_event_init(struct perf_event *event) if (has_branch_stack(event)) { u64 bhrb_filter = -1; + /* + * Currently no PMU supports having multiple branch filters + * at the same time. Branch filters are set via MMCRA IFM[32:33] + * bits for Power8 and above. Return EOPNOTSUPP when multiple + * branch filters are requested in the event attr. + * + * When opening event via perf_event_open(), branch_sample_type + * gets adjusted in perf_copy_attr(). Kernel will automatically + * adjust the branch_sample_type based on the event modifier + * settings to include PERF_SAMPLE_BRANCH_PLM_ALL. Hence drop + * the check for PERF_SAMPLE_BRANCH_PLM_ALL. + */ + if (hweight64(event->attr.branch_sample_type & ~PERF_SAMPLE_BRANCH_PLM_ALL) > 1) { + local_irq_restore(irq_flags); + return -EOPNOTSUPP; + } + if (ppmu->bhrb_filter_map) bhrb_filter = ppmu->bhrb_filter_map( event->attr.branch_sample_type); From 18213532de7156af689cb0511d2f95bcbe3c98a0 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Wed, 21 Sep 2022 20:22:55 +0530 Subject: [PATCH 179/214] selftests/powerpc: Update bhrb filter sampling test for multiple branch filters For PERF_SAMPLE_BRANCH_STACK sample type, different branch_sample_type, ie branch filters are supported. The testcase "bhrb_filter_map_test" tests the valid and invalid filter maps in different powerpc platforms. Update this testcase to include scenario to cover multiple branch filters at sametime. Since powerpc doesn't support multiple filters at sametime, expect failure during perf_event_open. Reported-by: Disha Goel Signed-off-by: Athira Rajeev Reviewed-by: Kajol Jain Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220921145255.20972-3-atrajeev@linux.vnet.ibm.com --- .../powerpc/pmu/sampling_tests/bhrb_filter_map_test.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c index 8182647c63c8..3f43c315c666 100644 --- a/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c +++ b/tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c @@ -96,6 +96,15 @@ static int bhrb_filter_map_test(void) } } + /* + * Combine filter maps which includes a valid branch filter and an invalid branch + * filter. Example: any ( PERF_SAMPLE_BRANCH_ANY) and any_call + * (PERF_SAMPLE_BRANCH_ANY_CALL). + * The perf_event_open should fail in this case. + */ + event.attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL; + FAIL_IF(!event_open(&event)); + return 0; } From 37b9345ce7f4ab17538ea62def6f6d430f091355 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Fri, 2 Sep 2022 23:21:02 +0200 Subject: [PATCH 180/214] powerpc: Fix SPE Power ISA properties for e500v1 platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 2eb28006431c ("powerpc/e500v2: Add Power ISA properties to comply with ePAPR 1.1") introduced new include file e500v2_power_isa.dtsi and should have used it for all e500v2 platforms. But apparently it was used also for e500v1 platforms mpc8540, mpc8541, mpc8555 and mpc8560. e500v1 cores compared to e500v2 do not support double precision floating point SPE instructions. Hence power-isa-sp.fd should not be set on e500v1 platforms, which is in e500v2_power_isa.dtsi include file. Fix this issue by introducing a new e500v1_power_isa.dtsi include file and use it in all e500v1 device tree files. Fixes: 2eb28006431c ("powerpc/e500v2: Add Power ISA properties to comply with ePAPR 1.1") Signed-off-by: Pali Rohár Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220902212103.22534-1-pali@kernel.org --- .../boot/dts/fsl/e500v1_power_isa.dtsi | 51 +++++++++++++++++++ arch/powerpc/boot/dts/fsl/mpc8540ads.dts | 2 +- arch/powerpc/boot/dts/fsl/mpc8541cds.dts | 2 +- arch/powerpc/boot/dts/fsl/mpc8555cds.dts | 2 +- arch/powerpc/boot/dts/fsl/mpc8560ads.dts | 2 +- 5 files changed, 55 insertions(+), 4 deletions(-) create mode 100644 arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi diff --git a/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi new file mode 100644 index 000000000000..7e2a90cde72e --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi @@ -0,0 +1,51 @@ +/* + * e500v1 Power ISA Device Tree Source (include) + * + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/ { + cpus { + power-isa-version = "2.03"; + power-isa-b; // Base + power-isa-e; // Embedded + power-isa-atb; // Alternate Time Base + power-isa-cs; // Cache Specification + power-isa-e.le; // Embedded.Little-Endian + power-isa-e.pm; // Embedded.Performance Monitor + power-isa-ecl; // Embedded Cache Locking + power-isa-mmc; // Memory Coherence + power-isa-sp; // Signal Processing Engine + power-isa-sp.fs; // SPE.Embedded Float Scalar Single + power-isa-sp.fv; // SPE.Embedded Float Vector + mmu-type = "power-embedded"; + }; +}; diff --git a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts index 18a885130538..e03ae130162b 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8540ADS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts index ac381e7b1c60..a2a6c5cf852e 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8541CDS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts index 9f58db2a7e66..901b6ff06dfb 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8555CDS"; diff --git a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts index a24722ccaebf..c2f9aea78b29 100644 --- a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts +++ b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts @@ -7,7 +7,7 @@ /dts-v1/; -/include/ "e500v2_power_isa.dtsi" +/include/ "e500v1_power_isa.dtsi" / { model = "MPC8560ADS"; From c102432005e8811b80b25641e12c4577970b5558 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Fri, 2 Sep 2022 23:21:03 +0200 Subject: [PATCH 181/214] powerpc: Include e500v1_power_isa.dtsi for remaining e500v1 platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are still some board device tree files without Power ISA properties which have Freescale e500v1 cores, namely those which are based on Freescale mpc8540, mpc8541, mpc8555 and mpc8560 processors. So include newly introduced e500v1_power_isa.dtsi file in devices tree files with those processors. Signed-off-by: Pali Rohár Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220902212103.22534-2-pali@kernel.org --- arch/powerpc/boot/dts/ksi8560.dts | 2 ++ arch/powerpc/boot/dts/stx_gp3_8560.dts | 2 ++ arch/powerpc/boot/dts/stxssa8555.dts | 2 ++ arch/powerpc/boot/dts/tqm8540.dts | 2 ++ arch/powerpc/boot/dts/tqm8541.dts | 2 ++ arch/powerpc/boot/dts/tqm8555.dts | 2 ++ arch/powerpc/boot/dts/tqm8560.dts | 2 ++ 7 files changed, 14 insertions(+) diff --git a/arch/powerpc/boot/dts/ksi8560.dts b/arch/powerpc/boot/dts/ksi8560.dts index fe6c17c8812a..37a7eb576d02 100644 --- a/arch/powerpc/boot/dts/ksi8560.dts +++ b/arch/powerpc/boot/dts/ksi8560.dts @@ -14,6 +14,8 @@ /dts-v1/; +/include/ "fsl/e500v1_power_isa.dtsi" + / { model = "KSI8560"; compatible = "emerson,KSI8560"; diff --git a/arch/powerpc/boot/dts/stx_gp3_8560.dts b/arch/powerpc/boot/dts/stx_gp3_8560.dts index d1ab698eef36..e73f7e75b0b4 100644 --- a/arch/powerpc/boot/dts/stx_gp3_8560.dts +++ b/arch/powerpc/boot/dts/stx_gp3_8560.dts @@ -7,6 +7,8 @@ /dts-v1/; +/include/ "fsl/e500v1_power_isa.dtsi" + / { model = "stx,gp3"; compatible = "stx,gp3-8560", "stx,gp3"; diff --git a/arch/powerpc/boot/dts/stxssa8555.dts b/arch/powerpc/boot/dts/stxssa8555.dts index 5dca2a91c41f..96add25c904b 100644 --- a/arch/powerpc/boot/dts/stxssa8555.dts +++ b/arch/powerpc/boot/dts/stxssa8555.dts @@ -9,6 +9,8 @@ /dts-v1/; +/include/ "fsl/e500v1_power_isa.dtsi" + / { model = "stx,gp3"; compatible = "stx,gp3-8560", "stx,gp3"; diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts index 9c1eb9779108..eb4d8fd3f7aa 100644 --- a/arch/powerpc/boot/dts/tqm8540.dts +++ b/arch/powerpc/boot/dts/tqm8540.dts @@ -7,6 +7,8 @@ /dts-v1/; +/include/ "fsl/e500v1_power_isa.dtsi" + / { model = "tqc,tqm8540"; compatible = "tqc,tqm8540"; diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts index 44595cf675d0..fe5d3d873ec9 100644 --- a/arch/powerpc/boot/dts/tqm8541.dts +++ b/arch/powerpc/boot/dts/tqm8541.dts @@ -7,6 +7,8 @@ /dts-v1/; +/include/ "fsl/e500v1_power_isa.dtsi" + / { model = "tqc,tqm8541"; compatible = "tqc,tqm8541"; diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts index 54f3e82907d6..4be05b7d225d 100644 --- a/arch/powerpc/boot/dts/tqm8555.dts +++ b/arch/powerpc/boot/dts/tqm8555.dts @@ -7,6 +7,8 @@ /dts-v1/; +/include/ "fsl/e500v1_power_isa.dtsi" + / { model = "tqc,tqm8555"; compatible = "tqc,tqm8555"; diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts index 7415cb69f60d..8ea48502420b 100644 --- a/arch/powerpc/boot/dts/tqm8560.dts +++ b/arch/powerpc/boot/dts/tqm8560.dts @@ -8,6 +8,8 @@ /dts-v1/; +/include/ "fsl/e500v1_power_isa.dtsi" + / { model = "tqc,tqm8560"; compatible = "tqc,tqm8560"; From 110a58b9f91c66f743c01a2c217243d94c899c23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Sat, 27 Aug 2022 15:44:54 +0200 Subject: [PATCH 182/214] powerpc/boot: Explicitly disable usage of SPE instructions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit uImage boot wrapper should not use SPE instructions, like kernel itself. Boot wrapper has already disabled Altivec and VSX instructions but not SPE. Options -mno-spe and -mspe=no already set when compilation of kernel, but not when compiling uImage wrapper yet. Fix it. Cc: stable@vger.kernel.org Signed-off-by: Pali Rohár Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220827134454.17365-1-pali@kernel.org --- arch/powerpc/boot/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index a9cd2ea4a861..d32d95aea5d6 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -34,6 +34,7 @@ endif BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ + $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ $(LINUXINCLUDE) From 6bd7ff497b4af13ea3d53781ffca7dc744dbb4da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Tue, 23 Aug 2022 01:17:51 +0200 Subject: [PATCH 183/214] powerpc/udbg: Remove extern function prototypes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 'extern' keyword is pointless and deprecated for function prototypes. Signed-off-by: Pali Rohár Suggested-by: Gabriel Paubert Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220822231751.16973-1-pali@kernel.org --- arch/powerpc/include/asm/udbg.h | 52 ++++++++++++++++----------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 88add5593e6f..b1f094728b35 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -15,13 +15,13 @@ extern void (*udbg_flush)(void); extern int (*udbg_getc)(void); extern int (*udbg_getc_poll)(void); -extern void udbg_puts(const char *s); -extern int udbg_write(const char *s, int n); +void udbg_puts(const char *s); +int udbg_write(const char *s, int n); -extern void register_early_udbg_console(void); -extern void udbg_printf(const char *fmt, ...) +void register_early_udbg_console(void); +void udbg_printf(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); -extern void udbg_progress(char *s, unsigned short hex); +void udbg_progress(char *s, unsigned short hex); void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride); void __init udbg_uart_init_pio(unsigned long port, unsigned int stride); @@ -31,28 +31,28 @@ unsigned int __init udbg_probe_uart_speed(unsigned int clock); struct device_node; void __init udbg_scc_init(int force_scc); -extern int udbg_adb_init(int force_btext); -extern void udbg_adb_init_early(void); +int udbg_adb_init(int force_btext); +void udbg_adb_init_early(void); -extern void __init udbg_early_init(void); -extern void __init udbg_init_debug_lpar(void); -extern void __init udbg_init_debug_lpar_hvsi(void); -extern void __init udbg_init_pmac_realmode(void); -extern void __init udbg_init_maple_realmode(void); -extern void __init udbg_init_pas_realmode(void); -extern void __init udbg_init_rtas_panel(void); -extern void __init udbg_init_rtas_console(void); -extern void __init udbg_init_btext(void); -extern void __init udbg_init_44x_as1(void); -extern void __init udbg_init_40x_realmode(void); -extern void __init udbg_init_cpm(void); -extern void __init udbg_init_usbgecko(void); -extern void __init udbg_init_memcons(void); -extern void __init udbg_init_ehv_bc(void); -extern void __init udbg_init_ps3gelic(void); -extern void __init udbg_init_debug_opal_raw(void); -extern void __init udbg_init_debug_opal_hvsi(void); -extern void __init udbg_init_debug_16550(void); +void __init udbg_early_init(void); +void __init udbg_init_debug_lpar(void); +void __init udbg_init_debug_lpar_hvsi(void); +void __init udbg_init_pmac_realmode(void); +void __init udbg_init_maple_realmode(void); +void __init udbg_init_pas_realmode(void); +void __init udbg_init_rtas_panel(void); +void __init udbg_init_rtas_console(void); +void __init udbg_init_btext(void); +void __init udbg_init_44x_as1(void); +void __init udbg_init_40x_realmode(void); +void __init udbg_init_cpm(void); +void __init udbg_init_usbgecko(void); +void __init udbg_init_memcons(void); +void __init udbg_init_ehv_bc(void); +void __init udbg_init_ps3gelic(void); +void __init udbg_init_debug_opal_raw(void); +void __init udbg_init_debug_opal_hvsi(void); +void __init udbg_init_debug_16550(void); #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UDBG_H */ From 99df7a2810b6d24651d4887ab61a142e042fb235 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 26 Sep 2022 08:16:42 -0500 Subject: [PATCH 184/214] powerpc/pseries: block untrusted device tree changes when locked down The /proc/powerpc/ofdt interface allows the root user to freely alter the in-kernel device tree, enabling arbitrary physical address writes via drivers that could bind to malicious device nodes, thus making it possible to disable lockdown. Historically this interface has been used on the pseries platform to facilitate the runtime addition and removal of processor, memory, and device resources (aka Dynamic Logical Partitioning or DLPAR). Years ago, the processor and memory use cases were migrated to designs that happen to be lockdown-friendly: device tree updates are communicated directly to the kernel from firmware without passing through untrusted user space. I/O device DLPAR via the "drmgr" command in powerpc-utils remains the sole legitimate user of /proc/powerpc/ofdt, but it is already broken in lockdown since it uses /dev/mem to allocate argument buffers for the rtas syscall. So only illegitimate uses of the interface should see a behavior change when running on a locked down kernel. Signed-off-by: Nathan Lynch Acked-by: Paul Moore (LSM) Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926131643.146502-2-nathanl@linux.ibm.com --- arch/powerpc/platforms/pseries/reconfig.c | 5 +++++ include/linux/security.h | 1 + security/security.c | 1 + 3 files changed, 7 insertions(+) diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index cad7a0c93117..599bd2c78514 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -361,6 +362,10 @@ static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t coun char *kbuf; char *tmp; + rv = security_locked_down(LOCKDOWN_DEVICE_TREE); + if (rv) + return rv; + kbuf = memdup_user_nul(buf, count); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); diff --git a/include/linux/security.h b/include/linux/security.h index 1bc362cb413f..7da801ceb5a4 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -114,6 +114,7 @@ enum lockdown_reason { LOCKDOWN_IOPORT, LOCKDOWN_MSR, LOCKDOWN_ACPI_TABLES, + LOCKDOWN_DEVICE_TREE, LOCKDOWN_PCMCIA_CIS, LOCKDOWN_TIOCSSERIAL, LOCKDOWN_MODULE_PARAMETERS, diff --git a/security/security.c b/security/security.c index 14d30fec8a00..400ab5de631e 100644 --- a/security/security.c +++ b/security/security.c @@ -52,6 +52,7 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { [LOCKDOWN_IOPORT] = "raw io port access", [LOCKDOWN_MSR] = "raw MSR access", [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables", + [LOCKDOWN_DEVICE_TREE] = "modifying device tree contents", [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage", [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO", [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters", From b8f3e48834fe8c86b4f21739c6effd160e2c2c19 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 26 Sep 2022 08:16:43 -0500 Subject: [PATCH 185/214] powerpc/rtas: block error injection when locked down The error injection facility on pseries VMs allows corruption of arbitrary guest memory, potentially enabling a sufficiently privileged user to disable lockdown or perform other modifications of the running kernel via the rtas syscall. Block the PAPR error injection facility from being opened or called when locked down. Signed-off-by: Nathan Lynch Acked-by: Paul Moore (LSM) Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926131643.146502-3-nathanl@linux.ibm.com --- arch/powerpc/kernel/rtas.c | 25 ++++++++++++++++++++++++- include/linux/security.h | 1 + security/security.c | 1 + 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 0b8a858aa847..e847f9b1c5b9 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -463,6 +464,9 @@ void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, va_end(list); } +static int ibm_open_errinjct_token; +static int ibm_errinjct_token; + int rtas_call(int token, int nargs, int nret, int *outputs, ...) { va_list list; @@ -475,6 +479,16 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) return -1; + if (token == ibm_open_errinjct_token || token == ibm_errinjct_token) { + /* + * It would be nicer to not discard the error value + * from security_locked_down(), but callers expect an + * RTAS status, not an errno. + */ + if (security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION)) + return -1; + } + if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) { WARN_ON_ONCE(1); return -1; @@ -1173,6 +1187,14 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) if (block_rtas_call(token, nargs, &args)) return -EINVAL; + if (token == ibm_open_errinjct_token || token == ibm_errinjct_token) { + int err; + + err = security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION); + if (err) + return err; + } + /* Need to handle ibm,suspend_me call specially */ if (token == rtas_token("ibm,suspend-me")) { @@ -1271,7 +1293,8 @@ void __init rtas_initialize(void) #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); #endif - + ibm_open_errinjct_token = rtas_token("ibm,open-errinjct"); + ibm_errinjct_token = rtas_token("ibm,errinjct"); rtas_syscall_filter_init(); } diff --git a/include/linux/security.h b/include/linux/security.h index 7da801ceb5a4..a6d67600759e 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -123,6 +123,7 @@ enum lockdown_reason { LOCKDOWN_XMON_WR, LOCKDOWN_BPF_WRITE_USER, LOCKDOWN_DBG_WRITE_KERNEL, + LOCKDOWN_RTAS_ERROR_INJECTION, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, diff --git a/security/security.c b/security/security.c index 400ab5de631e..3f5aa9d64aa7 100644 --- a/security/security.c +++ b/security/security.c @@ -61,6 +61,7 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", + [LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", From b37ac1894ac3c014863986d6b8ed880195213e78 Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Mon, 26 Sep 2022 17:02:50 -0500 Subject: [PATCH 186/214] powerpc/smp: poll cpu_callin_map more aggressively in __cpu_up() At boot time, it is not necessary to delay between polls of cpu_callin_map when waiting for a kicked CPU to come up. Remove the delay intervals, but preserve the overall deadline (five seconds). At run time, the first poll result is usually negative and we incur a sleeping wait. If we spin on the callin word for a short time first, we can reduce __cpu_up() from dozens of milliseconds to under 1ms in the common case on a P9 LPAR: $ ppc64_cpu --smt=off $ bpftrace -e 'kprobe:__cpu_up { @start[tid] = nsecs; } kretprobe:__cpu_up /@start[tid]/ { @us = hist((nsecs - @start[tid]) / 1000); delete(@start[tid]); }' -c 'ppc64_cpu --smt=on' Before: @us: [16K, 32K) 85 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| [32K, 64K) 13 |@@@@@@@ | After: @us: [128, 256) 95 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| [256, 512) 3 |@ | Signed-off-by: Nathan Lynch Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220926220250.157022-1-nathanl@linux.ibm.com --- arch/powerpc/kernel/smp.c | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 11ded19186b9..0da6e59161cd 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1257,7 +1257,12 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - int rc, c; + const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC; + const bool booting = system_state < SYSTEM_RUNNING; + const unsigned long hp_spin_ms = 1; + unsigned long deadline; + int rc; + const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms; /* * Don't allow secondary threads to come online if inhibited @@ -1302,22 +1307,23 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) } /* - * wait to see if the cpu made a callin (is actually up). - * use this value that I found through experimentation. - * -- Cort + * At boot time, simply spin on the callin word until the + * deadline passes. + * + * At run time, spin for an optimistic amount of time to avoid + * sleeping in the common case. */ - if (system_state < SYSTEM_RUNNING) - for (c = 50000; c && !cpu_callin_map[cpu]; c--) - udelay(100); -#ifdef CONFIG_HOTPLUG_CPU - else - /* - * CPUs can take much longer to come up in the - * hotplug case. Wait five seconds. - */ - for (c = 5000; c && !cpu_callin_map[cpu]; c--) - msleep(1); -#endif + deadline = jiffies + msecs_to_jiffies(spin_wait_ms); + spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline)); + + if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) { + const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC; + const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC; + + deadline = jiffies + msecs_to_jiffies(sleep_wait_ms); + while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline)) + fsleep(sleep_interval_us); + } if (!cpu_callin_map[cpu]) { printk(KERN_ERR "Processor %u is stuck.\n", cpu); From 91986d7f0300c2c01722e0eac5119bb0946fe9b5 Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Thu, 25 Aug 2022 07:26:57 +0000 Subject: [PATCH 187/214] powerpc/pseries/vas: Remove the unneeded result variable Return the value vas_register_coproc_api() directly instead of storing it in another redundant variable. Reported-by: Zeal Robot Signed-off-by: ye xingchen Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220825072657.229168-1-ye.xingchen@zte.com.cn --- arch/powerpc/platforms/pseries/vas.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index 7e6e6dd2e33e..46ea4e252f97 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -501,14 +501,10 @@ static const struct vas_user_win_ops vops_pseries = { int vas_register_api_pseries(struct module *mod, enum vas_cop_type cop_type, const char *name) { - int rc; - if (!copypaste_feat) return -ENOTSUPP; - rc = vas_register_coproc_api(mod, cop_type, name, &vops_pseries); - - return rc; + return vas_register_coproc_api(mod, cop_type, name, &vops_pseries); } EXPORT_SYMBOL_GPL(vas_register_api_pseries); From 5e4952656bca1b5d8c2be36682dc66d844797ad2 Mon Sep 17 00:00:00 2001 From: ye xingchen Date: Tue, 6 Sep 2022 07:20:06 +0000 Subject: [PATCH 188/214] ocxl: Remove the unneeded result variable Return the value opal_npu_spa_clear_cache() directly instead of storing it in another redundant variable. Reported-by: Zeal Robot Signed-off-by: ye xingchen Acked-by: Andrew Donnellan Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220906072006.337099-1-ye.xingchen@zte.com.cn --- arch/powerpc/platforms/powernv/ocxl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c index 27c936075031..629067781cec 100644 --- a/arch/powerpc/platforms/powernv/ocxl.c +++ b/arch/powerpc/platforms/powernv/ocxl.c @@ -478,10 +478,8 @@ EXPORT_SYMBOL_GPL(pnv_ocxl_spa_release); int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle) { struct spa_data *data = (struct spa_data *) platform_data; - int rc; - rc = opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle); - return rc; + return opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle); } EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache); From 97f88a3d723162781d6cbfdc7b9617eefab55b19 Mon Sep 17 00:00:00 2001 From: Li Huafei Date: Fri, 23 Sep 2022 17:32:53 +0800 Subject: [PATCH 189/214] powerpc/kprobes: Fix null pointer reference in arch_prepare_kprobe() I found a null pointer reference in arch_prepare_kprobe(): # echo 'p cmdline_proc_show' > kprobe_events # echo 'p cmdline_proc_show+16' >> kprobe_events Kernel attempted to read user page (0) - exploit attempt? (uid: 0) BUG: Kernel NULL pointer dereference on read at 0x00000000 Faulting instruction address: 0xc000000000050bfc Oops: Kernel access of bad area, sig: 11 [#1] LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA PowerNV Modules linked in: CPU: 0 PID: 122 Comm: sh Not tainted 6.0.0-rc3-00007-gdcf8e5633e2e #10 NIP: c000000000050bfc LR: c000000000050bec CTR: 0000000000005bdc REGS: c0000000348475b0 TRAP: 0300 Not tainted (6.0.0-rc3-00007-gdcf8e5633e2e) MSR: 9000000000009033 CR: 88002444 XER: 20040006 CFAR: c00000000022d100 DAR: 0000000000000000 DSISR: 40000000 IRQMASK: 0 ... NIP arch_prepare_kprobe+0x10c/0x2d0 LR arch_prepare_kprobe+0xfc/0x2d0 Call Trace: 0xc0000000012f77a0 (unreliable) register_kprobe+0x3c0/0x7a0 __register_trace_kprobe+0x140/0x1a0 __trace_kprobe_create+0x794/0x1040 trace_probe_create+0xc4/0xe0 create_or_delete_trace_kprobe+0x2c/0x80 trace_parse_run_command+0xf0/0x210 probes_write+0x20/0x40 vfs_write+0xfc/0x450 ksys_write+0x84/0x140 system_call_exception+0x17c/0x3a0 system_call_vectored_common+0xe8/0x278 --- interrupt: 3000 at 0x7fffa5682de0 NIP: 00007fffa5682de0 LR: 0000000000000000 CTR: 0000000000000000 REGS: c000000034847e80 TRAP: 3000 Not tainted (6.0.0-rc3-00007-gdcf8e5633e2e) MSR: 900000000280f033 CR: 44002408 XER: 00000000 The address being probed has some special: cmdline_proc_show: Probe based on ftrace cmdline_proc_show+16: Probe for the next instruction at the ftrace location The ftrace-based kprobe does not generate kprobe::ainsn::insn, it gets set to NULL. In arch_prepare_kprobe() it will check for: ... prev = get_kprobe(p->addr - 1); preempt_enable_no_resched(); if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) { ... If prev is based on ftrace, 'ppc_inst_read(prev->ainsn.insn)' will occur with a null pointer reference. At this point prev->addr will not be a prefixed instruction, so the check can be skipped. Check if prev is ftrace-based kprobe before reading 'prev->ainsn.insn' to fix this problem. Fixes: b4657f7650ba ("powerpc/kprobes: Don't allow breakpoints on suffixes") Signed-off-by: Li Huafei [mpe: Trim oops] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220923093253.177298-1-lihuafei1@huawei.com --- arch/powerpc/kernel/kprobes.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 912d4f8a13be..bd7b1a035459 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -161,7 +161,13 @@ int arch_prepare_kprobe(struct kprobe *p) preempt_disable(); prev = get_kprobe(p->addr - 1); preempt_enable_no_resched(); - if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) { + + /* + * When prev is a ftrace-based kprobe, we don't have an insn, and it + * doesn't probe for prefixed instruction. + */ + if (prev && !kprobe_ftrace(prev) && + ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) { printk("Cannot register a kprobe on the second word of prefixed instruction\n"); ret = -EINVAL; } From bbd71709087a9d486d1da42399eec14e106072f2 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Sep 2022 01:04:18 +1000 Subject: [PATCH 190/214] powerpc: Make stack frame marker upper case Now that the stack frame regs marker is only 32-bits it is not as obvious in memory dumps and easier to miss, eg: c000000004733e40 0000000000000000 0000000000000000 |................| c000000004733e50 0000000000000000 0000000000000000 |................| c000000004733e60 0000000000000000 0000000000000000 |................| c000000004733e70 7367657200000000 0000000000000000 |sger............| c000000004733e80 a700000000000000 708897f7ff7f0000 |........p.......| c000000004733e90 0073428fff7f0000 208997f7ff7f0000 |.sB..... .......| c000000004733ea0 0100000000000000 ffffffffffffffff |................| c000000004733eb0 0000000000000000 0000000000000000 |................| So make it upper case to make it stand out a bit more: c000000004733e70 5347455200000000 0000000000000000 |SGER............| Acked-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220927150419.1503001-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 5b496e589d54..6c23d1d25dc7 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -99,7 +99,7 @@ struct pt_regs #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) -#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) +#define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753) #ifdef __powerpc64__ From 19c95df1277c48e3ef8cc7d9f1d315dce949f203 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Sep 2022 01:04:19 +1000 Subject: [PATCH 191/214] powerpc: Reverse stack frame marker on little endian On little endian the stack frame marker appears reversed when dumping memory sequentially, as is typical in xmon or gdb, eg: c000000004733e40 0000000000000000 0000000000000000 |................| c000000004733e50 0000000000000000 0000000000000000 |................| c000000004733e60 0000000000000000 0000000000000000 |................| c000000004733e70 5347455200000000 0000000000000000 |SGER............| c000000004733e80 a700000000000000 708897f7ff7f0000 |........p.......| c000000004733e90 0073428fff7f0000 208997f7ff7f0000 |.sB..... .......| c000000004733ea0 0100000000000000 ffffffffffffffff |................| c000000004733eb0 0000000000000000 0000000000000000 |................| To make it easier to recognise, reverse the value on little endian, so it always appears as "REGS", eg: c000000004733e70 5245475300000000 0000000000000000 |REGS............| Acked-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220927150419.1503001-2-mpe@ellerman.id.au --- arch/powerpc/include/asm/ptrace.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 6c23d1d25dc7..2efec6d87049 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -99,7 +99,12 @@ struct pt_regs #define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)) +// Always displays as "REGS" in memory dumps +#ifdef CONFIG_CPU_BIG_ENDIAN #define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753) +#else +#define STACK_FRAME_REGS_MARKER ASM_CONST(0x53474552) +#endif #ifdef __powerpc64__ From 335e1a91042764629fbbcd8c7e40379fa3762d35 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Tue, 27 Sep 2022 18:29:27 -0700 Subject: [PATCH 192/214] powerpc: Ignore DSI error caused by the copy/paste instruction MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The data storage interrupt (DSI) error will be generated when the paste operation is issued on the suspended Nest Accelerator (NX) window due to NX state changes. The hypervisor expects the partition to ignore this error during page fault handling. To differentiate DSI caused by an actual HW configuration or by the NX window, a new “ibm,pi-features” type value is defined. Byte 0, bit 3 of pi-attribute-specifier-type is now defined to indicate this DSI error. If this error is not ignored, the user space can get SIGBUS when the NX request is issued. This patch adds changes to read ibm,pi-features property and ignore DSI error during page fault handling if MMU_FTR_NX_DSI is defined. Signed-off-by: Haren Myneni [mpe: Mention PAPR version in comment] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/b9cd844b85eb8f70459109ce1b14e44c4cc85fa7.camel@linux.ibm.com --- arch/powerpc/include/asm/mmu.h | 5 ++++- arch/powerpc/kernel/prom.c | 36 ++++++++++++++++++++++++---------- arch/powerpc/mm/fault.c | 17 +++++++++++++++- 3 files changed, 46 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 39057320e436..94b981152667 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -120,6 +120,9 @@ */ #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) +// NX paste RMA reject in DSI +#define MMU_FTR_NX_DSI ASM_CONST(0x80000000) + /* MMU feature bit sets for various CPUs */ #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 (MMU_FTR_HPTE_TABLE | MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE) #define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2 @@ -181,7 +184,7 @@ enum { #endif #ifdef CONFIG_PPC_RADIX_MMU MMU_FTR_TYPE_RADIX | - MMU_FTR_GTSE | + MMU_FTR_GTSE | MMU_FTR_NX_DSI | #endif /* CONFIG_PPC_RADIX_MMU */ #endif #ifdef CONFIG_PPC_KUAP diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index a730b951b64b..2e7a04dab2f7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -137,7 +137,7 @@ static void __init move_device_tree(void) } /* - * ibm,pa-features is a per-cpu property that contains a string of + * ibm,pa/pi-features is a per-cpu property that contains a string of * attribute descriptors, each of which has a 2 byte header plus up * to 254 bytes worth of processor attribute bits. First header * byte specifies the number of bytes following the header. @@ -149,15 +149,17 @@ static void __init move_device_tree(void) * is supported/not supported. Note that the bit numbers are * big-endian to match the definition in PAPR. */ -static struct ibm_pa_feature { +struct ibm_feature { unsigned long cpu_features; /* CPU_FTR_xxx bit */ unsigned long mmu_features; /* MMU_FTR_xxx bit */ unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */ - unsigned char pabyte; /* byte number in ibm,pa-features */ + unsigned char pabyte; /* byte number in ibm,pa/pi-features */ unsigned char pabit; /* bit number (big-endian) */ unsigned char invert; /* if 1, pa bit set => clear feature */ -} ibm_pa_features[] __initdata = { +}; + +static struct ibm_feature ibm_pa_features[] __initdata = { { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU }, { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU }, { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, @@ -179,9 +181,19 @@ static struct ibm_pa_feature { { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 }, }; +/* + * ibm,pi-features property provides the support of processor specific + * options not described in ibm,pa-features. Right now use byte 0, bit 3 + * which indicates the occurrence of DSI interrupt when the paste operation + * on the suspended NX window. + */ +static struct ibm_feature ibm_pi_features[] __initdata = { + { .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI }, +}; + static void __init scan_features(unsigned long node, const unsigned char *ftrs, unsigned long tablelen, - struct ibm_pa_feature *fp, + struct ibm_feature *fp, unsigned long ft_size) { unsigned long i, len, bit; @@ -218,17 +230,18 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs, } } -static void __init check_cpu_pa_features(unsigned long node) +static void __init check_cpu_features(unsigned long node, char *name, + struct ibm_feature *fp, + unsigned long size) { const unsigned char *pa_ftrs; int tablelen; - pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); + pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen); if (pa_ftrs == NULL) return; - scan_features(node, pa_ftrs, tablelen, - ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); + scan_features(node, pa_ftrs, tablelen, fp, size); } #ifdef CONFIG_PPC_64S_HASH_MMU @@ -380,7 +393,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, identify_cpu(0, be32_to_cpup(prop)); check_cpu_feature_properties(node); - check_cpu_pa_features(node); + check_cpu_features(node, "ibm,pa-features", ibm_pa_features, + ARRAY_SIZE(ibm_pa_features)); + check_cpu_features(node, "ibm,pi-features", ibm_pi_features, + ARRAY_SIZE(ibm_pi_features)); } identical_pvr_fixup(node); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 1566804e4b3d..2bef19cc1b98 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -371,7 +371,22 @@ static void sanity_check_fault(bool is_write, bool is_user, #elif defined(CONFIG_PPC_8xx) #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) #elif defined(CONFIG_PPC64) -#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S) +static int page_fault_is_bad(unsigned long err) +{ + unsigned long flag = DSISR_BAD_FAULT_64S; + + /* + * PAPR+ v2.11 § 14.15.3.4.1 (unreleased) + * If byte 0, bit 3 of pi-attribute-specifier-type in + * ibm,pi-features property is defined, ignore the DSI error + * which is caused by the paste instruction on the + * suspended NX window. + */ + if (mmu_has_feature(MMU_FTR_NX_DSI)) + flag &= ~DSISR_BAD_COPYPASTE; + + return err & flag; +} #else #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) #endif From e4335f53198fa0c0aefb2a38bb5518e94253412c Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 8 Sep 2022 23:25:45 +1000 Subject: [PATCH 193/214] KVM: PPC: Book3S HV: Implement scheduling wait interval counters in the VPA PAPR specifies accumulated virtual processor wait intervals that relate to partition scheduling interval times. Implement these counters in the same way as they are repoted by dtl. Reviewed-by: Fabiano Rosas Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220908132545.4085849-5-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv.c | 64 +++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ecce10a4486d..6ba68dd6190b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -733,16 +733,15 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) } static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, + struct lppaca *vpa, unsigned int pcpu, u64 now, unsigned long stolen) { struct dtl_entry *dt; - struct lppaca *vpa; dt = vcpu->arch.dtl_ptr; - vpa = vcpu->arch.vpa.pinned_addr; - if (!dt || !vpa) + if (!dt) return; dt->dispatch_reason = 7; @@ -763,29 +762,23 @@ static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, /* order writing *dt vs. writing vpa->dtl_idx */ smp_wmb(); vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); - vcpu->arch.dtl.dirty = true; + + /* vcpu->arch.dtl.dirty is set by the caller */ } -static void kvmppc_create_dtl_entry_p9(struct kvm_vcpu *vcpu, - struct kvmppc_vcore *vc, - u64 now) -{ - unsigned long stolen; - - stolen = vc->stolen_tb - vcpu->arch.stolen_logged; - vcpu->arch.stolen_logged = vc->stolen_tb; - - __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now, stolen); -} - -static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, - struct kvmppc_vcore *vc) +static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu, + struct kvmppc_vcore *vc) { + struct lppaca *vpa; unsigned long stolen; unsigned long core_stolen; u64 now; unsigned long flags; + vpa = vcpu->arch.vpa.pinned_addr; + if (!vpa) + return; + now = mftb(); core_stolen = vcore_stolen_time(vc, now); @@ -796,7 +789,34 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, vcpu->arch.busy_stolen = 0; spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); - __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen); + vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen); + + __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen); + + vcpu->arch.vpa.dirty = true; +} + +static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu, + struct kvmppc_vcore *vc, + u64 now) +{ + struct lppaca *vpa; + unsigned long stolen; + unsigned long stolen_delta; + + vpa = vcpu->arch.vpa.pinned_addr; + if (!vpa) + return; + + stolen = vc->stolen_tb; + stolen_delta = stolen - vcpu->arch.stolen_logged; + vcpu->arch.stolen_logged = stolen; + + vpa->enqueue_dispatch_tb = cpu_to_be64(stolen); + + __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta); + + vcpu->arch.vpa.dirty = true; } /* See if there is a doorbell interrupt pending for a vcpu */ @@ -3852,7 +3872,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * kvmppc_core_prepare_to_enter. */ kvmppc_start_thread(vcpu, pvc); - kvmppc_create_dtl_entry(vcpu, pvc); + kvmppc_update_vpa_dispatch(vcpu, pvc); trace_kvm_guest_enter(vcpu); if (!vcpu->arch.ptid) thr0_done = true; @@ -4449,7 +4469,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) if ((vc->vcore_state == VCORE_PIGGYBACK || vc->vcore_state == VCORE_RUNNING) && !VCORE_IS_EXITING(vc)) { - kvmppc_create_dtl_entry(vcpu, vc); + kvmppc_update_vpa_dispatch(vcpu, vc); kvmppc_start_thread(vcpu, vc); trace_kvm_guest_enter(vcpu); } else if (vc->vcore_state == VCORE_SLEEPING) { @@ -4637,7 +4657,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, tb = mftb(); - kvmppc_create_dtl_entry_p9(vcpu, vc, tb + vc->tb_offset); + kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset); trace_kvm_guest_enter(vcpu); From f3e5d9e53e74d77e711a2c90a91a8b0836a9e0b3 Mon Sep 17 00:00:00 2001 From: Haren Myneni Date: Wed, 28 Sep 2022 18:57:33 -0700 Subject: [PATCH 194/214] powerpc/pseries/vas: Pass hw_cpu_id to node associativity HCALL Generally the hypervisor decides to allocate a window on different VAS instances. But if user space wishes to allocate on the current VAS instance where the process is executing, the kernel has to pass associativity domain IDs to allocate VAS window HCALL. To determine the associativity domain IDs for the current CPU, smp_processor_id() is passed to node associativity HCALL which may return H_P2 (-55) error during DLPAR CPU event. This is because Linux CPU numbers (smp_processor_id()) are not the same as the hypervisor's view of CPU numbers. Fix the issue by passing hard_smp_processor_id() with VPHN_FLAG_VCPU flag (PAPR 14.11.6.1 H_HOME_NODE_ASSOCIATIVITY). Fixes: b22f2d88e435 ("powerpc/pseries/vas: Integrate API with open/close windows") Reviewed-by: Nathan Lynch Signed-off-by: Haren Myneni [mpe: Update change log to mention Linux vs HV CPU numbers] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/55380253ea0c11341824cd4c0fc6bbcfc5752689.camel@linux.ibm.com --- arch/powerpc/platforms/pseries/vas.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c index 46ea4e252f97..0e0524cbe20c 100644 --- a/arch/powerpc/platforms/pseries/vas.c +++ b/arch/powerpc/platforms/pseries/vas.c @@ -333,7 +333,7 @@ static struct vas_window *vas_allocate_window(int vas_id, u64 flags, * So no unpacking needs to be done. */ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain, - VPHN_FLAG_VCPU, smp_processor_id()); + VPHN_FLAG_VCPU, hard_smp_processor_id()); if (rc != H_SUCCESS) { pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc); goto out; From a08661af4c52068972c552deb940b3b13635eb3e Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Thu, 29 Sep 2022 13:21:20 +1000 Subject: [PATCH 195/214] powerpc: remove orphan systbl_chk.sh arch/powerpc/kernel/systbl_chk.sh has not been referenced since commit ab66dcc76d6a ("powerpc: generate uapi header and system call table files"). Remove it. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220929032120.3592593-1-npiggin@gmail.com --- arch/powerpc/kernel/systbl_chk.sh | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 arch/powerpc/kernel/systbl_chk.sh diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh deleted file mode 100644 index c7ac3ed657c4..000000000000 --- a/arch/powerpc/kernel/systbl_chk.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0-or-later -# -# Just process the CPP output from systbl_chk.c and complain -# if anything is out of order. -# -# Copyright © 2008 IBM Corporation -# - -awk 'BEGIN { num = -1; } # Ignore the beginning of the file - /^#/ { next; } - /^[ \t]*$/ { next; } - /^START_TABLE/ { num = 0; next; } - /^END_TABLE/ { - if (num != $2) { - printf "Error: NR_syscalls (%s) is not one more than the last syscall (%s)\n", - $2, num - 1; - exit(1); - } - num = -1; # Ignore the rest of the file - } - { - if (num == -1) next; - if (($1 != -1) && ($1 != num)) { - printf "Error: Syscall %s out of order (expected %s)\n", - $1, num; - exit(1); - }; - num++; - }' "$1" From 57a8e4b26eaa8f30aa8bc737255d192915a53023 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Sep 2022 23:09:12 +1000 Subject: [PATCH 196/214] powerpc/64s: Remove old STAB comment This used to be about the 0x4300 handler, but that was moved in commit da2bc4644c75 ("powerpc/64s: Add new exception vector macros"). Note that "STAB" here refers to "Segment Table" not the debug format. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220928130912.1732466-1-mpe@ellerman.id.au --- arch/powerpc/kernel/exceptions-64s.S | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index c3b803d6d805..e7a4f56fbf26 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -3080,12 +3080,6 @@ MASKED_INTERRUPT hsrr=1 * to HV=1 from HV=0 is delivered via real mode handlers. */ - /* - * This uses the standard macro, since the original 0x300 vector - * only has extra guff for STAB-based processors -- which never - * come here. - */ - USE_FIXED_SECTION(virt_trampolines) /* * All code below __end_soft_masked is treated as soft-masked. If From 0c360996425e36945c10479e2bc6ad5992c57794 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Sep 2022 23:09:41 +1000 Subject: [PATCH 197/214] powerpc/64s: Remove lost/old comment The bulk of this was moved/reworded in: 57f266497d81 ("powerpc: Use gas sections for arranging exception vectors") And now appears around line 700 in arch/powerpc/kernel/exceptions-64s.S. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220928130941.1732818-1-mpe@ellerman.id.au --- arch/powerpc/kernel/exceptions-64s.S | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e7a4f56fbf26..fed983cc7ee0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -3070,16 +3070,6 @@ EXPORT_SYMBOL(do_uaccess_flush) MASKED_INTERRUPT MASKED_INTERRUPT hsrr=1 - /* - * Relocation-on interrupts: A subset of the interrupts can be delivered - * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering - * it. Addresses are the same as the original interrupt addresses, but - * offset by 0xc000000000004000. - * It's impossible to receive interrupts below 0x300 via this mechanism. - * KVM: None of these traps are from the guest ; anything that escalated - * to HV=1 from HV=0 is delivered via real mode handlers. - */ - USE_FIXED_SECTION(virt_trampolines) /* * All code below __end_soft_masked is treated as soft-masked. If From 7673335e2a0b8e68a2a238773a34e287a089a8fe Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 28 Sep 2022 23:09:51 +1000 Subject: [PATCH 198/214] powerpc: Drops STABS_DEBUG from linker scripts No toolchain we support should be generating stabs debug information anymore. Drop the sections entirely from our linker scripts. We removed all the manual stabs annotations in commit 12318163737c ("powerpc/32: Remove remaining .stabs annotations"). Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220928130951.1732983-1-mpe@ellerman.id.au --- arch/powerpc/kernel/vdso/vdso32.lds.S | 1 - arch/powerpc/kernel/vdso/vdso64.lds.S | 1 - arch/powerpc/kernel/vmlinux.lds.S | 1 - 3 files changed, 3 deletions(-) diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S index e0d19d74455f..bc0be274a9ac 100644 --- a/arch/powerpc/kernel/vdso/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso/vdso32.lds.S @@ -78,7 +78,6 @@ SECTIONS __end = .; PROVIDE(end = .); - STABS_DEBUG DWARF_DEBUG ELF_DETAILS diff --git a/arch/powerpc/kernel/vdso/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S index 1a4a7bc4c815..744ae5363e6c 100644 --- a/arch/powerpc/kernel/vdso/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso/vdso64.lds.S @@ -76,7 +76,6 @@ SECTIONS _end = .; PROVIDE(end = .); - STABS_DEBUG DWARF_DEBUG ELF_DETAILS diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index c025c83dfdc3..7786e3ac7611 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -404,7 +404,6 @@ SECTIONS _end = . ; PROVIDE32 (end = .); - STABS_DEBUG DWARF_DEBUG ELF_DETAILS From d368e0c478a628f36680650f8d1d1634037b046e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 7 Sep 2022 13:49:41 +0530 Subject: [PATCH 199/214] powerpc/mm/book3s/hash: Rename flush_tlb_pmd_range This function does the hash page table update. Hence rename it to indicate this better to avoid confusion with flush_pmd_tlb_range() Signed-off-by: Aneesh Kumar K.V [mpe: Drop unnecessary extern] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220907081941.209501-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 4 +--- arch/powerpc/mm/book3s64/hash_pgtable.c | 2 +- arch/powerpc/mm/book3s64/hash_tlb.c | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h index 8b762f282190..fab8332fe1ad 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h @@ -112,13 +112,11 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start, struct mmu_gather; extern void hash__tlb_flush(struct mmu_gather *tlb); -void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); #ifdef CONFIG_PPC_64S_HASH_MMU /* Private function for use by PCI IO mapping code */ extern void __flush_hash_table_range(unsigned long start, unsigned long end); -extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr); +void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); #else static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { } #endif diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 28332001bd87..747492edb75a 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -256,7 +256,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres * the __collapse_huge_page_copy can result in copying * the old content. */ - flush_tlb_pmd_range(vma->vm_mm, &pmd, address); + flush_hash_table_pmd_range(vma->vm_mm, &pmd, address); return pmd; } diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c index eb0bccaf221e..a64ea0a7ef96 100644 --- a/arch/powerpc/mm/book3s64/hash_tlb.c +++ b/arch/powerpc/mm/book3s64/hash_tlb.c @@ -221,7 +221,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end) local_irq_restore(flags); } -void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) +void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) { pte_t *pte; pte_t *start_pte; From 7b31f7dadd7074fa70bb14a53bd286ffdfc98b04 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 4 Jul 2022 12:08:51 +0530 Subject: [PATCH 200/214] powerpc/mm: Always update max/min_low_pfn in mem_topology_setup() For both CONFIG_NUMA enabled/disabled use mem_topology_setup() to update max/min_low_pfn. This also adds min_low_pfn update to CONFIG_NUMA which was initialized to zero before. (mpe: Though MEMORY_START is == 0 for PPC64=y which is all possible NUMA=y systems) Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220704063851.295482-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/numa.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 0801b2ce9b7d..b44ce71917d7 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1160,6 +1160,9 @@ void __init mem_topology_setup(void) { int cpu; + max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; + min_low_pfn = MEMORY_START >> PAGE_SHIFT; + /* * Linux/mm assumes node 0 to be online at boot. However this is not * true on PowerPC, where node 0 is similar to any other node, it @@ -1204,9 +1207,6 @@ void __init initmem_init(void) { int nid; - max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; - max_pfn = max_low_pfn; - memblock_dump_all(); for_each_online_node(nid) { From 7dd3a7b90bca2c12e2146a47d63cf69a2f5d7e89 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 8 Sep 2022 12:54:40 +0530 Subject: [PATCH 201/214] powerpc/mm: Fix UBSAN warning reported on hugetlb Powerpc architecture supports 16GB hugetlb pages with hash translation. For 4K page size, this is implemented as a hugepage directory entry at PGD level and for 64K it is implemented as a huge page pte at PUD level With 16GB hugetlb size, offset within a page is greater than 32 bits. Hence switch to use unsigned long type when using hugepd_shift. In order to keep things simpler, we make sure we always use unsigned long type when using hugepd_shift() even though all the hugetlb page size won't require that. The hugetlb_free_p*d_range changes are all related to nohash usage where we can have multiple pgd entries pointing to the same hugepd entries. Hence on book3s64 where we can have > 4GB hugetlb page size we will always find more < next even if we compute the value of more correctly. Hence there is no functional change in this patch except that it fixes the below warning. UBSAN: shift-out-of-bounds in arch/powerpc/mm/hugetlbpage.c:499:21 shift exponent 34 is too large for 32-bit type 'int' CPU: 39 PID: 1673 Comm: a.out Not tainted 6.0.0-rc2-00327-gee88a56e8517-dirty #1 Call Trace: dump_stack_lvl+0x98/0xe0 (unreliable) ubsan_epilogue+0x18/0x70 __ubsan_handle_shift_out_of_bounds+0x1bc/0x390 hugetlb_free_pgd_range+0x5d8/0x600 free_pgtables+0x114/0x290 exit_mmap+0x150/0x550 mmput+0xcc/0x210 do_exit+0x420/0xdd0 do_group_exit+0x4c/0xd0 sys_exit_group+0x24/0x30 system_call_exception+0x250/0x600 system_call_common+0xec/0x250 Signed-off-by: Aneesh Kumar K.V [mpe: Drop generic change to be sent separately, change 1ULL to 1UL] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220908072440.258301-1-aneesh.kumar@linux.ibm.com --- arch/powerpc/mm/hugetlbpage.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 8c3ea5300ac3..5852a86d990d 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -392,7 +392,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ - more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd)); + more = addr + (1UL << hugepd_shift(*(hugepd_t *)pmd)); if (more > next) next = more; @@ -434,7 +434,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, * single hugepage, but all of them point to * the same kmem cache that holds the hugepte. */ - more = addr + (1 << hugepd_shift(*(hugepd_t *)pud)); + more = addr + (1UL << hugepd_shift(*(hugepd_t *)pud)); if (more > next) next = more; @@ -496,7 +496,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, * for a single hugepage, but all of them point to the * same kmem cache that holds the hugepte. */ - more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd)); + more = addr + (1UL << hugepd_shift(*(hugepd_t *)pgd)); if (more > next) next = more; From d210ee3fdfe8584f84f8fdd0ac4a9895d023325b Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Thu, 29 Sep 2022 12:15:02 +0200 Subject: [PATCH 202/214] powerpc/configs: Update config files for removed/renamed symbols Clean up config files by: - removing configs that were deleted in the past - removing configs not in tree and without recently pending patches - adding new configs that are replacements for old configs in the file For some detailed information, see: https://lore.kernel.org/kernel-janitors/20220929090645.1389-1-lukas.bulwahn@gmail.com/ Renamed: - CONFIG_PPC_PTDUMP -> CONFIG_GENERIC_PTDUMP e084728393a5 ("powerpc/ptdump: Convert powerpc to GENERIC_PTDUMP") Removed: - CONFIG_BLK_DEV_CRYPTOLOOP 47e9624616c8 ("block: remove support for cryptoloop and the xor transfer") - CONFIG_CRYPTO_RMD128 b21b9a5e0aef ("crypto: rmd128 - remove RIPE-MD 128 hash algorithm") - CONFIG_CRYPTO_RMD256 c15d4167f0b0 ("crypto: rmd256 - remove RIPE-MD 256 hash algorithm") - CONFIG_CRYPTO_RMD320 93f64202926f ("crypto: rmd320 - remove RIPE-MD 320 hash algorithm") - CONFIG_CRYPTO_SALSA20 663f63ee6d9c ("crypto: salsa20 - remove Salsa20 stream cipher algorithm") - CONFIG_CRYPTO_TGR192 87cd723f8978 ("crypto: tgr192 - remove Tiger 128/160/192 hash algorithms") - CONFIG_HARDENED_USERCOPY_PAGESPAN 1109a5d90701 ("usercopy: Remove HARDENED_USERCOPY_PAGESPAN") - CONFIG_RAPIDIO_TSI568, CONFIG_RAPIDIO_TSI57X 612d4904191f ("rapidio: remove not used code about RIO_VID_TUNDRA") - CONFIG_RAW_DRIVER 603e4922f1c8 ("remove the raw driver") - CONFIG_ROCKETPORT 3b00b6af7a5b ("tty: rocket, remove the driver") - CONFIG_ENABLE_MUST_CHECK 196793946264 ("Compiler Attributes: remove CONFIG_ENABLE_MUST_CHECK") Signed-off-by: Lukas Bulwahn [mpe: Add documentation of relevant commit for each symbol change] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220929101502.32527-1-lukas.bulwahn@gmail.com --- arch/powerpc/configs/83xx/mpc837x_rdb_defconfig | 1 - arch/powerpc/configs/85xx/ge_imp3a_defconfig | 1 - arch/powerpc/configs/85xx/ppa8548_defconfig | 2 -- arch/powerpc/configs/cell_defconfig | 1 - arch/powerpc/configs/g5_defconfig | 1 - arch/powerpc/configs/mpc512x_defconfig | 1 - arch/powerpc/configs/mpc885_ads_defconfig | 2 +- arch/powerpc/configs/pasemi_defconfig | 1 - arch/powerpc/configs/pmac32_defconfig | 1 - arch/powerpc/configs/powernv_defconfig | 3 --- arch/powerpc/configs/ppc64_defconfig | 3 --- arch/powerpc/configs/ppc64e_defconfig | 3 --- arch/powerpc/configs/ppc6xx_defconfig | 7 ------- arch/powerpc/configs/ps3_defconfig | 1 - arch/powerpc/configs/pseries_defconfig | 3 --- arch/powerpc/configs/skiroot_defconfig | 2 -- arch/powerpc/configs/storcenter_defconfig | 1 - 17 files changed, 1 insertion(+), 33 deletions(-) diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig index cbcae2a927e9..4e3373381ab6 100644 --- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig @@ -77,6 +77,5 @@ CONFIG_NFS_FS=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_CRC_T10DIF=y -# CONFIG_ENABLE_MUST_CHECK is not set CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig index f29c166998af..e6d878a44d33 100644 --- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig +++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig @@ -74,7 +74,6 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_RAW_NAND=y CONFIG_MTD_NAND_FSL_ELBC=y CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 diff --git a/arch/powerpc/configs/85xx/ppa8548_defconfig b/arch/powerpc/configs/85xx/ppa8548_defconfig index 190978a5b7d5..4bd5f993d26a 100644 --- a/arch/powerpc/configs/85xx/ppa8548_defconfig +++ b/arch/powerpc/configs/85xx/ppa8548_defconfig @@ -7,9 +7,7 @@ CONFIG_RAPIDIO=y CONFIG_FSL_RIO=y CONFIG_RAPIDIO_DMA_ENGINE=y CONFIG_RAPIDIO_ENUM_BASIC=y -CONFIG_RAPIDIO_TSI57X=y CONFIG_RAPIDIO_CPS_XX=y -CONFIG_RAPIDIO_TSI568=y CONFIG_RAPIDIO_CPS_GEN2=y CONFIG_ADVANCED_OPTIONS=y CONFIG_LOWMEM_SIZE_BOOL=y diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 7fd9e596ea33..06391cc2af3a 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -195,7 +195,6 @@ CONFIG_NLS_ISO8859_9=m CONFIG_NLS_ISO8859_13=m CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m -# CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 9d6212a8b195..71d9d112c0b6 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -119,7 +119,6 @@ CONFIG_INPUT_EVDEV=y # CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set # CONFIG_HW_RANDOM is not set -CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_AGP=m CONFIG_AGP_UNINORTH=m diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index e75d3f3060c9..10fe061c5e6d 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig @@ -114,5 +114,4 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 700115d85d6f..56b876e418e9 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_PPC_PTDUMP=y +CONFIG_GENERIC_PTDUMP=y diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index e00a703581c3..96aa5355911f 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -92,7 +92,6 @@ CONFIG_LEGACY_PTY_COUNT=4 CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_HW_RANDOM=y -CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_PASEMI=y CONFIG_SENSORS_LM85=y diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index 13885ec563d1..019163c2571e 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig @@ -284,7 +284,6 @@ CONFIG_BOOTX_TEXT=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index 49f49c263935..c6910f5a6182 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -252,7 +252,6 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_GENERIC=y # CONFIG_VIRTIO_MENU is not set CONFIG_LIBNVDIMM=y -# CONFIG_ND_BLK is not set CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -325,13 +324,11 @@ CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 6be0c43397b4..d6949a6c5b2b 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -215,7 +215,6 @@ CONFIG_HVC_RTAS=y CONFIG_HVCS=m CONFIG_VIRTIO_CONSOLE=m CONFIG_IBM_BSR=m -CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_AMD8111=y CONFIG_I2C_PASEMI=y @@ -344,13 +343,11 @@ CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 5cf49a515f8e..f97a2d31bbf7 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -118,7 +118,6 @@ CONFIG_INPUT_MISC=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_AMD8111=y CONFIG_FB=y @@ -234,13 +233,11 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 91967824272e..6dce852caaa5 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -323,7 +323,6 @@ CONFIG_PNP=y CONFIG_ISAPNP=y CONFIG_MAC_FLOPPY=m CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=16384 @@ -592,7 +591,6 @@ CONFIG_GAMEPORT_EMU10K1=m CONFIG_GAMEPORT_FM801=m # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y -CONFIG_ROCKETPORT=m CONFIG_SYNCLINK_GT=m CONFIG_NOZOMI=m CONFIG_N_HDLC=m @@ -1109,13 +1107,9 @@ CONFIG_CRYPTO_XTS=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m @@ -1123,7 +1117,6 @@ CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index 2d9ac233da68..0a1b42c4f26a 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -165,6 +165,5 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_LZO=m CONFIG_PRINTK_TIME=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 117643d20281..a25cf2ca5c7a 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -190,7 +190,6 @@ CONFIG_HVC_RTAS=y CONFIG_HVCS=m CONFIG_VIRTIO_CONSOLE=m CONFIG_IBM_BSR=m -CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y CONFIG_FB=y CONFIG_FIRMWARE_EDID=y @@ -305,13 +304,11 @@ CONFIG_CRYPTO_MD5_PPC=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA1_PPC=m CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig index f491875700e8..e0964210f259 100644 --- a/arch/powerpc/configs/skiroot_defconfig +++ b/arch/powerpc/configs/skiroot_defconfig @@ -133,7 +133,6 @@ CONFIG_ACENIC_OMIT_TIGON_I=y # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set -# CONFIG_NET_VENDOR_AURORA is not set CONFIG_TIGON3=m CONFIG_BNX2X=m # CONFIG_NET_VENDOR_BROCADE is not set @@ -274,7 +273,6 @@ CONFIG_NLS_UTF8=y CONFIG_ENCRYPTED_KEYS=y CONFIG_SECURITY=y CONFIG_HARDENED_USERCOPY=y -CONFIG_HARDENED_USERCOPY_PAGESPAN=y CONFIG_FORTIFY_SOURCE=y CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig index 47dcfaddc1ac..7a978d396991 100644 --- a/arch/powerpc/configs/storcenter_defconfig +++ b/arch/powerpc/configs/storcenter_defconfig @@ -76,4 +76,3 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_CRC_T10DIF=y -# CONFIG_ENABLE_MUST_CHECK is not set From d91c3f15fcaf90723ebdcd1c9172f9bb8ea4f09b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 29 Sep 2022 15:15:17 +1000 Subject: [PATCH 203/214] powerpc/configs: Enable PPC_UV in powernv_defconfig Make sure the ultravisor code at least gets some build testing by enabling it in powernv_defconfig. Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220929051517.1903079-1-mpe@ellerman.id.au --- arch/powerpc/configs/powernv_defconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig index c6910f5a6182..43e38f0fa5a7 100644 --- a/arch/powerpc/configs/powernv_defconfig +++ b/arch/powerpc/configs/powernv_defconfig @@ -50,6 +50,7 @@ CONFIG_CPU_IDLE=y CONFIG_HZ_100=y CONFIG_BINFMT_MISC=m CONFIG_PPC_TRANSACTIONAL_MEM=y +CONFIG_PPC_UV=y CONFIG_HOTPLUG_CPU=y CONFIG_KEXEC=y CONFIG_KEXEC_FILE=y @@ -65,6 +66,8 @@ CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_SCHED_SMT=y CONFIG_PM=y CONFIG_HOTPLUG_PCI=y +CONFIG_ZONE_DEVICE=y +CONFIG_DEVICE_PRIVATE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y From 41dc056391b334fae646b55ee020bfa8f67b60c8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Sep 2022 18:27:04 +1000 Subject: [PATCH 204/214] powerpc: Add hardware description string Create a hardware description string, which we will use to record various details of the hardware platform we are running on. Print the accumulated description at boot, and use it to set the generic description which is printed in oopses. To begin with add ppc_md.name, aka the "machine description". Example output at boot with the full series applied: Linux version 6.0.0-rc2-gcc-11.1.0-00199-g893f9007a5ce-dirty (michael@alpine1-p1) (powerpc64-linux-gcc (GCC) 11.1.0, GNU ld (GNU Binutils) 2.36.1) #844 SMP Thu Sep 29 22:29:53 AEST 2022 Hardware name: IBM pSeries (emulated by qemu) POWER9 (raw) 0x4e1200 0xf000005 of:SLOF,git-5b4c5a pSeries printk: bootconsole [udbg0] enabled Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220930082709.55830-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/setup.h | 2 ++ arch/powerpc/kernel/setup-common.c | 19 ++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 85143849a586..e29e83f8a89c 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -88,6 +88,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long pp, unsigned long r6, unsigned long r7, unsigned long kbase); +extern struct seq_buf ppc_hw_desc; + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index dd98f43bd685..6d041993a45d 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -588,6 +590,15 @@ static __init int add_pcspkr(void) device_initcall(add_pcspkr); #endif /* CONFIG_PCSPKR_PLATFORM */ +static char ppc_hw_desc_buf[128] __initdata; + +struct seq_buf ppc_hw_desc __initdata = { + .buffer = ppc_hw_desc_buf, + .size = sizeof(ppc_hw_desc_buf), + .len = 0, + .readpos = 0, +}; + static __init void probe_machine(void) { extern struct machdep_calls __machine_desc_start; @@ -628,7 +639,13 @@ static __init void probe_machine(void) for (;;); } - printk(KERN_INFO "Using %s machine description\n", ppc_md.name); + // Append the machine name to other info we've gathered + seq_buf_puts(&ppc_hw_desc, ppc_md.name); + + // Set the generic hardware description shown in oopses + dump_stack_set_arch_desc(ppc_hw_desc.buffer); + + pr_info("Hardware name: %s\n", ppc_hw_desc.buffer); } /* Match a class of boards, not a specific device configuration. */ From bd649d40e0f2ffa1e16b4dbb93dc627177410e78 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Sep 2022 18:27:05 +1000 Subject: [PATCH 205/214] powerpc: Add PVR & CPU name to hardware description Add the PVR and CPU name to the hardware description, which is printed at boot and in case of an oops. eg: Hardware name: ... POWER8E (raw) 0x4b0201 Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220930082709.55830-2-mpe@ellerman.id.au --- arch/powerpc/kernel/prom.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 2e7a04dab2f7..09f07cc57216 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -819,6 +820,9 @@ void __init early_init_devtree(void *params) dt_cpu_ftrs_scan(); + // We can now add the CPU name & PVR to the hardware description + seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR)); + /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ From 48b7019b6abd029d3800620bb53f0ae3ca052441 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Sep 2022 18:27:06 +1000 Subject: [PATCH 206/214] powerpc/64: Add logical PVR to the hardware description If we detect a logical PVR add that to the hardware description, which is printed at boot and in case of an oops. eg: Hardware name: ... 0xf000004 Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220930082709.55830-3-mpe@ellerman.id.au --- arch/powerpc/kernel/prom.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 09f07cc57216..83fc72202838 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -390,8 +390,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, */ if (!dt_cpu_ftrs_in_use()) { prop = of_get_flat_dt_prop(node, "cpu-version", NULL); - if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) + if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) { identify_cpu(0, be32_to_cpup(prop)); + seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop)); + } check_cpu_feature_properties(node); check_cpu_features(node, "ibm,pa-features", ibm_pa_features, From 541229707970ff2ad3f7705b1dbd025d7cc9bc48 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Sep 2022 18:27:07 +1000 Subject: [PATCH 207/214] powerpc: Add device-tree model to the hardware description Add the model of the machine we're on to the hardware description, which is printed at boot and in case of an oops. eg: Hardware name: IBM,8247-22L Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220930082709.55830-4-mpe@ellerman.id.au --- arch/powerpc/kernel/prom.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 83fc72202838..1eed87d954ba 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -715,6 +715,23 @@ static void __init tm_init(void) static void tm_init(void) { } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ +static int __init +early_init_dt_scan_model(unsigned long node, const char *uname, + int depth, void *data) +{ + const char *prop; + + if (depth != 0) + return 0; + + prop = of_get_flat_dt_prop(node, "model", NULL); + if (prop) + seq_buf_printf(&ppc_hw_desc, "%s ", prop); + + /* break now */ + return 1; +} + #ifdef CONFIG_PPC64 static void __init save_fscr_to_task(void) { @@ -743,6 +760,8 @@ void __init early_init_devtree(void *params) if (!early_init_dt_verify(params)) panic("BUG: Failed verifying flat device tree, bad version?"); + of_scan_flat_dt(early_init_dt_scan_model, NULL); + #ifdef CONFIG_PPC_RTAS /* Some machines might need RTAS info for debugging, grab it now. */ of_scan_flat_dt(early_init_dt_scan_rtas, NULL); From 37576cb0961fe9d3318c17e4e4bc5ecebf38e9bb Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Sep 2022 18:27:08 +1000 Subject: [PATCH 208/214] powerpc/powernv: Add opal details to the hardware description Add OPAL version details to the hardware description, which is printed at boot and in case of an oops. eg: Hardware name: ... opal:v6.2 Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220930082709.55830-5-mpe@ellerman.id.au --- arch/powerpc/platforms/powernv/setup.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index dac545aa0308..61ab2d38ff4b 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -207,8 +208,29 @@ static void __init pnv_setup_arch(void) pnv_rng_init(); } +static void __init pnv_add_hw_description(void) +{ + struct device_node *dn; + const char *s; + + dn = of_find_node_by_path("/ibm,opal/firmware"); + if (!dn) + return; + + if (of_property_read_string(dn, "version", &s) == 0 || + of_property_read_string(dn, "git-id", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "opal:%s ", s); + + if (of_property_read_string(dn, "mi-version", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "mi:%s ", s); + + of_node_put(dn); +} + static void __init pnv_init(void) { + pnv_add_hw_description(); + /* * Initialize the LPC bus now so that legacy serial * ports can be found on it From 8535a1afff0f4f568eb589f3795a930ef3d483b0 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 30 Sep 2022 18:27:09 +1000 Subject: [PATCH 209/214] powerpc/pseries: Add firmware details to the hardware description Add firmware version details to the hardware description, which is printed at boot and in case of an oops. Use /hypervisor if we find it, though currently it only exists if we're running under qemu. Look for "ibm,powervm-partition" which is specified in PAPR+ v2.11 and tells us we're running under PowerVM. Failing that look for "ibm,fw-net-version" which is seen on PowerVM going back to at least Power6. eg: Hardware name: ... of:IBM,FW860.42 (SV860_138) hv:phyp Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20220930082709.55830-6-mpe@ellerman.id.au --- arch/powerpc/platforms/pseries/setup.c | 30 ++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5e44c65a032c..8ef3270515a9 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -1011,6 +1012,33 @@ static void __init pSeries_cmo_feature_init(void) pr_debug(" <- fw_cmo_feature_init()\n"); } +static void __init pseries_add_hw_description(void) +{ + struct device_node *dn; + const char *s; + + dn = of_find_node_by_path("/openprom"); + if (dn) { + if (of_property_read_string(dn, "model", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "of:%s ", s); + + of_node_put(dn); + } + + dn = of_find_node_by_path("/hypervisor"); + if (dn) { + if (of_property_read_string(dn, "compatible", &s) == 0) + seq_buf_printf(&ppc_hw_desc, "hv:%s ", s); + + of_node_put(dn); + return; + } + + if (of_property_read_bool(of_root, "ibm,powervm-partition") || + of_property_read_bool(of_root, "ibm,fw-net-version")) + seq_buf_printf(&ppc_hw_desc, "hv:phyp "); +} + /* * Early initialization. Relocation is on but do not reference unbolted pages */ @@ -1018,6 +1046,8 @@ static void __init pseries_init(void) { pr_debug(" -> pseries_init()\n"); + pseries_add_hw_description(); + #ifdef CONFIG_HVC_CONSOLE if (firmware_has_feature(FW_FEATURE_LPAR)) hvc_vio_init_early(); From 8154850b28bd57a35ea73a7518ffcb9ccd5e43bc Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 4 Oct 2022 15:11:56 +1000 Subject: [PATCH 210/214] powerpc/64s/interrupt: Change must-hard-mask interrupt check from BUG to WARN This new assertion added is generally harmless and gets fixed up naturally, but it does indicate a problem with MSR manipulation somewhere. Fixes: c39fb71a54f0 ("powerpc/64s/interrupt: masked handler debug check for previous hard disable") Reported-by: Sachin Sant Signed-off-by: Nicholas Piggin Tested-by: Sachin Sant Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221004051157.308999-1-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index fed983cc7ee0..87ce00766f52 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -2823,12 +2823,16 @@ masked_interrupt: #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG /* * Ensure there was no previous MUST_HARD_MASK interrupt or - * HARD_DIS setting. + * HARD_DIS setting. If this does fire, the interrupt is still + * masked and MSR[EE] will be cleared on return, so no need to + * panic, but somebody probably enabled MSR[EE] under + * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common + * cause. */ lbz r9,PACAIRQHAPPENED(r13) andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS) 0: tdnei r9,0 - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__,0 + EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE) #endif lbz r9,PACAIRQHAPPENED(r13) or r9,r9,r10 From 0fa6831811f62cfc10415d731bcf9fde2647ad81 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 4 Oct 2022 15:11:57 +1000 Subject: [PATCH 211/214] powerpc/64: Fix msr_check_and_set/clear MSR[EE] race irq soft-masking means that when Linux irqs are disabled, the MSR[EE] value can change from 1 to 0 asynchronously: if a masked interrupt of the PACA_IRQ_MUST_HARD_MASK variety fires while irqs are disabled, the masked handler will return with MSR[EE]=0. This means a sequence like mtmsr(mfmsr() | MSR_FP) is racy if it can be called with local irqs disabled, unless a hard_irq_disable has been done. Reported-by: Sachin Sant Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221004051157.308999-2-npiggin@gmail.com --- arch/powerpc/include/asm/hw_irq.h | 24 ++++++++++++++++++++++++ arch/powerpc/kernel/process.c | 4 ++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index e8de249339d8..77fa88c2aed0 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -471,6 +471,30 @@ static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned l } #endif /* CONFIG_PPC64 */ +static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr) +{ +#ifdef CONFIG_PPC64 + if (arch_irqs_disabled()) { + /* + * With soft-masking, MSR[EE] can change from 1 to 0 + * asynchronously when irqs are disabled, and we don't want to + * set MSR[EE] back to 1 here if that has happened. A race-free + * way to do this is ensure EE is already 0. Another way it + * could be done is with a RESTART_TABLE handler, but that's + * probably overkill here. + */ + msr &= ~MSR_EE; + mtmsr_isync(msr); + irq_soft_mask_set(IRQS_ALL_DISABLED); + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + } else +#endif + mtmsr_isync(msr); + + return msr; +} + + #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 0fbda89cd1bb..37df0428e4fb 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -127,7 +127,7 @@ unsigned long notrace msr_check_and_set(unsigned long bits) newmsr |= MSR_VSX; if (oldmsr != newmsr) - mtmsr_isync(newmsr); + newmsr = mtmsr_isync_irqsafe(newmsr); return newmsr; } @@ -145,7 +145,7 @@ void notrace __msr_check_and_clear(unsigned long bits) newmsr &= ~MSR_VSX; if (oldmsr != newmsr) - mtmsr_isync(newmsr); + mtmsr_isync_irqsafe(newmsr); } EXPORT_SYMBOL(__msr_check_and_clear); From b2e82e495a528eed77c15f3923c2b049a21d7280 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 4 Oct 2022 23:29:52 +1000 Subject: [PATCH 212/214] powerpc/64s/interrupt: Fix stack frame regs marker The value of the stack frame regs marker that gets saved on the stack in interrupt entry code does not match the regs marker value, which breaks stack frame marker matching. This stray instruction looks to have been introduced in a mismerge. Fixes: bf75a3258a403 ("powerpc/64s/interrupt: move early boot ILE fixup into a macro") Signed-off-by: Nicholas Piggin [mpe: Mismerge by yours truly -_-] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221004132952.984341-1-npiggin@gmail.com --- arch/powerpc/kernel/exceptions-64s.S | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 87ce00766f52..5381a43e50fe 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -590,7 +590,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r9,_TRAP(r1) /* set trap number */ li r10,0 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER) - rldimi r11, r11, 32, 0 std r10,RESULT(r1) /* clear regs->result */ std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */ .endm From 94746890202cf18e5266b4de77895243e55b0a79 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 6 Oct 2022 23:34:17 +1100 Subject: [PATCH 213/214] powerpc: Don't add __powerpc_ prefix to syscall entry points When using syscall wrappers the __SYSCALL_DEFINEx() and related macros add a "__powerpc_" prefix to all syscall entry points. So for example sys_mmap becomes __powerpc_sys_mmap. This risks breaking workflows and tools that expect the old naming scheme. At a minimum setting a breakpoint on eg. sys_mmap with gdb no longer works. There seems to be no compelling reason to add the "__powerpc_" prefix, other than that it follows what some other arches do (x86, arm64, s390). But unlike other arches powerpc doesn't always enable syscall wrappers, so the syscall entry points can change name depending on CONFIG options. For those reasons drop the "__powerpc_" prefix, reverting to the existing naming. Doing so reveals two prototypes in signal.h that have the incorrect type when syscall wrappers are enabled. There are already prototypes for both functions in syscalls.h, so drop the ones from signal.h. Fixes: 7e92e01b7245 ("powerpc: Provide syscall wrapper") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221006135940.1223988-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/syscall_wrapper.h | 18 ++++++++---------- arch/powerpc/include/asm/syscalls.h | 2 +- arch/powerpc/kernel/signal.h | 3 --- arch/powerpc/kernel/systbl.c | 3 +-- 4 files changed, 10 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/include/asm/syscall_wrapper.h b/arch/powerpc/include/asm/syscall_wrapper.h index 75b41b173f7a..67486c67e8a2 100644 --- a/arch/powerpc/include/asm/syscall_wrapper.h +++ b/arch/powerpc/include/asm/syscall_wrapper.h @@ -16,11 +16,11 @@ struct pt_regs; ,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8]) #define __SYSCALL_DEFINEx(x, name, ...) \ - long __powerpc_sys##name(const struct pt_regs *regs); \ - ALLOW_ERROR_INJECTION(__powerpc_sys##name, ERRNO); \ + long sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ - long __powerpc_sys##name(const struct pt_regs *regs) \ + long sys##name(const struct pt_regs *regs) \ { \ return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \ } \ @@ -35,17 +35,15 @@ struct pt_regs; #define SYSCALL_DEFINE0(sname) \ SYSCALL_METADATA(_##sname, 0); \ - long __powerpc_sys_##sname(const struct pt_regs *__unused); \ - ALLOW_ERROR_INJECTION(__powerpc_sys_##sname, ERRNO); \ - long __powerpc_sys_##sname(const struct pt_regs *__unused) + long sys_##sname(const struct pt_regs *__unused); \ + ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \ + long sys_##sname(const struct pt_regs *__unused) #define COND_SYSCALL(name) \ - long __powerpc_sys_##name(const struct pt_regs *regs); \ - long __weak __powerpc_sys_##name(const struct pt_regs *regs) \ + long sys_##name(const struct pt_regs *regs); \ + long __weak sys_##name(const struct pt_regs *regs) \ { \ return sys_ni_syscall(); \ } -#define SYS_NI(name) SYSCALL_ALIAS(__powerpc_sys_##name, sys_ni_posix_timers); - #endif // __ASM_POWERPC_SYSCALL_WRAPPER_H diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h index 49bbc3e0733d..9840d572da55 100644 --- a/arch/powerpc/include/asm/syscalls.h +++ b/arch/powerpc/include/asm/syscalls.h @@ -124,7 +124,7 @@ long sys_ppc_fadvise64_64(int fd, int advice, #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) \ - long __powerpc_##entry(const struct pt_regs *regs); + long entry(const struct pt_regs *regs); #ifdef CONFIG_PPC64 #include diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index 618aeccdf691..a429c57ed433 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h @@ -196,9 +196,6 @@ extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, #else /* CONFIG_PPC64 */ -extern long sys_rt_sigreturn(void); -extern long sys_sigreturn(void); - static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct task_struct *tsk) { diff --git a/arch/powerpc/kernel/systbl.c b/arch/powerpc/kernel/systbl.c index 9d7c5a596171..4305f2a2162f 100644 --- a/arch/powerpc/kernel/systbl.c +++ b/arch/powerpc/kernel/systbl.c @@ -20,8 +20,7 @@ #undef __SYSCALL #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER -#define __SYSCALL(nr, entry) [nr] = __powerpc_##entry, -#define __powerpc_sys_ni_syscall sys_ni_syscall +#define __SYSCALL(nr, entry) [nr] = entry, #else /* * Coerce syscall handlers with arbitrary parameters to common type From 376b3275c19f83d373e841e9af2d7658693190b9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 7 Oct 2022 00:33:45 +1000 Subject: [PATCH 214/214] KVM: PPC: Book3S HV: Fix stack frame regs marker The hard-coded marker is out of date now, fix it using the nice define. Fixes: 17773afdcd15 ("powerpc/64: use 32-bit immediate for STACK_FRAME_REGS_MARKER") Reported-by: Joel Stanley Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20221006143345.129077-1-npiggin@gmail.com --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index c984021e62c8..37f50861dd98 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2728,7 +2728,7 @@ kvmppc_bad_host_intr: std r5, _XER(r1) std r6, SOFTE(r1) LOAD_PACA_TOC() - LOAD_REG_IMMEDIATE(3, 0x7265677368657265) + LOAD_REG_IMMEDIATE(3, STACK_FRAME_REGS_MARKER) std r3, STACK_FRAME_OVERHEAD-16(r1) /*