mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 06:01:57 +00:00
powerpc updates for 6.12
- Reduce alignment constraints on STRICT_KERNEL_RWX and speed-up TLB misses on 8xx and 603. - Replace kretprobe code with rethook and enable fprobe. - Remove the "fast endian switch" syscall. - Handle DLPAR device tree updates in kernel, allowing the deprecation of the binary /proc/powerpc/ofdt interface. Thanks to: Abhishek Dubey, Alex Shi, Benjamin Gray, Christophe Leroy, Gaosheng Cui, Gautam Menghani, Geert Uytterhoeven, Haren Myneni, Hari Bathini, Huang Xiaojia, Jinjie Ruan, Madhavan Srinivasan, Miguel Ojeda, Mina Almasry, Narayana Murty N, Naveen Rao, Rob Herring (Arm), Scott Cheloha, Segher Boessenkool, Stephen Rothwell, Thomas Zimmermann, Uwe Kleine-König, Vaibhav Jain, Zhang Zekun. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRjvi15rv0TSTaE+SIF0oADX8seIQUCZurC9gAKCRAF0oADX8se IWunAPkBK70cSZgldH9gJL7C0aqRX+j6qBbvzmoz0E0UlxiRKQD/eW4yDxJBsS9Q KPS9e50duoeU+gKQYuSWkKpH/i4uXAU= =Q107 -----END PGP SIGNATURE----- Merge tag 'powerpc-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: - Reduce alignment constraints on STRICT_KERNEL_RWX and speed-up TLB misses on 8xx and 603 - Replace kretprobe code with rethook and enable fprobe - Remove the "fast endian switch" syscall - Handle DLPAR device tree updates in kernel, allowing the deprecation of the binary /proc/powerpc/ofdt interface Thanks to Abhishek Dubey, Alex Shi, Benjamin Gray, Christophe Leroy, Gaosheng Cui, Gautam Menghani, Geert Uytterhoeven, Haren Myneni, Hari Bathini, Huang Xiaojia, Jinjie Ruan, Madhavan Srinivasan, Miguel Ojeda, Mina Almasry, Narayana Murty N, Naveen Rao, Rob Herring (Arm), Scott Cheloha, Segher Boessenkool, Stephen Rothwell, Thomas Zimmermann, Uwe Kleine-König, Vaibhav Jain, and Zhang Zekun. * tag 'powerpc-6.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (59 commits) powerpc/atomic: Use YZ constraints for DS-form instructions MAINTAINERS: powerpc: Add Maddy powerpc: Switch back to struct platform_driver::remove() powerpc/pseries/eeh: Fix pseries_eeh_err_inject selftests/powerpc: Allow building without static libc macintosh/via-pmu: register_pmu_pm_ops() can be __init powerpc: Stop using no_llseek powerpc/64s: Remove the "fast endian switch" syscall powerpc/mm/64s: Restrict THP to Radix or HPT w/64K pages powerpc/mm/64s: Move THP reqs into a separate symbol powerpc/64s: Make mmu_hash_ops __ro_after_init powerpc: Replace kretprobe code with rethook on powerpc powerpc: pseries: Constify struct kobj_type powerpc: powernv: Constify struct kobj_type powerpc: Constify struct kobj_type powerpc/pseries/dlpar: Add device tree nodes for DLPAR IO add powerpc/pseries/dlpar: Remove device tree node for DLPAR IO remove powerpc/pseries: Use correct data types from pseries_hp_errorlog struct powerpc/vdso: Inconditionally use CFUNC macro powerpc/32: Implement validation of emergency stack ...
This commit is contained in:
commit
3a7101e9b2
6
CREDITS
6
CREDITS
@ -378,6 +378,9 @@ S: 1549 Hiironen Rd.
|
||||
S: Brimson, MN 55602
|
||||
S: USA
|
||||
|
||||
N: Arnd Bergmann
|
||||
D: Maintainer of Cell Broadband Engine Architecture
|
||||
|
||||
N: Hennus Bergman
|
||||
P: 1024/77D50909 76 99 FD 31 91 E1 96 1C 90 BB 22 80 62 F6 BD 63
|
||||
D: Author and maintainer of the QIC-02 tape driver
|
||||
@ -1869,6 +1872,9 @@ S: K osmidomkum 723
|
||||
S: 160 00 Praha 6
|
||||
S: Czech Republic
|
||||
|
||||
N: Jeremy Kerr
|
||||
D: Maintainer of SPU File System
|
||||
|
||||
N: Michael Kerrisk
|
||||
E: mtk.manpages@gmail.com
|
||||
W: https://man7.org/
|
||||
|
@ -5145,10 +5145,8 @@ F: Documentation/devicetree/bindings/media/cec/cec-gpio.yaml
|
||||
F: drivers/media/cec/platform/cec-gpio/
|
||||
|
||||
CELL BROADBAND ENGINE ARCHITECTURE
|
||||
M: Arnd Bergmann <arnd@arndb.de>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/power/cell/
|
||||
S: Orphan
|
||||
F: arch/powerpc/include/asm/cell*.h
|
||||
F: arch/powerpc/include/asm/spu*.h
|
||||
F: arch/powerpc/include/uapi/asm/spu*.h
|
||||
@ -12995,6 +12993,7 @@ M: Michael Ellerman <mpe@ellerman.id.au>
|
||||
R: Nicholas Piggin <npiggin@gmail.com>
|
||||
R: Christophe Leroy <christophe.leroy@csgroup.eu>
|
||||
R: Naveen N Rao <naveen@kernel.org>
|
||||
R: Madhavan Srinivasan <maddy@linux.ibm.com>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
W: https://github.com/linuxppc/wiki/wiki
|
||||
@ -21672,10 +21671,8 @@ F: include/linux/spmi.h
|
||||
F: include/trace/events/spmi.h
|
||||
|
||||
SPU FILE SYSTEM
|
||||
M: Jeremy Kerr <jk@ozlabs.org>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/power/cell/
|
||||
S: Orphan
|
||||
F: Documentation/filesystems/spufs/spufs.rst
|
||||
F: arch/powerpc/platforms/cell/spufs/
|
||||
|
||||
|
@ -269,6 +269,7 @@ config PPC
|
||||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_RETHOOK if KPROBES
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE
|
||||
select HAVE_RSEQ
|
||||
@ -854,8 +855,8 @@ config DATA_SHIFT_BOOL
|
||||
bool "Set custom data alignment"
|
||||
depends on ADVANCED_OPTIONS
|
||||
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
|
||||
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \
|
||||
PPC_85xx
|
||||
depends on (PPC_8xx && !PIN_TLB_DATA && (!STRICT_KERNEL_RWX || !PIN_TLB_TEXT)) || \
|
||||
PPC_BOOK3S_32 || PPC_85xx
|
||||
help
|
||||
This option allows you to set the kernel data alignment. When
|
||||
RAM is mapped by blocks, the alignment needs to fit the size and
|
||||
@ -871,9 +872,9 @@ config DATA_SHIFT
|
||||
range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx
|
||||
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
|
||||
default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
|
||||
default 23 if STRICT_KERNEL_RWX && PPC_8xx
|
||||
default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
|
||||
default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
|
||||
default 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && \
|
||||
(PIN_TLB_DATA || PIN_TLB_TEXT)
|
||||
default 19 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
|
||||
default 24 if STRICT_KERNEL_RWX && PPC_85xx
|
||||
default PAGE_SHIFT
|
||||
help
|
||||
@ -1274,8 +1275,27 @@ config TASK_SIZE_BOOL
|
||||
config TASK_SIZE
|
||||
hex "Size of user task space" if TASK_SIZE_BOOL
|
||||
default "0x80000000" if PPC_8xx
|
||||
default "0xb0000000" if PPC_BOOK3S_32
|
||||
default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM
|
||||
default "0xc0000000"
|
||||
|
||||
config MODULES_SIZE_BOOL
|
||||
bool "Set custom size for modules/execmem area"
|
||||
depends on EXECMEM && ADVANCED_OPTIONS
|
||||
help
|
||||
This option allows you to set the size of kernel virtual address
|
||||
space dedicated for modules/execmem.
|
||||
For the time being it is only for 8xx and book3s/32. Other
|
||||
platform share it with vmalloc space.
|
||||
|
||||
Say N here unless you know what you are doing.
|
||||
|
||||
config MODULES_SIZE
|
||||
int "Size of modules/execmem area (In Mbytes)" if MODULES_SIZE_BOOL
|
||||
range 1 256 if EXECMEM
|
||||
default 64 if EXECMEM && PPC_BOOK3S_32
|
||||
default 32 if EXECMEM && PPC_8xx
|
||||
default 0
|
||||
|
||||
endmenu
|
||||
|
||||
if PPC64
|
||||
|
@ -379,12 +379,6 @@ config FAIL_IOMMU
|
||||
|
||||
If you are unsure, say N.
|
||||
|
||||
config PPC_FAST_ENDIAN_SWITCH
|
||||
bool "Deprecated fast endian-switch syscall"
|
||||
depends on DEBUG_KERNEL && PPC_BOOK3S_64
|
||||
help
|
||||
If you're unsure what this is, say N.
|
||||
|
||||
config KASAN_SHADOW_OFFSET
|
||||
hex
|
||||
depends on KASAN
|
||||
|
@ -93,6 +93,7 @@ CONFIG_MEMORY_HOTREMOVE=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
|
||||
CONFIG_ZONE_DEVICE=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
|
@ -26,19 +26,23 @@
|
||||
#define PPC_MIN_STKFRM 112
|
||||
|
||||
#ifdef __BIG_ENDIAN__
|
||||
#define LHZX_BE stringify_in_c(lhzx)
|
||||
#define LWZX_BE stringify_in_c(lwzx)
|
||||
#define LDX_BE stringify_in_c(ldx)
|
||||
#define STWX_BE stringify_in_c(stwx)
|
||||
#define STDX_BE stringify_in_c(stdx)
|
||||
#else
|
||||
#define LHZX_BE stringify_in_c(lhbrx)
|
||||
#define LWZX_BE stringify_in_c(lwbrx)
|
||||
#define LDX_BE stringify_in_c(ldbrx)
|
||||
#define STWX_BE stringify_in_c(stwbrx)
|
||||
#define STDX_BE stringify_in_c(stdbrx)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
#define DS_FORM_CONSTRAINT "Z<>"
|
||||
#else
|
||||
#define DS_FORM_CONSTRAINT "YZ<>"
|
||||
#endif
|
||||
|
||||
#else /* 32-bit */
|
||||
|
||||
/* operations for longs and pointers */
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/asm-const.h>
|
||||
#include <asm/asm-compat.h>
|
||||
|
||||
/*
|
||||
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
|
||||
@ -197,7 +198,7 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
|
||||
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
|
||||
__asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
|
||||
else
|
||||
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
|
||||
__asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
|
||||
|
||||
return t;
|
||||
}
|
||||
@ -208,7 +209,7 @@ static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
|
||||
if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
|
||||
__asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
|
||||
else
|
||||
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
|
||||
__asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
|
@ -196,7 +196,8 @@ void unmap_kernel_page(unsigned long va);
|
||||
#endif
|
||||
|
||||
#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
|
||||
#define MODULES_VADDR (MODULES_END - SZ_256M)
|
||||
#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
|
||||
#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/sched.h>
|
||||
|
@ -74,6 +74,26 @@
|
||||
#define remap_4k_pfn(vma, addr, pfn, prot) \
|
||||
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
|
||||
|
||||
/*
|
||||
* With 4K page size the real_pte machinery is all nops.
|
||||
*/
|
||||
#define __real_pte(e, p, o) ((real_pte_t){(e)})
|
||||
#define __rpte_to_pte(r) ((r).pte)
|
||||
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
|
||||
|
||||
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
|
||||
do { \
|
||||
index = 0; \
|
||||
shift = mmu_psize_defs[psize].shift; \
|
||||
|
||||
#define pte_iterate_hashed_end() } while(0)
|
||||
|
||||
/*
|
||||
* We expect this to be called only for user addresses or kernel virtual
|
||||
* addresses other than the linear mapping.
|
||||
*/
|
||||
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
|
||||
|
||||
/*
|
||||
* 4K PTE format is different from 64K PTE format. Saving the hash_slot is just
|
||||
* a matter of returning the PTE bits that need to be modified. On 64K PTE,
|
||||
|
@ -330,32 +330,6 @@ static inline unsigned long pud_leaf_size(pud_t pud)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* This is the default implementation of various PTE accessors, it's
|
||||
* used in all cases except Book3S with 64K pages where we have a
|
||||
* concept of sub-pages
|
||||
*/
|
||||
#ifndef __real_pte
|
||||
|
||||
#define __real_pte(e, p, o) ((real_pte_t){(e)})
|
||||
#define __rpte_to_pte(r) ((r).pte)
|
||||
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
|
||||
|
||||
#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
|
||||
do { \
|
||||
index = 0; \
|
||||
shift = mmu_psize_defs[psize].shift; \
|
||||
|
||||
#define pte_iterate_hashed_end() } while(0)
|
||||
|
||||
/*
|
||||
* We expect this to be called only for user addresses or kernel virtual
|
||||
* addresses other than the linear mapping.
|
||||
*/
|
||||
#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
|
||||
|
||||
#endif /* __real_pte */
|
||||
|
||||
static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long clr,
|
||||
unsigned long set, int huge)
|
||||
|
@ -76,6 +76,43 @@ int patch_instruction(u32 *addr, ppc_inst_t instr);
|
||||
int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
|
||||
int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);
|
||||
|
||||
/*
|
||||
* The data patching functions patch_uint() and patch_ulong(), etc., must be
|
||||
* called on aligned addresses.
|
||||
*
|
||||
* The instruction patching functions patch_instruction() and similar must be
|
||||
* called on addresses satisfying instruction alignment requirements.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
int patch_uint(void *addr, unsigned int val);
|
||||
int patch_ulong(void *addr, unsigned long val);
|
||||
|
||||
#define patch_u64 patch_ulong
|
||||
|
||||
#else
|
||||
|
||||
static inline int patch_uint(void *addr, unsigned int val)
|
||||
{
|
||||
if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
|
||||
return -EINVAL;
|
||||
|
||||
return patch_instruction(addr, ppc_inst(val));
|
||||
}
|
||||
|
||||
static inline int patch_ulong(void *addr, unsigned long val)
|
||||
{
|
||||
if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
|
||||
return -EINVAL;
|
||||
|
||||
return patch_instruction(addr, ppc_inst(val));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define patch_u32 patch_uint
|
||||
|
||||
static inline unsigned long patch_site_addr(s32 *site)
|
||||
{
|
||||
return (unsigned long)site + *site;
|
||||
|
@ -308,6 +308,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed);
|
||||
int eeh_pe_configure(struct eeh_pe *pe);
|
||||
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
|
||||
unsigned long addr, unsigned long mask);
|
||||
int eeh_pe_inject_mmio_error(struct pci_dev *pdev);
|
||||
|
||||
/**
|
||||
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
|
||||
|
@ -116,9 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int use_cop(unsigned long acop, struct mm_struct *mm);
|
||||
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
static inline void inc_mm_active_cpus(struct mm_struct *mm)
|
||||
{
|
||||
|
@ -170,8 +170,9 @@
|
||||
|
||||
#define mmu_linear_psize MMU_PAGE_8M
|
||||
|
||||
#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
|
||||
#define MODULES_END PAGE_OFFSET
|
||||
#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
|
||||
#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -19,8 +19,14 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
pgd_t *pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
|
||||
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
|
||||
memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
|
||||
(MAX_PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
||||
#endif
|
||||
return pgd;
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
@ -23,7 +23,7 @@ DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);
|
||||
(static_key_enabled(&__percpu_first_chunk_is_paged.key))
|
||||
#else
|
||||
#define percpu_first_chunk_is_paged false
|
||||
#endif /* CONFIG_PPC64 && CONFIG_SMP */
|
||||
#endif
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
|
@ -397,6 +397,7 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
|
||||
#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
|
||||
#define PSERIES_HP_ELOG_RESOURCE_PHB 4
|
||||
#define PSERIES_HP_ELOG_RESOURCE_PMEM 6
|
||||
#define PSERIES_HP_ELOG_RESOURCE_DT 7
|
||||
|
||||
#define PSERIES_HP_ELOG_ACTION_ADD 1
|
||||
#define PSERIES_HP_ELOG_ACTION_REMOVE 2
|
||||
|
@ -226,6 +226,10 @@ static inline int arch_within_stack_frames(const void * const stack,
|
||||
return BAD_STACK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
extern void *emergency_ctx[];
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <asm/page.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/kup.h>
|
||||
#include <asm/asm-compat.h>
|
||||
|
||||
#ifdef __powerpc64__
|
||||
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
|
||||
@ -92,12 +93,6 @@ __pu_failed: \
|
||||
: label)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
#define DS_FORM_CONSTRAINT "Z<>"
|
||||
#else
|
||||
#define DS_FORM_CONSTRAINT "YZ<>"
|
||||
#endif
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#ifdef CONFIG_PPC_KERNEL_PREFIXED
|
||||
#define __put_user_asm2_goto(x, ptr, label) \
|
||||
|
@ -139,6 +139,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o
|
||||
obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o
|
||||
obj-$(CONFIG_UPROBES) += uprobes.o
|
||||
obj-$(CONFIG_RETHOOK) += rethook.o
|
||||
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
|
||||
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
|
||||
obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o
|
||||
|
@ -735,7 +735,7 @@ static const struct sysfs_ops cache_index_ops = {
|
||||
.show = cache_index_show,
|
||||
};
|
||||
|
||||
static struct kobj_type cache_index_type = {
|
||||
static const struct kobj_type cache_index_type = {
|
||||
.release = cache_index_release,
|
||||
.sysfs_ops = &cache_index_ops,
|
||||
.default_groups = cache_index_default_groups,
|
||||
|
@ -1537,10 +1537,6 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
|
||||
if (!eeh_ops || !eeh_ops->err_inject)
|
||||
return -ENOENT;
|
||||
|
||||
/* Check on PCI error type */
|
||||
if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check on PCI error function */
|
||||
if (func < EEH_ERR_FUNC_MIN || func > EEH_ERR_FUNC_MAX)
|
||||
return -EINVAL;
|
||||
@ -1682,7 +1678,6 @@ static ssize_t eeh_force_recover_write(struct file *filp,
|
||||
|
||||
static const struct file_operations eeh_force_recover_fops = {
|
||||
.open = simple_open,
|
||||
.llseek = no_llseek,
|
||||
.write = eeh_force_recover_write,
|
||||
};
|
||||
|
||||
@ -1726,7 +1721,6 @@ static ssize_t eeh_dev_check_write(struct file *filp,
|
||||
|
||||
static const struct file_operations eeh_dev_check_fops = {
|
||||
.open = simple_open,
|
||||
.llseek = no_llseek,
|
||||
.write = eeh_dev_check_write,
|
||||
.read = eeh_debugfs_dev_usage,
|
||||
};
|
||||
@ -1846,11 +1840,15 @@ static ssize_t eeh_dev_break_write(struct file *filp,
|
||||
|
||||
static const struct file_operations eeh_dev_break_fops = {
|
||||
.open = simple_open,
|
||||
.llseek = no_llseek,
|
||||
.write = eeh_dev_break_write,
|
||||
.read = eeh_debugfs_dev_usage,
|
||||
};
|
||||
|
||||
int eeh_pe_inject_mmio_error(struct pci_dev *pdev)
|
||||
{
|
||||
return eeh_debugfs_break_device(pdev);
|
||||
}
|
||||
|
||||
static ssize_t eeh_dev_can_recover(struct file *filp,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
@ -1893,7 +1891,6 @@ static ssize_t eeh_dev_can_recover(struct file *filp,
|
||||
|
||||
static const struct file_operations eeh_dev_can_recover_fops = {
|
||||
.open = simple_open,
|
||||
.llseek = no_llseek,
|
||||
.write = eeh_dev_can_recover,
|
||||
.read = eeh_debugfs_dev_usage,
|
||||
};
|
||||
|
@ -1989,13 +1989,6 @@ INT_DEFINE_END(system_call)
|
||||
INTERRUPT_TO_KERNEL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
|
||||
BEGIN_FTR_SECTION
|
||||
cmpdi r0,0x1ebe
|
||||
beq- 1f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
|
||||
#endif
|
||||
|
||||
/* We reach here with PACA in r13, r13 in r9. */
|
||||
mfspr r11,SPRN_SRR0
|
||||
mfspr r12,SPRN_SRR1
|
||||
@ -2015,16 +2008,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
|
||||
b system_call_common
|
||||
#endif
|
||||
.endif
|
||||
|
||||
#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
|
||||
/* Fast LE/BE switch system call */
|
||||
1: mfspr r12,SPRN_SRR1
|
||||
xori r12,r12,MSR_LE
|
||||
mtspr SPRN_SRR1,r12
|
||||
mr r13,r9
|
||||
RFI_TO_USER /* return to userspace */
|
||||
b . /* prevent speculative execution */
|
||||
#endif
|
||||
.endm
|
||||
|
||||
EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
|
||||
|
@ -40,16 +40,6 @@
|
||||
|
||||
#include "head_32.h"
|
||||
|
||||
.macro compare_to_kernel_boundary scratch, addr
|
||||
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
|
||||
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
|
||||
not. \scratch, \addr
|
||||
#else
|
||||
rlwinm \scratch, \addr, 16, 0xfff8
|
||||
cmpli cr0, \scratch, PAGE_OFFSET@h
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define PAGE_SHIFT_512K 19
|
||||
#define PAGE_SHIFT_8M 23
|
||||
|
||||
@ -199,18 +189,7 @@ instruction_counter:
|
||||
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
|
||||
INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
|
||||
mtspr SPRN_MD_EPN, r10
|
||||
#ifdef CONFIG_EXECMEM
|
||||
mfcr r11
|
||||
compare_to_kernel_boundary r10, r10
|
||||
#endif
|
||||
mfspr r10, SPRN_M_TWB /* Get level 1 table */
|
||||
#ifdef CONFIG_EXECMEM
|
||||
blt+ 3f
|
||||
rlwinm r10, r10, 0, 20, 31
|
||||
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
|
||||
3:
|
||||
mtcr r11
|
||||
#endif
|
||||
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
|
||||
mtspr SPRN_MD_TWC, r11
|
||||
mfspr r10, SPRN_MD_TWC
|
||||
@ -248,19 +227,12 @@ instruction_counter:
|
||||
START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss)
|
||||
mtspr SPRN_SPRG_SCRATCH2, r10
|
||||
mtspr SPRN_M_TW, r11
|
||||
mfcr r11
|
||||
|
||||
/* If we are faulting a kernel address, we have to use the
|
||||
* kernel page tables.
|
||||
*/
|
||||
mfspr r10, SPRN_MD_EPN
|
||||
compare_to_kernel_boundary r10, r10
|
||||
mfspr r10, SPRN_M_TWB /* Get level 1 table */
|
||||
blt+ 3f
|
||||
rlwinm r10, r10, 0, 20, 31
|
||||
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
|
||||
3:
|
||||
mtcr r11
|
||||
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
|
||||
|
||||
mtspr SPRN_MD_TWC, r11
|
||||
@ -332,15 +304,19 @@ instruction_counter:
|
||||
cmpwi cr1, r11, RPN_PATTERN
|
||||
beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */
|
||||
DARFixed:/* Return from dcbx instruction bug workaround */
|
||||
mfspr r11, SPRN_DSISR
|
||||
rlwinm r11, r11, 0, DSISR_NOHPTE
|
||||
cmpwi cr1, r11, 0
|
||||
beq+ cr1, .Ldtlbie
|
||||
mfspr r11, SPRN_DAR
|
||||
tlbie r11
|
||||
rlwinm r11, r11, 16, 0xffff
|
||||
cmplwi cr1, r11, TASK_SIZE@h
|
||||
bge- cr1, FixupPGD
|
||||
.Ldtlbie:
|
||||
EXCEPTION_PROLOG_1
|
||||
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
|
||||
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1
|
||||
lwz r4, _DAR(r11)
|
||||
lwz r5, _DSISR(r11)
|
||||
andis. r10,r5,DSISR_NOHPTE@h
|
||||
beq+ .Ldtlbie
|
||||
tlbie r4
|
||||
.Ldtlbie:
|
||||
prepare_transfer_to_handler
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
@ -394,6 +370,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */
|
||||
__HEAD
|
||||
. = 0x2000
|
||||
|
||||
FixupPGD:
|
||||
mtspr SPRN_M_TW, r10
|
||||
mfspr r10, SPRN_DAR
|
||||
mtspr SPRN_MD_EPN, r10
|
||||
mfspr r11, SPRN_M_TWB /* Get level 1 table */
|
||||
lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
|
||||
cmpwi cr1, r10, 0
|
||||
bne cr1, 1f
|
||||
|
||||
rlwinm r10, r11, 0, 20, 31
|
||||
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
|
||||
lwz r10, (swapper_pg_dir - PAGE_OFFSET)@l(r10) /* Get the level 1 entry */
|
||||
cmpwi cr1, r10, 0
|
||||
beq cr1, 1f
|
||||
stw r10, (swapper_pg_dir - PAGE_OFFSET)@l(r11) /* Set the level 1 entry */
|
||||
mfspr r10, SPRN_M_TW
|
||||
mtcr r10
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
rfi
|
||||
1:
|
||||
mfspr r10, SPRN_M_TW
|
||||
b .Ldtlbie
|
||||
|
||||
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
|
||||
* by decoding the registers used by the dcbx instruction and adding them.
|
||||
* DAR is set to the calculated address.
|
||||
@ -404,7 +404,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
||||
mfspr r10, SPRN_SRR0
|
||||
mtspr SPRN_MD_EPN, r10
|
||||
rlwinm r11, r10, 16, 0xfff8
|
||||
cmpli cr1, r11, PAGE_OFFSET@h
|
||||
cmpli cr1, r11, TASK_SIZE@h
|
||||
mfspr r11, SPRN_M_TWB /* Get level 1 table */
|
||||
blt+ cr1, 3f
|
||||
|
||||
@ -587,6 +587,10 @@ start_here:
|
||||
lis r0, (MD_TWAM | MD_RSV4I)@h
|
||||
mtspr SPRN_MD_CTR, r0
|
||||
#endif
|
||||
#ifndef CONFIG_PIN_TLB_TEXT
|
||||
li r0, 0
|
||||
mtspr SPRN_MI_CTR, r0
|
||||
#endif
|
||||
#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
|
||||
lis r0, MD_TWAM@h
|
||||
mtspr SPRN_MD_CTR, r0
|
||||
@ -683,6 +687,7 @@ SYM_FUNC_START_LOCAL(initial_mmu)
|
||||
blr
|
||||
SYM_FUNC_END(initial_mmu)
|
||||
|
||||
#ifdef CONFIG_PIN_TLB
|
||||
_GLOBAL(mmu_pin_tlb)
|
||||
lis r9, (1f - PAGE_OFFSET)@h
|
||||
ori r9, r9, (1f - PAGE_OFFSET)@l
|
||||
@ -704,6 +709,7 @@ _GLOBAL(mmu_pin_tlb)
|
||||
mtspr SPRN_MD_CTR, r6
|
||||
tlbia
|
||||
|
||||
#ifdef CONFIG_PIN_TLB_TEXT
|
||||
LOAD_REG_IMMEDIATE(r5, 28 << 8)
|
||||
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
|
||||
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
|
||||
@ -724,6 +730,7 @@ _GLOBAL(mmu_pin_tlb)
|
||||
bdnzt lt, 2b
|
||||
lis r0, MI_RSV4I@h
|
||||
mtspr SPRN_MI_CTR, r0
|
||||
#endif
|
||||
|
||||
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
|
||||
#ifdef CONFIG_PIN_TLB_DATA
|
||||
@ -783,3 +790,4 @@ _GLOBAL(mmu_pin_tlb)
|
||||
mtspr SPRN_SRR1, r10
|
||||
mtspr SPRN_SRR0, r11
|
||||
rfi
|
||||
#endif
|
||||
|
@ -411,39 +411,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
|
||||
*/
|
||||
. = INTERRUPT_INST_TLB_MISS_603
|
||||
InstructionTLBMiss:
|
||||
/*
|
||||
* r0: userspace flag (later scratch)
|
||||
* r1: linux style pte ( later becomes ppc hardware pte )
|
||||
* r2: ptr to linux-style pte
|
||||
* r3: fault address
|
||||
*/
|
||||
/* Get PTE (linux-style) and check access */
|
||||
mfspr r3,SPRN_IMISS
|
||||
#ifdef CONFIG_EXECMEM
|
||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
#endif
|
||||
mfspr r0,SPRN_IMISS
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
#ifdef CONFIG_EXECMEM
|
||||
li r0, 3
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
li r0, 0
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
#endif
|
||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||
rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r2) /* get pmd entry */
|
||||
#ifdef CONFIG_EXECMEM
|
||||
rlwinm r3, r0, 4, 0xf
|
||||
subi r3, r3, (TASK_SIZE >> 28) & 0xf
|
||||
#endif
|
||||
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
|
||||
beq- InstructionAddressInvalid /* return if no mapping */
|
||||
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
|
||||
rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
|
||||
lwz r2,0(r2) /* get linux-style pte */
|
||||
andc. r1,r1,r2 /* check access & ~permission */
|
||||
bne- InstructionAddressInvalid /* return if access not permitted */
|
||||
/* Convert linux-style PTE to low word of PPC-style PTE */
|
||||
#ifdef CONFIG_EXECMEM
|
||||
rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
|
||||
rlwimi r2, r3, 1, 31, 31 /* userspace ? -> PP lsb */
|
||||
#endif
|
||||
ori r1, r1, 0xe06 /* clear out reserved bits */
|
||||
andc r1, r2, r1 /* PP = user? 1 : 0 */
|
||||
@ -451,7 +438,7 @@ BEGIN_FTR_SECTION
|
||||
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||
mtspr SPRN_RPA,r1
|
||||
tlbli r3
|
||||
tlbli r0
|
||||
mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
|
||||
mtcrf 0x80,r3
|
||||
rfi
|
||||
@ -480,35 +467,24 @@ InstructionAddressInvalid:
|
||||
*/
|
||||
. = INTERRUPT_DATA_LOAD_TLB_MISS_603
|
||||
DataLoadTLBMiss:
|
||||
/*
|
||||
* r0: userspace flag (later scratch)
|
||||
* r1: linux style pte ( later becomes ppc hardware pte )
|
||||
* r2: ptr to linux-style pte
|
||||
* r3: fault address
|
||||
*/
|
||||
/* Get PTE (linux-style) and check access */
|
||||
mfspr r3,SPRN_DMISS
|
||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
mfspr r0,SPRN_DMISS
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
li r0, 3
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
li r0, 0
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r2) /* get pmd entry */
|
||||
rlwinm r1, r2, 28, 0xfffff000
|
||||
rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r1) /* get pmd entry */
|
||||
rlwinm r3, r0, 4, 0xf
|
||||
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
|
||||
beq- DataAddressInvalid /* return if no mapping */
|
||||
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
|
||||
subi r3, r3, (TASK_SIZE >> 28) & 0xf
|
||||
beq- 2f /* bail if no mapping */
|
||||
1: rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
|
||||
lwz r2,0(r2) /* get linux-style pte */
|
||||
li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
|
||||
andc. r1,r1,r2 /* check access & ~permission */
|
||||
bne- DataAddressInvalid /* return if access not permitted */
|
||||
/* Convert linux-style PTE to low word of PPC-style PTE */
|
||||
rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */
|
||||
rlwimi r2,r0,0,30,31 /* userspace ? -> PP */
|
||||
rlwimi r2,r3,2,30,31 /* userspace ? -> PP */
|
||||
rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */
|
||||
xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
|
||||
ori r1,r1,0xe04 /* clear out reserved bits */
|
||||
@ -518,25 +494,35 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||
mtspr SPRN_RPA,r1
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
li r0,1
|
||||
li r3,1
|
||||
mfspr r1,SPRN_SPRG_603_LRU
|
||||
rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
|
||||
slw r0,r0,r2
|
||||
xor r1,r0,r1
|
||||
srw r0,r1,r2
|
||||
rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */
|
||||
slw r3,r3,r2
|
||||
xor r1,r3,r1
|
||||
srw r3,r1,r2
|
||||
mtspr SPRN_SPRG_603_LRU,r1
|
||||
mfspr r2,SPRN_SRR1
|
||||
rlwimi r2,r0,31-14,14,14
|
||||
rlwimi r2,r3,31-14,14,14
|
||||
mtspr SPRN_SRR1,r2
|
||||
mtcrf 0x80,r2
|
||||
tlbld r3
|
||||
tlbld r0
|
||||
rfi
|
||||
MMU_FTR_SECTION_ELSE
|
||||
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
|
||||
mtcrf 0x80,r2
|
||||
tlbld r3
|
||||
tlbld r0
|
||||
rfi
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
|
||||
|
||||
2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r2) /* get pmd entry */
|
||||
cmpwi cr0,r2,0
|
||||
beq- DataAddressInvalid /* return if no mapping */
|
||||
stw r2,0(r1)
|
||||
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
|
||||
b 1b
|
||||
DataAddressInvalid:
|
||||
mfspr r3,SPRN_SRR1
|
||||
rlwinm r1,r3,9,6,6 /* Get load/store bit */
|
||||
@ -560,34 +546,24 @@ DataAddressInvalid:
|
||||
*/
|
||||
. = INTERRUPT_DATA_STORE_TLB_MISS_603
|
||||
DataStoreTLBMiss:
|
||||
/*
|
||||
* r0: userspace flag (later scratch)
|
||||
* r1: linux style pte ( later becomes ppc hardware pte )
|
||||
* r2: ptr to linux-style pte
|
||||
* r3: fault address
|
||||
*/
|
||||
/* Get PTE (linux-style) and check access */
|
||||
mfspr r3,SPRN_DMISS
|
||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
mfspr r0,SPRN_DMISS
|
||||
mfspr r2, SPRN_SDR1
|
||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
rlwinm r2, r2, 28, 0xfffff000
|
||||
li r0, 3
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
li r0, 0
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r2) /* get pmd entry */
|
||||
rlwinm r1, r2, 28, 0xfffff000
|
||||
rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r1) /* get pmd entry */
|
||||
rlwinm r3, r0, 4, 0xf
|
||||
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
|
||||
beq- DataAddressInvalid /* return if no mapping */
|
||||
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
|
||||
subi r3, r3, (TASK_SIZE >> 28) & 0xf
|
||||
beq- 2f /* bail if no mapping */
|
||||
1:
|
||||
rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
|
||||
lwz r2,0(r2) /* get linux-style pte */
|
||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
andc. r1,r1,r2 /* check access & ~permission */
|
||||
bne- DataAddressInvalid /* return if access not permitted */
|
||||
/* Convert linux-style PTE to low word of PPC-style PTE */
|
||||
rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */
|
||||
rlwimi r2,r3,1,31,31 /* userspace ? -> PP lsb */
|
||||
li r1,0xe06 /* clear out reserved bits & PP msb */
|
||||
andc r1,r2,r1 /* PP = user? 1: 0 */
|
||||
BEGIN_FTR_SECTION
|
||||
@ -597,26 +573,36 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
|
||||
mtcrf 0x80,r2
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
li r0,1
|
||||
li r3,1
|
||||
mfspr r1,SPRN_SPRG_603_LRU
|
||||
rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
|
||||
slw r0,r0,r2
|
||||
xor r1,r0,r1
|
||||
srw r0,r1,r2
|
||||
rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */
|
||||
slw r3,r3,r2
|
||||
xor r1,r3,r1
|
||||
srw r3,r1,r2
|
||||
mtspr SPRN_SPRG_603_LRU,r1
|
||||
mfspr r2,SPRN_SRR1
|
||||
rlwimi r2,r0,31-14,14,14
|
||||
rlwimi r2,r3,31-14,14,14
|
||||
mtspr SPRN_SRR1,r2
|
||||
mtcrf 0x80,r2
|
||||
tlbld r3
|
||||
tlbld r0
|
||||
rfi
|
||||
MMU_FTR_SECTION_ELSE
|
||||
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
|
||||
mtcrf 0x80,r2
|
||||
tlbld r3
|
||||
tlbld r0
|
||||
rfi
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
|
||||
|
||||
2: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
|
||||
lwz r2,0(r2) /* get pmd entry */
|
||||
cmpwi cr0,r2,0
|
||||
beq- DataAddressInvalid /* return if no mapping */
|
||||
stw r2,0(r1)
|
||||
rlwinm r2,r2,0,0,19 /* extract address of pte page */
|
||||
b 1b
|
||||
|
||||
#ifndef CONFIG_ALTIVEC
|
||||
#define altivec_assist_exception unknown_exception
|
||||
#endif
|
||||
|
@ -228,16 +228,6 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs
|
||||
kcb->kprobe_saved_msr = regs->msr;
|
||||
}
|
||||
|
||||
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||
{
|
||||
ri->ret_addr = (kprobe_opcode_t *)regs->link;
|
||||
ri->fp = NULL;
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
regs->link = (unsigned long)__kretprobe_trampoline;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
|
||||
|
||||
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
int ret;
|
||||
@ -394,49 +384,6 @@ no_kprobe:
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_handler);
|
||||
|
||||
/*
|
||||
* Function return probe trampoline:
|
||||
* - init_kprobes() establishes a probepoint here
|
||||
* - When the probed function returns, this probe
|
||||
* causes the handlers to fire
|
||||
*/
|
||||
asm(".global __kretprobe_trampoline\n"
|
||||
".type __kretprobe_trampoline, @function\n"
|
||||
"__kretprobe_trampoline:\n"
|
||||
"nop\n"
|
||||
"blr\n"
|
||||
".size __kretprobe_trampoline, .-__kretprobe_trampoline\n");
|
||||
|
||||
/*
|
||||
* Called when the probe at kretprobe trampoline is hit
|
||||
*/
|
||||
static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long orig_ret_address;
|
||||
|
||||
orig_ret_address = __kretprobe_trampoline_handler(regs, NULL);
|
||||
/*
|
||||
* We get here through one of two paths:
|
||||
* 1. by taking a trap -> kprobe_handler() -> here
|
||||
* 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
|
||||
*
|
||||
* When going back through (1), we need regs->nip to be setup properly
|
||||
* as it is used to determine the return address from the trap.
|
||||
* For (2), since nip is not honoured with optprobes, we instead setup
|
||||
* the link register properly so that the subsequent 'blr' in
|
||||
* __kretprobe_trampoline jumps back to the right instruction.
|
||||
*
|
||||
* For nip, we should set the address to the previous instruction since
|
||||
* we end up emulating it in kprobe_handler(), which increments the nip
|
||||
* again.
|
||||
*/
|
||||
regs_set_return_ip(regs, orig_ret_address - 4);
|
||||
regs->link = orig_ret_address;
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(trampoline_probe_handler);
|
||||
|
||||
/*
|
||||
* Called after single-stepping. p->addr is the address of the
|
||||
* instruction whose first byte has been replaced by the "breakpoint"
|
||||
@ -539,19 +486,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_fault_handler);
|
||||
|
||||
static struct kprobe trampoline_p = {
|
||||
.addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
|
||||
.pre_handler = trampoline_probe_handler
|
||||
};
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
return register_kprobe(&trampoline_p);
|
||||
}
|
||||
|
||||
int arch_trampoline_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
|
||||
if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -651,12 +651,11 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
|
||||
// func_desc_t is 8 bytes if ABIv2, else 16 bytes
|
||||
desc = func_desc(addr);
|
||||
for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
|
||||
if (patch_instruction(((u32 *)&entry->funcdata) + i,
|
||||
ppc_inst(((u32 *)(&desc))[i])))
|
||||
if (patch_u32(((u32 *)&entry->funcdata) + i, ((u32 *)&desc)[i]))
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
|
||||
if (patch_u32(&entry->magic, STUB_MAGIC))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
|
@ -56,7 +56,7 @@ static unsigned long can_optimize(struct kprobe *p)
|
||||
* has a 'nop' instruction, which can be emulated.
|
||||
* So further checks can be skipped.
|
||||
*/
|
||||
if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
|
||||
if (p->addr == (kprobe_opcode_t *)&arch_rethook_trampoline)
|
||||
return addr + sizeof(kprobe_opcode_t);
|
||||
|
||||
/*
|
||||
|
@ -72,8 +72,6 @@
|
||||
#define TM_DEBUG(x...) do { } while(0)
|
||||
#endif
|
||||
|
||||
extern unsigned long _get_SP(void);
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
/*
|
||||
* Are we running in "Suspend disabled" mode? If so we have to block any
|
||||
@ -2177,10 +2175,10 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
|
||||
unsigned long nbytes)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long stack_page;
|
||||
unsigned long cpu = task_cpu(p);
|
||||
|
||||
@ -2208,10 +2206,26 @@ static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
|
||||
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||
return 1;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
|
||||
unsigned long nbytes)
|
||||
{
|
||||
unsigned long stack_page;
|
||||
unsigned long cpu = task_cpu(p);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_VMAP_STACK))
|
||||
return 0;
|
||||
|
||||
stack_page = (unsigned long)emergency_ctx[cpu] - THREAD_SIZE;
|
||||
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* validate the stack frame of a particular minimum size, used for when we are
|
||||
|
73
arch/powerpc/kernel/rethook.c
Normal file
73
arch/powerpc/kernel/rethook.c
Normal file
@ -0,0 +1,73 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* PowerPC implementation of rethook. This depends on kprobes.
|
||||
*/
|
||||
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/rethook.h>
|
||||
|
||||
/*
|
||||
* Function return trampoline:
|
||||
* - init_kprobes() establishes a probepoint here
|
||||
* - When the probed function returns, this probe
|
||||
* causes the handlers to fire
|
||||
*/
|
||||
asm(".global arch_rethook_trampoline\n"
|
||||
".type arch_rethook_trampoline, @function\n"
|
||||
"arch_rethook_trampoline:\n"
|
||||
"nop\n"
|
||||
"blr\n"
|
||||
".size arch_rethook_trampoline, .-arch_rethook_trampoline\n");
|
||||
|
||||
/*
|
||||
* Called when the probe at kretprobe trampoline is hit
|
||||
*/
|
||||
static int trampoline_rethook_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
return !rethook_trampoline_handler(regs, regs->gpr[1]);
|
||||
}
|
||||
NOKPROBE_SYMBOL(trampoline_rethook_handler);
|
||||
|
||||
void arch_rethook_prepare(struct rethook_node *rh, struct pt_regs *regs, bool mcount)
|
||||
{
|
||||
rh->ret_addr = regs->link;
|
||||
rh->frame = regs->gpr[1];
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
regs->link = (unsigned long)arch_rethook_trampoline;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_rethook_prepare);
|
||||
|
||||
/* This is called from rethook_trampoline_handler(). */
|
||||
void arch_rethook_fixup_return(struct pt_regs *regs, unsigned long orig_ret_address)
|
||||
{
|
||||
/*
|
||||
* We get here through one of two paths:
|
||||
* 1. by taking a trap -> kprobe_handler() -> here
|
||||
* 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
|
||||
*
|
||||
* When going back through (1), we need regs->nip to be setup properly
|
||||
* as it is used to determine the return address from the trap.
|
||||
* For (2), since nip is not honoured with optprobes, we instead setup
|
||||
* the link register properly so that the subsequent 'blr' in
|
||||
* arch_rethook_trampoline jumps back to the right instruction.
|
||||
*
|
||||
* For nip, we should set the address to the previous instruction since
|
||||
* we end up emulating it in kprobe_handler(), which increments the nip
|
||||
* again.
|
||||
*/
|
||||
regs_set_return_ip(regs, orig_ret_address - 4);
|
||||
regs->link = orig_ret_address;
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_rethook_fixup_return);
|
||||
|
||||
static struct kprobe trampoline_p = {
|
||||
.addr = (kprobe_opcode_t *) &arch_rethook_trampoline,
|
||||
.pre_handler = trampoline_rethook_handler
|
||||
};
|
||||
|
||||
/* rethook initializer */
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
return register_kprobe(&trampoline_p);
|
||||
}
|
@ -125,7 +125,7 @@ static const struct attribute_group secvar_attr_group = {
|
||||
};
|
||||
__ATTRIBUTE_GROUPS(secvar_attr);
|
||||
|
||||
static struct kobj_type secvar_ktype = {
|
||||
static const struct kobj_type secvar_ktype = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.default_groups = secvar_attr_groups,
|
||||
};
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <linux/rethook.h>
|
||||
|
||||
#include <asm/paca.h>
|
||||
|
||||
@ -133,12 +134,13 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
|
||||
* arch-dependent code, they are generic.
|
||||
*/
|
||||
ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
|
||||
#ifdef CONFIG_KPROBES
|
||||
|
||||
/*
|
||||
* Mark stacktraces with kretprobed functions on them
|
||||
* as unreliable.
|
||||
*/
|
||||
if (ip == (unsigned long)__kretprobe_trampoline)
|
||||
#ifdef CONFIG_RETHOOK
|
||||
if (ip == (unsigned long)arch_rethook_trampoline)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
|
@ -17,7 +17,7 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
|
||||
mutex_lock(&text_mutex);
|
||||
|
||||
if (func && !is_short) {
|
||||
err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target));
|
||||
err = patch_ulong(tramp + PPC_SCT_DATA, target);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ static void pmac_backlight_unblank(void)
|
||||
|
||||
props = &pmac_backlight->props;
|
||||
props->brightness = props->max_brightness;
|
||||
props->power = FB_BLANK_UNBLANK;
|
||||
props->power = BACKLIGHT_POWER_ON;
|
||||
backlight_update_status(pmac_backlight);
|
||||
}
|
||||
mutex_unlock(&pmac_backlight_mutex);
|
||||
|
@ -38,11 +38,7 @@
|
||||
.else
|
||||
addi r4, r5, VDSO_DATA_OFFSET
|
||||
.endif
|
||||
#ifdef __powerpc64__
|
||||
bl CFUNC(DOTSYM(\funct))
|
||||
#else
|
||||
bl \funct
|
||||
#endif
|
||||
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
|
||||
#ifdef __powerpc64__
|
||||
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
|
||||
|
@ -1922,14 +1922,22 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
|
||||
|
||||
r = EMULATE_FAIL;
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
if (cause == FSCR_MSGP_LG)
|
||||
switch (cause) {
|
||||
case FSCR_MSGP_LG:
|
||||
r = kvmppc_emulate_doorbell_instr(vcpu);
|
||||
if (cause == FSCR_PM_LG)
|
||||
break;
|
||||
case FSCR_PM_LG:
|
||||
r = kvmppc_pmu_unavailable(vcpu);
|
||||
if (cause == FSCR_EBB_LG)
|
||||
break;
|
||||
case FSCR_EBB_LG:
|
||||
r = kvmppc_ebb_unavailable(vcpu);
|
||||
if (cause == FSCR_TM_LG)
|
||||
break;
|
||||
case FSCR_TM_LG:
|
||||
r = kvmppc_tm_unavailable(vcpu);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (r == EMULATE_FAIL) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL |
|
||||
@ -4049,7 +4057,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
/* Return to whole-core mode if we split the core earlier */
|
||||
if (cmd_bit) {
|
||||
unsigned long hid0 = mfspr(SPRN_HID0);
|
||||
unsigned long loops = 0;
|
||||
|
||||
hid0 &= ~HID0_POWER8_DYNLPARDIS;
|
||||
stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
|
||||
@ -4061,7 +4068,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
if (!(hid0 & stat_bit))
|
||||
break;
|
||||
cpu_relax();
|
||||
++loops;
|
||||
}
|
||||
split_info.do_nap = 0;
|
||||
}
|
||||
|
@ -20,15 +20,14 @@
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
|
||||
static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
|
||||
{
|
||||
if (!ppc_inst_prefixed(instr)) {
|
||||
u32 val = ppc_inst_val(instr);
|
||||
if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) {
|
||||
/* For big endian correctness: plain address would use the wrong half */
|
||||
u32 val32 = val;
|
||||
|
||||
__put_kernel_nofault(patch_addr, &val, u32, failed);
|
||||
__put_kernel_nofault(patch_addr, &val32, u32, failed);
|
||||
} else {
|
||||
u64 val = ppc_inst_as_ulong(instr);
|
||||
|
||||
__put_kernel_nofault(patch_addr, &val, u64, failed);
|
||||
}
|
||||
|
||||
@ -44,7 +43,10 @@ failed:
|
||||
|
||||
int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
{
|
||||
return __patch_instruction(addr, instr, addr);
|
||||
if (ppc_inst_prefixed(instr))
|
||||
return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true);
|
||||
else
|
||||
return __patch_mem(addr, ppc_inst_val(instr), addr, false);
|
||||
}
|
||||
|
||||
struct patch_context {
|
||||
@ -276,7 +278,7 @@ static void unmap_patch_area(unsigned long addr)
|
||||
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
||||
}
|
||||
|
||||
static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
|
||||
static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword)
|
||||
{
|
||||
int err;
|
||||
u32 *patch_addr;
|
||||
@ -305,7 +307,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
|
||||
|
||||
orig_mm = start_using_temp_mm(patching_mm);
|
||||
|
||||
err = __patch_instruction(addr, instr, patch_addr);
|
||||
err = __patch_mem(addr, val, patch_addr, is_dword);
|
||||
|
||||
/* context synchronisation performed by __patch_instruction (isync or exception) */
|
||||
stop_using_temp_mm(patching_mm, orig_mm);
|
||||
@ -322,7 +324,7 @@ static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
static int __do_patch_mem(void *addr, unsigned long val, bool is_dword)
|
||||
{
|
||||
int err;
|
||||
u32 *patch_addr;
|
||||
@ -339,7 +341,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
if (radix_enabled())
|
||||
asm volatile("ptesync": : :"memory");
|
||||
|
||||
err = __patch_instruction(addr, instr, patch_addr);
|
||||
err = __patch_mem(addr, val, patch_addr, is_dword);
|
||||
|
||||
pte_clear(&init_mm, text_poke_addr, pte);
|
||||
flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
|
||||
@ -347,7 +349,7 @@ static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
return err;
|
||||
}
|
||||
|
||||
int patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
static int patch_mem(void *addr, unsigned long val, bool is_dword)
|
||||
{
|
||||
int err;
|
||||
unsigned long flags;
|
||||
@ -359,19 +361,57 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
|
||||
!static_branch_likely(&poking_init_done))
|
||||
return raw_patch_instruction(addr, instr);
|
||||
return __patch_mem(addr, val, addr, is_dword);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (mm_patch_enabled())
|
||||
err = __do_patch_instruction_mm(addr, instr);
|
||||
err = __do_patch_mem_mm(addr, val, is_dword);
|
||||
else
|
||||
err = __do_patch_instruction(addr, instr);
|
||||
err = __do_patch_mem(addr, val, is_dword);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
int patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
{
|
||||
if (ppc_inst_prefixed(instr))
|
||||
return patch_mem(addr, ppc_inst_as_ulong(instr), true);
|
||||
else
|
||||
return patch_mem(addr, ppc_inst_val(instr), false);
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_instruction);
|
||||
|
||||
int patch_uint(void *addr, unsigned int val)
|
||||
{
|
||||
if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
|
||||
return -EINVAL;
|
||||
|
||||
return patch_mem(addr, val, false);
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_uint);
|
||||
|
||||
int patch_ulong(void *addr, unsigned long val)
|
||||
{
|
||||
if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
|
||||
return -EINVAL;
|
||||
|
||||
return patch_mem(addr, val, true);
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_ulong);
|
||||
|
||||
#else
|
||||
|
||||
int patch_instruction(u32 *addr, ppc_inst_t instr)
|
||||
{
|
||||
return patch_mem(addr, ppc_inst_val(instr), false);
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_instruction)
|
||||
|
||||
#endif
|
||||
|
||||
static int patch_memset64(u64 *addr, u64 val, size_t count)
|
||||
{
|
||||
for (u64 *end = addr + count; addr < end; addr++)
|
||||
|
@ -438,6 +438,46 @@ static void __init test_multi_instruction_patching(void)
|
||||
vfree(buf);
|
||||
}
|
||||
|
||||
static void __init test_data_patching(void)
|
||||
{
|
||||
void *buf;
|
||||
u32 *addr32;
|
||||
|
||||
buf = vzalloc(PAGE_SIZE);
|
||||
check(buf);
|
||||
if (!buf)
|
||||
return;
|
||||
|
||||
addr32 = buf + 128;
|
||||
|
||||
addr32[1] = 0xA0A1A2A3;
|
||||
addr32[2] = 0xB0B1B2B3;
|
||||
|
||||
check(!patch_uint(&addr32[1], 0xC0C1C2C3));
|
||||
|
||||
check(addr32[0] == 0);
|
||||
check(addr32[1] == 0xC0C1C2C3);
|
||||
check(addr32[2] == 0xB0B1B2B3);
|
||||
check(addr32[3] == 0);
|
||||
|
||||
/* Unaligned patch_ulong() should fail */
|
||||
if (IS_ENABLED(CONFIG_PPC64))
|
||||
check(patch_ulong(&addr32[1], 0xD0D1D2D3) == -EINVAL);
|
||||
|
||||
check(!patch_ulong(&addr32[2], 0xD0D1D2D3));
|
||||
|
||||
check(addr32[0] == 0);
|
||||
check(addr32[1] == 0xC0C1C2C3);
|
||||
check(*(unsigned long *)(&addr32[2]) == 0xD0D1D2D3);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PPC64))
|
||||
check(addr32[3] == 0);
|
||||
|
||||
check(addr32[4] == 0);
|
||||
|
||||
vfree(buf);
|
||||
}
|
||||
|
||||
static int __init test_code_patching(void)
|
||||
{
|
||||
pr_info("Running code patching self-tests ...\n");
|
||||
@ -448,6 +488,7 @@ static int __init test_code_patching(void)
|
||||
test_translate_branch();
|
||||
test_prefixed_patching();
|
||||
test_multi_instruction_patching();
|
||||
test_data_patching();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -223,6 +223,8 @@ int mmu_mark_initmem_nx(void)
|
||||
|
||||
update_bats();
|
||||
|
||||
BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE);
|
||||
|
||||
for (i = TASK_SIZE >> 28; i < 16; i++) {
|
||||
/* Do not set NX on VM space for modules */
|
||||
if (is_module_segment(i << 28))
|
||||
|
@ -125,7 +125,7 @@ int mmu_ci_restrictions;
|
||||
#endif
|
||||
static u8 *linear_map_hash_slots;
|
||||
static unsigned long linear_map_hash_count;
|
||||
struct mmu_hash_ops mmu_hash_ops;
|
||||
struct mmu_hash_ops mmu_hash_ops __ro_after_init;
|
||||
EXPORT_SYMBOL(mmu_hash_ops);
|
||||
|
||||
/*
|
||||
|
@ -410,6 +410,18 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range);
|
||||
#ifdef CONFIG_EXECMEM
|
||||
static struct execmem_info execmem_info __ro_after_init;
|
||||
|
||||
#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_BOOK3S_603)
|
||||
static void prealloc_execmem_pgtable(void)
|
||||
{
|
||||
unsigned long va;
|
||||
|
||||
for (va = ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE); va < MODULES_END; va += PGDIR_SIZE)
|
||||
pte_alloc_kernel(pmd_off_k(va), va);
|
||||
}
|
||||
#else
|
||||
static void prealloc_execmem_pgtable(void) { }
|
||||
#endif
|
||||
|
||||
struct execmem_info __init *execmem_arch_setup(void)
|
||||
{
|
||||
pgprot_t kprobes_prot = strict_module_rwx_enabled() ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
|
||||
@ -441,6 +453,8 @@ struct execmem_info __init *execmem_arch_setup(void)
|
||||
end = VMALLOC_END;
|
||||
#endif
|
||||
|
||||
prealloc_execmem_pgtable();
|
||||
|
||||
execmem_info = (struct execmem_info){
|
||||
.ranges = {
|
||||
[EXECMEM_DEFAULT] = {
|
||||
|
@ -150,11 +150,11 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
|
||||
|
||||
mmu_mapin_immr();
|
||||
|
||||
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
|
||||
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_X, true);
|
||||
if (debug_pagealloc_enabled_or_kfence()) {
|
||||
top = boundary;
|
||||
} else {
|
||||
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
|
||||
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_X, true);
|
||||
mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
|
||||
}
|
||||
|
||||
@ -177,7 +177,8 @@ int mmu_mark_initmem_nx(void)
|
||||
if (!debug_pagealloc_enabled_or_kfence())
|
||||
err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
|
||||
|
||||
mmu_pin_tlb(block_mapped_ram, false);
|
||||
if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
|
||||
mmu_pin_tlb(block_mapped_ram, false);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -206,6 +207,8 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
|
||||
/* 8xx can only access 32MB at the moment */
|
||||
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
|
||||
|
||||
BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE) < TASK_SIZE);
|
||||
}
|
||||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
|
@ -95,7 +95,7 @@ static int avr_probe(struct i2c_client *client)
|
||||
}
|
||||
|
||||
static const struct i2c_device_id avr_id[] = {
|
||||
{ "akebono-avr", 0 },
|
||||
{ "akebono-avr" },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -504,7 +504,7 @@ MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
|
||||
|
||||
static struct platform_driver mpc512x_lpbfifo_driver = {
|
||||
.probe = mpc512x_lpbfifo_probe,
|
||||
.remove_new = mpc512x_lpbfifo_remove,
|
||||
.remove = mpc512x_lpbfifo_remove,
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.of_match_table = mpc512x_lpbfifo_match,
|
||||
|
@ -644,7 +644,6 @@ static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
|
||||
|
||||
static const struct file_operations mpc52xx_wdt_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
.write = mpc52xx_wdt_write,
|
||||
.unlocked_ioctl = mpc52xx_wdt_ioctl,
|
||||
.compat_ioctl = compat_ptr_ioctl,
|
||||
|
@ -143,7 +143,7 @@ static struct platform_driver gpio_halt_driver = {
|
||||
.of_match_table = gpio_halt_match,
|
||||
},
|
||||
.probe = gpio_halt_probe,
|
||||
.remove_new = gpio_halt_remove,
|
||||
.remove = gpio_halt_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(gpio_halt_driver);
|
||||
|
@ -195,6 +195,13 @@ config PIN_TLB_IMMR
|
||||
CONFIG_PIN_TLB_DATA is also selected, it will reduce
|
||||
CONFIG_PIN_TLB_DATA to 24 Mbytes.
|
||||
|
||||
config PIN_TLB_TEXT
|
||||
bool "Pinned TLB for TEXT"
|
||||
depends on PIN_TLB
|
||||
default y
|
||||
help
|
||||
This pins kernel text with 8M pages.
|
||||
|
||||
endmenu
|
||||
|
||||
endmenu
|
||||
|
@ -84,11 +84,8 @@ config PPC_BOOK3S_64
|
||||
bool "Server processors"
|
||||
select PPC_FPU
|
||||
select PPC_HAVE_PMU_SUPPORT
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
|
||||
select ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
||||
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
|
||||
select ARCH_SUPPORTS_HUGETLBFS
|
||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||
select HAVE_MOVE_PMD
|
||||
@ -108,6 +105,14 @@ config PPC_BOOK3E_64
|
||||
|
||||
endchoice
|
||||
|
||||
config PPC_THP
|
||||
def_bool y
|
||||
depends on PPC_BOOK3S_64
|
||||
depends on PPC_RADIX_MMU || (PPC_64S_HASH_MMU && PAGE_SIZE_64KB)
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
|
||||
|
||||
choice
|
||||
prompt "CPU selection"
|
||||
help
|
||||
|
@ -453,7 +453,6 @@ static const struct file_operations spufs_cntl_fops = {
|
||||
.release = spufs_cntl_release,
|
||||
.read = simple_attr_read,
|
||||
.write = simple_attr_write,
|
||||
.llseek = no_llseek,
|
||||
.mmap = spufs_cntl_mmap,
|
||||
};
|
||||
|
||||
@ -634,7 +633,6 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
|
||||
static const struct file_operations spufs_mbox_fops = {
|
||||
.open = spufs_pipe_open,
|
||||
.read = spufs_mbox_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
|
||||
@ -664,7 +662,6 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
|
||||
static const struct file_operations spufs_mbox_stat_fops = {
|
||||
.open = spufs_pipe_open,
|
||||
.read = spufs_mbox_stat_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
/* low-level ibox access function */
|
||||
@ -769,7 +766,6 @@ static const struct file_operations spufs_ibox_fops = {
|
||||
.open = spufs_pipe_open,
|
||||
.read = spufs_ibox_read,
|
||||
.poll = spufs_ibox_poll,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
|
||||
@ -797,7 +793,6 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
|
||||
static const struct file_operations spufs_ibox_stat_fops = {
|
||||
.open = spufs_pipe_open,
|
||||
.read = spufs_ibox_stat_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
/* low-level mailbox write */
|
||||
@ -901,7 +896,6 @@ static const struct file_operations spufs_wbox_fops = {
|
||||
.open = spufs_pipe_open,
|
||||
.write = spufs_wbox_write,
|
||||
.poll = spufs_wbox_poll,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
|
||||
@ -929,7 +923,6 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
|
||||
static const struct file_operations spufs_wbox_stat_fops = {
|
||||
.open = spufs_pipe_open,
|
||||
.read = spufs_wbox_stat_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static int spufs_signal1_open(struct inode *inode, struct file *file)
|
||||
@ -1056,7 +1049,6 @@ static const struct file_operations spufs_signal1_fops = {
|
||||
.read = spufs_signal1_read,
|
||||
.write = spufs_signal1_write,
|
||||
.mmap = spufs_signal1_mmap,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations spufs_signal1_nosched_fops = {
|
||||
@ -1064,7 +1056,6 @@ static const struct file_operations spufs_signal1_nosched_fops = {
|
||||
.release = spufs_signal1_release,
|
||||
.write = spufs_signal1_write,
|
||||
.mmap = spufs_signal1_mmap,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static int spufs_signal2_open(struct inode *inode, struct file *file)
|
||||
@ -1195,7 +1186,6 @@ static const struct file_operations spufs_signal2_fops = {
|
||||
.read = spufs_signal2_read,
|
||||
.write = spufs_signal2_write,
|
||||
.mmap = spufs_signal2_mmap,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static const struct file_operations spufs_signal2_nosched_fops = {
|
||||
@ -1203,7 +1193,6 @@ static const struct file_operations spufs_signal2_nosched_fops = {
|
||||
.release = spufs_signal2_release,
|
||||
.write = spufs_signal2_write,
|
||||
.mmap = spufs_signal2_mmap,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1343,7 +1332,6 @@ static const struct file_operations spufs_mss_fops = {
|
||||
.open = spufs_mss_open,
|
||||
.release = spufs_mss_release,
|
||||
.mmap = spufs_mss_mmap,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static vm_fault_t
|
||||
@ -1401,7 +1389,6 @@ static const struct file_operations spufs_psmap_fops = {
|
||||
.open = spufs_psmap_open,
|
||||
.release = spufs_psmap_release,
|
||||
.mmap = spufs_psmap_mmap,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
|
||||
@ -1732,7 +1719,6 @@ static const struct file_operations spufs_mfc_fops = {
|
||||
.flush = spufs_mfc_flush,
|
||||
.fsync = spufs_mfc_fsync,
|
||||
.mmap = spufs_mfc_mmap,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static int spufs_npc_set(void *data, u64 val)
|
||||
@ -2102,7 +2088,6 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
|
||||
static const struct file_operations spufs_dma_info_fops = {
|
||||
.open = spufs_info_open,
|
||||
.read = spufs_dma_info_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static void spufs_get_proxydma_info(struct spu_context *ctx,
|
||||
@ -2159,7 +2144,6 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
|
||||
static const struct file_operations spufs_proxydma_info_fops = {
|
||||
.open = spufs_info_open,
|
||||
.read = spufs_proxydma_info_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static int spufs_show_tid(struct seq_file *s, void *private)
|
||||
@ -2442,7 +2426,6 @@ static const struct file_operations spufs_switch_log_fops = {
|
||||
.read = spufs_switch_log_read,
|
||||
.poll = spufs_switch_log_poll,
|
||||
.release = spufs_switch_log_release,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -7,7 +7,6 @@
|
||||
extern int maple_set_rtc_time(struct rtc_time *tm);
|
||||
extern void maple_get_rtc_time(struct rtc_time *tm);
|
||||
extern time64_t maple_get_boot_time(void);
|
||||
extern void maple_calibrate_decr(void);
|
||||
extern void maple_pci_init(void);
|
||||
extern void maple_pci_irq_fixup(struct pci_dev *dev);
|
||||
extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
|
||||
|
@ -285,7 +285,7 @@ MODULE_DEVICE_TABLE(of, gpio_mdio_match);
|
||||
static struct platform_driver gpio_mdio_driver =
|
||||
{
|
||||
.probe = gpio_mdio_probe,
|
||||
.remove_new = gpio_mdio_remove,
|
||||
.remove = gpio_mdio_remove,
|
||||
.driver = {
|
||||
.name = "gpio-mdio-bitbang",
|
||||
.of_match_table = gpio_mdio_match,
|
||||
|
@ -5,7 +5,6 @@
|
||||
extern time64_t pas_get_boot_time(void);
|
||||
extern void pas_pci_init(void);
|
||||
struct pci_dev;
|
||||
extern void pas_pci_irq_fixup(struct pci_dev *dev);
|
||||
extern void pas_pci_dma_dev_setup(struct pci_dev *dev);
|
||||
|
||||
void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
|
||||
|
@ -313,7 +313,7 @@ static void __init uninorth_install_pfunc(void)
|
||||
/*
|
||||
* Install handlers for the hwclock child if any
|
||||
*/
|
||||
for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
|
||||
for_each_child_of_node(uninorth_node, np)
|
||||
if (of_node_name_eq(np, "hw-clock")) {
|
||||
unin_hwclock = np;
|
||||
break;
|
||||
|
@ -827,7 +827,7 @@ static int smp_core99_kick_cpu(int nr)
|
||||
mdelay(1);
|
||||
|
||||
/* Restore our exception vector */
|
||||
patch_instruction(vector, ppc_inst(save_vector));
|
||||
patch_uint(vector, save_vector);
|
||||
|
||||
local_irq_restore(flags);
|
||||
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
|
||||
|
@ -99,7 +99,6 @@ static ssize_t pnv_eeh_ei_write(struct file *filp,
|
||||
|
||||
static const struct file_operations pnv_eeh_ei_fops = {
|
||||
.open = simple_open,
|
||||
.llseek = no_llseek,
|
||||
.write = pnv_eeh_ei_write,
|
||||
};
|
||||
|
||||
@ -860,7 +859,7 @@ static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option)
|
||||
int64_t rc;
|
||||
|
||||
/* Hot reset to the bus if firmware cannot handle */
|
||||
if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL))
|
||||
if (!dn || !of_property_present(dn, "ibm,reset-by-firmware"))
|
||||
return __pnv_eeh_bridge_reset(pdev, option);
|
||||
|
||||
pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n",
|
||||
|
@ -210,7 +210,7 @@ static struct attribute *dump_default_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(dump_default);
|
||||
|
||||
static struct kobj_type dump_ktype = {
|
||||
static const struct kobj_type dump_ktype = {
|
||||
.sysfs_ops = &dump_sysfs_ops,
|
||||
.release = &dump_release,
|
||||
.default_groups = dump_default_groups,
|
||||
|
@ -146,7 +146,7 @@ static struct attribute *elog_default_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(elog_default);
|
||||
|
||||
static struct kobj_type elog_ktype = {
|
||||
static const struct kobj_type elog_ktype = {
|
||||
.sysfs_ops = &elog_sysfs_ops,
|
||||
.release = &elog_release,
|
||||
.default_groups = elog_default_groups,
|
||||
|
@ -393,7 +393,7 @@ void __init opal_lpc_init(void)
|
||||
for_each_compatible_node(np, NULL, "ibm,power8-lpc") {
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
if (!of_get_property(np, "primary", NULL))
|
||||
if (!of_property_present(np, "primary"))
|
||||
continue;
|
||||
opal_lpc_chip_id = of_get_ibm_chip_id(np);
|
||||
of_node_put(np);
|
||||
|
@ -443,7 +443,7 @@ static struct platform_driver opal_prd_driver = {
|
||||
.of_match_table = opal_prd_match,
|
||||
},
|
||||
.probe = opal_prd_probe,
|
||||
.remove_new = opal_prd_remove,
|
||||
.remove = opal_prd_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(opal_prd_driver);
|
||||
|
@ -274,7 +274,6 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
|
||||
int where, int size, u32 val);
|
||||
extern struct iommu_table *pnv_pci_table_alloc(int nid);
|
||||
|
||||
extern void pnv_pci_init_ioda_hub(struct device_node *np);
|
||||
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
|
||||
extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
|
||||
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/rtas.h>
|
||||
#include <asm/rtas-work-area.h>
|
||||
#include <asm/prom.h>
|
||||
|
||||
static struct workqueue_struct *pseries_hp_wq;
|
||||
|
||||
@ -250,11 +251,8 @@ int dlpar_detach_node(struct device_node *dn)
|
||||
struct device_node *child;
|
||||
int rc;
|
||||
|
||||
child = of_get_next_child(dn, NULL);
|
||||
while (child) {
|
||||
for_each_child_of_node(dn, child)
|
||||
dlpar_detach_node(child);
|
||||
child = of_get_next_child(dn, child);
|
||||
}
|
||||
|
||||
rc = of_detach_node(dn);
|
||||
if (rc)
|
||||
@ -264,6 +262,20 @@ int dlpar_detach_node(struct device_node *dn)
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int dlpar_changeset_attach_cc_nodes(struct of_changeset *ocs,
|
||||
struct device_node *dn)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = of_changeset_attach_node(ocs, dn);
|
||||
|
||||
if (!rc && dn->child)
|
||||
rc = dlpar_changeset_attach_cc_nodes(ocs, dn->child);
|
||||
if (!rc && dn->sibling)
|
||||
rc = dlpar_changeset_attach_cc_nodes(ocs, dn->sibling);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#define DR_ENTITY_SENSE 9003
|
||||
#define DR_ENTITY_PRESENT 1
|
||||
@ -330,27 +342,206 @@ int dlpar_unisolate_drc(u32 drc_index)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct device_node *
|
||||
get_device_node_with_drc_index(u32 index)
|
||||
{
|
||||
struct device_node *np = NULL;
|
||||
u32 node_index;
|
||||
int rc;
|
||||
|
||||
for_each_node_with_property(np, "ibm,my-drc-index") {
|
||||
rc = of_property_read_u32(np, "ibm,my-drc-index",
|
||||
&node_index);
|
||||
if (rc) {
|
||||
pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
|
||||
__func__, np, "ibm,my-drc-index", rc);
|
||||
of_node_put(np);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (index == node_index)
|
||||
break;
|
||||
}
|
||||
|
||||
return np;
|
||||
}
|
||||
|
||||
static struct device_node *
|
||||
get_device_node_with_drc_info(u32 index)
|
||||
{
|
||||
struct device_node *np = NULL;
|
||||
struct of_drc_info drc;
|
||||
struct property *info;
|
||||
const __be32 *value;
|
||||
u32 node_index;
|
||||
int i, j, count;
|
||||
|
||||
for_each_node_with_property(np, "ibm,drc-info") {
|
||||
info = of_find_property(np, "ibm,drc-info", NULL);
|
||||
if (info == NULL) {
|
||||
/* XXX can this happen? */
|
||||
of_node_put(np);
|
||||
return NULL;
|
||||
}
|
||||
value = of_prop_next_u32(info, NULL, &count);
|
||||
if (value == NULL)
|
||||
continue;
|
||||
value++;
|
||||
for (i = 0; i < count; i++) {
|
||||
if (of_read_drc_info_cell(&info, &value, &drc))
|
||||
break;
|
||||
if (index > drc.last_drc_index)
|
||||
continue;
|
||||
node_index = drc.drc_index_start;
|
||||
for (j = 0; j < drc.num_sequential_elems; j++) {
|
||||
if (index == node_index)
|
||||
return np;
|
||||
node_index += drc.sequential_inc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int dlpar_hp_dt_add(u32 index)
|
||||
{
|
||||
struct device_node *np, *nodes;
|
||||
struct of_changeset ocs;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Do not add device node(s) if already exists in the
|
||||
* device tree.
|
||||
*/
|
||||
np = get_device_node_with_drc_index(index);
|
||||
if (np) {
|
||||
pr_err("%s: Adding device node for index (%d), but "
|
||||
"already exists in the device tree\n",
|
||||
__func__, index);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
np = get_device_node_with_drc_info(index);
|
||||
|
||||
if (!np)
|
||||
return -EIO;
|
||||
|
||||
/* Next, configure the connector. */
|
||||
nodes = dlpar_configure_connector(cpu_to_be32(index), np);
|
||||
if (!nodes) {
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add the new nodes from dlpar_configure_connector() onto
|
||||
* the device-tree.
|
||||
*/
|
||||
of_changeset_init(&ocs);
|
||||
rc = dlpar_changeset_attach_cc_nodes(&ocs, nodes);
|
||||
|
||||
if (!rc)
|
||||
rc = of_changeset_apply(&ocs);
|
||||
else
|
||||
dlpar_free_cc_nodes(nodes);
|
||||
|
||||
of_changeset_destroy(&ocs);
|
||||
|
||||
out:
|
||||
of_node_put(np);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int changeset_detach_node_recursive(struct of_changeset *ocs,
|
||||
struct device_node *node)
|
||||
{
|
||||
struct device_node *child;
|
||||
int rc;
|
||||
|
||||
for_each_child_of_node(node, child) {
|
||||
rc = changeset_detach_node_recursive(ocs, child);
|
||||
if (rc) {
|
||||
of_node_put(child);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
return of_changeset_detach_node(ocs, node);
|
||||
}
|
||||
|
||||
static int dlpar_hp_dt_remove(u32 drc_index)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct of_changeset ocs;
|
||||
u32 index;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* Prune all nodes with a matching index.
|
||||
*/
|
||||
of_changeset_init(&ocs);
|
||||
|
||||
for_each_node_with_property(np, "ibm,my-drc-index") {
|
||||
rc = of_property_read_u32(np, "ibm,my-drc-index", &index);
|
||||
if (rc) {
|
||||
pr_err("%s: %pOF: of_property_read_u32 %s: %d\n",
|
||||
__func__, np, "ibm,my-drc-index", rc);
|
||||
of_node_put(np);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (index == drc_index) {
|
||||
rc = changeset_detach_node_recursive(&ocs, np);
|
||||
if (rc) {
|
||||
of_node_put(np);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc = of_changeset_apply(&ocs);
|
||||
|
||||
out:
|
||||
of_changeset_destroy(&ocs);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int dlpar_hp_dt(struct pseries_hp_errorlog *phpe)
|
||||
{
|
||||
u32 drc_index;
|
||||
int rc;
|
||||
|
||||
if (phpe->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX)
|
||||
return -EINVAL;
|
||||
|
||||
drc_index = be32_to_cpu(phpe->_drc_u.drc_index);
|
||||
|
||||
lock_device_hotplug();
|
||||
|
||||
switch (phpe->action) {
|
||||
case PSERIES_HP_ELOG_ACTION_ADD:
|
||||
rc = dlpar_hp_dt_add(drc_index);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_ACTION_REMOVE:
|
||||
rc = dlpar_hp_dt_remove(drc_index);
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid action (%d) specified\n", phpe->action);
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
unlock_device_hotplug();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* pseries error logs are in BE format, convert to cpu type */
|
||||
switch (hp_elog->id_type) {
|
||||
case PSERIES_HP_ELOG_ID_DRC_COUNT:
|
||||
hp_elog->_drc_u.drc_count =
|
||||
be32_to_cpu(hp_elog->_drc_u.drc_count);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_ID_DRC_INDEX:
|
||||
hp_elog->_drc_u.drc_index =
|
||||
be32_to_cpu(hp_elog->_drc_u.drc_index);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_ID_DRC_IC:
|
||||
hp_elog->_drc_u.ic.count =
|
||||
be32_to_cpu(hp_elog->_drc_u.ic.count);
|
||||
hp_elog->_drc_u.ic.index =
|
||||
be32_to_cpu(hp_elog->_drc_u.ic.index);
|
||||
}
|
||||
|
||||
switch (hp_elog->resource) {
|
||||
case PSERIES_HP_ELOG_RESOURCE_MEM:
|
||||
rc = dlpar_memory(hp_elog);
|
||||
@ -361,6 +552,9 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
|
||||
case PSERIES_HP_ELOG_RESOURCE_PMEM:
|
||||
rc = dlpar_hp_pmem(hp_elog);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_RESOURCE_DT:
|
||||
rc = dlpar_hp_dt(hp_elog);
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_warn_ratelimited("Invalid resource (%d) specified\n",
|
||||
@ -413,6 +607,8 @@ static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
|
||||
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
|
||||
} else if (sysfs_streq(arg, "cpu")) {
|
||||
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
|
||||
} else if (sysfs_streq(arg, "dt")) {
|
||||
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_DT;
|
||||
} else {
|
||||
pr_err("Invalid resource specified.\n");
|
||||
return -EINVAL;
|
||||
@ -554,7 +750,7 @@ dlpar_store_out:
|
||||
static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", "memory,cpu");
|
||||
return sprintf(buf, "%s\n", "memory,cpu,dt");
|
||||
}
|
||||
|
||||
static CLASS_ATTR_RW(dlpar);
|
||||
|
@ -325,7 +325,6 @@ static const struct file_operations dtl_fops = {
|
||||
.open = dtl_file_open,
|
||||
.release = dtl_file_release,
|
||||
.read = dtl_file_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
static struct dentry *dtl_dir;
|
||||
|
@ -784,6 +784,43 @@ static int pseries_notify_resume(struct eeh_dev *edev)
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* pseries_eeh_err_inject - Inject specified error to the indicated PE
|
||||
* @pe: the indicated PE
|
||||
* @type: error type
|
||||
* @func: specific error type
|
||||
* @addr: address
|
||||
* @mask: address mask
|
||||
* The routine is called to inject specified error, which is
|
||||
* determined by @type and @func, to the indicated PE
|
||||
*/
|
||||
static int pseries_eeh_err_inject(struct eeh_pe *pe, int type, int func,
|
||||
unsigned long addr, unsigned long mask)
|
||||
{
|
||||
struct eeh_dev *pdev;
|
||||
|
||||
/* Check on PCI error type */
|
||||
if (type != EEH_ERR_TYPE_32 && type != EEH_ERR_TYPE_64)
|
||||
return -EINVAL;
|
||||
|
||||
switch (func) {
|
||||
case EEH_ERR_FUNC_LD_MEM_ADDR:
|
||||
case EEH_ERR_FUNC_LD_MEM_DATA:
|
||||
case EEH_ERR_FUNC_ST_MEM_ADDR:
|
||||
case EEH_ERR_FUNC_ST_MEM_DATA:
|
||||
/* injects a MMIO error for all pdev's belonging to PE */
|
||||
pci_lock_rescan_remove();
|
||||
list_for_each_entry(pdev, &pe->edevs, entry)
|
||||
eeh_pe_inject_mmio_error(pdev->pdev);
|
||||
pci_unlock_rescan_remove();
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct eeh_ops pseries_eeh_ops = {
|
||||
.name = "pseries",
|
||||
.probe = pseries_eeh_probe,
|
||||
@ -792,7 +829,7 @@ static struct eeh_ops pseries_eeh_ops = {
|
||||
.reset = pseries_eeh_reset,
|
||||
.get_log = pseries_eeh_get_log,
|
||||
.configure_bridge = pseries_eeh_configure_bridge,
|
||||
.err_inject = NULL,
|
||||
.err_inject = pseries_eeh_err_inject,
|
||||
.read_config = pseries_eeh_read_config,
|
||||
.write_config = pseries_eeh_write_config,
|
||||
.next_error = NULL,
|
||||
|
@ -757,7 +757,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
|
||||
u32 drc_index;
|
||||
int rc;
|
||||
|
||||
drc_index = hp_elog->_drc_u.drc_index;
|
||||
drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
|
||||
|
||||
lock_device_hotplug();
|
||||
|
||||
|
@ -817,16 +817,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
|
||||
case PSERIES_HP_ELOG_ACTION_ADD:
|
||||
switch (hp_elog->id_type) {
|
||||
case PSERIES_HP_ELOG_ID_DRC_COUNT:
|
||||
count = hp_elog->_drc_u.drc_count;
|
||||
count = be32_to_cpu(hp_elog->_drc_u.drc_count);
|
||||
rc = dlpar_memory_add_by_count(count);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_ID_DRC_INDEX:
|
||||
drc_index = hp_elog->_drc_u.drc_index;
|
||||
drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
|
||||
rc = dlpar_memory_add_by_index(drc_index);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_ID_DRC_IC:
|
||||
count = hp_elog->_drc_u.ic.count;
|
||||
drc_index = hp_elog->_drc_u.ic.index;
|
||||
count = be32_to_cpu(hp_elog->_drc_u.ic.count);
|
||||
drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
|
||||
rc = dlpar_memory_add_by_ic(count, drc_index);
|
||||
break;
|
||||
default:
|
||||
@ -838,16 +838,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
|
||||
case PSERIES_HP_ELOG_ACTION_REMOVE:
|
||||
switch (hp_elog->id_type) {
|
||||
case PSERIES_HP_ELOG_ID_DRC_COUNT:
|
||||
count = hp_elog->_drc_u.drc_count;
|
||||
count = be32_to_cpu(hp_elog->_drc_u.drc_count);
|
||||
rc = dlpar_memory_remove_by_count(count);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_ID_DRC_INDEX:
|
||||
drc_index = hp_elog->_drc_u.drc_index;
|
||||
drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
|
||||
rc = dlpar_memory_remove_by_index(drc_index);
|
||||
break;
|
||||
case PSERIES_HP_ELOG_ID_DRC_IC:
|
||||
count = hp_elog->_drc_u.ic.count;
|
||||
drc_index = hp_elog->_drc_u.ic.index;
|
||||
count = be32_to_cpu(hp_elog->_drc_u.ic.count);
|
||||
drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index);
|
||||
rc = dlpar_memory_remove_by_ic(count, drc_index);
|
||||
break;
|
||||
default:
|
||||
|
@ -1509,7 +1509,7 @@ static const struct of_device_id papr_scm_match[] = {
|
||||
|
||||
static struct platform_driver papr_scm_driver = {
|
||||
.probe = papr_scm_probe,
|
||||
.remove_new = papr_scm_remove,
|
||||
.remove = papr_scm_remove,
|
||||
.driver = {
|
||||
.name = "papr_scm",
|
||||
.of_match_table = papr_scm_match,
|
||||
|
@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drc_index = hp_elog->_drc_u.drc_index;
|
||||
drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index);
|
||||
|
||||
lock_device_hotplug();
|
||||
|
||||
|
@ -162,13 +162,13 @@ static const struct sysfs_ops vas_sysfs_ops = {
|
||||
.store = vas_type_store,
|
||||
};
|
||||
|
||||
static struct kobj_type vas_def_attr_type = {
|
||||
static const struct kobj_type vas_def_attr_type = {
|
||||
.release = vas_type_release,
|
||||
.sysfs_ops = &vas_sysfs_ops,
|
||||
.default_groups = vas_def_capab_groups,
|
||||
};
|
||||
|
||||
static struct kobj_type vas_qos_attr_type = {
|
||||
static const struct kobj_type vas_qos_attr_type = {
|
||||
.release = vas_type_release,
|
||||
.sysfs_ops = &vas_sysfs_ops,
|
||||
.default_groups = vas_qos_capab_groups,
|
||||
|
@ -603,7 +603,7 @@ static struct platform_driver fsl_of_msi_driver = {
|
||||
.of_match_table = fsl_of_msi_ids,
|
||||
},
|
||||
.probe = fsl_of_msi_probe,
|
||||
.remove_new = fsl_of_msi_remove,
|
||||
.remove = fsl_of_msi_remove,
|
||||
};
|
||||
|
||||
static __init int fsl_of_msi_init(void)
|
||||
|
@ -193,7 +193,7 @@ static void pmi_of_remove(struct platform_device *dev)
|
||||
|
||||
static struct platform_driver pmi_of_platform_driver = {
|
||||
.probe = pmi_of_probe,
|
||||
.remove_new = pmi_of_remove,
|
||||
.remove = pmi_of_remove,
|
||||
.driver = {
|
||||
.name = "pmi",
|
||||
.of_match_table = pmi_match,
|
||||
|
@ -3543,7 +3543,7 @@ scanhex(unsigned long *vp)
|
||||
}
|
||||
} else if (c == '$') {
|
||||
int i;
|
||||
for (i=0; i<63; i++) {
|
||||
for (i = 0; i < (KSYM_NAME_LEN - 1); i++) {
|
||||
c = inchar();
|
||||
if (isspace(c) || c == '\0') {
|
||||
termch = c;
|
||||
|
@ -178,7 +178,7 @@ void __init pmu_backlight_init(void)
|
||||
}
|
||||
|
||||
bd->props.brightness = level;
|
||||
bd->props.power = FB_BLANK_UNBLANK;
|
||||
bd->props.power = BACKLIGHT_POWER_ON;
|
||||
backlight_update_status(bd);
|
||||
|
||||
printk(KERN_INFO "PMU Backlight initialized (%s)\n", name);
|
||||
|
@ -2334,7 +2334,7 @@ static const struct platform_suspend_ops pmu_pm_ops = {
|
||||
.valid = pmu_sleep_valid,
|
||||
};
|
||||
|
||||
static int register_pmu_pm_ops(void)
|
||||
static int __init register_pmu_pm_ops(void)
|
||||
{
|
||||
if (pmu_kind == PMU_OHARE_BASED)
|
||||
powerbook_sleep_init_3400();
|
||||
|
@ -18,4 +18,4 @@ $(OUTPUT)/context_switch: LDLIBS += -lpthread
|
||||
|
||||
$(OUTPUT)/fork: LDLIBS += -lpthread
|
||||
|
||||
$(OUTPUT)/exec_target: CFLAGS += -static -nostartfiles
|
||||
$(OUTPUT)/exec_target: CFLAGS += -nostartfiles
|
||||
|
@ -7,10 +7,22 @@
|
||||
*/
|
||||
|
||||
#define _GNU_SOURCE
|
||||
#include <unistd.h>
|
||||
#include <sys/syscall.h>
|
||||
|
||||
void _start(void)
|
||||
{
|
||||
syscall(SYS_exit, 0);
|
||||
asm volatile (
|
||||
"li %%r0, %[sys_exit];"
|
||||
"li %%r3, 0;"
|
||||
"sc;"
|
||||
:
|
||||
: [sys_exit] "i" (SYS_exit)
|
||||
/*
|
||||
* "sc" will clobber r0, r3-r13, cr0, ctr, xer and memory.
|
||||
* Even though sys_exit never returns, handle clobber
|
||||
* registers.
|
||||
*/
|
||||
: "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
|
||||
"r11", "r12", "r13", "cr0", "ctr", "xer", "memory"
|
||||
);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user