mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
s390 updates for the 5.16 merge window
- Add support for ftrace with direct call and ftrace direct call samples. - Add support for kernel command lines longer than current 896 bytes and make its length configurable. - Add support for BEAR enhancement facility to improve last breaking event instruction tracking. - Add kprobes sanity checks and testcases to prevent kprobe in the mid of an instruction. - Allow concurrent access to /dev/hwc for the CPUMF users. - Various ftrace / jump label improvements. - Convert unwinder tests to KUnit. - Add s390_iommu_aperture kernel parameter to tweak the limits on concurrently usable DMA mappings. - Add ap.useirq AP module option which can be used to disable interrupt use. - Add add_disk() error handling support to block device drivers. - Drop arch specific and use generic implementation of strlcpy and strrchr. - Several __pa/__va usages fixes. - Various cio, crypto, pci, kernel doc and other small fixes and improvements all over the code. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEE3QHqV+H2a8xAv27vjYWKoQLXFBgFAmGFW6EACgkQjYWKoQLX FBg20Qf/UbohgnKnE6vxbbH3sNTlI2dk3Cw4z3IobcsZgqXAu6AFLgLQGLk/X07F DIyUdrgSgCzLIEKLqrLrFXIOMIK44zAGaurIltNt7IrnWWlA+/YVD+YeL2gHwccq wT7KXRcrVMZQ1z18djJQ45DpPUC8ErBdL6+P+ftHck90YGFZsfMA5S7jf8X1h08U IlqdPTmY8t4unKHWVpHbxx9b+xrUuV6KTEXADsllpMV2jQoTLdDECd3vmefYR6tR 3lssgop1m/RzH5OCqvia5Sy2D5fOQObNWDMakwOkVMxOD43lmGCTHstzS2Uo2OFE QcY79lfZ5NrzKnenUdE5Fd0XJ9kSwQ== =k0Ab -----END PGP SIGNATURE----- Merge tag 's390-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Vasily Gorbik: - Add support for ftrace with direct call and ftrace direct call samples. - Add support for kernel command lines longer than current 896 bytes and make its length configurable. - Add support for BEAR enhancement facility to improve last breaking event instruction tracking. - Add kprobes sanity checks and testcases to prevent kprobe in the mid of an instruction. - Allow concurrent access to /dev/hwc for the CPUMF users. - Various ftrace / jump label improvements. - Convert unwinder tests to KUnit. - Add s390_iommu_aperture kernel parameter to tweak the limits on concurrently usable DMA mappings. - Add ap.useirq AP module option which can be used to disable interrupt use. - Add add_disk() error handling support to block device drivers. - Drop arch specific and use generic implementation of strlcpy and strrchr. - Several __pa/__va usages fixes. - Various cio, crypto, pci, kernel doc and other small fixes and improvements all over the code. [ Merge fixup as per https://lore.kernel.org/all/YXAqZ%2FEszRisunQw@osiris/ ] * tag 's390-5.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (63 commits) s390: make command line configurable s390: support command lines longer than 896 bytes s390/kexec_file: move kernel image size check s390/pci: add s390_iommu_aperture kernel parameter s390/spinlock: remove incorrect kernel doc indicator s390/string: use generic strlcpy s390/string: use generic strrchr s390/ap: function rework based on compiler warning s390/cio: make ccw_device_dma_* more robust s390/vfio-ap: s390/crypto: fix all kernel-doc warnings s390/hmcdrv: fix kernel doc comments s390/ap: new module option ap.useirq s390/cpumf: Allow multiple processes to access /dev/hwc s390/bitops: return true/false (not 1/0) from bool functions s390: add support for BEAR enhancement facility s390: introduce nospec_uses_trampoline() s390: rename last_break to pgm_last_break s390/ptrace: add last_break member to pt_regs s390/sclp: sort out physical vs virtual pointers usage s390/setup: convert start and end initrd pointers to virtual ...
This commit is contained in:
commit
0b707e572a
@ -4992,6 +4992,18 @@
|
||||
an IOTLB flush. Default is lazy flushing before reuse,
|
||||
which is faster.
|
||||
|
||||
s390_iommu_aperture= [KNL,S390]
|
||||
Specifies the size of the per device DMA address space
|
||||
accessible through the DMA and IOMMU APIs as a decimal
|
||||
factor of the size of main memory.
|
||||
The default is 1 meaning that one can concurrently use
|
||||
as many DMA addresses as physical memory is installed,
|
||||
if supported by hardware, and thus map all of memory
|
||||
once. With a value of 2 one can map all of memory twice
|
||||
and so on. As a special case a factor of 0 imposes no
|
||||
restrictions other than those given by hardware at the
|
||||
cost of significant additional memory use for tables.
|
||||
|
||||
sa1100ir [NET]
|
||||
See drivers/net/irda/sa1100_ir.c.
|
||||
|
||||
|
@ -153,12 +153,15 @@ config S390
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_EBPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_FAST_GUP
|
||||
select HAVE_FENTRY
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||
select HAVE_FUNCTION_ERROR_INJECTION
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
@ -190,6 +193,7 @@ config S390
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RELIABLE_STACKTRACE
|
||||
select HAVE_RSEQ
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT
|
||||
select HAVE_SOFTIRQ_ON_OWN_STACK
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
@ -434,6 +438,14 @@ endchoice
|
||||
config 64BIT
|
||||
def_bool y
|
||||
|
||||
config COMMAND_LINE_SIZE
|
||||
int "Maximum size of kernel command line"
|
||||
default 4096
|
||||
range 896 1048576
|
||||
help
|
||||
This allows you to specify the maximum length of the kernel command
|
||||
line.
|
||||
|
||||
config COMPAT
|
||||
def_bool y
|
||||
prompt "Kernel support for 31 bit emulation"
|
||||
@ -938,6 +950,8 @@ menu "Selftests"
|
||||
|
||||
config S390_UNWIND_SELFTEST
|
||||
def_tristate n
|
||||
depends on KUNIT
|
||||
default KUNIT_ALL_TESTS
|
||||
prompt "Test unwind functions"
|
||||
help
|
||||
This option enables s390 specific stack unwinder testing kernel
|
||||
@ -946,4 +960,16 @@ config S390_UNWIND_SELFTEST
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
config S390_KPROBES_SANITY_TEST
|
||||
def_tristate n
|
||||
prompt "Enable s390 specific kprobes tests"
|
||||
depends on KPROBES
|
||||
depends on KUNIT
|
||||
help
|
||||
This option enables an s390 specific kprobes test module. This option
|
||||
is not useful for distributions or general kernels, but only for kernel
|
||||
developers working on architecture code.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
endmenu
|
||||
|
@ -24,6 +24,7 @@ struct vmlinux_info {
|
||||
unsigned long dynsym_start;
|
||||
unsigned long rela_dyn_start;
|
||||
unsigned long rela_dyn_end;
|
||||
unsigned long amode31_size;
|
||||
};
|
||||
|
||||
/* Symbols defined by linker scripts */
|
||||
|
@ -184,35 +184,23 @@ iplstart:
|
||||
bas %r14,.Lloader # load parameter file
|
||||
ltr %r2,%r2 # got anything ?
|
||||
bz .Lnopf
|
||||
chi %r2,895
|
||||
bnh .Lnotrunc
|
||||
la %r2,895
|
||||
l %r3,MAX_COMMAND_LINE_SIZE+ARCH_OFFSET-PARMAREA(%r12)
|
||||
ahi %r3,-1
|
||||
clr %r2,%r3
|
||||
bl .Lnotrunc
|
||||
lr %r2,%r3
|
||||
.Lnotrunc:
|
||||
l %r4,.Linitrd
|
||||
clc 0(3,%r4),.L_hdr # if it is HDRx
|
||||
bz .Lagain1 # skip dataset header
|
||||
clc 0(3,%r4),.L_eof # if it is EOFx
|
||||
bz .Lagain1 # skip dateset trailer
|
||||
la %r5,0(%r4,%r2)
|
||||
lr %r3,%r2
|
||||
la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
|
||||
mvc 0(256,%r3),0(%r4)
|
||||
mvc 256(256,%r3),256(%r4)
|
||||
mvc 512(256,%r3),512(%r4)
|
||||
mvc 768(122,%r3),768(%r4)
|
||||
slr %r0,%r0
|
||||
b .Lcntlp
|
||||
.Ldelspc:
|
||||
ic %r0,0(%r2,%r3)
|
||||
chi %r0,0x20 # is it a space ?
|
||||
be .Lcntlp
|
||||
ahi %r2,1
|
||||
b .Leolp
|
||||
.Lcntlp:
|
||||
brct %r2,.Ldelspc
|
||||
.Leolp:
|
||||
slr %r0,%r0
|
||||
stc %r0,0(%r2,%r3) # terminate buffer
|
||||
|
||||
lr %r5,%r2
|
||||
la %r6,COMMAND_LINE-PARMAREA(%r12)
|
||||
lr %r7,%r2
|
||||
ahi %r7,1
|
||||
mvcl %r6,%r4
|
||||
.Lnopf:
|
||||
|
||||
#
|
||||
@ -317,6 +305,7 @@ SYM_CODE_START_LOCAL(startup_normal)
|
||||
xc 0x300(256),0x300
|
||||
xc 0xe00(256),0xe00
|
||||
xc 0xf00(256),0xf00
|
||||
lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
|
||||
stcke __LC_BOOT_CLOCK
|
||||
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
|
||||
spt 6f-.LPG0(%r13)
|
||||
@ -335,6 +324,22 @@ SYM_CODE_END(startup_normal)
|
||||
.quad 0x0000000180000000,startup_pgm_check_handler
|
||||
.Lio_new_psw:
|
||||
.quad 0x0002000180000000,0x1f0 # disabled wait
|
||||
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
|
||||
.quad 0 # cr1: primary space segment table
|
||||
.quad 0 # cr2: dispatchable unit control table
|
||||
.quad 0 # cr3: instruction authorization
|
||||
.quad 0xffff # cr4: instruction authorization
|
||||
.quad 0 # cr5: primary-aste origin
|
||||
.quad 0 # cr6: I/O interrupts
|
||||
.quad 0 # cr7: secondary space segment table
|
||||
.quad 0x0000000000008000 # cr8: access registers translation
|
||||
.quad 0 # cr9: tracing off
|
||||
.quad 0 # cr10: tracing off
|
||||
.quad 0 # cr11: tracing off
|
||||
.quad 0 # cr12: tracing off
|
||||
.quad 0 # cr13: home space segment table
|
||||
.quad 0xc0000000 # cr14: machine check handling off
|
||||
.quad 0 # cr15: linkage stack operations
|
||||
|
||||
#include "head_kdump.S"
|
||||
|
||||
@ -377,11 +382,10 @@ SYM_DATA_START(parmarea)
|
||||
.quad 0 # OLDMEM_BASE
|
||||
.quad 0 # OLDMEM_SIZE
|
||||
.quad kernel_version # points to kernel version string
|
||||
.quad COMMAND_LINE_SIZE
|
||||
|
||||
.org COMMAND_LINE
|
||||
.byte "root=/dev/ram0 ro"
|
||||
.byte 0
|
||||
.org PARMAREA+__PARMAREA_SIZE
|
||||
SYM_DATA_END(parmarea)
|
||||
|
||||
.org HEAD_END
|
||||
|
@ -170,10 +170,10 @@ static inline int has_ebcdic_char(const char *str)
|
||||
|
||||
void setup_boot_command_line(void)
|
||||
{
|
||||
parmarea.command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
|
||||
parmarea.command_line[COMMAND_LINE_SIZE - 1] = 0;
|
||||
/* convert arch command line to ascii if necessary */
|
||||
if (has_ebcdic_char(parmarea.command_line))
|
||||
EBCASC(parmarea.command_line, ARCH_COMMAND_LINE_SIZE);
|
||||
EBCASC(parmarea.command_line, COMMAND_LINE_SIZE);
|
||||
/* copy arch command line */
|
||||
strcpy(early_command_line, strim(parmarea.command_line));
|
||||
|
||||
|
@ -175,6 +175,6 @@ void print_pgm_check_info(void)
|
||||
gpregs[12], gpregs[13], gpregs[14], gpregs[15]);
|
||||
print_stacktrace();
|
||||
decompressor_printk("Last Breaking-Event-Address:\n");
|
||||
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.breaking_event_addr,
|
||||
(void *)S390_lowcore.breaking_event_addr);
|
||||
decompressor_printk(" [<%016lx>] %pS\n", (unsigned long)S390_lowcore.pgm_last_break,
|
||||
(void *)S390_lowcore.pgm_last_break);
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "uv.h"
|
||||
|
||||
unsigned long __bootdata_preserved(__kaslr_offset);
|
||||
unsigned long __bootdata(__amode31_base);
|
||||
unsigned long __bootdata_preserved(VMALLOC_START);
|
||||
unsigned long __bootdata_preserved(VMALLOC_END);
|
||||
struct page *__bootdata_preserved(vmemmap);
|
||||
@ -259,6 +260,12 @@ static void offset_vmlinux_info(unsigned long offset)
|
||||
vmlinux.dynsym_start += offset;
|
||||
}
|
||||
|
||||
static unsigned long reserve_amode31(unsigned long safe_addr)
|
||||
{
|
||||
__amode31_base = PAGE_ALIGN(safe_addr);
|
||||
return safe_addr + vmlinux.amode31_size;
|
||||
}
|
||||
|
||||
void startup_kernel(void)
|
||||
{
|
||||
unsigned long random_lma;
|
||||
@ -273,6 +280,7 @@ void startup_kernel(void)
|
||||
setup_lpp();
|
||||
store_ipl_parmblock();
|
||||
safe_addr = mem_safe_offset();
|
||||
safe_addr = reserve_amode31(safe_addr);
|
||||
safe_addr = read_ipl_report(safe_addr);
|
||||
uv_query_info();
|
||||
rescue_initrd(safe_addr);
|
||||
|
@ -61,7 +61,8 @@ CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
|
||||
CONFIG_CMM=m
|
||||
CONFIG_APPLDATA_BASE=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_S390_UNWIND_SELFTEST=y
|
||||
CONFIG_S390_UNWIND_SELFTEST=m
|
||||
CONFIG_S390_KPROBES_SANITY_TEST=m
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_STATIC_KEYS_SELFTEST=y
|
||||
@ -776,7 +777,6 @@ CONFIG_CRC8=m
|
||||
CONFIG_RANDOM32_SELFTEST=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=0
|
||||
CONFIG_DMA_API_DEBUG=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
@ -839,8 +839,13 @@ CONFIG_BPF_KPROBE_OVERRIDE=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_FTRACE_STARTUP_TEST=y
|
||||
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
|
||||
CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||
CONFIG_DEBUG_ENTRY=y
|
||||
CONFIG_CIO_INJECT=y
|
||||
CONFIG_KUNIT=m
|
||||
CONFIG_KUNIT_DEBUGFS=y
|
||||
CONFIG_NOTIFIER_ERROR_INJECTION=m
|
||||
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_FAULT_INJECTION=y
|
||||
|
@ -60,6 +60,7 @@ CONFIG_CMM=m
|
||||
CONFIG_APPLDATA_BASE=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_S390_UNWIND_SELFTEST=m
|
||||
CONFIG_S390_KPROBES_SANITY_TEST=m
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_JUMP_LABEL=y
|
||||
# CONFIG_GCC_PLUGINS is not set
|
||||
@ -788,6 +789,11 @@ CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_BPF_KPROBE_OVERRIDE=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_SAMPLES=y
|
||||
CONFIG_SAMPLE_TRACE_PRINTK=m
|
||||
CONFIG_SAMPLE_FTRACE_DIRECT=m
|
||||
CONFIG_KUNIT=m
|
||||
CONFIG_KUNIT_DEBUGFS=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
|
@ -16,20 +16,24 @@
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
/* Fast-BCR without checkpoint synchronization */
|
||||
#define __ASM_BARRIER "bcr 14,0\n"
|
||||
#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
|
||||
#else
|
||||
#define __ASM_BARRIER "bcr 15,0\n"
|
||||
#define __ASM_BCR_SERIALIZE "bcr 15,0\n"
|
||||
#endif
|
||||
|
||||
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
|
||||
static __always_inline void bcr_serialize(void)
|
||||
{
|
||||
asm volatile(__ASM_BCR_SERIALIZE : : : "memory");
|
||||
}
|
||||
|
||||
#define rmb() barrier()
|
||||
#define wmb() barrier()
|
||||
#define dma_rmb() mb()
|
||||
#define dma_wmb() mb()
|
||||
#define __smp_mb() mb()
|
||||
#define __smp_rmb() rmb()
|
||||
#define __smp_wmb() wmb()
|
||||
#define mb() bcr_serialize()
|
||||
#define rmb() barrier()
|
||||
#define wmb() barrier()
|
||||
#define dma_rmb() mb()
|
||||
#define dma_wmb() mb()
|
||||
#define __smp_mb() mb()
|
||||
#define __smp_rmb() rmb()
|
||||
#define __smp_wmb() wmb()
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
|
@ -188,7 +188,7 @@ static inline bool arch_test_and_set_bit_lock(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
{
|
||||
if (arch_test_bit(nr, ptr))
|
||||
return 1;
|
||||
return true;
|
||||
return arch_test_and_set_bit(nr, ptr);
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
struct cpuid
|
||||
{
|
||||
@ -21,5 +22,7 @@ struct cpuid
|
||||
unsigned int unused : 16;
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(cpu_has_bear);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_S390_CPU_H */
|
||||
|
@ -462,7 +462,7 @@ arch_initcall(VNAME(var, reg))
|
||||
*
|
||||
* @var: Name of debug_info_t variable
|
||||
* @name: Name of debug log (e.g. used for debugfs entry)
|
||||
* @pages_per_area: Number of pages per area
|
||||
* @pages: Number of pages per area
|
||||
* @nr_areas: Number of debug areas
|
||||
* @buf_size: Size of data area in each debug entry
|
||||
* @view: Pointer to debug view struct
|
||||
|
@ -17,7 +17,6 @@
|
||||
|
||||
void ftrace_caller(void);
|
||||
|
||||
extern char ftrace_graph_caller_end;
|
||||
extern void *ftrace_func;
|
||||
|
||||
struct dyn_arch_ftrace { };
|
||||
@ -42,6 +41,35 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
return addr;
|
||||
}
|
||||
|
||||
struct ftrace_regs {
|
||||
struct pt_regs regs;
|
||||
};
|
||||
|
||||
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
|
||||
{
|
||||
return &fregs->regs;
|
||||
}
|
||||
|
||||
static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct pt_regs *regs = arch_ftrace_get_regs(fregs);
|
||||
|
||||
regs->psw.addr = ip;
|
||||
}
|
||||
|
||||
/*
|
||||
* When an ftrace registered caller is tracing a function that is
|
||||
* also set by a register_ftrace_direct() call, it needs to be
|
||||
* differentiated in the ftrace_caller trampoline. To do this,
|
||||
* place the direct caller in the ORIG_GPR2 part of pt_regs. This
|
||||
* tells the ftrace_caller that there's a direct caller.
|
||||
*/
|
||||
static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
regs->orig_gpr2 = addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though the system call numbers are identical for s390/s390x a
|
||||
* different system call table is used for compat tasks. This may lead
|
||||
@ -68,4 +96,32 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
#define FTRACE_NOP_INSN .word 0xc004, 0x0000, 0x0000 /* brcl 0,0 */
|
||||
|
||||
#ifndef CC_USING_HOTPATCH
|
||||
|
||||
#define FTRACE_GEN_MCOUNT_RECORD(name) \
|
||||
.section __mcount_loc, "a", @progbits; \
|
||||
.quad name; \
|
||||
.previous;
|
||||
|
||||
#else /* !CC_USING_HOTPATCH */
|
||||
|
||||
#define FTRACE_GEN_MCOUNT_RECORD(name)
|
||||
|
||||
#endif /* !CC_USING_HOTPATCH */
|
||||
|
||||
#define FTRACE_GEN_NOP_ASM(name) \
|
||||
FTRACE_GEN_MCOUNT_RECORD(name) \
|
||||
FTRACE_NOP_INSN
|
||||
|
||||
#else /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#define FTRACE_GEN_NOP_ASM(name)
|
||||
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#endif /* _ASM_S390_FTRACE_H */
|
||||
|
@ -2,6 +2,8 @@
|
||||
#ifndef _ASM_S390_JUMP_LABEL_H
|
||||
#define _ASM_S390_JUMP_LABEL_H
|
||||
|
||||
#define HAVE_JUMP_LABEL_BATCH
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
@ -16,9 +16,7 @@
|
||||
|
||||
static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
|
||||
{
|
||||
struct pt_regs *regs = ftrace_get_regs(fregs);
|
||||
|
||||
regs->psw.addr = ip;
|
||||
ftrace_instruction_pointer_set(fregs, ip);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -65,7 +65,7 @@ struct lowcore {
|
||||
__u32 external_damage_code; /* 0x00f4 */
|
||||
__u64 failing_storage_address; /* 0x00f8 */
|
||||
__u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
|
||||
__u64 breaking_event_addr; /* 0x0110 */
|
||||
__u64 pgm_last_break; /* 0x0110 */
|
||||
__u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
|
||||
psw_t restart_old_psw; /* 0x0120 */
|
||||
psw_t external_old_psw; /* 0x0130 */
|
||||
@ -93,9 +93,10 @@ struct lowcore {
|
||||
psw_t return_psw; /* 0x0290 */
|
||||
psw_t return_mcck_psw; /* 0x02a0 */
|
||||
|
||||
__u64 last_break; /* 0x02b0 */
|
||||
|
||||
/* CPU accounting and timing values. */
|
||||
__u64 sys_enter_timer; /* 0x02b0 */
|
||||
__u8 pad_0x02b8[0x02c0-0x02b8]; /* 0x02b8 */
|
||||
__u64 sys_enter_timer; /* 0x02b8 */
|
||||
__u64 mcck_enter_timer; /* 0x02c0 */
|
||||
__u64 exit_timer; /* 0x02c8 */
|
||||
__u64 user_timer; /* 0x02d0 */
|
||||
@ -188,7 +189,7 @@ struct lowcore {
|
||||
__u32 tod_progreg_save_area; /* 0x1324 */
|
||||
__u32 cpu_timer_save_area[2]; /* 0x1328 */
|
||||
__u32 clock_comp_save_area[2]; /* 0x1330 */
|
||||
__u8 pad_0x1338[0x1340-0x1338]; /* 0x1338 */
|
||||
__u64 last_break_save_area; /* 0x1338 */
|
||||
__u32 access_regs_save_area[16]; /* 0x1340 */
|
||||
__u64 cregs_save_area[16]; /* 0x1380 */
|
||||
__u8 pad_0x1400[0x1800-0x1400]; /* 0x1400 */
|
||||
|
@ -12,6 +12,11 @@ void nospec_init_branches(void);
|
||||
void nospec_auto_detect(void);
|
||||
void nospec_revert(s32 *start, s32 *end);
|
||||
|
||||
static inline bool nospec_uses_trampoline(void)
|
||||
{
|
||||
return __is_defined(CC_USING_EXPOLINE) && !nospec_disable;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_S390_EXPOLINE_H */
|
||||
|
@ -583,11 +583,11 @@ static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new
|
||||
#define CRDTE_DTT_REGION1 0x1cUL
|
||||
|
||||
static inline void crdte(unsigned long old, unsigned long new,
|
||||
unsigned long table, unsigned long dtt,
|
||||
unsigned long *table, unsigned long dtt,
|
||||
unsigned long address, unsigned long asce)
|
||||
{
|
||||
union register_pair r1 = { .even = old, .odd = new, };
|
||||
union register_pair r2 = { .even = table | dtt, .odd = address, };
|
||||
union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
|
||||
|
||||
asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
|
||||
: [r1] "+&d" (r1.pair)
|
||||
@ -1001,7 +1001,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
|
||||
unsigned long opt, unsigned long asce,
|
||||
int local)
|
||||
{
|
||||
unsigned long pto = (unsigned long) ptep;
|
||||
unsigned long pto = __pa(ptep);
|
||||
|
||||
if (__builtin_constant_p(opt) && opt == 0) {
|
||||
/* Invalidation + TLB flush for the pte */
|
||||
@ -1023,7 +1023,7 @@ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
|
||||
static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
|
||||
pte_t *ptep, int local)
|
||||
{
|
||||
unsigned long pto = (unsigned long) ptep;
|
||||
unsigned long pto = __pa(ptep);
|
||||
|
||||
/* Invalidate a range of ptes + TLB flush of the ptes */
|
||||
do {
|
||||
@ -1487,7 +1487,7 @@ static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
|
||||
{
|
||||
unsigned long sto;
|
||||
|
||||
sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
|
||||
sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
|
||||
if (__builtin_constant_p(opt) && opt == 0) {
|
||||
/* flush without guest asce */
|
||||
asm volatile(
|
||||
@ -1513,7 +1513,7 @@ static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
|
||||
{
|
||||
unsigned long r3o;
|
||||
|
||||
r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
|
||||
r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
|
||||
r3o |= _ASCE_TYPE_REGION3;
|
||||
if (__builtin_constant_p(opt) && opt == 0) {
|
||||
/* flush without guest asce */
|
||||
|
@ -76,8 +76,7 @@ enum {
|
||||
* The pt_regs struct defines the way the registers are stored on
|
||||
* the stack during a system call.
|
||||
*/
|
||||
struct pt_regs
|
||||
{
|
||||
struct pt_regs {
|
||||
union {
|
||||
user_pt_regs user_regs;
|
||||
struct {
|
||||
@ -97,6 +96,7 @@ struct pt_regs
|
||||
};
|
||||
unsigned long flags;
|
||||
unsigned long cr1;
|
||||
unsigned long last_break;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -197,6 +197,25 @@ const char *regs_query_register_name(unsigned int offset);
|
||||
unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
|
||||
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
|
||||
|
||||
/**
|
||||
* regs_get_kernel_argument() - get Nth function argument in kernel
|
||||
* @regs: pt_regs of that context
|
||||
* @n: function argument number (start from 0)
|
||||
*
|
||||
* regs_get_kernel_argument() returns @n th argument of the function call.
|
||||
*/
|
||||
static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
|
||||
unsigned int n)
|
||||
{
|
||||
unsigned int argoffset = STACK_FRAME_OVERHEAD / sizeof(long);
|
||||
|
||||
#define NR_REG_ARGUMENTS 5
|
||||
if (n < NR_REG_ARGUMENTS)
|
||||
return regs_get_register(regs, 2 + n);
|
||||
n -= NR_REG_ARGUMENTS;
|
||||
return regs_get_kernel_stack_nth(regs, argoffset + n);
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return regs->gprs[15];
|
||||
|
@ -117,6 +117,7 @@ struct zpci_report_error_header {
|
||||
|
||||
extern char *sclp_early_sccb;
|
||||
|
||||
void sclp_early_adjust_va(void);
|
||||
void sclp_early_set_buffer(void *sccb);
|
||||
int sclp_early_read_info(void);
|
||||
int sclp_early_read_storage_info(void);
|
||||
|
@ -11,8 +11,8 @@
|
||||
#include <linux/build_bug.h>
|
||||
|
||||
#define PARMAREA 0x10400
|
||||
#define HEAD_END 0x11000
|
||||
|
||||
#define COMMAND_LINE_SIZE CONFIG_COMMAND_LINE_SIZE
|
||||
/*
|
||||
* Machine features detected in early.c
|
||||
*/
|
||||
@ -43,6 +43,8 @@
|
||||
#define STARTUP_NORMAL_OFFSET 0x10000
|
||||
#define STARTUP_KDUMP_OFFSET 0x10010
|
||||
|
||||
#define LEGACY_COMMAND_LINE_SIZE 896
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
@ -55,8 +57,9 @@ struct parmarea {
|
||||
unsigned long oldmem_base; /* 0x10418 */
|
||||
unsigned long oldmem_size; /* 0x10420 */
|
||||
unsigned long kernel_version; /* 0x10428 */
|
||||
char pad1[0x10480 - 0x10430]; /* 0x10430 - 0x10480 */
|
||||
char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
|
||||
unsigned long max_command_line_size; /* 0x10430 */
|
||||
char pad1[0x10480-0x10438]; /* 0x10438 - 0x10480 */
|
||||
char command_line[COMMAND_LINE_SIZE]; /* 0x10480 */
|
||||
};
|
||||
|
||||
extern struct parmarea parmarea;
|
||||
|
@ -31,22 +31,18 @@ void *memmove(void *dest, const void *src, size_t n);
|
||||
#define __HAVE_ARCH_STRCMP /* arch function */
|
||||
#define __HAVE_ARCH_STRCPY /* inline & arch function */
|
||||
#define __HAVE_ARCH_STRLCAT /* arch function */
|
||||
#define __HAVE_ARCH_STRLCPY /* arch function */
|
||||
#define __HAVE_ARCH_STRLEN /* inline & arch function */
|
||||
#define __HAVE_ARCH_STRNCAT /* arch function */
|
||||
#define __HAVE_ARCH_STRNCPY /* arch function */
|
||||
#define __HAVE_ARCH_STRNLEN /* inline & arch function */
|
||||
#define __HAVE_ARCH_STRRCHR /* arch function */
|
||||
#define __HAVE_ARCH_STRSTR /* arch function */
|
||||
|
||||
/* Prototypes for non-inlined arch strings functions. */
|
||||
int memcmp(const void *s1, const void *s2, size_t n);
|
||||
int strcmp(const char *s1, const char *s2);
|
||||
size_t strlcat(char *dest, const char *src, size_t n);
|
||||
size_t strlcpy(char *dest, const char *src, size_t size);
|
||||
char *strncat(char *dest, const char *src, size_t n);
|
||||
char *strncpy(char *dest, const char *src, size_t n);
|
||||
char *strrchr(const char *s, int c);
|
||||
char *strstr(const char *s1, const char *s2);
|
||||
#endif /* !CONFIG_KASAN */
|
||||
|
||||
|
16
arch/s390/include/asm/text-patching.h
Normal file
16
arch/s390/include/asm/text-patching.h
Normal file
@ -0,0 +1,16 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _ASM_S390_TEXT_PATCHING_H
|
||||
#define _ASM_S390_TEXT_PATCHING_H
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
static __always_inline void sync_core(void)
|
||||
{
|
||||
bcr_serialize();
|
||||
}
|
||||
|
||||
void text_poke_sync(void);
|
||||
void text_poke_sync_lock(void);
|
||||
|
||||
#endif /* _ASM_S390_TEXT_PATCHING_H */
|
@ -1,14 +1 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 1999, 2010
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_ASM_S390_SETUP_H
|
||||
#define _UAPI_ASM_S390_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 4096
|
||||
|
||||
#define ARCH_COMMAND_LINE_SIZE 896
|
||||
|
||||
#endif /* _UAPI_ASM_S390_SETUP_H */
|
||||
|
@ -1,5 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
@ -110,3 +113,20 @@ void __init apply_alternative_instructions(void)
|
||||
{
|
||||
apply_alternatives(__alt_instructions, __alt_instructions_end);
|
||||
}
|
||||
|
||||
static void do_sync_core(void *info)
|
||||
{
|
||||
sync_core();
|
||||
}
|
||||
|
||||
void text_poke_sync(void)
|
||||
{
|
||||
on_each_cpu(do_sync_core, NULL, 1);
|
||||
}
|
||||
|
||||
void text_poke_sync_lock(void)
|
||||
{
|
||||
cpus_read_lock();
|
||||
text_poke_sync();
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ int main(void)
|
||||
OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
|
||||
OFFSET(__PT_FLAGS, pt_regs, flags);
|
||||
OFFSET(__PT_CR1, pt_regs, cr1);
|
||||
OFFSET(__PT_LAST_BREAK, pt_regs, last_break);
|
||||
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
|
||||
BLANK();
|
||||
/* stack_frame offsets */
|
||||
@ -45,6 +46,7 @@ int main(void)
|
||||
OFFSET(__SF_SIE_SAVEAREA, stack_frame, empty1[2]);
|
||||
OFFSET(__SF_SIE_REASON, stack_frame, empty1[3]);
|
||||
OFFSET(__SF_SIE_FLAGS, stack_frame, empty1[4]);
|
||||
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
|
||||
BLANK();
|
||||
/* idle data offsets */
|
||||
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
|
||||
@ -77,7 +79,7 @@ int main(void)
|
||||
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
|
||||
OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
|
||||
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
|
||||
OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
|
||||
OFFSET(__LC_PGM_LAST_BREAK, lowcore, pgm_last_break);
|
||||
OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
|
||||
OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
|
||||
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
|
||||
@ -126,6 +128,7 @@ int main(void)
|
||||
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
|
||||
OFFSET(__LC_GMAP, lowcore, gmap);
|
||||
OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
|
||||
OFFSET(__LC_LAST_BREAK, lowcore, last_break);
|
||||
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
|
||||
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
|
||||
/* hardware defined lowcore locations 0x1000 - 0x18ff */
|
||||
@ -139,6 +142,7 @@ int main(void)
|
||||
OFFSET(__LC_TOD_PROGREG_SAVE_AREA, lowcore, tod_progreg_save_area);
|
||||
OFFSET(__LC_CPU_TIMER_SAVE_AREA, lowcore, cpu_timer_save_area);
|
||||
OFFSET(__LC_CLOCK_COMP_SAVE_AREA, lowcore, clock_comp_save_area);
|
||||
OFFSET(__LC_LAST_BREAK_SAVE_AREA, lowcore, last_break_save_area);
|
||||
OFFSET(__LC_AREGS_SAVE_AREA, lowcore, access_regs_save_area);
|
||||
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
|
||||
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
|
||||
@ -160,5 +164,6 @@ int main(void)
|
||||
DEFINE(OLDMEM_BASE, PARMAREA + offsetof(struct parmarea, oldmem_base));
|
||||
DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
|
||||
DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
|
||||
DEFINE(MAX_COMMAND_LINE_SIZE, PARMAREA + offsetof(struct parmarea, max_command_line_size));
|
||||
return 0;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ static int diag8_noresponse(int cmdlen)
|
||||
asm volatile(
|
||||
" diag %[rx],%[ry],0x8\n"
|
||||
: [ry] "+&d" (cmdlen)
|
||||
: [rx] "d" ((addr_t) cpcmd_buf)
|
||||
: [rx] "d" (__pa(cpcmd_buf))
|
||||
: "cc");
|
||||
return cmdlen;
|
||||
}
|
||||
@ -39,8 +39,8 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
|
||||
union register_pair rx, ry;
|
||||
int cc;
|
||||
|
||||
rx.even = (addr_t) cpcmd_buf;
|
||||
rx.odd = (addr_t) response;
|
||||
rx.even = __pa(cpcmd_buf);
|
||||
rx.odd = __pa(response);
|
||||
ry.even = cmdlen | 0x40000000L;
|
||||
ry.odd = *rlen;
|
||||
asm volatile(
|
||||
|
@ -152,7 +152,7 @@ void show_stack(struct task_struct *task, unsigned long *stack,
|
||||
static void show_last_breaking_event(struct pt_regs *regs)
|
||||
{
|
||||
printk("Last Breaking-Event-Address:\n");
|
||||
printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
|
||||
printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break);
|
||||
}
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
|
@ -280,7 +280,7 @@ char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
|
||||
static void __init setup_boot_command_line(void)
|
||||
{
|
||||
/* copy arch command line */
|
||||
strlcpy(boot_command_line, early_command_line, ARCH_COMMAND_LINE_SIZE);
|
||||
strlcpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
|
||||
}
|
||||
|
||||
static void __init check_image_bootable(void)
|
||||
@ -296,6 +296,7 @@ static void __init check_image_bootable(void)
|
||||
|
||||
void __init startup_init(void)
|
||||
{
|
||||
sclp_early_adjust_va();
|
||||
reset_tod_clock();
|
||||
check_image_bootable();
|
||||
time_early_init();
|
||||
|
@ -52,6 +52,22 @@ STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
|
||||
|
||||
_LPP_OFFSET = __LC_LPP
|
||||
|
||||
.macro STBEAR address
|
||||
ALTERNATIVE "", ".insn s,0xb2010000,\address", 193
|
||||
.endm
|
||||
|
||||
.macro LBEAR address
|
||||
ALTERNATIVE "", ".insn s,0xb2000000,\address", 193
|
||||
.endm
|
||||
|
||||
.macro LPSWEY address,lpswe
|
||||
ALTERNATIVE "b \lpswe", ".insn siy,0xeb0000000071,\address,0", 193
|
||||
.endm
|
||||
|
||||
.macro MBEAR reg
|
||||
ALTERNATIVE "", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
|
||||
.endm
|
||||
|
||||
.macro CHECK_STACK savearea
|
||||
#ifdef CONFIG_CHECK_STACK
|
||||
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
|
||||
@ -302,6 +318,7 @@ ENTRY(system_call)
|
||||
BPOFF
|
||||
lghi %r14,0
|
||||
.Lsysc_per:
|
||||
STBEAR __LC_LAST_BREAK
|
||||
lctlg %c1,%c1,__LC_KERNEL_ASCE
|
||||
lg %r12,__LC_CURRENT
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
@ -321,14 +338,16 @@ ENTRY(system_call)
|
||||
xgr %r11,%r11
|
||||
la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
|
||||
mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
|
||||
MBEAR %r2
|
||||
lgr %r3,%r14
|
||||
brasl %r14,__do_syscall
|
||||
lctlg %c1,%c1,__LC_USER_ASCE
|
||||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
|
||||
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
stpt __LC_EXIT_TIMER
|
||||
b __LC_RETURN_LPSWE
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
|
||||
ENDPROC(system_call)
|
||||
|
||||
#
|
||||
@ -340,9 +359,10 @@ ENTRY(ret_from_fork)
|
||||
lctlg %c1,%c1,__LC_USER_ASCE
|
||||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
|
||||
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
stpt __LC_EXIT_TIMER
|
||||
b __LC_RETURN_LPSWE
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
|
||||
ENDPROC(ret_from_fork)
|
||||
|
||||
/*
|
||||
@ -382,6 +402,7 @@ ENTRY(pgm_check_handler)
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
|
||||
mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
|
||||
stmg %r8,%r9,__PT_PSW(%r11)
|
||||
|
||||
# clear user controlled registers to prevent speculative use
|
||||
@ -401,8 +422,9 @@ ENTRY(pgm_check_handler)
|
||||
stpt __LC_EXIT_TIMER
|
||||
.Lpgm_exit_kernel:
|
||||
mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
|
||||
LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
|
||||
lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
b __LC_RETURN_LPSWE
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
|
||||
|
||||
#
|
||||
# single stepped system call
|
||||
@ -412,7 +434,8 @@ ENTRY(pgm_check_handler)
|
||||
larl %r14,.Lsysc_per
|
||||
stg %r14,__LC_RETURN_PSW+8
|
||||
lghi %r14,1
|
||||
lpswe __LC_RETURN_PSW # branch to .Lsysc_per
|
||||
LBEAR __LC_PGM_LAST_BREAK
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
|
||||
ENDPROC(pgm_check_handler)
|
||||
|
||||
/*
|
||||
@ -422,6 +445,7 @@ ENDPROC(pgm_check_handler)
|
||||
ENTRY(\name)
|
||||
STCK __LC_INT_CLOCK
|
||||
stpt __LC_SYS_ENTER_TIMER
|
||||
STBEAR __LC_LAST_BREAK
|
||||
BPOFF
|
||||
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
|
||||
lg %r12,__LC_CURRENT
|
||||
@ -453,6 +477,7 @@ ENTRY(\name)
|
||||
xgr %r10,%r10
|
||||
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
|
||||
MBEAR %r11
|
||||
stmg %r8,%r9,__PT_PSW(%r11)
|
||||
tm %r8,0x0001 # coming from user space?
|
||||
jno 1f
|
||||
@ -465,8 +490,9 @@ ENTRY(\name)
|
||||
lctlg %c1,%c1,__LC_USER_ASCE
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
stpt __LC_EXIT_TIMER
|
||||
2: lmg %r0,%r15,__PT_R0(%r11)
|
||||
b __LC_RETURN_LPSWE
|
||||
2: LBEAR __PT_LAST_BREAK(%r11)
|
||||
lmg %r0,%r15,__PT_R0(%r11)
|
||||
LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
|
||||
ENDPROC(\name)
|
||||
.endm
|
||||
|
||||
@ -505,6 +531,7 @@ ENTRY(mcck_int_handler)
|
||||
BPOFF
|
||||
la %r1,4095 # validate r1
|
||||
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
|
||||
LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
|
||||
lg %r12,__LC_CURRENT
|
||||
lmg %r8,%r9,__LC_MCK_OLD_PSW
|
||||
@ -591,8 +618,10 @@ ENTRY(mcck_int_handler)
|
||||
jno 0f
|
||||
BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
|
||||
stpt __LC_EXIT_TIMER
|
||||
0: lmg %r11,%r15,__PT_R11(%r11)
|
||||
b __LC_RETURN_MCCK_LPSWE
|
||||
0: ALTERNATIVE "", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
|
||||
LBEAR 0(%r12)
|
||||
lmg %r11,%r15,__PT_R11(%r11)
|
||||
LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
|
||||
|
||||
.Lmcck_panic:
|
||||
/*
|
||||
|
@ -70,5 +70,6 @@ extern struct exception_table_entry _stop_amode31_ex_table[];
|
||||
#define __amode31_data __section(".amode31.data")
|
||||
#define __amode31_ref __section(".amode31.refs")
|
||||
extern long _start_amode31_refs[], _end_amode31_refs[];
|
||||
extern unsigned long __amode31_base;
|
||||
|
||||
#endif /* _ENTRY_H */
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <trace/syscall.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ftrace.lds.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
@ -80,17 +81,6 @@ asm(
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
static char *ftrace_plt;
|
||||
|
||||
asm(
|
||||
" .data\n"
|
||||
"ftrace_plt_template:\n"
|
||||
" basr %r1,%r0\n"
|
||||
" lg %r1,0f-.(%r1)\n"
|
||||
" br %r1\n"
|
||||
"0: .quad ftrace_caller\n"
|
||||
"ftrace_plt_template_end:\n"
|
||||
" .previous\n"
|
||||
);
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
static const char *ftrace_shared_hotpatch_trampoline(const char **end)
|
||||
@ -116,7 +106,7 @@ static const char *ftrace_shared_hotpatch_trampoline(const char **end)
|
||||
|
||||
bool ftrace_need_init_nop(void)
|
||||
{
|
||||
return ftrace_shared_hotpatch_trampoline(NULL);
|
||||
return true;
|
||||
}
|
||||
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
||||
@ -175,28 +165,6 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
/* brcl 0,0 */
|
||||
insn->opc = 0xc004;
|
||||
insn->disp = 0;
|
||||
}
|
||||
|
||||
static void ftrace_generate_call_insn(struct ftrace_insn *insn,
|
||||
unsigned long ip)
|
||||
{
|
||||
unsigned long target;
|
||||
|
||||
/* brasl r0,ftrace_caller */
|
||||
target = FTRACE_ADDR;
|
||||
#ifdef CONFIG_MODULES
|
||||
if (is_module_addr((void *)ip))
|
||||
target = (unsigned long)ftrace_plt;
|
||||
#endif /* CONFIG_MODULES */
|
||||
insn->opc = 0xc005;
|
||||
insn->disp = (target - ip) / 2;
|
||||
}
|
||||
|
||||
static void brcl_disable(void *brcl)
|
||||
{
|
||||
u8 op = 0x04; /* set mask field to zero */
|
||||
@ -207,23 +175,7 @@ static void brcl_disable(void *brcl)
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL)) {
|
||||
brcl_disable((void *)rec->ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/* Replace ftrace call with a nop. */
|
||||
ftrace_generate_call_insn(&orig, rec->ip);
|
||||
ftrace_generate_nop_insn(&new);
|
||||
|
||||
/* Verify that the to be replaced code matches what we expect. */
|
||||
if (memcmp(&orig, &old, sizeof(old)))
|
||||
return -EINVAL;
|
||||
s390_kernel_write((void *) rec->ip, &new, sizeof(new));
|
||||
brcl_disable((void *)rec->ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -236,23 +188,7 @@ static void brcl_enable(void *brcl)
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL)) {
|
||||
brcl_enable((void *)rec->ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/* Replace nop with an ftrace call. */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
ftrace_generate_call_insn(&new, rec->ip);
|
||||
|
||||
/* Verify that the to be replaced code matches what we expect. */
|
||||
if (memcmp(&orig, &old, sizeof(old)))
|
||||
return -EINVAL;
|
||||
s390_kernel_write((void *) rec->ip, &new, sizeof(new));
|
||||
brcl_enable((void *)rec->ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -264,22 +200,16 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
|
||||
void arch_ftrace_update_code(int command)
|
||||
{
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL))
|
||||
ftrace_modify_all_code(command);
|
||||
else
|
||||
ftrace_run_stop_machine(command);
|
||||
}
|
||||
|
||||
static void __ftrace_sync(void *dummy)
|
||||
{
|
||||
ftrace_modify_all_code(command);
|
||||
}
|
||||
|
||||
int ftrace_arch_code_modify_post_process(void)
|
||||
{
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL)) {
|
||||
/* Send SIGP to the other CPUs, so they see the new code. */
|
||||
smp_call_function(__ftrace_sync, NULL, 1);
|
||||
}
|
||||
/*
|
||||
* Flush any pre-fetched instructions on all
|
||||
* CPUs to make the new code visible.
|
||||
*/
|
||||
text_poke_sync_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -294,10 +224,6 @@ static int __init ftrace_plt_init(void)
|
||||
panic("cannot allocate ftrace plt\n");
|
||||
|
||||
start = ftrace_shared_hotpatch_trampoline(&end);
|
||||
if (!start) {
|
||||
start = ftrace_plt_template;
|
||||
end = ftrace_plt_template_end;
|
||||
}
|
||||
memcpy(ftrace_plt, start, end - start);
|
||||
set_memory_ro((unsigned long)ftrace_plt, 1);
|
||||
return 0;
|
||||
@ -337,12 +263,14 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
brcl_disable(ftrace_graph_caller);
|
||||
text_poke_sync_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
brcl_enable(ftrace_graph_caller);
|
||||
text_poke_sync_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -20,8 +20,6 @@ __HEAD
|
||||
ENTRY(startup_continue)
|
||||
larl %r1,tod_clock_base
|
||||
mvc 0(16,%r1),__LC_BOOT_CLOCK
|
||||
larl %r13,.LPG1 # get base
|
||||
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
|
||||
#
|
||||
# Setup stack
|
||||
#
|
||||
@ -42,19 +40,3 @@ ENTRY(startup_continue)
|
||||
.align 16
|
||||
.LPG1:
|
||||
.Ldw: .quad 0x0002000180000000,0x0000000000000000
|
||||
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
|
||||
.quad 0 # cr1: primary space segment table
|
||||
.quad 0 # cr2: dispatchable unit control table
|
||||
.quad 0 # cr3: instruction authorization
|
||||
.quad 0xffff # cr4: instruction authorization
|
||||
.quad 0 # cr5: primary-aste origin
|
||||
.quad 0 # cr6: I/O interrupts
|
||||
.quad 0 # cr7: secondary space segment table
|
||||
.quad 0x0000000000008000 # cr8: access registers translation
|
||||
.quad 0 # cr9: tracing off
|
||||
.quad 0 # cr10: tracing off
|
||||
.quad 0 # cr11: tracing off
|
||||
.quad 0 # cr12: tracing off
|
||||
.quad 0 # cr13: home space segment table
|
||||
.quad 0xc0000000 # cr14: machine check handling off
|
||||
.quad 0 # cr15: linkage stack operations
|
||||
|
@ -140,8 +140,11 @@ void noinstr do_io_irq(struct pt_regs *regs)
|
||||
|
||||
irq_enter();
|
||||
|
||||
if (user_mode(regs))
|
||||
if (user_mode(regs)) {
|
||||
update_timer_sys();
|
||||
if (static_branch_likely(&cpu_has_bear))
|
||||
current->thread.last_break = regs->last_break;
|
||||
}
|
||||
|
||||
from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
|
||||
if (from_idle)
|
||||
@ -171,8 +174,11 @@ void noinstr do_ext_irq(struct pt_regs *regs)
|
||||
|
||||
irq_enter();
|
||||
|
||||
if (user_mode(regs))
|
||||
if (user_mode(regs)) {
|
||||
update_timer_sys();
|
||||
if (static_branch_likely(&cpu_has_bear))
|
||||
current->thread.last_break = regs->last_break;
|
||||
}
|
||||
|
||||
regs->int_code = S390_lowcore.ext_int_code_addr;
|
||||
regs->int_parm = S390_lowcore.ext_params;
|
||||
|
@ -6,8 +6,9 @@
|
||||
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/ipl.h>
|
||||
|
||||
struct insn {
|
||||
@ -48,9 +49,9 @@ static struct insn orignop = {
|
||||
.offset = JUMP_LABEL_NOP_OFFSET >> 1,
|
||||
};
|
||||
|
||||
static void __jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type,
|
||||
int init)
|
||||
static void jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type,
|
||||
int init)
|
||||
{
|
||||
void *code = (void *)jump_entry_code(entry);
|
||||
struct insn old, new;
|
||||
@ -72,19 +73,28 @@ static void __jump_label_transform(struct jump_entry *entry,
|
||||
s390_kernel_write(code, &new, sizeof(new));
|
||||
}
|
||||
|
||||
static void __jump_label_sync(void *dummy)
|
||||
{
|
||||
}
|
||||
|
||||
void arch_jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
__jump_label_transform(entry, type, 0);
|
||||
smp_call_function(__jump_label_sync, NULL, 1);
|
||||
jump_label_transform(entry, type, 0);
|
||||
text_poke_sync();
|
||||
}
|
||||
|
||||
void arch_jump_label_transform_static(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
__jump_label_transform(entry, type, 1);
|
||||
jump_label_transform(entry, type, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
void arch_jump_label_transform_apply(void)
|
||||
{
|
||||
text_poke_sync();
|
||||
}
|
||||
|
||||
void __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
jump_label_transform(entry, type, 1);
|
||||
text_poke_sync();
|
||||
}
|
||||
|
@ -122,9 +122,55 @@ static void s390_free_insn_slot(struct kprobe *p)
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_free_insn_slot);
|
||||
|
||||
/* Check if paddr is at an instruction boundary */
|
||||
static bool can_probe(unsigned long paddr)
|
||||
{
|
||||
unsigned long addr, offset = 0;
|
||||
kprobe_opcode_t insn;
|
||||
struct kprobe *kp;
|
||||
|
||||
if (paddr & 0x01)
|
||||
return false;
|
||||
|
||||
if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
|
||||
return false;
|
||||
|
||||
/* Decode instructions */
|
||||
addr = paddr - offset;
|
||||
while (addr < paddr) {
|
||||
if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(insn)))
|
||||
return false;
|
||||
|
||||
if (insn >> 8 == 0) {
|
||||
if (insn != BREAKPOINT_INSTRUCTION) {
|
||||
/*
|
||||
* Note that QEMU inserts opcode 0x0000 to implement
|
||||
* software breakpoints for guests. Since the size of
|
||||
* the original instruction is unknown, stop following
|
||||
* instructions and prevent setting a kprobe.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
/*
|
||||
* Check if the instruction has been modified by another
|
||||
* kprobe, in which case the original instruction is
|
||||
* decoded.
|
||||
*/
|
||||
kp = get_kprobe((void *)addr);
|
||||
if (!kp) {
|
||||
/* not a kprobe */
|
||||
return false;
|
||||
}
|
||||
insn = kp->opcode;
|
||||
}
|
||||
addr += insn_length(insn >> 8);
|
||||
}
|
||||
return addr == paddr;
|
||||
}
|
||||
|
||||
int arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
if ((unsigned long) p->addr & 0x01)
|
||||
if (!can_probe((unsigned long)p->addr))
|
||||
return -EINVAL;
|
||||
/* Make sure the probe isn't going on a difficult instruction */
|
||||
if (probe_is_prohibited_opcode(p->addr))
|
||||
|
@ -216,7 +216,9 @@ void *kexec_file_add_components(struct kimage *image,
|
||||
int (*add_kernel)(struct kimage *image,
|
||||
struct s390_load_data *data))
|
||||
{
|
||||
unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE;
|
||||
struct s390_load_data data = {0};
|
||||
unsigned long minsize;
|
||||
int ret;
|
||||
|
||||
data.report = ipl_report_init(&ipl_block);
|
||||
@ -227,10 +229,23 @@ void *kexec_file_add_components(struct kimage *image,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
|
||||
ret = -EINVAL;
|
||||
ret = -EINVAL;
|
||||
minsize = PARMAREA + offsetof(struct parmarea, command_line);
|
||||
if (image->kernel_buf_len < minsize)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (data.parm->max_command_line_size)
|
||||
max_command_line_size = data.parm->max_command_line_size;
|
||||
|
||||
if (minsize + max_command_line_size < minsize)
|
||||
goto out;
|
||||
|
||||
if (image->kernel_buf_len < minsize + max_command_line_size)
|
||||
goto out;
|
||||
|
||||
if (image->cmdline_buf_len >= max_command_line_size)
|
||||
goto out;
|
||||
|
||||
memcpy(data.parm->command_line, image->cmdline_buf,
|
||||
image->cmdline_buf_len);
|
||||
|
||||
@ -307,17 +322,3 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||
unsigned long buf_len)
|
||||
{
|
||||
/* A kernel must be at least large enough to contain head.S. During
|
||||
* load memory in head.S will be accessed, e.g. to register the next
|
||||
* command line. If the next kernel were smaller the current kernel
|
||||
* will panic at load.
|
||||
*/
|
||||
if (buf_len < HEAD_END)
|
||||
return -ENOEXEC;
|
||||
|
||||
return kexec_image_probe_default(image, buf, buf_len);
|
||||
}
|
||||
|
@ -22,10 +22,11 @@ ENTRY(ftrace_stub)
|
||||
BR_EX %r14
|
||||
ENDPROC(ftrace_stub)
|
||||
|
||||
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
|
||||
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
|
||||
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
||||
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
|
||||
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
|
||||
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
||||
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
|
||||
#ifdef __PACK_STACK
|
||||
/* allocate just enough for r14, r15 and backchain */
|
||||
#define TRACED_FUNC_FRAME_SIZE 24
|
||||
@ -33,13 +34,15 @@ ENDPROC(ftrace_stub)
|
||||
#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
|
||||
#endif
|
||||
|
||||
ENTRY(ftrace_caller)
|
||||
.globl ftrace_regs_caller
|
||||
.set ftrace_regs_caller,ftrace_caller
|
||||
.macro ftrace_regs_entry, allregs=0
|
||||
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
|
||||
|
||||
.if \allregs == 1
|
||||
lghi %r14,0 # save condition code
|
||||
ipm %r14 # don't put any instructions
|
||||
sllg %r14,%r14,16 # clobbering CC before this point
|
||||
.endif
|
||||
|
||||
lgr %r1,%r15
|
||||
# allocate stack frame for ftrace_caller to contain traced function
|
||||
aghi %r15,-TRACED_FUNC_FRAME_SIZE
|
||||
@ -49,13 +52,31 @@ ENTRY(ftrace_caller)
|
||||
# allocate pt_regs and stack frame for ftrace_trace_function
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
||||
xc STACK_PTREGS_ORIG_GPR2(8,%r15),STACK_PTREGS_ORIG_GPR2(%r15)
|
||||
|
||||
.if \allregs == 1
|
||||
stg %r14,(STACK_PTREGS_PSW)(%r15)
|
||||
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
|
||||
stosm (STACK_PTREGS_PSW)(%r15),0
|
||||
.endif
|
||||
|
||||
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
|
||||
aghi %r1,-TRACED_FUNC_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
||||
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
|
||||
.endm
|
||||
|
||||
SYM_CODE_START(ftrace_regs_caller)
|
||||
ftrace_regs_entry 1
|
||||
j ftrace_common
|
||||
SYM_CODE_END(ftrace_regs_caller)
|
||||
|
||||
SYM_CODE_START(ftrace_caller)
|
||||
ftrace_regs_entry 0
|
||||
j ftrace_common
|
||||
SYM_CODE_END(ftrace_caller)
|
||||
|
||||
SYM_CODE_START(ftrace_common)
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
aghik %r2,%r0,-MCOUNT_INSN_SIZE
|
||||
lgrl %r4,function_trace_op
|
||||
@ -74,24 +95,31 @@ ENTRY(ftrace_caller)
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
# The j instruction gets runtime patched to a nop instruction.
|
||||
# See ftrace_enable_ftrace_graph_caller.
|
||||
.globl ftrace_graph_caller
|
||||
ftrace_graph_caller:
|
||||
j ftrace_graph_caller_end
|
||||
SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
|
||||
j .Lftrace_graph_caller_end
|
||||
lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
|
||||
lg %r4,(STACK_PTREGS_PSW+8)(%r15)
|
||||
brasl %r14,prepare_ftrace_return
|
||||
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
|
||||
ftrace_graph_caller_end:
|
||||
.globl ftrace_graph_caller_end
|
||||
.Lftrace_graph_caller_end:
|
||||
#endif
|
||||
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
|
||||
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
|
||||
lg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
ltg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
|
||||
locgrz %r1,%r0
|
||||
#else
|
||||
lg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
|
||||
ltgr %r1,%r1
|
||||
jnz 0f
|
||||
lgr %r1,%r0
|
||||
#endif
|
||||
0: lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
|
||||
BR_EX %r1
|
||||
ENDPROC(ftrace_caller)
|
||||
SYM_CODE_END(ftrace_common)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
ENTRY(return_to_handler)
|
||||
SYM_FUNC_START(return_to_handler)
|
||||
stmg %r2,%r5,32(%r15)
|
||||
lgr %r1,%r15
|
||||
aghi %r15,-STACK_FRAME_OVERHEAD
|
||||
@ -101,6 +129,6 @@ ENTRY(return_to_handler)
|
||||
lgr %r14,%r2
|
||||
lmg %r2,%r5,32(%r15)
|
||||
BR_EX %r14
|
||||
ENDPROC(return_to_handler)
|
||||
SYM_FUNC_END(return_to_handler)
|
||||
|
||||
#endif
|
||||
|
@ -38,7 +38,7 @@ static int __init nospec_report(void)
|
||||
{
|
||||
if (test_facility(156))
|
||||
pr_info("Spectre V2 mitigation: etokens\n");
|
||||
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
|
||||
if (nospec_uses_trampoline())
|
||||
pr_info("Spectre V2 mitigation: execute trampolines\n");
|
||||
if (__test_facility(82, alt_stfle_fac_list))
|
||||
pr_info("Spectre V2 mitigation: limited branch prediction\n");
|
||||
|
@ -15,7 +15,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
|
||||
{
|
||||
if (test_facility(156))
|
||||
return sprintf(buf, "Mitigation: etokens\n");
|
||||
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
|
||||
if (nospec_uses_trampoline())
|
||||
return sprintf(buf, "Mitigation: execute trampolines\n");
|
||||
if (__test_facility(82, alt_stfle_fac_list))
|
||||
return sprintf(buf, "Mitigation: limited branch prediction\n");
|
||||
|
@ -773,22 +773,46 @@ static int __init cpumf_pmu_init(void)
|
||||
* counter set via normal file operations.
|
||||
*/
|
||||
|
||||
static atomic_t cfset_opencnt = ATOMIC_INIT(0); /* Excl. access */
|
||||
static atomic_t cfset_opencnt = ATOMIC_INIT(0); /* Access count */
|
||||
static DEFINE_MUTEX(cfset_ctrset_mutex);/* Synchronize access to hardware */
|
||||
struct cfset_call_on_cpu_parm { /* Parm struct for smp_call_on_cpu */
|
||||
unsigned int sets; /* Counter set bit mask */
|
||||
atomic_t cpus_ack; /* # CPUs successfully executed func */
|
||||
};
|
||||
|
||||
static struct cfset_request { /* CPUs and counter set bit mask */
|
||||
static struct cfset_session { /* CPUs and counter set bit mask */
|
||||
struct list_head head; /* Head of list of active processes */
|
||||
} cfset_session = {
|
||||
.head = LIST_HEAD_INIT(cfset_session.head)
|
||||
};
|
||||
|
||||
struct cfset_request { /* CPUs and counter set bit mask */
|
||||
unsigned long ctrset; /* Bit mask of counter set to read */
|
||||
cpumask_t mask; /* CPU mask to read from */
|
||||
} cfset_request;
|
||||
struct list_head node; /* Chain to cfset_session.head */
|
||||
};
|
||||
|
||||
static void cfset_ctrset_clear(void)
|
||||
static void cfset_session_init(void)
|
||||
{
|
||||
cpumask_clear(&cfset_request.mask);
|
||||
cfset_request.ctrset = 0;
|
||||
INIT_LIST_HEAD(&cfset_session.head);
|
||||
}
|
||||
|
||||
/* Remove current request from global bookkeeping. Maintain a counter set bit
|
||||
* mask on a per CPU basis.
|
||||
* Done in process context under mutex protection.
|
||||
*/
|
||||
static void cfset_session_del(struct cfset_request *p)
|
||||
{
|
||||
list_del(&p->node);
|
||||
}
|
||||
|
||||
/* Add current request to global bookkeeping. Maintain a counter set bit mask
|
||||
* on a per CPU basis.
|
||||
* Done in process context under mutex protection.
|
||||
*/
|
||||
static void cfset_session_add(struct cfset_request *p)
|
||||
{
|
||||
list_add(&p->node, &cfset_session.head);
|
||||
}
|
||||
|
||||
/* The /dev/hwctr device access uses PMU_F_IN_USE to mark the device access
|
||||
@ -827,15 +851,23 @@ static void cfset_ioctl_off(void *parm)
|
||||
struct cfset_call_on_cpu_parm *p = parm;
|
||||
int rc;
|
||||
|
||||
cpuhw->dev_state = 0;
|
||||
/* Check if any counter set used by /dev/hwc */
|
||||
for (rc = CPUMF_CTR_SET_BASIC; rc < CPUMF_CTR_SET_MAX; ++rc)
|
||||
if ((p->sets & cpumf_ctr_ctl[rc]))
|
||||
atomic_dec(&cpuhw->ctr_set[rc]);
|
||||
rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
|
||||
if ((p->sets & cpumf_ctr_ctl[rc])) {
|
||||
if (!atomic_dec_return(&cpuhw->ctr_set[rc])) {
|
||||
ctr_set_disable(&cpuhw->dev_state,
|
||||
cpumf_ctr_ctl[rc]);
|
||||
ctr_set_stop(&cpuhw->dev_state,
|
||||
cpumf_ctr_ctl[rc]);
|
||||
}
|
||||
}
|
||||
/* Keep perf_event_open counter sets */
|
||||
rc = lcctl(cpuhw->dev_state | cpuhw->state);
|
||||
if (rc)
|
||||
pr_err("Counter set stop %#llx of /dev/%s failed rc=%i\n",
|
||||
cpuhw->state, S390_HWCTR_DEVICE, rc);
|
||||
cpuhw->flags &= ~PMU_F_IN_USE;
|
||||
if (!cpuhw->dev_state)
|
||||
cpuhw->flags &= ~PMU_F_IN_USE;
|
||||
debug_sprintf_event(cf_dbg, 4, "%s rc %d state %#llx dev_state %#llx\n",
|
||||
__func__, rc, cpuhw->state, cpuhw->dev_state);
|
||||
}
|
||||
@ -870,11 +902,26 @@ static void cfset_release_cpu(void *p)
|
||||
|
||||
debug_sprintf_event(cf_dbg, 4, "%s state %#llx dev_state %#llx\n",
|
||||
__func__, cpuhw->state, cpuhw->dev_state);
|
||||
cpuhw->dev_state = 0;
|
||||
rc = lcctl(cpuhw->state); /* Keep perf_event_open counter sets */
|
||||
if (rc)
|
||||
pr_err("Counter set release %#llx of /dev/%s failed rc=%i\n",
|
||||
cpuhw->state, S390_HWCTR_DEVICE, rc);
|
||||
cpuhw->dev_state = 0;
|
||||
}
|
||||
|
||||
/* This modifies the process CPU mask to adopt it to the currently online
|
||||
* CPUs. Offline CPUs can not be addresses. This call terminates the access
|
||||
* and is usually followed by close() or a new iotcl(..., START, ...) which
|
||||
* creates a new request structure.
|
||||
*/
|
||||
static void cfset_all_stop(struct cfset_request *req)
|
||||
{
|
||||
struct cfset_call_on_cpu_parm p = {
|
||||
.sets = req->ctrset,
|
||||
};
|
||||
|
||||
cpumask_and(&req->mask, &req->mask, cpu_online_mask);
|
||||
on_each_cpu_mask(&req->mask, cfset_ioctl_off, &p, 1);
|
||||
}
|
||||
|
||||
/* Release function is also called when application gets terminated without
|
||||
@ -882,10 +929,19 @@ static void cfset_release_cpu(void *p)
|
||||
*/
|
||||
static int cfset_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
on_each_cpu(cfset_release_cpu, NULL, 1);
|
||||
mutex_lock(&cfset_ctrset_mutex);
|
||||
/* Open followed by close/exit has no private_data */
|
||||
if (file->private_data) {
|
||||
cfset_all_stop(file->private_data);
|
||||
cfset_session_del(file->private_data);
|
||||
kfree(file->private_data);
|
||||
file->private_data = NULL;
|
||||
}
|
||||
if (!atomic_dec_return(&cfset_opencnt))
|
||||
on_each_cpu(cfset_release_cpu, NULL, 1);
|
||||
mutex_unlock(&cfset_ctrset_mutex);
|
||||
|
||||
hw_perf_event_destroy(NULL);
|
||||
cfset_ctrset_clear();
|
||||
atomic_set(&cfset_opencnt, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -893,9 +949,10 @@ static int cfset_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
/* Only one user space program can open /dev/hwctr */
|
||||
if (atomic_xchg(&cfset_opencnt, 1))
|
||||
return -EBUSY;
|
||||
mutex_lock(&cfset_ctrset_mutex);
|
||||
if (atomic_inc_return(&cfset_opencnt) == 1)
|
||||
cfset_session_init();
|
||||
mutex_unlock(&cfset_ctrset_mutex);
|
||||
|
||||
cpumf_hw_inuse();
|
||||
file->private_data = NULL;
|
||||
@ -903,25 +960,10 @@ static int cfset_open(struct inode *inode, struct file *file)
|
||||
return nonseekable_open(inode, file);
|
||||
}
|
||||
|
||||
static int cfset_all_stop(void)
|
||||
static int cfset_all_start(struct cfset_request *req)
|
||||
{
|
||||
struct cfset_call_on_cpu_parm p = {
|
||||
.sets = cfset_request.ctrset,
|
||||
};
|
||||
cpumask_var_t mask;
|
||||
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
|
||||
on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
|
||||
free_cpumask_var(mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfset_all_start(void)
|
||||
{
|
||||
struct cfset_call_on_cpu_parm p = {
|
||||
.sets = cfset_request.ctrset,
|
||||
.sets = req->ctrset,
|
||||
.cpus_ack = ATOMIC_INIT(0),
|
||||
};
|
||||
cpumask_var_t mask;
|
||||
@ -929,7 +971,7 @@ static int cfset_all_start(void)
|
||||
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
|
||||
cpumask_and(mask, &req->mask, cpu_online_mask);
|
||||
on_each_cpu_mask(mask, cfset_ioctl_on, &p, 1);
|
||||
if (atomic_read(&p.cpus_ack) != cpumask_weight(mask)) {
|
||||
on_each_cpu_mask(mask, cfset_ioctl_off, &p, 1);
|
||||
@ -1045,7 +1087,7 @@ static void cfset_cpu_read(void *parm)
|
||||
cpuhw->sets, cpuhw->used);
|
||||
}
|
||||
|
||||
static int cfset_all_read(unsigned long arg)
|
||||
static int cfset_all_read(unsigned long arg, struct cfset_request *req)
|
||||
{
|
||||
struct cfset_call_on_cpu_parm p;
|
||||
cpumask_var_t mask;
|
||||
@ -1054,46 +1096,53 @@ static int cfset_all_read(unsigned long arg)
|
||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
p.sets = cfset_request.ctrset;
|
||||
cpumask_and(mask, &cfset_request.mask, cpu_online_mask);
|
||||
p.sets = req->ctrset;
|
||||
cpumask_and(mask, &req->mask, cpu_online_mask);
|
||||
on_each_cpu_mask(mask, cfset_cpu_read, &p, 1);
|
||||
rc = cfset_all_copy(arg, mask);
|
||||
free_cpumask_var(mask);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static long cfset_ioctl_read(unsigned long arg)
|
||||
static long cfset_ioctl_read(unsigned long arg, struct cfset_request *req)
|
||||
{
|
||||
struct s390_ctrset_read read;
|
||||
int ret = 0;
|
||||
int ret = -ENODATA;
|
||||
|
||||
if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
|
||||
return -EFAULT;
|
||||
ret = cfset_all_read(arg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long cfset_ioctl_stop(void)
|
||||
{
|
||||
int ret = ENXIO;
|
||||
|
||||
if (cfset_request.ctrset) {
|
||||
ret = cfset_all_stop();
|
||||
cfset_ctrset_clear();
|
||||
if (req && req->ctrset) {
|
||||
if (copy_from_user(&read, (char __user *)arg, sizeof(read)))
|
||||
return -EFAULT;
|
||||
ret = cfset_all_read(arg, req);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long cfset_ioctl_start(unsigned long arg)
|
||||
static long cfset_ioctl_stop(struct file *file)
|
||||
{
|
||||
struct cfset_request *req = file->private_data;
|
||||
int ret = -ENXIO;
|
||||
|
||||
if (req) {
|
||||
cfset_all_stop(req);
|
||||
cfset_session_del(req);
|
||||
kfree(req);
|
||||
file->private_data = NULL;
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long cfset_ioctl_start(unsigned long arg, struct file *file)
|
||||
{
|
||||
struct s390_ctrset_start __user *ustart;
|
||||
struct s390_ctrset_start start;
|
||||
struct cfset_request *preq;
|
||||
void __user *umask;
|
||||
unsigned int len;
|
||||
int ret = 0;
|
||||
size_t need;
|
||||
|
||||
if (cfset_request.ctrset)
|
||||
if (file->private_data)
|
||||
return -EBUSY;
|
||||
ustart = (struct s390_ctrset_start __user *)arg;
|
||||
if (copy_from_user(&start, ustart, sizeof(start)))
|
||||
@ -1108,25 +1157,36 @@ static long cfset_ioctl_start(unsigned long arg)
|
||||
return -EINVAL; /* Invalid counter set */
|
||||
if (!start.counter_sets)
|
||||
return -EINVAL; /* No counter set at all? */
|
||||
cpumask_clear(&cfset_request.mask);
|
||||
|
||||
preq = kzalloc(sizeof(*preq), GFP_KERNEL);
|
||||
if (!preq)
|
||||
return -ENOMEM;
|
||||
cpumask_clear(&preq->mask);
|
||||
len = min_t(u64, start.cpumask_len, cpumask_size());
|
||||
umask = (void __user *)start.cpumask;
|
||||
if (copy_from_user(&cfset_request.mask, umask, len))
|
||||
if (copy_from_user(&preq->mask, umask, len)) {
|
||||
kfree(preq);
|
||||
return -EFAULT;
|
||||
if (cpumask_empty(&cfset_request.mask))
|
||||
}
|
||||
if (cpumask_empty(&preq->mask)) {
|
||||
kfree(preq);
|
||||
return -EINVAL;
|
||||
}
|
||||
need = cfset_needspace(start.counter_sets);
|
||||
if (put_user(need, &ustart->data_bytes))
|
||||
ret = -EFAULT;
|
||||
if (ret)
|
||||
goto out;
|
||||
cfset_request.ctrset = start.counter_sets;
|
||||
ret = cfset_all_start();
|
||||
out:
|
||||
if (ret)
|
||||
cfset_ctrset_clear();
|
||||
debug_sprintf_event(cf_dbg, 4, "%s sets %#lx need %ld ret %d\n",
|
||||
__func__, cfset_request.ctrset, need, ret);
|
||||
if (put_user(need, &ustart->data_bytes)) {
|
||||
kfree(preq);
|
||||
return -EFAULT;
|
||||
}
|
||||
preq->ctrset = start.counter_sets;
|
||||
ret = cfset_all_start(preq);
|
||||
if (!ret) {
|
||||
cfset_session_add(preq);
|
||||
file->private_data = preq;
|
||||
debug_sprintf_event(cf_dbg, 4, "%s set %#lx need %ld ret %d\n",
|
||||
__func__, preq->ctrset, need, ret);
|
||||
} else {
|
||||
kfree(preq);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1136,7 +1196,7 @@ out:
|
||||
* counter set keeps running until explicitly stopped. Returns the number
|
||||
* of bytes needed to store the counter values. If another S390_HWCTR_START
|
||||
* ioctl subcommand is called without a previous S390_HWCTR_STOP stop
|
||||
* command, -EBUSY is returned.
|
||||
* command on the same file descriptor, -EBUSY is returned.
|
||||
* S390_HWCTR_READ: Read the counter set values from specified CPU list given
|
||||
* with the S390_HWCTR_START command.
|
||||
* S390_HWCTR_STOP: Stops the counter sets on the CPU list given with the
|
||||
@ -1150,13 +1210,13 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
mutex_lock(&cfset_ctrset_mutex);
|
||||
switch (cmd) {
|
||||
case S390_HWCTR_START:
|
||||
ret = cfset_ioctl_start(arg);
|
||||
ret = cfset_ioctl_start(arg, file);
|
||||
break;
|
||||
case S390_HWCTR_STOP:
|
||||
ret = cfset_ioctl_stop();
|
||||
ret = cfset_ioctl_stop(file);
|
||||
break;
|
||||
case S390_HWCTR_READ:
|
||||
ret = cfset_ioctl_read(arg);
|
||||
ret = cfset_ioctl_read(arg, file->private_data);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
@ -1182,29 +1242,41 @@ static struct miscdevice cfset_dev = {
|
||||
.fops = &cfset_fops,
|
||||
};
|
||||
|
||||
/* Hotplug add of a CPU. Scan through all active processes and add
|
||||
* that CPU to the list of CPUs supplied with ioctl(..., START, ...).
|
||||
*/
|
||||
int cfset_online_cpu(unsigned int cpu)
|
||||
{
|
||||
struct cfset_call_on_cpu_parm p;
|
||||
struct cfset_request *rp;
|
||||
|
||||
mutex_lock(&cfset_ctrset_mutex);
|
||||
if (cfset_request.ctrset) {
|
||||
p.sets = cfset_request.ctrset;
|
||||
cfset_ioctl_on(&p);
|
||||
cpumask_set_cpu(cpu, &cfset_request.mask);
|
||||
if (!list_empty(&cfset_session.head)) {
|
||||
list_for_each_entry(rp, &cfset_session.head, node) {
|
||||
p.sets = rp->ctrset;
|
||||
cfset_ioctl_on(&p);
|
||||
cpumask_set_cpu(cpu, &rp->mask);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cfset_ctrset_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Hotplug remove of a CPU. Scan through all active processes and clear
|
||||
* that CPU from the list of CPUs supplied with ioctl(..., START, ...).
|
||||
*/
|
||||
int cfset_offline_cpu(unsigned int cpu)
|
||||
{
|
||||
struct cfset_call_on_cpu_parm p;
|
||||
struct cfset_request *rp;
|
||||
|
||||
mutex_lock(&cfset_ctrset_mutex);
|
||||
if (cfset_request.ctrset) {
|
||||
p.sets = cfset_request.ctrset;
|
||||
cfset_ioctl_off(&p);
|
||||
cpumask_clear_cpu(cpu, &cfset_request.mask);
|
||||
if (!list_empty(&cfset_session.head)) {
|
||||
list_for_each_entry(rp, &cfset_session.head, node) {
|
||||
p.sets = rp->ctrset;
|
||||
cfset_ioctl_off(&p);
|
||||
cpumask_clear_cpu(cpu, &rp->mask);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cfset_ctrset_mutex);
|
||||
return 0;
|
||||
|
@ -141,7 +141,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
|
||||
frame->childregs.gprs[10] = arg;
|
||||
frame->childregs.gprs[11] = (unsigned long)do_exit;
|
||||
frame->childregs.orig_gpr2 = -1;
|
||||
|
||||
frame->childregs.last_break = 1;
|
||||
return 0;
|
||||
}
|
||||
frame->childregs = *current_pt_regs();
|
||||
|
@ -95,10 +95,10 @@ EXPORT_SYMBOL(console_irq);
|
||||
* relocated above 2 GB, because it has to use 31 bit addresses.
|
||||
* Such code and data is part of the .amode31 section.
|
||||
*/
|
||||
unsigned long __amode31_ref __samode31 = __pa(&_samode31);
|
||||
unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
|
||||
unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
|
||||
unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
|
||||
unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
|
||||
unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
|
||||
unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
|
||||
unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
|
||||
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
|
||||
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
|
||||
|
||||
@ -149,6 +149,7 @@ struct mem_detect_info __bootdata(mem_detect);
|
||||
struct initrd_data __bootdata(initrd_data);
|
||||
|
||||
unsigned long __bootdata_preserved(__kaslr_offset);
|
||||
unsigned long __bootdata(__amode31_base);
|
||||
unsigned int __bootdata_preserved(zlib_dfltcc_support);
|
||||
EXPORT_SYMBOL(zlib_dfltcc_support);
|
||||
u64 __bootdata_preserved(stfle_fac_list[16]);
|
||||
@ -173,6 +174,8 @@ unsigned long MODULES_END;
|
||||
struct lowcore *lowcore_ptr[NR_CPUS];
|
||||
EXPORT_SYMBOL(lowcore_ptr);
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
|
||||
|
||||
/*
|
||||
* The Write Back bit position in the physaddr is given by the SLPC PCI.
|
||||
* Leaving the mask zero always uses write through which is safe
|
||||
@ -719,7 +722,7 @@ static void __init reserve_initrd(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (!initrd_data.start || !initrd_data.size)
|
||||
return;
|
||||
initrd_start = initrd_data.start;
|
||||
initrd_start = (unsigned long)__va(initrd_data.start);
|
||||
initrd_end = initrd_start + initrd_data.size;
|
||||
memblock_reserve(initrd_data.start, initrd_data.size);
|
||||
#endif
|
||||
@ -805,12 +808,10 @@ static void __init check_initrd(void)
|
||||
*/
|
||||
static void __init reserve_kernel(void)
|
||||
{
|
||||
unsigned long start_pfn = PFN_UP(__pa(_end));
|
||||
|
||||
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
|
||||
memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
|
||||
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
|
||||
- (unsigned long)_stext);
|
||||
memblock_reserve(__amode31_base, __eamode31 - __samode31);
|
||||
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
|
||||
memblock_reserve(__pa(_stext), _end - _stext);
|
||||
}
|
||||
|
||||
static void __init setup_memory(void)
|
||||
@ -832,20 +833,14 @@ static void __init setup_memory(void)
|
||||
|
||||
static void __init relocate_amode31_section(void)
|
||||
{
|
||||
unsigned long amode31_addr, amode31_size;
|
||||
long amode31_offset;
|
||||
unsigned long amode31_size = __eamode31 - __samode31;
|
||||
long amode31_offset = __amode31_base - __samode31;
|
||||
long *ptr;
|
||||
|
||||
/* Allocate a new AMODE31 capable memory region */
|
||||
amode31_size = __eamode31 - __samode31;
|
||||
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
|
||||
amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
|
||||
if (!amode31_addr)
|
||||
panic("Failed to allocate memory for AMODE31 section\n");
|
||||
amode31_offset = amode31_addr - __samode31;
|
||||
|
||||
/* Move original AMODE31 section to the new one */
|
||||
memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
|
||||
memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
|
||||
/* Zero out the old AMODE31 section to catch invalid accesses within it */
|
||||
memset((void *)__samode31, 0, amode31_size);
|
||||
|
||||
@ -884,14 +879,12 @@ static void __init setup_randomness(void)
|
||||
{
|
||||
struct sysinfo_3_2_2 *vmms;
|
||||
|
||||
vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
|
||||
PAGE_SIZE);
|
||||
vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!vmms)
|
||||
panic("Failed to allocate memory for sysinfo structure\n");
|
||||
|
||||
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
|
||||
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
|
||||
memblock_phys_free((unsigned long)vmms, PAGE_SIZE);
|
||||
memblock_free(vmms, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1048,6 +1041,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
smp_detect_cpus();
|
||||
topology_init_early();
|
||||
|
||||
if (test_facility(193))
|
||||
static_branch_enable(&cpu_has_bear);
|
||||
|
||||
/*
|
||||
* Create kernel page tables and switch to virtual addressing.
|
||||
*/
|
||||
|
@ -154,6 +154,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
|
||||
regs->psw = S390_lowcore.svc_old_psw;
|
||||
regs->int_code = S390_lowcore.svc_int_code;
|
||||
update_timer_sys();
|
||||
if (static_branch_likely(&cpu_has_bear))
|
||||
current->thread.last_break = regs->last_break;
|
||||
|
||||
local_irq_enable();
|
||||
regs->orig_gpr2 = regs->gprs[2];
|
||||
|
@ -300,7 +300,6 @@ static void (*pgm_check_table[128])(struct pt_regs *regs);
|
||||
|
||||
void noinstr __do_pgm_check(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long last_break = S390_lowcore.breaking_event_addr;
|
||||
unsigned int trapnr;
|
||||
irqentry_state_t state;
|
||||
|
||||
@ -311,10 +310,11 @@ void noinstr __do_pgm_check(struct pt_regs *regs)
|
||||
|
||||
if (user_mode(regs)) {
|
||||
update_timer_sys();
|
||||
if (last_break < 4096)
|
||||
last_break = 1;
|
||||
current->thread.last_break = last_break;
|
||||
regs->args[0] = last_break;
|
||||
if (!static_branch_likely(&cpu_has_bear)) {
|
||||
if (regs->last_break < 4096)
|
||||
regs->last_break = 1;
|
||||
}
|
||||
current->thread.last_break = regs->last_break;
|
||||
}
|
||||
|
||||
if (S390_lowcore.pgm_code & 0x0200) {
|
||||
|
@ -212,6 +212,7 @@ SECTIONS
|
||||
QUAD(__dynsym_start) /* dynsym_start */
|
||||
QUAD(__rela_dyn_start) /* rela_dyn_start */
|
||||
QUAD(__rela_dyn_end) /* rela_dyn_end */
|
||||
QUAD(_eamode31 - _samode31) /* amode31_size */
|
||||
} :NONE
|
||||
|
||||
/* Debugging sections. */
|
||||
|
@ -960,7 +960,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
|
||||
/* bit 1+2 of the target are the ilc, so we can directly use ilen */
|
||||
rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
|
||||
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
|
||||
(u64 *) __LC_LAST_BREAK);
|
||||
(u64 *) __LC_PGM_LAST_BREAK);
|
||||
rc |= put_guest_lc(vcpu, pgm_info.code,
|
||||
(u16 *)__LC_PGM_INT_CODE);
|
||||
rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
|
||||
|
@ -7,6 +7,8 @@ lib-y += delay.o string.o uaccess.o find.o spinlock.o
|
||||
obj-y += mem.o xor.o
|
||||
lib-$(CONFIG_KPROBES) += probes.o
|
||||
lib-$(CONFIG_UPROBES) += probes.o
|
||||
obj-$(CONFIG_S390_KPROBES_SANITY_TEST) += test_kprobes_s390.o
|
||||
test_kprobes_s390-objs += test_kprobes_asm.o test_kprobes.o
|
||||
|
||||
# Instrumenting memory accesses to __user data (in different address space)
|
||||
# produce false positives
|
||||
|
@ -26,7 +26,7 @@ static int __init spin_retry_init(void)
|
||||
}
|
||||
early_initcall(spin_retry_init);
|
||||
|
||||
/**
|
||||
/*
|
||||
* spin_retry= parameter
|
||||
*/
|
||||
static int __init spin_retry_setup(char *str)
|
||||
|
@ -100,32 +100,6 @@ char *strcpy(char *dest, const char *src)
|
||||
EXPORT_SYMBOL(strcpy);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* strlcpy - Copy a %NUL terminated string into a sized buffer
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
* @size: size of destination buffer
|
||||
*
|
||||
* Compatible with *BSD: the result is always a valid
|
||||
* NUL-terminated string that fits in the buffer (unless,
|
||||
* of course, the buffer size is zero). It does not pad
|
||||
* out the result like strncpy() does.
|
||||
*/
|
||||
#ifdef __HAVE_ARCH_STRLCPY
|
||||
size_t strlcpy(char *dest, const char *src, size_t size)
|
||||
{
|
||||
size_t ret = __strend(src) - src;
|
||||
|
||||
if (size) {
|
||||
size_t len = (ret >= size) ? size-1 : ret;
|
||||
dest[len] = '\0';
|
||||
memcpy(dest, src, len);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(strlcpy);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* strncpy - Copy a length-limited, %NUL-terminated string
|
||||
* @dest: Where to copy the string to
|
||||
@ -254,25 +228,6 @@ int strcmp(const char *s1, const char *s2)
|
||||
EXPORT_SYMBOL(strcmp);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* strrchr - Find the last occurrence of a character in a string
|
||||
* @s: The string to be searched
|
||||
* @c: The character to search for
|
||||
*/
|
||||
#ifdef __HAVE_ARCH_STRRCHR
|
||||
char *strrchr(const char *s, int c)
|
||||
{
|
||||
ssize_t len = __strend(s) - s;
|
||||
|
||||
do {
|
||||
if (s[len] == (char)c)
|
||||
return (char *)s + len;
|
||||
} while (--len >= 0);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(strrchr);
|
||||
#endif
|
||||
|
||||
static inline int clcle(const char *s1, unsigned long l1,
|
||||
const char *s2, unsigned long l2)
|
||||
{
|
||||
|
75
arch/s390/lib/test_kprobes.c
Normal file
75
arch/s390/lib/test_kprobes.c
Normal file
@ -0,0 +1,75 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/random.h>
|
||||
#include <kunit/test.h>
|
||||
#include "test_kprobes.h"
|
||||
|
||||
static struct kprobe kp;
|
||||
|
||||
static void setup_kprobe(struct kunit *test, struct kprobe *kp,
|
||||
const char *symbol, int offset)
|
||||
{
|
||||
kp->offset = offset;
|
||||
kp->addr = NULL;
|
||||
kp->symbol_name = symbol;
|
||||
}
|
||||
|
||||
static void test_kprobe_offset(struct kunit *test, struct kprobe *kp,
|
||||
const char *target, int offset)
|
||||
{
|
||||
int ret;
|
||||
|
||||
setup_kprobe(test, kp, target, 0);
|
||||
ret = register_kprobe(kp);
|
||||
if (!ret)
|
||||
unregister_kprobe(kp);
|
||||
KUNIT_EXPECT_EQ(test, 0, ret);
|
||||
setup_kprobe(test, kp, target, offset);
|
||||
ret = register_kprobe(kp);
|
||||
KUNIT_EXPECT_EQ(test, -EINVAL, ret);
|
||||
if (!ret)
|
||||
unregister_kprobe(kp);
|
||||
}
|
||||
|
||||
static void test_kprobe_odd(struct kunit *test)
|
||||
{
|
||||
test_kprobe_offset(test, &kp, "kprobes_target_odd",
|
||||
kprobes_target_odd_offs);
|
||||
}
|
||||
|
||||
static void test_kprobe_in_insn4(struct kunit *test)
|
||||
{
|
||||
test_kprobe_offset(test, &kp, "kprobes_target_in_insn4",
|
||||
kprobes_target_in_insn4_offs);
|
||||
}
|
||||
|
||||
static void test_kprobe_in_insn6_lo(struct kunit *test)
|
||||
{
|
||||
test_kprobe_offset(test, &kp, "kprobes_target_in_insn6_lo",
|
||||
kprobes_target_in_insn6_lo_offs);
|
||||
}
|
||||
|
||||
static void test_kprobe_in_insn6_hi(struct kunit *test)
|
||||
{
|
||||
test_kprobe_offset(test, &kp, "kprobes_target_in_insn6_hi",
|
||||
kprobes_target_in_insn6_hi_offs);
|
||||
}
|
||||
|
||||
static struct kunit_case kprobes_testcases[] = {
|
||||
KUNIT_CASE(test_kprobe_odd),
|
||||
KUNIT_CASE(test_kprobe_in_insn4),
|
||||
KUNIT_CASE(test_kprobe_in_insn6_lo),
|
||||
KUNIT_CASE(test_kprobe_in_insn6_hi),
|
||||
{}
|
||||
};
|
||||
|
||||
static struct kunit_suite kprobes_test_suite = {
|
||||
.name = "kprobes_test_s390",
|
||||
.test_cases = kprobes_testcases,
|
||||
};
|
||||
|
||||
kunit_test_suites(&kprobes_test_suite);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
10
arch/s390/lib/test_kprobes.h
Normal file
10
arch/s390/lib/test_kprobes.h
Normal file
@ -0,0 +1,10 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
#ifndef TEST_KPROBES_H
|
||||
#define TEST_KPROBES_H
|
||||
|
||||
extern unsigned long kprobes_target_odd_offs;
|
||||
extern unsigned long kprobes_target_in_insn4_offs;
|
||||
extern unsigned long kprobes_target_in_insn6_lo_offs;
|
||||
extern unsigned long kprobes_target_in_insn6_hi_offs;
|
||||
|
||||
#endif
|
45
arch/s390/lib/test_kprobes_asm.S
Normal file
45
arch/s390/lib/test_kprobes_asm.S
Normal file
@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
#define KPROBES_TARGET_START(name) \
|
||||
SYM_FUNC_START(name); \
|
||||
FTRACE_GEN_NOP_ASM(name)
|
||||
|
||||
#define KPROBES_TARGET_END(name) \
|
||||
SYM_FUNC_END(name); \
|
||||
SYM_DATA(name##_offs, .quad 1b - name)
|
||||
|
||||
KPROBES_TARGET_START(kprobes_target_in_insn4)
|
||||
.word 0x4700 // bc 0,0
|
||||
1: .word 0x0000
|
||||
br %r14
|
||||
KPROBES_TARGET_END(kprobes_target_in_insn4)
|
||||
|
||||
KPROBES_TARGET_START(kprobes_target_in_insn6_lo)
|
||||
.word 0xe310 // ly 1,0
|
||||
1: .word 0x0000
|
||||
.word 0x0058
|
||||
br %r14
|
||||
KPROBES_TARGET_END(kprobes_target_in_insn6_lo)
|
||||
|
||||
KPROBES_TARGET_START(kprobes_target_in_insn6_hi)
|
||||
.word 0xe310 // ly 1,0
|
||||
.word 0x0000
|
||||
1: .word 0x0058
|
||||
br %r14
|
||||
KPROBES_TARGET_END(kprobes_target_in_insn6_hi)
|
||||
|
||||
KPROBES_TARGET_START(kprobes_target_bp)
|
||||
nop
|
||||
.word 0x0000
|
||||
nop
|
||||
1: br %r14
|
||||
KPROBES_TARGET_END(kprobes_target_bp)
|
||||
|
||||
KPROBES_TARGET_START(kprobes_target_odd)
|
||||
.byte 0x07
|
||||
1: .byte 0x07
|
||||
br %r14
|
||||
KPROBES_TARGET_END(kprobes_target_odd)
|
@ -3,7 +3,7 @@
|
||||
* Test module for unwind_for_each_frame
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "test_unwind: " fmt
|
||||
#include <kunit/test.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/kallsyms.h>
|
||||
@ -16,6 +16,8 @@
|
||||
#include <linux/wait.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
struct kunit *current_test;
|
||||
|
||||
#define BT_BUF_SIZE (PAGE_SIZE * 4)
|
||||
|
||||
/*
|
||||
@ -29,7 +31,7 @@ static void print_backtrace(char *bt)
|
||||
p = strsep(&bt, "\n");
|
||||
if (!p)
|
||||
break;
|
||||
pr_err("%s\n", p);
|
||||
kunit_err(current_test, "%s\n", p);
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,7 +51,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
|
||||
|
||||
bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC);
|
||||
if (!bt) {
|
||||
pr_err("failed to allocate backtrace buffer\n");
|
||||
kunit_err(current_test, "failed to allocate backtrace buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Unwind. */
|
||||
@ -63,7 +65,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
|
||||
if (frame_count++ == max_frames)
|
||||
break;
|
||||
if (state.reliable && !addr) {
|
||||
pr_err("unwind state reliable but addr is 0\n");
|
||||
kunit_err(current_test, "unwind state reliable but addr is 0\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -75,7 +77,7 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
|
||||
stack_type_name(state.stack_info.type),
|
||||
(void *)state.sp, (void *)state.ip);
|
||||
if (bt_pos >= BT_BUF_SIZE)
|
||||
pr_err("backtrace buffer is too small\n");
|
||||
kunit_err(current_test, "backtrace buffer is too small\n");
|
||||
}
|
||||
frame_count += 1;
|
||||
if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1"))
|
||||
@ -85,15 +87,15 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
|
||||
|
||||
/* Check the results. */
|
||||
if (unwind_error(&state)) {
|
||||
pr_err("unwind error\n");
|
||||
kunit_err(current_test, "unwind error\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (!seen_func2_func1) {
|
||||
pr_err("unwindme_func2 and unwindme_func1 not found\n");
|
||||
kunit_err(current_test, "unwindme_func2 and unwindme_func1 not found\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (frame_count == max_frames) {
|
||||
pr_err("Maximum number of frames exceeded\n");
|
||||
kunit_err(current_test, "Maximum number of frames exceeded\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (ret)
|
||||
@ -166,7 +168,7 @@ static noinline int unwindme_func4(struct unwindme *u)
|
||||
kp.pre_handler = pgm_pre_handler;
|
||||
ret = register_kprobe(&kp);
|
||||
if (ret < 0) {
|
||||
pr_err("register_kprobe failed %d\n", ret);
|
||||
kunit_err(current_test, "register_kprobe failed %d\n", ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -252,7 +254,7 @@ static int test_unwind_irq(struct unwindme *u)
|
||||
}
|
||||
|
||||
/* Spawns a task and passes it to test_unwind(). */
|
||||
static int test_unwind_task(struct unwindme *u)
|
||||
static int test_unwind_task(struct kunit *test, struct unwindme *u)
|
||||
{
|
||||
struct task_struct *task;
|
||||
int ret;
|
||||
@ -267,7 +269,7 @@ static int test_unwind_task(struct unwindme *u)
|
||||
*/
|
||||
task = kthread_run(unwindme_func1, u, "%s", __func__);
|
||||
if (IS_ERR(task)) {
|
||||
pr_err("kthread_run() failed\n");
|
||||
kunit_err(test, "kthread_run() failed\n");
|
||||
return PTR_ERR(task);
|
||||
}
|
||||
/*
|
||||
@ -282,77 +284,98 @@ static int test_unwind_task(struct unwindme *u)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int test_unwind_flags(int flags)
|
||||
struct test_params {
|
||||
int flags;
|
||||
char *name;
|
||||
};
|
||||
|
||||
/*
|
||||
* Create required parameter list for tests
|
||||
*/
|
||||
static const struct test_params param_list[] = {
|
||||
{.flags = UWM_DEFAULT, .name = "UWM_DEFAULT"},
|
||||
{.flags = UWM_SP, .name = "UWM_SP"},
|
||||
{.flags = UWM_REGS, .name = "UWM_REGS"},
|
||||
{.flags = UWM_SWITCH_STACK,
|
||||
.name = "UWM_SWITCH_STACK"},
|
||||
{.flags = UWM_SP | UWM_REGS,
|
||||
.name = "UWM_SP | UWM_REGS"},
|
||||
{.flags = UWM_CALLER | UWM_SP,
|
||||
.name = "WM_CALLER | UWM_SP"},
|
||||
{.flags = UWM_CALLER | UWM_SP | UWM_REGS,
|
||||
.name = "UWM_CALLER | UWM_SP | UWM_REGS"},
|
||||
{.flags = UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK,
|
||||
.name = "UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"},
|
||||
{.flags = UWM_THREAD, .name = "UWM_THREAD"},
|
||||
{.flags = UWM_THREAD | UWM_SP,
|
||||
.name = "UWM_THREAD | UWM_SP"},
|
||||
{.flags = UWM_THREAD | UWM_CALLER | UWM_SP,
|
||||
.name = "UWM_THREAD | UWM_CALLER | UWM_SP"},
|
||||
{.flags = UWM_IRQ, .name = "UWM_IRQ"},
|
||||
{.flags = UWM_IRQ | UWM_SWITCH_STACK,
|
||||
.name = "UWM_IRQ | UWM_SWITCH_STACK"},
|
||||
{.flags = UWM_IRQ | UWM_SP,
|
||||
.name = "UWM_IRQ | UWM_SP"},
|
||||
{.flags = UWM_IRQ | UWM_REGS,
|
||||
.name = "UWM_IRQ | UWM_REGS"},
|
||||
{.flags = UWM_IRQ | UWM_SP | UWM_REGS,
|
||||
.name = "UWM_IRQ | UWM_SP | UWM_REGS"},
|
||||
{.flags = UWM_IRQ | UWM_CALLER | UWM_SP,
|
||||
.name = "UWM_IRQ | UWM_CALLER | UWM_SP"},
|
||||
{.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS,
|
||||
.name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS"},
|
||||
{.flags = UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK,
|
||||
.name = "UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK"},
|
||||
#ifdef CONFIG_KPROBES
|
||||
{.flags = UWM_PGM, .name = "UWM_PGM"},
|
||||
{.flags = UWM_PGM | UWM_SP,
|
||||
.name = "UWM_PGM | UWM_SP"},
|
||||
{.flags = UWM_PGM | UWM_REGS,
|
||||
.name = "UWM_PGM | UWM_REGS"},
|
||||
{.flags = UWM_PGM | UWM_SP | UWM_REGS,
|
||||
.name = "UWM_PGM | UWM_SP | UWM_REGS"},
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Parameter description generator: required for KUNIT_ARRAY_PARAM()
|
||||
*/
|
||||
static void get_desc(const struct test_params *params, char *desc)
|
||||
{
|
||||
strscpy(desc, params->name, KUNIT_PARAM_DESC_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create test_unwind_gen_params
|
||||
*/
|
||||
KUNIT_ARRAY_PARAM(test_unwind, param_list, get_desc);
|
||||
|
||||
static void test_unwind_flags(struct kunit *test)
|
||||
{
|
||||
struct unwindme u;
|
||||
const struct test_params *params;
|
||||
|
||||
u.flags = flags;
|
||||
current_test = test;
|
||||
params = (const struct test_params *)test->param_value;
|
||||
u.flags = params->flags;
|
||||
if (u.flags & UWM_THREAD)
|
||||
return test_unwind_task(&u);
|
||||
KUNIT_EXPECT_EQ(test, 0, test_unwind_task(test, &u));
|
||||
else if (u.flags & UWM_IRQ)
|
||||
return test_unwind_irq(&u);
|
||||
KUNIT_EXPECT_EQ(test, 0, test_unwind_irq(&u));
|
||||
else
|
||||
return unwindme_func1(&u);
|
||||
KUNIT_EXPECT_EQ(test, 0, unwindme_func1(&u));
|
||||
}
|
||||
|
||||
static int test_unwind_init(void)
|
||||
{
|
||||
int failed = 0;
|
||||
int total = 0;
|
||||
static struct kunit_case unwind_test_cases[] = {
|
||||
KUNIT_CASE_PARAM(test_unwind_flags, test_unwind_gen_params),
|
||||
{}
|
||||
};
|
||||
|
||||
#define TEST(flags) \
|
||||
do { \
|
||||
pr_info("[ RUN ] " #flags "\n"); \
|
||||
total++; \
|
||||
if (!test_unwind_flags((flags))) { \
|
||||
pr_info("[ OK ] " #flags "\n"); \
|
||||
} else { \
|
||||
pr_err("[ FAILED ] " #flags "\n"); \
|
||||
failed++; \
|
||||
} \
|
||||
} while (0)
|
||||
static struct kunit_suite test_unwind_suite = {
|
||||
.name = "test_unwind",
|
||||
.test_cases = unwind_test_cases,
|
||||
};
|
||||
|
||||
pr_info("running stack unwinder tests");
|
||||
TEST(UWM_DEFAULT);
|
||||
TEST(UWM_SP);
|
||||
TEST(UWM_REGS);
|
||||
TEST(UWM_SWITCH_STACK);
|
||||
TEST(UWM_SP | UWM_REGS);
|
||||
TEST(UWM_CALLER | UWM_SP);
|
||||
TEST(UWM_CALLER | UWM_SP | UWM_REGS);
|
||||
TEST(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK);
|
||||
TEST(UWM_THREAD);
|
||||
TEST(UWM_THREAD | UWM_SP);
|
||||
TEST(UWM_THREAD | UWM_CALLER | UWM_SP);
|
||||
TEST(UWM_IRQ);
|
||||
TEST(UWM_IRQ | UWM_SWITCH_STACK);
|
||||
TEST(UWM_IRQ | UWM_SP);
|
||||
TEST(UWM_IRQ | UWM_REGS);
|
||||
TEST(UWM_IRQ | UWM_SP | UWM_REGS);
|
||||
TEST(UWM_IRQ | UWM_CALLER | UWM_SP);
|
||||
TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS);
|
||||
TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK);
|
||||
#ifdef CONFIG_KPROBES
|
||||
TEST(UWM_PGM);
|
||||
TEST(UWM_PGM | UWM_SP);
|
||||
TEST(UWM_PGM | UWM_REGS);
|
||||
TEST(UWM_PGM | UWM_SP | UWM_REGS);
|
||||
#endif
|
||||
#undef TEST
|
||||
if (failed) {
|
||||
pr_err("%d of %d stack unwinder tests failed", failed, total);
|
||||
WARN(1, "%d of %d stack unwinder tests failed", failed, total);
|
||||
} else {
|
||||
pr_info("all %d stack unwinder tests passed", total);
|
||||
}
|
||||
kunit_test_suites(&test_unwind_suite);
|
||||
|
||||
return failed ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static void test_unwind_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(test_unwind_init);
|
||||
module_exit(test_unwind_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -14,8 +14,8 @@
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/string_helpers.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/oom.h>
|
||||
@ -394,13 +394,10 @@ static int __init cmm_init(void)
|
||||
goto out_sysctl;
|
||||
#ifdef CONFIG_CMM_IUCV
|
||||
/* convert sender to uppercase characters */
|
||||
if (sender) {
|
||||
int len = strlen(sender);
|
||||
while (len--)
|
||||
sender[len] = toupper(sender[len]);
|
||||
} else {
|
||||
if (sender)
|
||||
string_upper(sender, sender);
|
||||
else
|
||||
sender = cmm_default_sender;
|
||||
}
|
||||
|
||||
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
|
||||
if (rc < 0)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/kasan.h>
|
||||
#include <asm/ptdump.h>
|
||||
#include <asm/kasan.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static unsigned long max_addr;
|
||||
@ -116,8 +117,13 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
||||
return;
|
||||
if (st->current_prot & _PAGE_NOEXEC)
|
||||
return;
|
||||
/* The first lowcore page is currently still W+X. */
|
||||
if (addr == PAGE_SIZE)
|
||||
/*
|
||||
* The first lowcore page is W+X if spectre mitigations are using
|
||||
* trampolines or the BEAR enhancements facility is not installed,
|
||||
* in which case we have two lpswe instructions in lowcore that need
|
||||
* to be executable.
|
||||
*/
|
||||
if (addr == PAGE_SIZE && (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)))
|
||||
return;
|
||||
WARN_ONCE(1, "s390/mm: Found insecure W+X mapping at address %pS\n",
|
||||
(void *)st->start_address);
|
||||
@ -203,7 +209,9 @@ void ptdump_check_wx(void)
|
||||
if (st.wx_pages)
|
||||
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages);
|
||||
else
|
||||
pr_info("Checked W+X mappings: passed, no unexpected W+X pages found\n");
|
||||
pr_info("Checked W+X mappings: passed, no %sW+X pages found\n",
|
||||
(nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) ?
|
||||
"unexpected " : "");
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_WX */
|
||||
|
||||
|
@ -57,7 +57,7 @@ void arch_report_meminfo(struct seq_file *m)
|
||||
static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
|
||||
unsigned long dtt)
|
||||
{
|
||||
unsigned long table, mask;
|
||||
unsigned long *table, mask;
|
||||
|
||||
mask = 0;
|
||||
if (MACHINE_HAS_EDAT2) {
|
||||
@ -72,7 +72,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
|
||||
mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
|
||||
break;
|
||||
}
|
||||
table = (unsigned long)old & mask;
|
||||
table = (unsigned long *)((unsigned long)old & mask);
|
||||
crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
|
||||
} else if (MACHINE_HAS_IDTE) {
|
||||
cspg(old, *old, new);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -584,8 +585,13 @@ void __init vmem_map_init(void)
|
||||
__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
|
||||
/* we need lowcore executable for our LPSWE instructions */
|
||||
set_memory_x(0, 1);
|
||||
if (nospec_uses_trampoline() || !static_key_enabled(&cpu_has_bear)) {
|
||||
/*
|
||||
* Lowcore must be executable for LPSWE
|
||||
* and expoline trampoline branch instructions.
|
||||
*/
|
||||
set_memory_x(0, 1);
|
||||
}
|
||||
|
||||
pr_info("Write protected kernel read-only data: %luk\n",
|
||||
(unsigned long)(__end_rodata - _stext) >> 10);
|
||||
|
@ -567,7 +567,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
|
||||
EMIT4(0xb9040000, REG_2, BPF_REG_0);
|
||||
/* Restore registers */
|
||||
save_restore_regs(jit, REGS_RESTORE, stack_depth);
|
||||
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
|
||||
if (nospec_uses_trampoline()) {
|
||||
jit->r14_thunk_ip = jit->prg;
|
||||
/* Generate __s390_indirect_jump_r14 thunk */
|
||||
if (test_facility(35)) {
|
||||
@ -585,7 +585,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
|
||||
/* br %r14 */
|
||||
_EMIT2(0x07fe);
|
||||
|
||||
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
|
||||
if ((nospec_uses_trampoline()) &&
|
||||
(is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
|
||||
jit->r1_thunk_ip = jit->prg;
|
||||
/* Generate __s390_indirect_jump_r1 thunk */
|
||||
@ -1332,7 +1332,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
|
||||
jit->seen |= SEEN_FUNC;
|
||||
/* lgrl %w1,func */
|
||||
EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
|
||||
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
|
||||
if (nospec_uses_trampoline()) {
|
||||
/* brasl %r14,__s390_indirect_jump_r1 */
|
||||
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
|
||||
} else {
|
||||
|
@ -18,6 +18,8 @@
|
||||
static struct kmem_cache *dma_region_table_cache;
|
||||
static struct kmem_cache *dma_page_table_cache;
|
||||
static int s390_iommu_strict;
|
||||
static u64 s390_iommu_aperture;
|
||||
static u32 s390_iommu_aperture_factor = 1;
|
||||
|
||||
static int zpci_refresh_global(struct zpci_dev *zdev)
|
||||
{
|
||||
@ -565,15 +567,19 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
|
||||
/*
|
||||
* Restrict the iommu bitmap size to the minimum of the following:
|
||||
* - main memory size
|
||||
* - s390_iommu_aperture which defaults to high_memory
|
||||
* - 3-level pagetable address limit minus start_dma offset
|
||||
* - DMA address range allowed by the hardware (clp query pci fn)
|
||||
*
|
||||
* Also set zdev->end_dma to the actual end address of the usable
|
||||
* range, instead of the theoretical maximum as reported by hardware.
|
||||
*
|
||||
* This limits the number of concurrently usable DMA mappings since
|
||||
* for each DMA mapped memory address we need a DMA address including
|
||||
* extra DMA addresses for multiple mappings of the same memory address.
|
||||
*/
|
||||
zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
|
||||
zdev->iommu_size = min3((u64) high_memory,
|
||||
zdev->iommu_size = min3(s390_iommu_aperture,
|
||||
ZPCI_TABLE_SIZE_RT - zdev->start_dma,
|
||||
zdev->end_dma - zdev->start_dma + 1);
|
||||
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
|
||||
@ -660,6 +666,12 @@ static int __init dma_alloc_cpu_table_caches(void)
|
||||
|
||||
int __init zpci_dma_init(void)
|
||||
{
|
||||
s390_iommu_aperture = (u64)high_memory;
|
||||
if (!s390_iommu_aperture_factor)
|
||||
s390_iommu_aperture = ULONG_MAX;
|
||||
else
|
||||
s390_iommu_aperture *= s390_iommu_aperture_factor;
|
||||
|
||||
return dma_alloc_cpu_table_caches();
|
||||
}
|
||||
|
||||
@ -692,3 +704,12 @@ static int __init s390_iommu_setup(char *str)
|
||||
}
|
||||
|
||||
__setup("s390_iommu=", s390_iommu_setup);
|
||||
|
||||
static int __init s390_iommu_aperture_setup(char *str)
|
||||
{
|
||||
if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
|
||||
s390_iommu_aperture_factor = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
|
||||
|
@ -52,6 +52,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
||||
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
||||
struct pci_dev *pdev = NULL;
|
||||
|
||||
zpci_dbg(3, "err fid:%x, fh:%x, pec:%x\n",
|
||||
ccdf->fid, ccdf->fh, ccdf->pec);
|
||||
zpci_err("error CCDF:\n");
|
||||
zpci_err_hex(ccdf, sizeof(*ccdf));
|
||||
|
||||
@ -96,6 +98,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
||||
enum zpci_state state;
|
||||
|
||||
zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
|
||||
ccdf->fid, ccdf->fh, ccdf->pec);
|
||||
zpci_err("avail CCDF:\n");
|
||||
zpci_err_hex(ccdf, sizeof(*ccdf));
|
||||
|
||||
|
@ -90,6 +90,14 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
if (zdev_enabled(zdev)) {
|
||||
ret = zpci_disable_device(zdev);
|
||||
/*
|
||||
* Due to a z/VM vs LPAR inconsistency in the error
|
||||
* state the FH may indicate an enabled device but
|
||||
* disable says the device is already disabled don't
|
||||
* treat it as an error here.
|
||||
*/
|
||||
if (ret == -EINVAL)
|
||||
ret = 0;
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
@ -192,6 +192,8 @@ config X86
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
||||
select HAVE_SAMPLE_FTRACE_DIRECT if X86_64
|
||||
select HAVE_SAMPLE_FTRACE_MULTI_DIRECT if X86_64
|
||||
select HAVE_EBPF_JIT
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_EISA
|
||||
|
@ -34,7 +34,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
|
||||
{
|
||||
struct gendisk *gdp;
|
||||
struct dasd_device *base;
|
||||
int len;
|
||||
int len, rc;
|
||||
|
||||
/* Make sure the minor for this device exists. */
|
||||
base = block->base;
|
||||
@ -80,7 +80,13 @@ int dasd_gendisk_alloc(struct dasd_block *block)
|
||||
dasd_add_link_to_gendisk(gdp, base);
|
||||
block->gdp = gdp;
|
||||
set_capacity(block->gdp, 0);
|
||||
device_add_disk(&base->cdev->dev, block->gdp, NULL);
|
||||
|
||||
rc = device_add_disk(&base->cdev->dev, block->gdp, NULL);
|
||||
if (rc) {
|
||||
dasd_gendisk_free(block);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -696,7 +696,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||
}
|
||||
|
||||
get_device(&dev_info->dev);
|
||||
device_add_disk(&dev_info->dev, dev_info->gd, NULL);
|
||||
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
|
||||
if (rc)
|
||||
goto out_dax;
|
||||
|
||||
switch (dev_info->segment_type) {
|
||||
case SEG_TYPE_SR:
|
||||
@ -712,6 +714,10 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||
rc = count;
|
||||
goto out;
|
||||
|
||||
out_dax:
|
||||
put_device(&dev_info->dev);
|
||||
kill_dax(dev_info->dax_dev);
|
||||
put_dax(dev_info->dax_dev);
|
||||
put_dev:
|
||||
list_del(&dev_info->lh);
|
||||
blk_cleanup_disk(dev_info->gd);
|
||||
|
@ -495,9 +495,14 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
|
||||
|
||||
/* 512 byte sectors */
|
||||
set_capacity(bdev->gendisk, scmdev->size >> 9);
|
||||
device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
|
||||
ret = device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
|
||||
if (ret)
|
||||
goto out_cleanup_disk;
|
||||
|
||||
return 0;
|
||||
|
||||
out_cleanup_disk:
|
||||
blk_cleanup_disk(bdev->gendisk);
|
||||
out_tag:
|
||||
blk_mq_free_tag_set(&bdev->tag_set);
|
||||
out:
|
||||
|
@ -163,7 +163,7 @@ static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
|
||||
summary.timeout = (u16)req->queue_timeout;
|
||||
summary.start_count = (u16)req->start_count;
|
||||
|
||||
sclp_trace(prio, id, (u32)(addr_t)sccb, summary.b, err);
|
||||
sclp_trace(prio, id, __pa(sccb), summary.b, err);
|
||||
}
|
||||
|
||||
static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
|
||||
@ -502,7 +502,7 @@ sclp_add_request(struct sclp_req *req)
|
||||
}
|
||||
|
||||
/* RQAD: Request was added (a=sccb, b=caller) */
|
||||
sclp_trace(2, "RQAD", (u32)(addr_t)req->sccb, _RET_IP_, false);
|
||||
sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false);
|
||||
|
||||
req->status = SCLP_REQ_QUEUED;
|
||||
req->start_count = 0;
|
||||
@ -617,15 +617,15 @@ __sclp_find_req(u32 sccb)
|
||||
|
||||
list_for_each(l, &sclp_req_queue) {
|
||||
req = list_entry(l, struct sclp_req, list);
|
||||
if (sccb == (u32) (addr_t) req->sccb)
|
||||
return req;
|
||||
if (sccb == __pa(req->sccb))
|
||||
return req;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
|
||||
{
|
||||
struct sccb_header *sccb = (struct sccb_header *)(addr_t)sccb_int;
|
||||
struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
|
||||
struct evbuf_header *evbuf;
|
||||
u16 response;
|
||||
|
||||
@ -664,7 +664,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
|
||||
|
||||
/* INT: Interrupt received (a=intparm, b=cmd) */
|
||||
sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
|
||||
(struct sccb_header *)(addr_t)finished_sccb,
|
||||
(struct sccb_header *)__va(finished_sccb),
|
||||
!ok_response(finished_sccb, active_cmd));
|
||||
|
||||
if (finished_sccb) {
|
||||
@ -1110,7 +1110,7 @@ static void sclp_check_handler(struct ext_code ext_code,
|
||||
/* Is this the interrupt we are waiting for? */
|
||||
if (finished_sccb == 0)
|
||||
return;
|
||||
if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
|
||||
if (finished_sccb != __pa(sclp_init_sccb))
|
||||
panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
|
||||
finished_sccb);
|
||||
spin_lock(&sclp_lock);
|
||||
|
@ -333,7 +333,7 @@ static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
|
||||
"2:\n"
|
||||
EX_TABLE(0b, 2b)
|
||||
EX_TABLE(1b, 2b)
|
||||
: "+&d" (cc) : "d" (command), "a" ((unsigned long)sccb)
|
||||
: "+&d" (cc) : "d" (command), "a" (__pa(sccb))
|
||||
: "cc", "memory");
|
||||
if (cc == 4)
|
||||
return -EINVAL;
|
||||
|
@ -155,6 +155,11 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
|
||||
sclp.has_linemode = 1;
|
||||
}
|
||||
|
||||
void __init sclp_early_adjust_va(void)
|
||||
{
|
||||
sclp_early_sccb = __va((unsigned long)sclp_early_sccb);
|
||||
}
|
||||
|
||||
void __init sclp_early_detect(void)
|
||||
{
|
||||
void *sccb = sclp_early_sccb;
|
||||
|
@ -31,6 +31,8 @@ static u64 sclp_ftp_length;
|
||||
|
||||
/**
|
||||
* sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
|
||||
* @req: sclp request
|
||||
* @data: pointer to struct completion
|
||||
*/
|
||||
static void sclp_ftp_txcb(struct sclp_req *req, void *data)
|
||||
{
|
||||
@ -45,6 +47,7 @@ static void sclp_ftp_txcb(struct sclp_req *req, void *data)
|
||||
|
||||
/**
|
||||
* sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
|
||||
* @evbuf: pointer to Diagnostic Test (ET7) event buffer
|
||||
*/
|
||||
static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
|
||||
{
|
||||
|
@ -122,6 +122,7 @@ static void sclp_sd_listener_remove(struct sclp_sd_listener *listener)
|
||||
|
||||
/**
|
||||
* sclp_sd_listener_init() - Initialize a Store Data response listener
|
||||
* @listener: Response listener to initialize
|
||||
* @id: Event ID to listen for
|
||||
*
|
||||
* Initialize a listener for asynchronous Store Data responses. This listener
|
||||
@ -193,7 +194,7 @@ static int sclp_sd_sync(unsigned long page, u8 eq, u8 di, u64 sat, u64 sa,
|
||||
struct sclp_sd_evbuf *evbuf;
|
||||
int rc;
|
||||
|
||||
sclp_sd_listener_init(&listener, (u32) (addr_t) sccb);
|
||||
sclp_sd_listener_init(&listener, __pa(sccb));
|
||||
sclp_sd_listener_add(&listener);
|
||||
|
||||
/* Prepare SCCB */
|
||||
@ -403,6 +404,7 @@ static int sclp_sd_file_update(struct sclp_sd_file *sd_file)
|
||||
/**
|
||||
* sclp_sd_file_update_async() - Wrapper for asynchronous update call
|
||||
* @data: Object to update
|
||||
* @cookie: Unused
|
||||
*/
|
||||
static void sclp_sd_file_update_async(void *data, async_cookie_t cookie)
|
||||
{
|
||||
@ -414,6 +416,9 @@ static void sclp_sd_file_update_async(void *data, async_cookie_t cookie)
|
||||
/**
|
||||
* reload_store() - Store function for "reload" sysfs attribute
|
||||
* @kobj: Kobject of sclp_sd_file object
|
||||
* @attr: Reload attribute
|
||||
* @buf: Data written to sysfs attribute
|
||||
* @count: Count of bytes written
|
||||
*
|
||||
* Initiate a reload of the data associated with an sclp_sd_file object.
|
||||
*/
|
||||
@ -441,8 +446,10 @@ static struct kobj_type sclp_sd_file_ktype = {
|
||||
};
|
||||
|
||||
/**
|
||||
* data_read() - Read function for "read" sysfs attribute
|
||||
* data_read() - Read function for "data" sysfs attribute
|
||||
* @file: Open file pointer
|
||||
* @kobj: Kobject of sclp_sd_file object
|
||||
* @attr: Data attribute
|
||||
* @buffer: Target buffer
|
||||
* @off: Requested file offset
|
||||
* @size: Requested number of bytes
|
||||
|
@ -768,6 +768,8 @@ out_driver:
|
||||
}
|
||||
__initcall(sclp_vt220_tty_init);
|
||||
|
||||
#ifdef CONFIG_SCLP_VT220_CONSOLE
|
||||
|
||||
static void __sclp_vt220_flush_buffer(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -784,8 +786,6 @@ static void __sclp_vt220_flush_buffer(void)
|
||||
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCLP_VT220_CONSOLE
|
||||
|
||||
static void
|
||||
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
|
||||
{
|
||||
|
@ -792,10 +792,13 @@ static int __unset_online(struct device *dev, void *data)
|
||||
{
|
||||
struct idset *set = data;
|
||||
struct subchannel *sch = to_subchannel(dev);
|
||||
struct ccw_device *cdev = sch_get_cdev(sch);
|
||||
struct ccw_device *cdev;
|
||||
|
||||
if (cdev && cdev->online)
|
||||
idset_sch_del(set, sch->schid);
|
||||
if (sch->st == SUBCHANNEL_TYPE_IO) {
|
||||
cdev = sch_get_cdev(sch);
|
||||
if (cdev && cdev->online)
|
||||
idset_sch_del(set, sch->schid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1322,6 +1322,7 @@ static int purge_fn(struct device *dev, void *data)
|
||||
{
|
||||
struct ccw_device *cdev = to_ccwdev(dev);
|
||||
struct ccw_dev_id *id = &cdev->private->dev_id;
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
if (is_blacklisted(id->ssid, id->devno) &&
|
||||
@ -1330,6 +1331,7 @@ static int purge_fn(struct device *dev, void *data)
|
||||
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
|
||||
id->devno);
|
||||
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
|
||||
css_sched_sch_todo(sch, SCH_TODO_UNREG);
|
||||
atomic_set(&cdev->private->onoff, 0);
|
||||
}
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
|
@ -825,13 +825,23 @@ EXPORT_SYMBOL_GPL(ccw_device_get_chid);
|
||||
*/
|
||||
void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
|
||||
{
|
||||
return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
|
||||
void *addr;
|
||||
|
||||
if (!get_device(&cdev->dev))
|
||||
return NULL;
|
||||
addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
|
||||
if (IS_ERR_OR_NULL(addr))
|
||||
put_device(&cdev->dev);
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_dma_zalloc);
|
||||
|
||||
void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
|
||||
{
|
||||
if (!cpu_addr)
|
||||
return;
|
||||
cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
|
||||
put_device(&cdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_dma_free);
|
||||
|
||||
|
@ -61,6 +61,10 @@ static char *aqm_str;
|
||||
module_param_named(aqmask, aqm_str, charp, 0440);
|
||||
MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
|
||||
|
||||
static int ap_useirq = 1;
|
||||
module_param_named(useirq, ap_useirq, int, 0440);
|
||||
MODULE_PARM_DESC(useirq, "Use interrupt if available, default is 1 (on).");
|
||||
|
||||
atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
|
||||
EXPORT_SYMBOL(ap_max_msg_size);
|
||||
|
||||
@ -725,7 +729,7 @@ static void ap_check_bindings_complete(void)
|
||||
if (bound == apqns) {
|
||||
if (!completion_done(&ap_init_apqn_bindings_complete)) {
|
||||
complete_all(&ap_init_apqn_bindings_complete);
|
||||
AP_DBF(DBF_INFO, "%s complete\n", __func__);
|
||||
AP_DBF_INFO("%s complete\n", __func__);
|
||||
}
|
||||
ap_send_bindings_complete_uevent();
|
||||
}
|
||||
@ -786,9 +790,12 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
|
||||
drvres = to_ap_drv(dev->driver)->flags
|
||||
& AP_DRIVER_FLAG_DEFAULT;
|
||||
if (!!devres != !!drvres) {
|
||||
AP_DBF_DBG("reprobing queue=%02x.%04x\n",
|
||||
card, queue);
|
||||
AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
|
||||
__func__, card, queue);
|
||||
rc = device_reprobe(dev);
|
||||
if (rc)
|
||||
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
|
||||
__func__, card, queue);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1118,7 +1125,8 @@ static ssize_t ap_domain_store(struct bus_type *bus,
|
||||
ap_domain_index = domain;
|
||||
spin_unlock_bh(&ap_domain_lock);
|
||||
|
||||
AP_DBF_INFO("stored new default domain=%d\n", domain);
|
||||
AP_DBF_INFO("%s stored new default domain=%d\n",
|
||||
__func__, domain);
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -1433,8 +1441,9 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
|
||||
|
||||
/* < CEX2A is not supported */
|
||||
if (rawtype < AP_DEVICE_TYPE_CEX2A) {
|
||||
AP_DBF_WARN("get_comp_type queue=%02x.%04x unsupported type %d\n",
|
||||
AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
|
||||
AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
|
||||
__func__, AP_QID_CARD(qid),
|
||||
AP_QID_QUEUE(qid), rawtype);
|
||||
return 0;
|
||||
}
|
||||
/* up to CEX7 known and fully supported */
|
||||
@ -1458,11 +1467,12 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
|
||||
comp_type = apinfo.cat;
|
||||
}
|
||||
if (!comp_type)
|
||||
AP_DBF_WARN("get_comp_type queue=%02x.%04x unable to map type %d\n",
|
||||
AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
|
||||
AP_DBF_WARN("%s queue=%02x.%04x unable to map type %d\n",
|
||||
__func__, AP_QID_CARD(qid),
|
||||
AP_QID_QUEUE(qid), rawtype);
|
||||
else if (comp_type != rawtype)
|
||||
AP_DBF_INFO("get_comp_type queue=%02x.%04x map type %d to %d\n",
|
||||
AP_QID_CARD(qid), AP_QID_QUEUE(qid),
|
||||
AP_DBF_INFO("%s queue=%02x.%04x map type %d to %d\n",
|
||||
__func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
|
||||
rawtype, comp_type);
|
||||
return comp_type;
|
||||
}
|
||||
@ -1535,7 +1545,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
aq = dev ? to_ap_queue(dev) : NULL;
|
||||
if (!ap_test_config_usage_domain(dom)) {
|
||||
if (dev) {
|
||||
AP_DBF_INFO("%s(%d,%d) not in config any more, rm queue device\n",
|
||||
AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
|
||||
__func__, ac->id, dom);
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
@ -1545,9 +1555,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
/* domain is valid, get info from this APQN */
|
||||
if (!ap_queue_info(qid, &type, &func, &depth, &ml, &decfg)) {
|
||||
if (aq) {
|
||||
AP_DBF_INFO(
|
||||
"%s(%d,%d) ap_queue_info() not successful, rm queue device\n",
|
||||
__func__, ac->id, dom);
|
||||
AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
|
||||
__func__, ac->id, dom);
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
}
|
||||
@ -1577,10 +1586,10 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
/* get it and thus adjust reference counter */
|
||||
get_device(dev);
|
||||
if (decfg)
|
||||
AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n",
|
||||
AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
|
||||
__func__, ac->id, dom);
|
||||
else
|
||||
AP_DBF_INFO("%s(%d,%d) new queue device created\n",
|
||||
AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
|
||||
__func__, ac->id, dom);
|
||||
goto put_dev_and_continue;
|
||||
}
|
||||
@ -1594,7 +1603,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
|
||||
}
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_INFO("%s(%d,%d) queue device config off\n",
|
||||
AP_DBF_INFO("%s(%d,%d) queue dev config off\n",
|
||||
__func__, ac->id, dom);
|
||||
ap_send_config_uevent(&aq->ap_dev, aq->config);
|
||||
/* 'receive' pending messages with -EAGAIN */
|
||||
@ -1609,7 +1618,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
aq->sm_state = AP_SM_STATE_RESET_START;
|
||||
}
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_INFO("%s(%d,%d) queue device config on\n",
|
||||
AP_DBF_INFO("%s(%d,%d) queue dev config on\n",
|
||||
__func__, ac->id, dom);
|
||||
ap_send_config_uevent(&aq->ap_dev, aq->config);
|
||||
goto put_dev_and_continue;
|
||||
@ -1621,7 +1630,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
ap_flush_queue(aq);
|
||||
/* re-init (with reset) the queue device */
|
||||
ap_queue_init_state(aq);
|
||||
AP_DBF_INFO("%s(%d,%d) queue device reinit enforced\n",
|
||||
AP_DBF_INFO("%s(%d,%d) queue dev reinit enforced\n",
|
||||
__func__, ac->id, dom);
|
||||
goto put_dev_and_continue;
|
||||
}
|
||||
@ -1653,7 +1662,7 @@ static inline void ap_scan_adapter(int ap)
|
||||
/* Adapter not in configuration ? */
|
||||
if (!ap_test_config_card_id(ap)) {
|
||||
if (ac) {
|
||||
AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devices\n",
|
||||
AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devs\n",
|
||||
__func__, ap);
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
@ -1678,9 +1687,8 @@ static inline void ap_scan_adapter(int ap)
|
||||
if (dom > ap_max_domain_id) {
|
||||
/* Could not find a valid APQN for this adapter */
|
||||
if (ac) {
|
||||
AP_DBF_INFO(
|
||||
"%s(%d) no type info (no APQN found), rm card and queue devices\n",
|
||||
__func__, ap);
|
||||
AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
|
||||
__func__, ap);
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
} else {
|
||||
@ -1692,7 +1700,7 @@ static inline void ap_scan_adapter(int ap)
|
||||
if (!type) {
|
||||
/* No apdater type info available, an unusable adapter */
|
||||
if (ac) {
|
||||
AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devices\n",
|
||||
AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
|
||||
__func__, ap);
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
@ -1706,13 +1714,13 @@ static inline void ap_scan_adapter(int ap)
|
||||
if (ac) {
|
||||
/* Check APQN against existing card device for changes */
|
||||
if (ac->raw_hwtype != type) {
|
||||
AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devices\n",
|
||||
AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
|
||||
__func__, ap, type);
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
ac = NULL;
|
||||
} else if (ac->functions != func) {
|
||||
AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devices\n",
|
||||
AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
|
||||
__func__, ap, type);
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
@ -1720,13 +1728,13 @@ static inline void ap_scan_adapter(int ap)
|
||||
} else {
|
||||
if (decfg && ac->config) {
|
||||
ac->config = false;
|
||||
AP_DBF_INFO("%s(%d) card device config off\n",
|
||||
AP_DBF_INFO("%s(%d) card dev config off\n",
|
||||
__func__, ap);
|
||||
ap_send_config_uevent(&ac->ap_dev, ac->config);
|
||||
}
|
||||
if (!decfg && !ac->config) {
|
||||
ac->config = true;
|
||||
AP_DBF_INFO("%s(%d) card device config on\n",
|
||||
AP_DBF_INFO("%s(%d) card dev config on\n",
|
||||
__func__, ap);
|
||||
ap_send_config_uevent(&ac->ap_dev, ac->config);
|
||||
}
|
||||
@ -1756,7 +1764,8 @@ static inline void ap_scan_adapter(int ap)
|
||||
if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
|
||||
atomic_set(&ap_max_msg_size, ac->maxmsgsize);
|
||||
AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
|
||||
__func__, ap, atomic_read(&ap_max_msg_size));
|
||||
__func__, ap,
|
||||
atomic_read(&ap_max_msg_size));
|
||||
}
|
||||
/* Register the new card device with AP bus */
|
||||
rc = device_register(dev);
|
||||
@ -1769,10 +1778,10 @@ static inline void ap_scan_adapter(int ap)
|
||||
/* get it and thus adjust reference counter */
|
||||
get_device(dev);
|
||||
if (decfg)
|
||||
AP_DBF_INFO("%s(%d) new (decfg) card device type=%d func=0x%08x created\n",
|
||||
AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
|
||||
__func__, ap, type, func);
|
||||
else
|
||||
AP_DBF_INFO("%s(%d) new card device type=%d func=0x%08x created\n",
|
||||
AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
|
||||
__func__, ap, type, func);
|
||||
}
|
||||
|
||||
@ -1810,12 +1819,12 @@ static void ap_scan_bus(struct work_struct *unused)
|
||||
if (dev)
|
||||
put_device(dev);
|
||||
else
|
||||
AP_DBF_INFO("no queue device with default domain %d available\n",
|
||||
ap_domain_index);
|
||||
AP_DBF_INFO("%s no queue device with default domain %d available\n",
|
||||
__func__, ap_domain_index);
|
||||
}
|
||||
|
||||
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
|
||||
AP_DBF(DBF_DEBUG, "%s init scan complete\n", __func__);
|
||||
AP_DBF_DBG("%s init scan complete\n", __func__);
|
||||
ap_send_init_scan_done_uevent();
|
||||
ap_check_bindings_complete();
|
||||
}
|
||||
@ -1830,7 +1839,7 @@ static void ap_config_timeout(struct timer_list *unused)
|
||||
|
||||
static int __init ap_debug_init(void)
|
||||
{
|
||||
ap_dbf_info = debug_register("ap", 1, 1,
|
||||
ap_dbf_info = debug_register("ap", 2, 1,
|
||||
DBF_MAX_SPRINTF_ARGS * sizeof(long));
|
||||
debug_register_view(ap_dbf_info, &debug_sprintf_view);
|
||||
debug_set_level(ap_dbf_info, DBF_ERR);
|
||||
@ -1897,7 +1906,7 @@ static int __init ap_module_init(void)
|
||||
}
|
||||
|
||||
/* enable interrupts if available */
|
||||
if (ap_interrupts_available()) {
|
||||
if (ap_interrupts_available() && ap_useirq) {
|
||||
rc = register_adapter_interrupt(&ap_airq);
|
||||
ap_irq_flag = (rc == 0);
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
|
||||
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
|
||||
|
||||
#define DBF_MAX_SPRINTF_ARGS 5
|
||||
#define DBF_MAX_SPRINTF_ARGS 6
|
||||
|
||||
#define AP_DBF(...) \
|
||||
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
|
||||
|
@ -157,6 +157,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
aq->queue_count = max_t(int, 0, aq->queue_count - 1);
|
||||
if (!status.queue_empty && !aq->queue_count)
|
||||
aq->queue_count++;
|
||||
if (aq->queue_count > 0)
|
||||
mod_timer(&aq->timeout,
|
||||
jiffies + aq->request_timeout);
|
||||
@ -246,6 +248,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
|
||||
|
||||
if (aq->requestq_count <= 0)
|
||||
return AP_SM_WAIT_NONE;
|
||||
|
||||
/* Start the next request on the queue. */
|
||||
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
|
||||
#ifdef CONFIG_ZCRYPT_DEBUG
|
||||
@ -279,7 +282,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
|
||||
aq->sm_state = AP_SM_STATE_RESET_WAIT;
|
||||
return AP_SM_WAIT_TIMEOUT;
|
||||
case AP_RESPONSE_INVALID_DOMAIN:
|
||||
AP_DBF(DBF_WARN, "AP_RESPONSE_INVALID_DOMAIN on NQAP\n");
|
||||
AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
|
||||
fallthrough;
|
||||
case AP_RESPONSE_MESSAGE_TOO_BIG:
|
||||
case AP_RESPONSE_REQ_FAC_NOT_INST:
|
||||
@ -571,8 +574,8 @@ static ssize_t reset_store(struct device *dev,
|
||||
ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
|
||||
spin_unlock_bh(&aq->lock);
|
||||
|
||||
AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
|
||||
AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
|
||||
__func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -42,10 +42,13 @@ static struct ap_device_id ap_queue_ids[] = {
|
||||
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
|
||||
|
||||
/**
|
||||
* vfio_ap_queue_dev_probe:
|
||||
* vfio_ap_queue_dev_probe: Allocate a vfio_ap_queue structure and associate it
|
||||
* with the device as driver_data.
|
||||
*
|
||||
* Allocate a vfio_ap_queue structure and associate it
|
||||
* with the device as driver_data.
|
||||
* @apdev: the AP device being probed
|
||||
*
|
||||
* Return: returns 0 if the probe succeeded; otherwise, returns -ENOMEM if
|
||||
* storage could not be allocated for a vfio_ap_queue object.
|
||||
*/
|
||||
static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
|
||||
{
|
||||
@ -61,10 +64,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
|
||||
}
|
||||
|
||||
/**
|
||||
* vfio_ap_queue_dev_remove:
|
||||
* vfio_ap_queue_dev_remove: Free the associated vfio_ap_queue structure.
|
||||
*
|
||||
* Takes the matrix lock to avoid actions on this device while removing
|
||||
* Free the associated vfio_ap_queue structure
|
||||
* @apdev: the AP device being removed
|
||||
*
|
||||
* Takes the matrix lock to avoid actions on this device while doing the remove.
|
||||
*/
|
||||
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
|
||||
{
|
||||
|
@ -187,6 +187,8 @@ end_free:
|
||||
* vfio_ap_irq_enable - Enable Interruption for a APQN
|
||||
*
|
||||
* @q: the vfio_ap_queue holding AQIC parameters
|
||||
* @isc: the guest ISC to register with the GIB interface
|
||||
* @nib: the notification indicator byte to pin.
|
||||
*
|
||||
* Pin the NIB saved in *q
|
||||
* Register the guest ISC to GIB interface and retrieve the
|
||||
@ -738,7 +740,6 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
|
||||
* assign_domain_store - parses the APQI from @buf and sets the
|
||||
* corresponding bit in the mediated matrix device's AQM
|
||||
*
|
||||
*
|
||||
* @dev: the matrix device
|
||||
* @attr: the mediated matrix device's assign_domain attribute
|
||||
* @buf: a buffer containing the AP queue index (APQI) of the domain to
|
||||
@ -866,7 +867,6 @@ static DEVICE_ATTR_WO(unassign_domain);
|
||||
* assign_control_domain_store - parses the domain ID from @buf and sets
|
||||
* the corresponding bit in the mediated matrix device's ADM
|
||||
*
|
||||
*
|
||||
* @dev: the matrix device
|
||||
* @attr: the mediated matrix device's assign_control_domain attribute
|
||||
* @buf: a buffer containing the domain ID to be assigned
|
||||
@ -1142,6 +1142,7 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
|
||||
* by @matrix_mdev.
|
||||
*
|
||||
* @matrix_mdev: a matrix mediated device
|
||||
* @kvm: the pointer to the kvm structure being unset.
|
||||
*
|
||||
* Note: The matrix_dev->lock must be taken prior to calling
|
||||
* this function; however, the lock will be temporarily released while the
|
||||
|
@ -26,16 +26,18 @@
|
||||
#define VFIO_AP_DRV_NAME "vfio_ap"
|
||||
|
||||
/**
|
||||
* ap_matrix_dev - the AP matrix device structure
|
||||
* struct ap_matrix_dev - Contains the data for the matrix device.
|
||||
*
|
||||
* @device: generic device structure associated with the AP matrix device
|
||||
* @available_instances: number of mediated matrix devices that can be created
|
||||
* @info: the struct containing the output from the PQAP(QCI) instruction
|
||||
* mdev_list: the list of mediated matrix devices created
|
||||
* lock: mutex for locking the AP matrix device. This lock will be
|
||||
* @mdev_list: the list of mediated matrix devices created
|
||||
* @lock: mutex for locking the AP matrix device. This lock will be
|
||||
* taken every time we fiddle with state managed by the vfio_ap
|
||||
* driver, be it using @mdev_list or writing the state of a
|
||||
* single ap_matrix_mdev device. It's quite coarse but we don't
|
||||
* expect much contention.
|
||||
* @vfio_ap_drv: the vfio_ap device driver
|
||||
*/
|
||||
struct ap_matrix_dev {
|
||||
struct device device;
|
||||
@ -49,17 +51,19 @@ struct ap_matrix_dev {
|
||||
extern struct ap_matrix_dev *matrix_dev;
|
||||
|
||||
/**
|
||||
* The AP matrix is comprised of three bit masks identifying the adapters,
|
||||
* queues (domains) and control domains that belong to an AP matrix. The bits i
|
||||
* each mask, from least significant to most significant bit, correspond to IDs
|
||||
* 0 to 255. When a bit is set, the corresponding ID belongs to the matrix.
|
||||
* struct ap_matrix - matrix of adapters, domains and control domains
|
||||
*
|
||||
* @apm_max: max adapter number in @apm
|
||||
* @apm identifies the AP adapters in the matrix
|
||||
* @apm: identifies the AP adapters in the matrix
|
||||
* @aqm_max: max domain number in @aqm
|
||||
* @aqm identifies the AP queues (domains) in the matrix
|
||||
* @aqm: identifies the AP queues (domains) in the matrix
|
||||
* @adm_max: max domain number in @adm
|
||||
* @adm identifies the AP control domains in the matrix
|
||||
* @adm: identifies the AP control domains in the matrix
|
||||
*
|
||||
* The AP matrix is comprised of three bit masks identifying the adapters,
|
||||
* queues (domains) and control domains that belong to an AP matrix. The bits in
|
||||
* each mask, from left to right, correspond to IDs 0 to 255. When a bit is set
|
||||
* the corresponding ID belongs to the matrix.
|
||||
*/
|
||||
struct ap_matrix {
|
||||
unsigned long apm_max;
|
||||
@ -71,13 +75,20 @@ struct ap_matrix {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ap_matrix_mdev - the mediated matrix device structure
|
||||
* @list: allows the ap_matrix_mdev struct to be added to a list
|
||||
* struct ap_matrix_mdev - Contains the data associated with a matrix mediated
|
||||
* device.
|
||||
* @vdev: the vfio device
|
||||
* @node: allows the ap_matrix_mdev struct to be added to a list
|
||||
* @matrix: the adapters, usage domains and control domains assigned to the
|
||||
* mediated matrix device.
|
||||
* @group_notifier: notifier block used for specifying callback function for
|
||||
* handling the VFIO_GROUP_NOTIFY_SET_KVM event
|
||||
* @iommu_notifier: notifier block used for specifying callback function for
|
||||
* handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even
|
||||
* @kvm: the struct holding guest's state
|
||||
* @pqap_hook: the function pointer to the interception handler for the
|
||||
* PQAP(AQIC) instruction.
|
||||
* @mdev: the mediated device
|
||||
*/
|
||||
struct ap_matrix_mdev {
|
||||
struct vfio_device vdev;
|
||||
@ -90,6 +101,14 @@ struct ap_matrix_mdev {
|
||||
struct mdev_device *mdev;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vfio_ap_queue - contains the data associated with a queue bound to the
|
||||
* vfio_ap device driver
|
||||
* @matrix_mdev: the matrix mediated device
|
||||
* @saved_pfn: the guest PFN pinned for the guest
|
||||
* @apqn: the APQN of the AP queue device
|
||||
* @saved_isc: the guest ISC registered with the GIB interface
|
||||
*/
|
||||
struct vfio_ap_queue {
|
||||
struct ap_matrix_mdev *matrix_mdev;
|
||||
unsigned long saved_pfn;
|
||||
|
@ -82,8 +82,8 @@ static inline int zcrypt_process_rescan(void)
|
||||
atomic_set(&zcrypt_rescan_req, 0);
|
||||
atomic_inc(&zcrypt_rescan_count);
|
||||
ap_bus_force_rescan();
|
||||
ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
|
||||
atomic_inc_return(&zcrypt_rescan_count));
|
||||
ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
|
||||
atomic_inc_return(&zcrypt_rescan_count));
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -341,8 +341,8 @@ static void zcdn_device_release(struct device *dev)
|
||||
{
|
||||
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
|
||||
|
||||
ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n",
|
||||
MAJOR(dev->devt), MINOR(dev->devt));
|
||||
ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
|
||||
__func__, MAJOR(dev->devt), MINOR(dev->devt));
|
||||
|
||||
kfree(zcdndev);
|
||||
}
|
||||
@ -407,8 +407,8 @@ static int zcdn_create(const char *name)
|
||||
goto unlockout;
|
||||
}
|
||||
|
||||
ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n",
|
||||
MAJOR(devt), MINOR(devt));
|
||||
ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
|
||||
__func__, MAJOR(devt), MINOR(devt));
|
||||
|
||||
unlockout:
|
||||
mutex_unlock(&ap_perms_mutex);
|
||||
@ -550,9 +550,8 @@ static inline int zcrypt_check_ioctl(struct ap_perms *perms,
|
||||
}
|
||||
|
||||
if (rc)
|
||||
ZCRYPT_DBF(DBF_WARN,
|
||||
"ioctl check failed: ioctlnr=0x%04x rc=%d\n",
|
||||
ioctlnr, rc);
|
||||
ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
|
||||
__func__, ioctlnr, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1446,7 +1445,7 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
|
||||
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
|
||||
rc = -EIO;
|
||||
if (rc) {
|
||||
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
|
||||
ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
return put_user(mex.outputdatalength, &umex->outputdatalength);
|
||||
@ -1491,7 +1490,7 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
|
||||
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
|
||||
rc = -EIO;
|
||||
if (rc) {
|
||||
ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
|
||||
ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
|
||||
@ -1509,12 +1508,12 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
|
||||
return -EFAULT;
|
||||
|
||||
#ifdef CONFIG_ZCRYPT_DEBUG
|
||||
if (xcRB.status & (1U << 31)) {
|
||||
if ((xcRB.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) {
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
tr.fi.cmd = (u16)(xcRB.status >> 16);
|
||||
}
|
||||
xcRB.status &= 0x0000FFFF;
|
||||
xcRB.status = 0;
|
||||
#endif
|
||||
|
||||
do {
|
||||
@ -1536,8 +1535,8 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
|
||||
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
|
||||
rc = -EIO;
|
||||
if (rc)
|
||||
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
|
||||
rc, xcRB.status);
|
||||
ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
|
||||
rc, xcRB.status);
|
||||
if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
|
||||
return -EFAULT;
|
||||
return rc;
|
||||
@ -1582,7 +1581,7 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
|
||||
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
|
||||
rc = -EIO;
|
||||
if (rc)
|
||||
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
|
||||
ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
|
||||
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
|
||||
return -EFAULT;
|
||||
return rc;
|
||||
@ -1709,7 +1708,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
}
|
||||
/* unknown ioctl number */
|
||||
default:
|
||||
ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
|
||||
ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
}
|
||||
@ -2048,16 +2047,14 @@ int zcrypt_wait_api_operational(void)
|
||||
break;
|
||||
case -ETIME:
|
||||
/* timeout */
|
||||
ZCRYPT_DBF(DBF_WARN,
|
||||
"%s ap_wait_init_apqn_bindings_complete() returned with ETIME\n",
|
||||
__func__);
|
||||
ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
|
||||
__func__);
|
||||
zcrypt_wait_api_state = -ETIME;
|
||||
break;
|
||||
default:
|
||||
/* other failure */
|
||||
ZCRYPT_DBF(DBF_DEBUG,
|
||||
"%s ap_wait_init_apqn_bindings_complete() failure rc=%d\n",
|
||||
__func__, rc);
|
||||
ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
|
||||
__func__, rc);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
@ -2079,7 +2076,7 @@ EXPORT_SYMBOL(zcrypt_wait_api_operational);
|
||||
|
||||
int __init zcrypt_debug_init(void)
|
||||
{
|
||||
zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
|
||||
zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
|
||||
DBF_MAX_SPRINTF_ARGS * sizeof(long));
|
||||
debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
|
||||
debug_set_level(zcrypt_dbf_info, DBF_ERR);
|
||||
|
@ -76,7 +76,7 @@ static ssize_t online_store(struct device *dev,
|
||||
zc->online = online;
|
||||
id = zc->card->id;
|
||||
|
||||
ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
|
||||
ZCRYPT_DBF_INFO("%s card=%02x online=%d\n", __func__, id, online);
|
||||
|
||||
ap_send_online_uevent(&ac->ap_dev, online);
|
||||
|
||||
@ -189,7 +189,8 @@ int zcrypt_card_register(struct zcrypt_card *zc)
|
||||
|
||||
zc->online = 1;
|
||||
|
||||
ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
|
||||
ZCRYPT_DBF_INFO("%s card=%02x register online=1\n",
|
||||
__func__, zc->card->id);
|
||||
|
||||
rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
|
||||
&zcrypt_card_attr_group);
|
||||
@ -211,7 +212,8 @@ EXPORT_SYMBOL(zcrypt_card_register);
|
||||
*/
|
||||
void zcrypt_card_unregister(struct zcrypt_card *zc)
|
||||
{
|
||||
ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
|
||||
ZCRYPT_DBF_INFO("%s card=%02x unregister\n",
|
||||
__func__, zc->card->id);
|
||||
|
||||
spin_lock(&zcrypt_list_lock);
|
||||
list_del_init(&zc->list);
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
|
||||
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
|
||||
|
||||
#define DBF_MAX_SPRINTF_ARGS 5
|
||||
#define DBF_MAX_SPRINTF_ARGS 6
|
||||
|
||||
#define ZCRYPT_DBF(...) \
|
||||
debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
|
||||
|
@ -98,9 +98,8 @@ static inline int convert_error(struct zcrypt_queue *zq,
|
||||
case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */
|
||||
case REP88_ERROR_KEY_TYPE: /* 0x34 */
|
||||
/* RY indicates malformed request */
|
||||
ZCRYPT_DBF(DBF_WARN,
|
||||
"dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
|
||||
card, queue, ehdr->reply_code);
|
||||
ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
|
||||
__func__, card, queue, ehdr->reply_code);
|
||||
return -EINVAL;
|
||||
case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */
|
||||
case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
|
||||
@ -119,19 +118,18 @@ static inline int convert_error(struct zcrypt_queue *zq,
|
||||
} __packed * head = reply->msg;
|
||||
unsigned int apfs = *((u32 *)head->fmt2.apfs);
|
||||
|
||||
ZCRYPT_DBF(DBF_WARN,
|
||||
"dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
|
||||
card, queue, ehdr->reply_code, apfs);
|
||||
ZCRYPT_DBF_WARN(
|
||||
"%s dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
|
||||
__func__, card, queue, ehdr->reply_code, apfs);
|
||||
} else
|
||||
ZCRYPT_DBF(DBF_WARN,
|
||||
"dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
|
||||
card, queue, ehdr->reply_code);
|
||||
ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
|
||||
__func__, card, queue,
|
||||
ehdr->reply_code);
|
||||
return -EAGAIN;
|
||||
default:
|
||||
/* Assume request is valid and a retry will be worth it */
|
||||
ZCRYPT_DBF(DBF_WARN,
|
||||
"dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
|
||||
card, queue, ehdr->reply_code);
|
||||
ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
|
||||
__func__, card, queue, ehdr->reply_code);
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
@ -369,12 +369,10 @@ static int convert_type80(struct zcrypt_queue *zq,
|
||||
zq->online = 0;
|
||||
pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
t80h->code);
|
||||
ZCRYPT_DBF_ERR("dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
t80h->code);
|
||||
AP_QID_QUEUE(zq->queue->qid), t80h->code);
|
||||
ZCRYPT_DBF_ERR("%s dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid), t80h->code);
|
||||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -409,10 +407,10 @@ static int convert_response_cex2a(struct zcrypt_queue *zq,
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) rtype);
|
||||
ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) rtype);
|
||||
ZCRYPT_DBF_ERR(
|
||||
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid), (int) rtype);
|
||||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@ -649,8 +649,8 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
|
||||
(service_rc == 8 && service_rs == 72) ||
|
||||
(service_rc == 8 && service_rs == 770) ||
|
||||
(service_rc == 12 && service_rs == 769)) {
|
||||
ZCRYPT_DBF_WARN("dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
ZCRYPT_DBF_WARN("%s dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) service_rc, (int) service_rs);
|
||||
return -EINVAL;
|
||||
@ -660,8 +660,8 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) service_rc, (int) service_rs);
|
||||
ZCRYPT_DBF_ERR("dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
ZCRYPT_DBF_ERR("%s dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) service_rc, (int) service_rs);
|
||||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
@ -806,10 +806,10 @@ static int convert_response_ica(struct zcrypt_queue *zq,
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR(
|
||||
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
|
||||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -841,10 +841,10 @@ static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR(
|
||||
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
|
||||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -871,10 +871,10 @@ static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR(
|
||||
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
|
||||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -902,10 +902,10 @@ static int convert_response_rng(struct zcrypt_queue *zq,
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR("dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
(int) msg->hdr.type);
|
||||
ZCRYPT_DBF_ERR(
|
||||
"%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid), (int) msg->hdr.type);
|
||||
ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
@ -65,10 +65,9 @@ static ssize_t online_store(struct device *dev,
|
||||
return -EINVAL;
|
||||
zq->online = online;
|
||||
|
||||
ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
|
||||
AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
online);
|
||||
ZCRYPT_DBF_INFO("%s queue=%02x.%04x online=%d\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid), online);
|
||||
|
||||
ap_send_online_uevent(&aq->ap_dev, online);
|
||||
|
||||
@ -175,8 +174,9 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
|
||||
zq->zcard = zc;
|
||||
zq->online = 1; /* New devices are online by default. */
|
||||
|
||||
ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
|
||||
AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
|
||||
ZCRYPT_DBF_INFO("%s queue=%02x.%04x register online=1\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid));
|
||||
|
||||
list_add_tail(&zq->list, &zc->zqueues);
|
||||
spin_unlock(&zcrypt_list_lock);
|
||||
@ -215,8 +215,9 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
|
||||
{
|
||||
struct zcrypt_card *zc;
|
||||
|
||||
ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
|
||||
AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
|
||||
ZCRYPT_DBF_INFO("%s queue=%02x.%04x unregister\n",
|
||||
__func__, AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid));
|
||||
|
||||
zc = zq->zcard;
|
||||
spin_lock(&zcrypt_list_lock);
|
||||
|
@ -26,7 +26,7 @@ config SAMPLE_TRACE_PRINTK
|
||||
config SAMPLE_FTRACE_DIRECT
|
||||
tristate "Build register_ftrace_direct() example"
|
||||
depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m
|
||||
depends on X86_64 # has x86_64 inlined asm
|
||||
depends on HAVE_SAMPLE_FTRACE_DIRECT
|
||||
help
|
||||
This builds an ftrace direct function example
|
||||
that hooks to wake_up_process and prints the parameters.
|
||||
@ -224,3 +224,9 @@ config SAMPLE_WATCH_QUEUE
|
||||
sb_notify() syscalls and the KEYCTL_WATCH_KEY keyctl() function.
|
||||
|
||||
endif # SAMPLES
|
||||
|
||||
config HAVE_SAMPLE_FTRACE_DIRECT
|
||||
bool
|
||||
|
||||
config HAVE_SAMPLE_FTRACE_MULTI_DIRECT
|
||||
bool
|
||||
|
@ -21,6 +21,7 @@ subdir-$(CONFIG_SAMPLE_TIMER) += timers
|
||||
obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace_events/
|
||||
obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace/
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace/
|
||||
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += ftrace/
|
||||
subdir-$(CONFIG_SAMPLE_UHID) += uhid
|
||||
obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
|
||||
|
@ -3,7 +3,7 @@
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-multi.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace-direct-multi.o
|
||||
|
||||
CFLAGS_sample-trace-array.o := -I$(src)
|
||||
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
void my_direct_func1(void)
|
||||
{
|
||||
@ -18,6 +19,8 @@ extern void my_tramp2(void *);
|
||||
|
||||
static unsigned long my_ip = (unsigned long)schedule;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
" .type my_tramp1, @function\n"
|
||||
@ -41,6 +44,47 @@ asm (
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_S390
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
" .type my_tramp1, @function\n"
|
||||
" .globl my_tramp1\n"
|
||||
" my_tramp1:"
|
||||
" lgr %r1,%r15\n"
|
||||
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
|
||||
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
|
||||
" brasl %r14,my_direct_func1\n"
|
||||
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
|
||||
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" lgr %r1,%r0\n"
|
||||
" br %r1\n"
|
||||
" .size my_tramp1, .-my_tramp1\n"
|
||||
" .type my_tramp2, @function\n"
|
||||
" .globl my_tramp2\n"
|
||||
" my_tramp2:"
|
||||
" lgr %r1,%r15\n"
|
||||
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
|
||||
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
|
||||
" brasl %r14,my_direct_func2\n"
|
||||
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
|
||||
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" lgr %r1,%r0\n"
|
||||
" br %r1\n"
|
||||
" .size my_tramp2, .-my_tramp2\n"
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
#endif /* CONFIG_S390 */
|
||||
|
||||
static unsigned long my_tramp = (unsigned long)my_tramp1;
|
||||
static unsigned long tramps[2] = {
|
||||
(unsigned long)my_tramp1,
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/mm.h> /* for handle_mm_fault() */
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
void my_direct_func(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags)
|
||||
@ -13,6 +14,8 @@ void my_direct_func(struct vm_area_struct *vma,
|
||||
|
||||
extern void my_tramp(void *);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
" .type my_tramp, @function\n"
|
||||
@ -33,6 +36,31 @@ asm (
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_S390
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
" .type my_tramp, @function\n"
|
||||
" .globl my_tramp\n"
|
||||
" my_tramp:"
|
||||
" lgr %r1,%r15\n"
|
||||
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
|
||||
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
|
||||
" brasl %r14,my_direct_func\n"
|
||||
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
|
||||
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" lgr %r1,%r0\n"
|
||||
" br %r1\n"
|
||||
" .size my_tramp, .-my_tramp\n"
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
#endif /* CONFIG_S390 */
|
||||
|
||||
static int __init ftrace_direct_init(void)
|
||||
{
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/sched.h> /* for wake_up_process() */
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
void my_direct_func(struct task_struct *p)
|
||||
{
|
||||
@ -11,6 +12,8 @@ void my_direct_func(struct task_struct *p)
|
||||
|
||||
extern void my_tramp(void *);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
" .type my_tramp, @function\n"
|
||||
@ -27,6 +30,31 @@ asm (
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_S390
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
" .type my_tramp, @function\n"
|
||||
" .globl my_tramp\n"
|
||||
" my_tramp:"
|
||||
" lgr %r1,%r15\n"
|
||||
" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
|
||||
" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
|
||||
" brasl %r14,my_direct_func\n"
|
||||
" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
|
||||
" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
|
||||
" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
|
||||
" lgr %r1,%r0\n"
|
||||
" br %r1\n"
|
||||
" .size my_tramp, .-my_tramp\n"
|
||||
" .popsection\n"
|
||||
);
|
||||
|
||||
#endif /* CONFIG_S390 */
|
||||
|
||||
static int __init ftrace_direct_init(void)
|
||||
{
|
||||
|
@ -22,6 +22,9 @@ ppc64*)
|
||||
ppc*)
|
||||
ARG1=%r3
|
||||
;;
|
||||
s390*)
|
||||
ARG1=%r2
|
||||
;;
|
||||
*)
|
||||
echo "Please implement other architecture here"
|
||||
exit_untested
|
||||
|
@ -32,6 +32,10 @@ ppc*)
|
||||
GOODREG=%r3
|
||||
BADREG=%msr
|
||||
;;
|
||||
s390*)
|
||||
GOODREG=%r2
|
||||
BADREG=%s2
|
||||
;;
|
||||
*)
|
||||
echo "Please implement other architecture here"
|
||||
exit_untested
|
||||
|
Loading…
Reference in New Issue
Block a user