mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
s390 updates for 6.9 merge window
- Various virtual vs physical address usage fixes - Fix error handling in Processor Activity Instrumentation device driver, and export number of counters with a sysfs file - Allow for multiple events when Processor Activity Instrumentation counters are monitored in system wide sampling - Change multiplier and shift values of the Time-of-Day clock source to improve steering precision - Remove a couple of unneeded GFP_DMA flags from allocations - Disable mmap alignment if randomize_va_space is also disabled, to avoid a too small heap - Various changes to allow s390 to be compiled with LLVM=1, since ld.lld and llvm-objcopy will have proper s390 support witch clang 19 - Add __uninitialized macro to Compiler Attributes. This is helpful with s390's FPU code where some users have up to 520 byte stack frames. Clearing such stack frames (if INIT_STACK_ALL_PATTERN or INIT_STACK_ALL_ZERO is enabled) before they are used contradicts the intention (performance improvement) of such code sections. - Convert switch_to() to an out-of-line function, and use the generic switch_to header file - Replace the usage of s390's debug feature with pr_debug() calls within the zcrypt device driver - Improve hotplug support of the Adjunct Processor device driver - Improve retry handling in the zcrypt device driver - Various changes to the in-kernel FPU code: - Make in-kernel FPU sections preemptible - Convert various larger inline assemblies and assembler files to C, mainly by using singe instruction inline assemblies. This increases readability, but also allows makes it easier to add proper instrumentation hooks - Cleanup of the header files - Provide fast variants of csum_partial() and csum_partial_copy_nocheck() based on vector instructions - Introduce and use a lock to synchronize accesses to zpci device data structures to avoid inconsistent states caused by concurrent accesses - Compile the kernel without -fPIE. This addresses the following problems if the kernel is compiled with -fPIE: - It uses dynamic symbols (.dynsym), for which the linker refuses to allow more than 64k sections. This can break features which use '-ffunction-sections' and '-fdata-sections', including kpatch-build and function granular KASLR - It unnecessarily uses GOT relocations, adding an extra layer of indirection for many memory accesses - Fix shared_cpu_list for CPU private L2 caches, which incorrectly were reported as globally shared -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEECMNfWEw3SLnmiLkZIg7DeRspbsIFAmXu3jEACgkQIg7DeRsp bsJC8A/9Gi9JSMKWpIDR4WE2MQGwP/PnYdEamtK6c9ewOjIR/UzRIyIM3J1pyV0L RwL8k7EBuv3f7shTcwfPzZWlnAwNwqr1UdcafjFNtHTig50YtdP5fBL33frKHBrm ATedlCjagojOuVbh1gB45WUgzjSSkPyn0vqwjjo4h6uEAQ35zMEWwCs5Hpajlkhi GCdJaiBLJcnhT96QGurQdke+MsrpGCzeBVBnA0qopQEWaQo8OdiAJ1uMD2WKbgPR 817kNzvmE6nXnfd5JevYbaiLjK/HQUSw2dZUS6/fjuIrzTsZEUhSg4ECaprKXDg7 5qiVVPNg4WbJAp0SsB+w7c4U99VxhbS7IVHXju18GrXw6SSAupdxIo7R7YiaT8vC YIXZ1uIQ4Vbts3w/UqWUczIl/ooQt2DdrWT5NDNA+84OlOM42rthzA3vznTWuPTb U21R7cZmN++hAUjR6s4aO2LfS7HQdnKL8nvJW2y99qSfrOXm+M973W2pDhYEVXQh ixQ/lxfQpbBT1yUGlquIErokCPB85VY6ZTdGu6Erziywf4CWGsT5CspyaQnX2KTJ s4CpFPnilrW3OnxmIkrM+pNJDun1nnkGA388Xq1NEKX8Oe65OMXEFNCb0kAHQ1ua vb6534Ib/iuPnxsGpz1sX9iRqtUd06aBovPcbwIvatHCSfkWws8= =KZ31 -----END PGP SIGNATURE----- Merge tag 's390-6.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Heiko Carstens: - Various virtual vs physical address usage fixes - Fix error handling in Processor Activity Instrumentation device driver, and export number of counters with a sysfs file - Allow for multiple events when Processor Activity Instrumentation counters are monitored in system wide sampling - Change multiplier and shift values of the Time-of-Day clock source to improve steering precision - Remove a couple of unneeded GFP_DMA flags from allocations - Disable mmap alignment if randomize_va_space is also disabled, to avoid a too small heap - Various changes to allow s390 to be compiled with LLVM=1, since ld.lld and llvm-objcopy will have proper s390 support witch clang 19 - Add __uninitialized macro to Compiler Attributes. This is helpful with s390's FPU code where some users have up to 520 byte stack frames. Clearing such stack frames (if INIT_STACK_ALL_PATTERN or INIT_STACK_ALL_ZERO is enabled) before they are used contradicts the intention (performance improvement) of such code sections. - Convert switch_to() to an out-of-line function, and use the generic switch_to header file - Replace the usage of s390's debug feature with pr_debug() calls within the zcrypt device driver - Improve hotplug support of the Adjunct Processor device driver - Improve retry handling in the zcrypt device driver - Various changes to the in-kernel FPU code: - Make in-kernel FPU sections preemptible - Convert various larger inline assemblies and assembler files to C, mainly by using singe instruction inline assemblies. This increases readability, but also allows makes it easier to add proper instrumentation hooks - Cleanup of the header files - Provide fast variants of csum_partial() and csum_partial_copy_nocheck() based on vector instructions - Introduce and use a lock to synchronize accesses to zpci device data structures to avoid inconsistent states caused by concurrent accesses - Compile the kernel without -fPIE. This addresses the following problems if the kernel is compiled with -fPIE: - It uses dynamic symbols (.dynsym), for which the linker refuses to allow more than 64k sections. This can break features which use '-ffunction-sections' and '-fdata-sections', including kpatch-build and function granular KASLR - It unnecessarily uses GOT relocations, adding an extra layer of indirection for many memory accesses - Fix shared_cpu_list for CPU private L2 caches, which incorrectly were reported as globally shared * tag 's390-6.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (117 commits) s390/tools: handle rela R_390_GOTPCDBL/R_390_GOTOFF64 s390/cache: prevent rebuild of shared_cpu_list s390/crypto: remove retry loop with sleep from PAES pkey invocation s390/pkey: improve pkey retry behavior s390/zcrypt: improve zcrypt retry behavior s390/zcrypt: introduce retries on in-kernel send CPRB functions s390/ap: introduce mutex to lock the AP bus scan s390/ap: rework ap_scan_bus() to return true on config change s390/ap: clarify AP scan bus related functions and variables s390/ap: rearm APQNs bindings complete completion s390/configs: increase number of LOCKDEP_BITS s390/vfio-ap: handle hardware checkstop state on queue reset operation s390/pai: change sampling event assignment for PMU device driver s390/boot: fix minor comment style damages s390/boot: do not check for zero-termination relocation entry s390/boot: make type of __vmlinux_relocs_64_start|end consistent s390/boot: sanitize kaslr_adjust_relocs() function prototype s390/boot: simplify GOT handling s390: vmlinux.lds.S: fix .got.plt assertion s390/boot: workaround current 'llvm-objdump -t -j ...' behavior ...
This commit is contained in:
commit
691632f0e8
@ -127,6 +127,7 @@ config S390
|
||||
select ARCH_WANT_DEFAULT_BPF_JIT
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select ARCH_WANT_KERNEL_PMD_MKWRITE
|
||||
select ARCH_WANT_LD_ORPHAN_WARN
|
||||
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
@ -448,7 +449,7 @@ config COMPAT
|
||||
select COMPAT_OLD_SIGACTION
|
||||
select HAVE_UID16
|
||||
depends on MULTIUSER
|
||||
depends on !CC_IS_CLANG
|
||||
depends on !CC_IS_CLANG && !LD_IS_LLD
|
||||
help
|
||||
Select this option if you want to enable your system kernel to
|
||||
handle system-calls from ELF binaries for 31 bit ESA. This option
|
||||
@ -582,14 +583,23 @@ config RELOCATABLE
|
||||
help
|
||||
This builds a kernel image that retains relocation information
|
||||
so it can be loaded at an arbitrary address.
|
||||
The kernel is linked as a position-independent executable (PIE)
|
||||
and contains dynamic relocations which are processed early in the
|
||||
bootup process.
|
||||
The relocations make the kernel image about 15% larger (compressed
|
||||
10%), but are discarded at runtime.
|
||||
Note: this option exists only for documentation purposes, please do
|
||||
not remove it.
|
||||
|
||||
config PIE_BUILD
|
||||
def_bool CC_IS_CLANG && !$(cc-option,-munaligned-symbols)
|
||||
help
|
||||
If the compiler is unable to generate code that can manage unaligned
|
||||
symbols, the kernel is linked as a position-independent executable
|
||||
(PIE) and includes dynamic relocations that are processed early
|
||||
during bootup.
|
||||
|
||||
For kpatch functionality, it is recommended to build the kernel
|
||||
without the PIE_BUILD option. PIE_BUILD is only enabled when the
|
||||
compiler lacks proper support for handling unaligned symbols.
|
||||
|
||||
config RANDOMIZE_BASE
|
||||
bool "Randomize the address of the kernel image (KASLR)"
|
||||
default y
|
||||
|
@ -14,8 +14,14 @@ KBUILD_AFLAGS_MODULE += -fPIC
|
||||
KBUILD_CFLAGS_MODULE += -fPIC
|
||||
KBUILD_AFLAGS += -m64
|
||||
KBUILD_CFLAGS += -m64
|
||||
ifdef CONFIG_PIE_BUILD
|
||||
KBUILD_CFLAGS += -fPIE
|
||||
LDFLAGS_vmlinux := -pie
|
||||
LDFLAGS_vmlinux := -pie -z notext
|
||||
else
|
||||
KBUILD_CFLAGS += $(call cc-option,-munaligned-symbols,)
|
||||
LDFLAGS_vmlinux := --emit-relocs --discard-none
|
||||
extra_tools := relocs
|
||||
endif
|
||||
aflags_dwarf := -Wa,-gdwarf-2
|
||||
KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
|
||||
ifndef CONFIG_AS_IS_LLVM
|
||||
@ -143,7 +149,7 @@ archheaders:
|
||||
|
||||
archprepare:
|
||||
$(Q)$(MAKE) $(build)=$(syscalls) kapi
|
||||
$(Q)$(MAKE) $(build)=$(tools) kapi
|
||||
$(Q)$(MAKE) $(build)=$(tools) kapi $(extra_tools)
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
|
||||
# In order to do that, we should use the archprepare target, but we can't since
|
||||
|
1
arch/s390/boot/.gitignore
vendored
1
arch/s390/boot/.gitignore
vendored
@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
image
|
||||
bzImage
|
||||
relocs.S
|
||||
section_cmp.*
|
||||
vmlinux
|
||||
vmlinux.lds
|
||||
|
@ -37,7 +37,8 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
|
||||
|
||||
obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
|
||||
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o
|
||||
obj-y += $(if $(CONFIG_PIE_BUILD),machine_kexec_reloc.o,relocs.o)
|
||||
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
|
||||
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
obj-y += $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
|
||||
@ -48,6 +49,9 @@ targets := bzImage section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y
|
||||
targets += vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
|
||||
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
|
||||
targets += vmlinux.bin.zst info.bin syms.bin vmlinux.syms $(obj-all)
|
||||
ifndef CONFIG_PIE_BUILD
|
||||
targets += relocs.S
|
||||
endif
|
||||
|
||||
OBJECTS := $(addprefix $(obj)/,$(obj-y))
|
||||
OBJECTS_ALL := $(addprefix $(obj)/,$(obj-all))
|
||||
@ -56,9 +60,9 @@ clean-files += vmlinux.map
|
||||
|
||||
quiet_cmd_section_cmp = SECTCMP $*
|
||||
define cmd_section_cmp
|
||||
s1=`$(OBJDUMP) -t -j "$*" "$<" | sort | \
|
||||
s1=`$(OBJDUMP) -t "$<" | grep "\s$*\s\+" | sort | \
|
||||
sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
|
||||
s2=`$(OBJDUMP) -t -j "$*" "$(word 2,$^)" | sort | \
|
||||
s2=`$(OBJDUMP) -t "$(word 2,$^)" | grep "\s$*\s\+" | sort | \
|
||||
sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
|
||||
if [ "$$s1" != "$$s2" ]; then \
|
||||
echo "error: section $* differs between $< and $(word 2,$^)" >&2; \
|
||||
@ -73,11 +77,12 @@ $(obj)/bzImage: $(obj)/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.b
|
||||
$(obj)/section_cmp%: vmlinux $(obj)/vmlinux FORCE
|
||||
$(call if_changed,section_cmp)
|
||||
|
||||
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup $(if $(CONFIG_VMLINUX_MAP),-Map=$(obj)/vmlinux.map) --build-id=sha1 -T
|
||||
LDFLAGS_vmlinux-$(CONFIG_LD_ORPHAN_WARN) := --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
|
||||
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) --oformat $(LD_BFD) -e startup $(if $(CONFIG_VMLINUX_MAP),-Map=$(obj)/vmlinux.map) --build-id=sha1 -T
|
||||
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS_ALL) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
LDFLAGS_vmlinux.syms := --oformat $(LD_BFD) -e startup -T
|
||||
LDFLAGS_vmlinux.syms := $(LDFLAGS_vmlinux-y) --oformat $(LD_BFD) -e startup -T
|
||||
$(obj)/vmlinux.syms: $(obj)/vmlinux.lds $(OBJECTS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
@ -93,7 +98,7 @@ OBJCOPYFLAGS_syms.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .
|
||||
$(obj)/syms.o: $(obj)/syms.bin FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
|
||||
OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=alloc,load
|
||||
$(obj)/info.bin: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
@ -105,6 +110,14 @@ OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section
|
||||
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
ifndef CONFIG_PIE_BUILD
|
||||
CMD_RELOCS=arch/s390/tools/relocs
|
||||
quiet_cmd_relocs = RELOCS $@
|
||||
cmd_relocs = $(CMD_RELOCS) $< > $@
|
||||
$(obj)/relocs.S: vmlinux FORCE
|
||||
$(call if_changed,relocs)
|
||||
endif
|
||||
|
||||
suffix-$(CONFIG_KERNEL_GZIP) := .gz
|
||||
suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
|
||||
suffix-$(CONFIG_KERNEL_LZ4) := .lz4
|
||||
|
@ -25,9 +25,14 @@ struct vmlinux_info {
|
||||
unsigned long bootdata_size;
|
||||
unsigned long bootdata_preserved_off;
|
||||
unsigned long bootdata_preserved_size;
|
||||
#ifdef CONFIG_PIE_BUILD
|
||||
unsigned long dynsym_start;
|
||||
unsigned long rela_dyn_start;
|
||||
unsigned long rela_dyn_end;
|
||||
#else
|
||||
unsigned long got_start;
|
||||
unsigned long got_end;
|
||||
#endif
|
||||
unsigned long amode31_size;
|
||||
unsigned long init_mm_off;
|
||||
unsigned long swapper_pg_dir_off;
|
||||
@ -83,6 +88,7 @@ extern unsigned long vmalloc_size;
|
||||
extern int vmalloc_size_set;
|
||||
extern char __boot_data_start[], __boot_data_end[];
|
||||
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
|
||||
extern char __vmlinux_relocs_64_start[], __vmlinux_relocs_64_end[];
|
||||
extern char _decompressor_syms_start[], _decompressor_syms_end[];
|
||||
extern char _stack_start[], _stack_end[];
|
||||
extern char _end[], _decompressor_end[];
|
||||
|
@ -141,7 +141,8 @@ static void copy_bootdata(void)
|
||||
memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
|
||||
}
|
||||
|
||||
static void handle_relocs(unsigned long offset)
|
||||
#ifdef CONFIG_PIE_BUILD
|
||||
static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
|
||||
{
|
||||
Elf64_Rela *rela_start, *rela_end, *rela;
|
||||
int r_type, r_sym, rc;
|
||||
@ -172,6 +173,54 @@ static void handle_relocs(unsigned long offset)
|
||||
}
|
||||
}
|
||||
|
||||
static void kaslr_adjust_got(unsigned long offset) {}
|
||||
static void rescue_relocs(void) {}
|
||||
static void free_relocs(void) {}
|
||||
#else
|
||||
static int *vmlinux_relocs_64_start;
|
||||
static int *vmlinux_relocs_64_end;
|
||||
|
||||
static void rescue_relocs(void)
|
||||
{
|
||||
unsigned long size = __vmlinux_relocs_64_end - __vmlinux_relocs_64_start;
|
||||
|
||||
vmlinux_relocs_64_start = (void *)physmem_alloc_top_down(RR_RELOC, size, 0);
|
||||
vmlinux_relocs_64_end = (void *)vmlinux_relocs_64_start + size;
|
||||
memmove(vmlinux_relocs_64_start, __vmlinux_relocs_64_start, size);
|
||||
}
|
||||
|
||||
static void free_relocs(void)
|
||||
{
|
||||
physmem_free(RR_RELOC);
|
||||
}
|
||||
|
||||
static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
|
||||
{
|
||||
int *reloc;
|
||||
long loc;
|
||||
|
||||
/* Adjust R_390_64 relocations */
|
||||
for (reloc = vmlinux_relocs_64_start; reloc < vmlinux_relocs_64_end; reloc++) {
|
||||
loc = (long)*reloc + offset;
|
||||
if (loc < min_addr || loc > max_addr)
|
||||
error("64-bit relocation outside of kernel!\n");
|
||||
*(u64 *)loc += offset;
|
||||
}
|
||||
}
|
||||
|
||||
static void kaslr_adjust_got(unsigned long offset)
|
||||
{
|
||||
u64 *entry;
|
||||
|
||||
/*
|
||||
* Even without -fPIE, Clang still uses a global offset table for some
|
||||
* reason. Adjust the GOT entries.
|
||||
*/
|
||||
for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
|
||||
*entry += offset;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Merge information from several sources into a single ident_map_size value.
|
||||
* "ident_map_size" represents the upper limit of physical memory we may ever
|
||||
@ -299,14 +348,19 @@ static void setup_vmalloc_size(void)
|
||||
vmalloc_size = max(size, vmalloc_size);
|
||||
}
|
||||
|
||||
static void offset_vmlinux_info(unsigned long offset)
|
||||
static void kaslr_adjust_vmlinux_info(unsigned long offset)
|
||||
{
|
||||
*(unsigned long *)(&vmlinux.entry) += offset;
|
||||
vmlinux.bootdata_off += offset;
|
||||
vmlinux.bootdata_preserved_off += offset;
|
||||
#ifdef CONFIG_PIE_BUILD
|
||||
vmlinux.rela_dyn_start += offset;
|
||||
vmlinux.rela_dyn_end += offset;
|
||||
vmlinux.dynsym_start += offset;
|
||||
#else
|
||||
vmlinux.got_start += offset;
|
||||
vmlinux.got_end += offset;
|
||||
#endif
|
||||
vmlinux.init_mm_off += offset;
|
||||
vmlinux.swapper_pg_dir_off += offset;
|
||||
vmlinux.invalid_pg_dir_off += offset;
|
||||
@ -361,6 +415,7 @@ void startup_kernel(void)
|
||||
detect_physmem_online_ranges(max_physmem_end);
|
||||
save_ipl_cert_comp_list();
|
||||
rescue_initrd(safe_addr, ident_map_size);
|
||||
rescue_relocs();
|
||||
|
||||
if (kaslr_enabled()) {
|
||||
vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
|
||||
@ -368,7 +423,7 @@ void startup_kernel(void)
|
||||
ident_map_size);
|
||||
if (vmlinux_lma) {
|
||||
__kaslr_offset = vmlinux_lma - vmlinux.default_lma;
|
||||
offset_vmlinux_info(__kaslr_offset);
|
||||
kaslr_adjust_vmlinux_info(__kaslr_offset);
|
||||
}
|
||||
}
|
||||
vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
|
||||
@ -393,18 +448,20 @@ void startup_kernel(void)
|
||||
/*
|
||||
* The order of the following operations is important:
|
||||
*
|
||||
* - handle_relocs() must follow clear_bss_section() to establish static
|
||||
* memory references to data in .bss to be used by setup_vmem()
|
||||
* - kaslr_adjust_relocs() must follow clear_bss_section() to establish
|
||||
* static memory references to data in .bss to be used by setup_vmem()
|
||||
* (i.e init_mm.pgd)
|
||||
*
|
||||
* - setup_vmem() must follow handle_relocs() to be able using
|
||||
* - setup_vmem() must follow kaslr_adjust_relocs() to be able using
|
||||
* static memory references to data in .bss (i.e init_mm.pgd)
|
||||
*
|
||||
* - copy_bootdata() must follow setup_vmem() to propagate changes to
|
||||
* bootdata made by setup_vmem()
|
||||
* - copy_bootdata() must follow setup_vmem() to propagate changes
|
||||
* to bootdata made by setup_vmem()
|
||||
*/
|
||||
clear_bss_section(vmlinux_lma);
|
||||
handle_relocs(__kaslr_offset);
|
||||
kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size, __kaslr_offset);
|
||||
kaslr_adjust_got(__kaslr_offset);
|
||||
free_relocs();
|
||||
setup_vmem(asce_limit);
|
||||
copy_bootdata();
|
||||
|
||||
|
@ -31,6 +31,7 @@ SECTIONS
|
||||
_text = .; /* Text */
|
||||
*(.text)
|
||||
*(.text.*)
|
||||
INIT_TEXT
|
||||
_etext = . ;
|
||||
}
|
||||
.rodata : {
|
||||
@ -39,6 +40,9 @@ SECTIONS
|
||||
*(.rodata.*)
|
||||
_erodata = . ;
|
||||
}
|
||||
.got : {
|
||||
*(.got)
|
||||
}
|
||||
NOTES
|
||||
.data : {
|
||||
_data = . ;
|
||||
@ -106,6 +110,24 @@ SECTIONS
|
||||
_compressed_end = .;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PIE_BUILD
|
||||
/*
|
||||
* When the kernel is built with CONFIG_KERNEL_UNCOMPRESSED, the entire
|
||||
* uncompressed vmlinux.bin is positioned in the bzImage decompressor
|
||||
* image at the default kernel LMA of 0x100000, enabling it to be
|
||||
* executed in-place. However, the size of .vmlinux.relocs could be
|
||||
* large enough to cause an overlap with the uncompressed kernel at the
|
||||
* address 0x100000. To address this issue, .vmlinux.relocs is
|
||||
* positioned after the .rodata.compressed.
|
||||
*/
|
||||
. = ALIGN(4);
|
||||
.vmlinux.relocs : {
|
||||
__vmlinux_relocs_64_start = .;
|
||||
*(.vmlinux.relocs_64)
|
||||
__vmlinux_relocs_64_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define SB_TRAILER_SIZE 32
|
||||
/* Trailer needed for Secure Boot */
|
||||
. += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
|
||||
@ -118,8 +140,34 @@ SECTIONS
|
||||
}
|
||||
_end = .;
|
||||
|
||||
DWARF_DEBUG
|
||||
ELF_DETAILS
|
||||
|
||||
/*
|
||||
* Make sure that the .got.plt is either completely empty or it
|
||||
* contains only the three reserved double words.
|
||||
*/
|
||||
.got.plt : {
|
||||
*(.got.plt)
|
||||
}
|
||||
ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!")
|
||||
|
||||
/*
|
||||
* Sections that should stay zero sized, which is safer to
|
||||
* explicitly check instead of blindly discarding.
|
||||
*/
|
||||
.plt : {
|
||||
*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
|
||||
}
|
||||
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
|
||||
.rela.dyn : {
|
||||
*(.rela.*) *(.rela_*)
|
||||
}
|
||||
ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
|
||||
|
||||
/* Sections to be discarded */
|
||||
/DISCARD/ : {
|
||||
COMMON_DISCARDS
|
||||
*(.eh_frame)
|
||||
*(__ex_table)
|
||||
*(*__ksymtab*)
|
||||
|
@ -824,6 +824,8 @@ CONFIG_TEST_LOCKUP=m
|
||||
CONFIG_DEBUG_PREEMPT=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
CONFIG_LOCK_STAT=y
|
||||
CONFIG_LOCKDEP_BITS=16
|
||||
CONFIG_LOCKDEP_CHAINS_BITS=17
|
||||
CONFIG_DEBUG_ATOMIC_SLEEP=y
|
||||
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
|
||||
CONFIG_DEBUG_IRQFLAGS=y
|
||||
|
@ -15,14 +15,14 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
#include "chacha-s390.h"
|
||||
|
||||
static void chacha20_crypt_s390(u32 *state, u8 *dst, const u8 *src,
|
||||
unsigned int nbytes, const u32 *key,
|
||||
u32 *counter)
|
||||
{
|
||||
struct kernel_fpu vxstate;
|
||||
DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
|
||||
|
||||
kernel_fpu_begin(&vxstate, KERNEL_VXR);
|
||||
chacha20_vx(dst, src, nbytes, key, counter);
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
#include <asm/vx-insn.h>
|
||||
#include <asm/fpu-insn.h>
|
||||
|
||||
#define SP %r15
|
||||
#define FRAME (16 * 8 + 4 * 8)
|
||||
|
@ -13,8 +13,8 @@
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
#include <asm/fpu.h>
|
||||
#include "crc32-vx.h"
|
||||
|
||||
#define CRC32_BLOCK_SIZE 1
|
||||
#define CRC32_DIGEST_SIZE 4
|
||||
@ -31,11 +31,6 @@ struct crc_desc_ctx {
|
||||
u32 crc;
|
||||
};
|
||||
|
||||
/* Prototypes for functions in assembly files */
|
||||
u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
||||
u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
||||
u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
||||
|
||||
/*
|
||||
* DEFINE_CRC32_VX() - Define a CRC-32 function using the vector extension
|
||||
*
|
||||
@ -49,8 +44,8 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
||||
static u32 __pure ___fname(u32 crc, \
|
||||
unsigned char const *data, size_t datalen) \
|
||||
{ \
|
||||
struct kernel_fpu vxstate; \
|
||||
unsigned long prealign, aligned, remaining; \
|
||||
DECLARE_KERNEL_FPU_ONSTACK16(vxstate); \
|
||||
\
|
||||
if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
|
||||
return ___crc32_sw(crc, data, datalen); \
|
||||
|
12
arch/s390/crypto/crc32-vx.h
Normal file
12
arch/s390/crypto/crc32-vx.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _CRC32_VX_S390_H
|
||||
#define _CRC32_VX_S390_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
||||
u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
||||
u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
||||
|
||||
#endif /* _CRC32_VX_S390_H */
|
@ -12,20 +12,17 @@
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
#include <asm/vx-insn.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/fpu.h>
|
||||
#include "crc32-vx.h"
|
||||
|
||||
/* Vector register range containing CRC-32 constants */
|
||||
#define CONST_R1R2 %v9
|
||||
#define CONST_R3R4 %v10
|
||||
#define CONST_R5 %v11
|
||||
#define CONST_R6 %v12
|
||||
#define CONST_RU_POLY %v13
|
||||
#define CONST_CRC_POLY %v14
|
||||
|
||||
.data
|
||||
.balign 8
|
||||
#define CONST_R1R2 9
|
||||
#define CONST_R3R4 10
|
||||
#define CONST_R5 11
|
||||
#define CONST_R6 12
|
||||
#define CONST_RU_POLY 13
|
||||
#define CONST_CRC_POLY 14
|
||||
|
||||
/*
|
||||
* The CRC-32 constant block contains reduction constants to fold and
|
||||
@ -58,105 +55,74 @@
|
||||
* P'(x) = 0xEDB88320
|
||||
*/
|
||||
|
||||
SYM_DATA_START_LOCAL(constants_CRC_32_BE)
|
||||
.quad 0x08833794c, 0x0e6228b11 # R1, R2
|
||||
.quad 0x0c5b9cd4c, 0x0e8a45605 # R3, R4
|
||||
.quad 0x0f200aa66, 1 << 32 # R5, x32
|
||||
.quad 0x0490d678d, 1 # R6, 1
|
||||
.quad 0x104d101df, 0 # u
|
||||
.quad 0x104C11DB7, 0 # P(x)
|
||||
SYM_DATA_END(constants_CRC_32_BE)
|
||||
static unsigned long constants_CRC_32_BE[] = {
|
||||
0x08833794c, 0x0e6228b11, /* R1, R2 */
|
||||
0x0c5b9cd4c, 0x0e8a45605, /* R3, R4 */
|
||||
0x0f200aa66, 1UL << 32, /* R5, x32 */
|
||||
0x0490d678d, 1, /* R6, 1 */
|
||||
0x104d101df, 0, /* u */
|
||||
0x104C11DB7, 0, /* P(x) */
|
||||
};
|
||||
|
||||
.previous
|
||||
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
.text
|
||||
/*
|
||||
* The CRC-32 function(s) use these calling conventions:
|
||||
*
|
||||
* Parameters:
|
||||
*
|
||||
* %r2: Initial CRC value, typically ~0; and final CRC (return) value.
|
||||
* %r3: Input buffer pointer, performance might be improved if the
|
||||
* buffer is on a doubleword boundary.
|
||||
* %r4: Length of the buffer, must be 64 bytes or greater.
|
||||
/**
|
||||
* crc32_be_vgfm_16 - Compute CRC-32 (BE variant) with vector registers
|
||||
* @crc: Initial CRC value, typically ~0.
|
||||
* @buf: Input buffer pointer, performance might be improved if the
|
||||
* buffer is on a doubleword boundary.
|
||||
* @size: Size of the buffer, must be 64 bytes or greater.
|
||||
*
|
||||
* Register usage:
|
||||
*
|
||||
* %r5: CRC-32 constant pool base pointer.
|
||||
* V0: Initial CRC value and intermediate constants and results.
|
||||
* V1..V4: Data for CRC computation.
|
||||
* V5..V8: Next data chunks that are fetched from the input buffer.
|
||||
*
|
||||
* V9..V14: CRC-32 constants.
|
||||
*/
|
||||
SYM_FUNC_START(crc32_be_vgfm_16)
|
||||
u32 crc32_be_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
|
||||
{
|
||||
/* Load CRC-32 constants */
|
||||
larl %r5,constants_CRC_32_BE
|
||||
VLM CONST_R1R2,CONST_CRC_POLY,0,%r5
|
||||
fpu_vlm(CONST_R1R2, CONST_CRC_POLY, &constants_CRC_32_BE);
|
||||
fpu_vzero(0);
|
||||
|
||||
/* Load the initial CRC value into the leftmost word of V0. */
|
||||
VZERO %v0
|
||||
VLVGF %v0,%r2,0
|
||||
fpu_vlvgf(0, crc, 0);
|
||||
|
||||
/* Load a 64-byte data chunk and XOR with CRC */
|
||||
VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */
|
||||
VX %v1,%v0,%v1 /* V1 ^= CRC */
|
||||
aghi %r3,64 /* BUF = BUF + 64 */
|
||||
aghi %r4,-64 /* LEN = LEN - 64 */
|
||||
fpu_vlm(1, 4, buf);
|
||||
fpu_vx(1, 0, 1);
|
||||
buf += 64;
|
||||
size -= 64;
|
||||
|
||||
/* Check remaining buffer size and jump to proper folding method */
|
||||
cghi %r4,64
|
||||
jl .Lless_than_64bytes
|
||||
while (size >= 64) {
|
||||
/* Load the next 64-byte data chunk into V5 to V8 */
|
||||
fpu_vlm(5, 8, buf);
|
||||
|
||||
.Lfold_64bytes_loop:
|
||||
/* Load the next 64-byte data chunk into V5 to V8 */
|
||||
VLM %v5,%v8,0,%r3
|
||||
/*
|
||||
* Perform a GF(2) multiplication of the doublewords in V1 with
|
||||
* the reduction constants in V0. The intermediate result is
|
||||
* then folded (accumulated) with the next data chunk in V5 and
|
||||
* stored in V1. Repeat this step for the register contents
|
||||
* in V2, V3, and V4 respectively.
|
||||
*/
|
||||
fpu_vgfmag(1, CONST_R1R2, 1, 5);
|
||||
fpu_vgfmag(2, CONST_R1R2, 2, 6);
|
||||
fpu_vgfmag(3, CONST_R1R2, 3, 7);
|
||||
fpu_vgfmag(4, CONST_R1R2, 4, 8);
|
||||
buf += 64;
|
||||
size -= 64;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform a GF(2) multiplication of the doublewords in V1 with
|
||||
* the reduction constants in V0. The intermediate result is
|
||||
* then folded (accumulated) with the next data chunk in V5 and
|
||||
* stored in V1. Repeat this step for the register contents
|
||||
* in V2, V3, and V4 respectively.
|
||||
*/
|
||||
VGFMAG %v1,CONST_R1R2,%v1,%v5
|
||||
VGFMAG %v2,CONST_R1R2,%v2,%v6
|
||||
VGFMAG %v3,CONST_R1R2,%v3,%v7
|
||||
VGFMAG %v4,CONST_R1R2,%v4,%v8
|
||||
|
||||
/* Adjust buffer pointer and length for next loop */
|
||||
aghi %r3,64 /* BUF = BUF + 64 */
|
||||
aghi %r4,-64 /* LEN = LEN - 64 */
|
||||
|
||||
cghi %r4,64
|
||||
jnl .Lfold_64bytes_loop
|
||||
|
||||
.Lless_than_64bytes:
|
||||
/* Fold V1 to V4 into a single 128-bit value in V1 */
|
||||
VGFMAG %v1,CONST_R3R4,%v1,%v2
|
||||
VGFMAG %v1,CONST_R3R4,%v1,%v3
|
||||
VGFMAG %v1,CONST_R3R4,%v1,%v4
|
||||
fpu_vgfmag(1, CONST_R3R4, 1, 2);
|
||||
fpu_vgfmag(1, CONST_R3R4, 1, 3);
|
||||
fpu_vgfmag(1, CONST_R3R4, 1, 4);
|
||||
|
||||
/* Check whether to continue with 64-bit folding */
|
||||
cghi %r4,16
|
||||
jl .Lfinal_fold
|
||||
while (size >= 16) {
|
||||
fpu_vl(2, buf);
|
||||
fpu_vgfmag(1, CONST_R3R4, 1, 2);
|
||||
buf += 16;
|
||||
size -= 16;
|
||||
}
|
||||
|
||||
.Lfold_16bytes_loop:
|
||||
|
||||
VL %v2,0,,%r3 /* Load next data chunk */
|
||||
VGFMAG %v1,CONST_R3R4,%v1,%v2 /* Fold next data chunk */
|
||||
|
||||
/* Adjust buffer pointer and size for folding next data chunk */
|
||||
aghi %r3,16
|
||||
aghi %r4,-16
|
||||
|
||||
/* Process remaining data chunks */
|
||||
cghi %r4,16
|
||||
jnl .Lfold_16bytes_loop
|
||||
|
||||
.Lfinal_fold:
|
||||
/*
|
||||
* The R5 constant is used to fold a 128-bit value into an 96-bit value
|
||||
* that is XORed with the next 96-bit input data chunk. To use a single
|
||||
@ -164,7 +130,7 @@ SYM_FUNC_START(crc32_be_vgfm_16)
|
||||
* form an intermediate 96-bit value (with appended zeros) which is then
|
||||
* XORed with the intermediate reduction result.
|
||||
*/
|
||||
VGFMG %v1,CONST_R5,%v1
|
||||
fpu_vgfmg(1, CONST_R5, 1);
|
||||
|
||||
/*
|
||||
* Further reduce the remaining 96-bit value to a 64-bit value using a
|
||||
@ -173,7 +139,7 @@ SYM_FUNC_START(crc32_be_vgfm_16)
|
||||
* doubleword with R6. The result is a 64-bit value and is subject to
|
||||
* the Barret reduction.
|
||||
*/
|
||||
VGFMG %v1,CONST_R6,%v1
|
||||
fpu_vgfmg(1, CONST_R6, 1);
|
||||
|
||||
/*
|
||||
* The input values to the Barret reduction are the degree-63 polynomial
|
||||
@ -194,20 +160,15 @@ SYM_FUNC_START(crc32_be_vgfm_16)
|
||||
*/
|
||||
|
||||
/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
|
||||
VUPLLF %v2,%v1
|
||||
VGFMG %v2,CONST_RU_POLY,%v2
|
||||
fpu_vupllf(2, 1);
|
||||
fpu_vgfmg(2, CONST_RU_POLY, 2);
|
||||
|
||||
/*
|
||||
* Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
|
||||
* V2 and XOR the intermediate result, T2(x), with the value in V1.
|
||||
* The final result is in the rightmost word of V2.
|
||||
*/
|
||||
VUPLLF %v2,%v2
|
||||
VGFMAG %v2,CONST_CRC_POLY,%v2,%v1
|
||||
|
||||
.Ldone:
|
||||
VLGVF %r2,%v2,3
|
||||
BR_EX %r14
|
||||
SYM_FUNC_END(crc32_be_vgfm_16)
|
||||
|
||||
.previous
|
||||
fpu_vupllf(2, 2);
|
||||
fpu_vgfmag(2, CONST_CRC_POLY, 2, 1);
|
||||
return fpu_vlgvf(2, 3);
|
||||
}
|
@ -13,20 +13,17 @@
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
#include <asm/vx-insn.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/fpu.h>
|
||||
#include "crc32-vx.h"
|
||||
|
||||
/* Vector register range containing CRC-32 constants */
|
||||
#define CONST_PERM_LE2BE %v9
|
||||
#define CONST_R2R1 %v10
|
||||
#define CONST_R4R3 %v11
|
||||
#define CONST_R5 %v12
|
||||
#define CONST_RU_POLY %v13
|
||||
#define CONST_CRC_POLY %v14
|
||||
|
||||
.data
|
||||
.balign 8
|
||||
#define CONST_PERM_LE2BE 9
|
||||
#define CONST_R2R1 10
|
||||
#define CONST_R4R3 11
|
||||
#define CONST_R5 12
|
||||
#define CONST_RU_POLY 13
|
||||
#define CONST_CRC_POLY 14
|
||||
|
||||
/*
|
||||
* The CRC-32 constant block contains reduction constants to fold and
|
||||
@ -59,64 +56,43 @@
|
||||
* P'(x) = 0x82F63B78
|
||||
*/
|
||||
|
||||
SYM_DATA_START_LOCAL(constants_CRC_32_LE)
|
||||
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
|
||||
.quad 0x1c6e41596, 0x154442bd4 # R2, R1
|
||||
.quad 0x0ccaa009e, 0x1751997d0 # R4, R3
|
||||
.octa 0x163cd6124 # R5
|
||||
.octa 0x1F7011641 # u'
|
||||
.octa 0x1DB710641 # P'(x) << 1
|
||||
SYM_DATA_END(constants_CRC_32_LE)
|
||||
static unsigned long constants_CRC_32_LE[] = {
|
||||
0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
|
||||
0x1c6e41596, 0x154442bd4, /* R2, R1 */
|
||||
0x0ccaa009e, 0x1751997d0, /* R4, R3 */
|
||||
0x0, 0x163cd6124, /* R5 */
|
||||
0x0, 0x1f7011641, /* u' */
|
||||
0x0, 0x1db710641 /* P'(x) << 1 */
|
||||
};
|
||||
|
||||
SYM_DATA_START_LOCAL(constants_CRC_32C_LE)
|
||||
.octa 0x0F0E0D0C0B0A09080706050403020100 # BE->LE mask
|
||||
.quad 0x09e4addf8, 0x740eef02 # R2, R1
|
||||
.quad 0x14cd00bd6, 0xf20c0dfe # R4, R3
|
||||
.octa 0x0dd45aab8 # R5
|
||||
.octa 0x0dea713f1 # u'
|
||||
.octa 0x105ec76f0 # P'(x) << 1
|
||||
SYM_DATA_END(constants_CRC_32C_LE)
|
||||
static unsigned long constants_CRC_32C_LE[] = {
|
||||
0x0f0e0d0c0b0a0908, 0x0706050403020100, /* BE->LE mask */
|
||||
0x09e4addf8, 0x740eef02, /* R2, R1 */
|
||||
0x14cd00bd6, 0xf20c0dfe, /* R4, R3 */
|
||||
0x0, 0x0dd45aab8, /* R5 */
|
||||
0x0, 0x0dea713f1, /* u' */
|
||||
0x0, 0x105ec76f0 /* P'(x) << 1 */
|
||||
};
|
||||
|
||||
.previous
|
||||
|
||||
GEN_BR_THUNK %r14
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* The CRC-32 functions use these calling conventions:
|
||||
*
|
||||
* Parameters:
|
||||
*
|
||||
* %r2: Initial CRC value, typically ~0; and final CRC (return) value.
|
||||
* %r3: Input buffer pointer, performance might be improved if the
|
||||
* buffer is on a doubleword boundary.
|
||||
* %r4: Length of the buffer, must be 64 bytes or greater.
|
||||
/**
|
||||
* crc32_le_vgfm_generic - Compute CRC-32 (LE variant) with vector registers
|
||||
* @crc: Initial CRC value, typically ~0.
|
||||
* @buf: Input buffer pointer, performance might be improved if the
|
||||
* buffer is on a doubleword boundary.
|
||||
* @size: Size of the buffer, must be 64 bytes or greater.
|
||||
* @constants: CRC-32 constant pool base pointer.
|
||||
*
|
||||
* Register usage:
|
||||
*
|
||||
* %r5: CRC-32 constant pool base pointer.
|
||||
* V0: Initial CRC value and intermediate constants and results.
|
||||
* V1..V4: Data for CRC computation.
|
||||
* V5..V8: Next data chunks that are fetched from the input buffer.
|
||||
* V9: Constant for BE->LE conversion and shift operations
|
||||
*
|
||||
* V0: Initial CRC value and intermediate constants and results.
|
||||
* V1..V4: Data for CRC computation.
|
||||
* V5..V8: Next data chunks that are fetched from the input buffer.
|
||||
* V9: Constant for BE->LE conversion and shift operations
|
||||
* V10..V14: CRC-32 constants.
|
||||
*/
|
||||
|
||||
SYM_FUNC_START(crc32_le_vgfm_16)
|
||||
larl %r5,constants_CRC_32_LE
|
||||
j crc32_le_vgfm_generic
|
||||
SYM_FUNC_END(crc32_le_vgfm_16)
|
||||
|
||||
SYM_FUNC_START(crc32c_le_vgfm_16)
|
||||
larl %r5,constants_CRC_32C_LE
|
||||
j crc32_le_vgfm_generic
|
||||
SYM_FUNC_END(crc32c_le_vgfm_16)
|
||||
|
||||
SYM_FUNC_START(crc32_le_vgfm_generic)
|
||||
static u32 crc32_le_vgfm_generic(u32 crc, unsigned char const *buf, size_t size, unsigned long *constants)
|
||||
{
|
||||
/* Load CRC-32 constants */
|
||||
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
|
||||
fpu_vlm(CONST_PERM_LE2BE, CONST_CRC_POLY, constants);
|
||||
|
||||
/*
|
||||
* Load the initial CRC value.
|
||||
@ -125,90 +101,73 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
|
||||
* vector register and is later XORed with the LSB portion
|
||||
* of the loaded input data.
|
||||
*/
|
||||
VZERO %v0 /* Clear V0 */
|
||||
VLVGF %v0,%r2,3 /* Load CRC into rightmost word */
|
||||
fpu_vzero(0); /* Clear V0 */
|
||||
fpu_vlvgf(0, crc, 3); /* Load CRC into rightmost word */
|
||||
|
||||
/* Load a 64-byte data chunk and XOR with CRC */
|
||||
VLM %v1,%v4,0,%r3 /* 64-bytes into V1..V4 */
|
||||
VPERM %v1,%v1,%v1,CONST_PERM_LE2BE
|
||||
VPERM %v2,%v2,%v2,CONST_PERM_LE2BE
|
||||
VPERM %v3,%v3,%v3,CONST_PERM_LE2BE
|
||||
VPERM %v4,%v4,%v4,CONST_PERM_LE2BE
|
||||
fpu_vlm(1, 4, buf);
|
||||
fpu_vperm(1, 1, 1, CONST_PERM_LE2BE);
|
||||
fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
|
||||
fpu_vperm(3, 3, 3, CONST_PERM_LE2BE);
|
||||
fpu_vperm(4, 4, 4, CONST_PERM_LE2BE);
|
||||
|
||||
VX %v1,%v0,%v1 /* V1 ^= CRC */
|
||||
aghi %r3,64 /* BUF = BUF + 64 */
|
||||
aghi %r4,-64 /* LEN = LEN - 64 */
|
||||
fpu_vx(1, 0, 1); /* V1 ^= CRC */
|
||||
buf += 64;
|
||||
size -= 64;
|
||||
|
||||
cghi %r4,64
|
||||
jl .Lless_than_64bytes
|
||||
while (size >= 64) {
|
||||
fpu_vlm(5, 8, buf);
|
||||
fpu_vperm(5, 5, 5, CONST_PERM_LE2BE);
|
||||
fpu_vperm(6, 6, 6, CONST_PERM_LE2BE);
|
||||
fpu_vperm(7, 7, 7, CONST_PERM_LE2BE);
|
||||
fpu_vperm(8, 8, 8, CONST_PERM_LE2BE);
|
||||
/*
|
||||
* Perform a GF(2) multiplication of the doublewords in V1 with
|
||||
* the R1 and R2 reduction constants in V0. The intermediate
|
||||
* result is then folded (accumulated) with the next data chunk
|
||||
* in V5 and stored in V1. Repeat this step for the register
|
||||
* contents in V2, V3, and V4 respectively.
|
||||
*/
|
||||
fpu_vgfmag(1, CONST_R2R1, 1, 5);
|
||||
fpu_vgfmag(2, CONST_R2R1, 2, 6);
|
||||
fpu_vgfmag(3, CONST_R2R1, 3, 7);
|
||||
fpu_vgfmag(4, CONST_R2R1, 4, 8);
|
||||
buf += 64;
|
||||
size -= 64;
|
||||
}
|
||||
|
||||
.Lfold_64bytes_loop:
|
||||
/* Load the next 64-byte data chunk into V5 to V8 */
|
||||
VLM %v5,%v8,0,%r3
|
||||
VPERM %v5,%v5,%v5,CONST_PERM_LE2BE
|
||||
VPERM %v6,%v6,%v6,CONST_PERM_LE2BE
|
||||
VPERM %v7,%v7,%v7,CONST_PERM_LE2BE
|
||||
VPERM %v8,%v8,%v8,CONST_PERM_LE2BE
|
||||
|
||||
/*
|
||||
* Perform a GF(2) multiplication of the doublewords in V1 with
|
||||
* the R1 and R2 reduction constants in V0. The intermediate result
|
||||
* is then folded (accumulated) with the next data chunk in V5 and
|
||||
* stored in V1. Repeat this step for the register contents
|
||||
* in V2, V3, and V4 respectively.
|
||||
*/
|
||||
VGFMAG %v1,CONST_R2R1,%v1,%v5
|
||||
VGFMAG %v2,CONST_R2R1,%v2,%v6
|
||||
VGFMAG %v3,CONST_R2R1,%v3,%v7
|
||||
VGFMAG %v4,CONST_R2R1,%v4,%v8
|
||||
|
||||
aghi %r3,64 /* BUF = BUF + 64 */
|
||||
aghi %r4,-64 /* LEN = LEN - 64 */
|
||||
|
||||
cghi %r4,64
|
||||
jnl .Lfold_64bytes_loop
|
||||
|
||||
.Lless_than_64bytes:
|
||||
/*
|
||||
* Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3
|
||||
* and R4 and accumulating the next 128-bit chunk until a single 128-bit
|
||||
* value remains.
|
||||
*/
|
||||
VGFMAG %v1,CONST_R4R3,%v1,%v2
|
||||
VGFMAG %v1,CONST_R4R3,%v1,%v3
|
||||
VGFMAG %v1,CONST_R4R3,%v1,%v4
|
||||
fpu_vgfmag(1, CONST_R4R3, 1, 2);
|
||||
fpu_vgfmag(1, CONST_R4R3, 1, 3);
|
||||
fpu_vgfmag(1, CONST_R4R3, 1, 4);
|
||||
|
||||
cghi %r4,16
|
||||
jl .Lfinal_fold
|
||||
while (size >= 16) {
|
||||
fpu_vl(2, buf);
|
||||
fpu_vperm(2, 2, 2, CONST_PERM_LE2BE);
|
||||
fpu_vgfmag(1, CONST_R4R3, 1, 2);
|
||||
buf += 16;
|
||||
size -= 16;
|
||||
}
|
||||
|
||||
.Lfold_16bytes_loop:
|
||||
|
||||
VL %v2,0,,%r3 /* Load next data chunk */
|
||||
VPERM %v2,%v2,%v2,CONST_PERM_LE2BE
|
||||
VGFMAG %v1,CONST_R4R3,%v1,%v2 /* Fold next data chunk */
|
||||
|
||||
aghi %r3,16
|
||||
aghi %r4,-16
|
||||
|
||||
cghi %r4,16
|
||||
jnl .Lfold_16bytes_loop
|
||||
|
||||
.Lfinal_fold:
|
||||
/*
|
||||
* Set up a vector register for byte shifts. The shift value must
|
||||
* be loaded in bits 1-4 in byte element 7 of a vector register.
|
||||
* Shift by 8 bytes: 0x40
|
||||
* Shift by 4 bytes: 0x20
|
||||
*/
|
||||
VLEIB %v9,0x40,7
|
||||
fpu_vleib(9, 0x40, 7);
|
||||
|
||||
/*
|
||||
* Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
|
||||
* to move R4 into the rightmost doubleword and set the leftmost
|
||||
* doubleword to 0x1.
|
||||
*/
|
||||
VSRLB %v0,CONST_R4R3,%v9
|
||||
VLEIG %v0,1,0
|
||||
fpu_vsrlb(0, CONST_R4R3, 9);
|
||||
fpu_vleig(0, 1, 0);
|
||||
|
||||
/*
|
||||
* Compute GF(2) product of V1 and V0. The rightmost doubleword
|
||||
@ -216,7 +175,7 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
|
||||
* multiplied by 0x1 and is then XORed with rightmost product.
|
||||
* Implicitly, the intermediate leftmost product becomes padded
|
||||
*/
|
||||
VGFMG %v1,%v0,%v1
|
||||
fpu_vgfmg(1, 0, 1);
|
||||
|
||||
/*
|
||||
* Now do the final 32-bit fold by multiplying the rightmost word
|
||||
@ -231,10 +190,10 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
|
||||
* rightmost doubleword and the leftmost doubleword is zero to ignore
|
||||
* the leftmost product of V1.
|
||||
*/
|
||||
VLEIB %v9,0x20,7 /* Shift by words */
|
||||
VSRLB %v2,%v1,%v9 /* Store remaining bits in V2 */
|
||||
VUPLLF %v1,%v1 /* Split rightmost doubleword */
|
||||
VGFMAG %v1,CONST_R5,%v1,%v2 /* V1 = (V1 * R5) XOR V2 */
|
||||
fpu_vleib(9, 0x20, 7); /* Shift by words */
|
||||
fpu_vsrlb(2, 1, 9); /* Store remaining bits in V2 */
|
||||
fpu_vupllf(1, 1); /* Split rightmost doubleword */
|
||||
fpu_vgfmag(1, CONST_R5, 1, 2); /* V1 = (V1 * R5) XOR V2 */
|
||||
|
||||
/*
|
||||
* Apply a Barret reduction to compute the final 32-bit CRC value.
|
||||
@ -256,20 +215,26 @@ SYM_FUNC_START(crc32_le_vgfm_generic)
|
||||
*/
|
||||
|
||||
/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
|
||||
VUPLLF %v2,%v1
|
||||
VGFMG %v2,CONST_RU_POLY,%v2
|
||||
fpu_vupllf(2, 1);
|
||||
fpu_vgfmg(2, CONST_RU_POLY, 2);
|
||||
|
||||
/*
|
||||
* Compute the GF(2) product of the CRC polynomial with T1(x) in
|
||||
* V2 and XOR the intermediate result, T2(x), with the value in V1.
|
||||
* The final result is stored in word element 2 of V2.
|
||||
*/
|
||||
VUPLLF %v2,%v2
|
||||
VGFMAG %v2,CONST_CRC_POLY,%v2,%v1
|
||||
fpu_vupllf(2, 2);
|
||||
fpu_vgfmag(2, CONST_CRC_POLY, 2, 1);
|
||||
|
||||
.Ldone:
|
||||
VLGVF %r2,%v2,2
|
||||
BR_EX %r14
|
||||
SYM_FUNC_END(crc32_le_vgfm_generic)
|
||||
return fpu_vlgvf(2, 2);
|
||||
}
|
||||
|
||||
.previous
|
||||
u32 crc32_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
|
||||
{
|
||||
return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32_LE[0]);
|
||||
}
|
||||
|
||||
u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size)
|
||||
{
|
||||
return crc32_le_vgfm_generic(crc, buf, size, &constants_CRC_32C_LE[0]);
|
||||
}
|
@ -125,20 +125,8 @@ struct s390_pxts_ctx {
|
||||
static inline int __paes_keyblob2pkey(struct key_blob *kb,
|
||||
struct pkey_protkey *pk)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
/* try three times in case of failure */
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (i > 0 && ret == -EAGAIN && in_task())
|
||||
if (msleep_interruptible(1000))
|
||||
return -EINTR;
|
||||
ret = pkey_keyblob2pkey(kb->key, kb->keylen,
|
||||
pk->protkey, &pk->len, &pk->type);
|
||||
if (ret == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return pkey_keyblob2pkey(kb->key, kb->keylen,
|
||||
pk->protkey, &pk->len, &pk->type);
|
||||
}
|
||||
|
||||
static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
|
||||
|
@ -20,8 +20,7 @@
|
||||
*/
|
||||
static void diag0c_fn(void *data)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X00C);
|
||||
diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
|
||||
diag0c(((void **)data)[smp_processor_id()]);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd)
|
||||
{
|
||||
union register_pair r1 = { .even = (unsigned long)data, };
|
||||
union register_pair r1 = { .even = virt_to_phys(data), };
|
||||
|
||||
asm volatile("diag %[r1],%[r3],0x304\n"
|
||||
: [r1] "+&d" (r1.pair)
|
||||
@ -74,7 +74,7 @@ static int __hypfs_sprp_ioctl(void __user *user_area)
|
||||
int rc;
|
||||
|
||||
rc = -ENOMEM;
|
||||
data = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
data = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
diag304 = kzalloc(sizeof(*diag304), GFP_KERNEL);
|
||||
if (!data || !diag304)
|
||||
goto out;
|
||||
|
38
arch/s390/include/asm/access-regs.h
Normal file
38
arch/s390/include/asm/access-regs.h
Normal file
@ -0,0 +1,38 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2024
|
||||
*/
|
||||
|
||||
#ifndef __ASM_S390_ACCESS_REGS_H
|
||||
#define __ASM_S390_ACCESS_REGS_H
|
||||
|
||||
#include <linux/instrumented.h>
|
||||
#include <asm/sigcontext.h>
|
||||
|
||||
struct access_regs {
|
||||
unsigned int regs[NUM_ACRS];
|
||||
};
|
||||
|
||||
static inline void save_access_regs(unsigned int *acrs)
|
||||
{
|
||||
struct access_regs *regs = (struct access_regs *)acrs;
|
||||
|
||||
instrument_write(regs, sizeof(*regs));
|
||||
asm volatile("stamy 0,15,%[regs]"
|
||||
: [regs] "=QS" (*regs)
|
||||
:
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void restore_access_regs(unsigned int *acrs)
|
||||
{
|
||||
struct access_regs *regs = (struct access_regs *)acrs;
|
||||
|
||||
instrument_read(regs, sizeof(*regs));
|
||||
asm volatile("lamy 0,15,%[regs]"
|
||||
:
|
||||
: [regs] "QS" (*regs)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#endif /* __ASM_S390_ACCESS_REGS_H */
|
@ -54,13 +54,13 @@ static inline int appldata_asm(struct appldata_parameter_list *parm_list,
|
||||
parm_list->function = fn;
|
||||
parm_list->parlist_length = sizeof(*parm_list);
|
||||
parm_list->buffer_length = length;
|
||||
parm_list->product_id_addr = (unsigned long) id;
|
||||
parm_list->product_id_addr = virt_to_phys(id);
|
||||
parm_list->buffer_addr = virt_to_phys(buffer);
|
||||
diag_stat_inc(DIAG_STAT_X0DC);
|
||||
asm volatile(
|
||||
" diag %1,%0,0xdc"
|
||||
: "=d" (ry)
|
||||
: "d" (parm_list), "m" (*parm_list), "m" (*id)
|
||||
: "d" (virt_to_phys(parm_list)), "m" (*parm_list), "m" (*id)
|
||||
: "cc");
|
||||
return ry;
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
__int128_t __ashlti3(__int128_t a, int b);
|
||||
|
@ -14,7 +14,7 @@
|
||||
".section .rodata.str,\"aMS\",@progbits,1\n" \
|
||||
"1: .asciz \""__FILE__"\"\n" \
|
||||
".previous\n" \
|
||||
".section __bug_table,\"awM\",@progbits,%2\n" \
|
||||
".section __bug_table,\"aw\"\n" \
|
||||
"2: .long 0b-.\n" \
|
||||
" .long 1b-.\n" \
|
||||
" .short %0,%1\n" \
|
||||
@ -30,7 +30,7 @@
|
||||
#define __EMIT_BUG(x) do { \
|
||||
asm_inline volatile( \
|
||||
"0: mc 0,0\n" \
|
||||
".section __bug_table,\"awM\",@progbits,%1\n" \
|
||||
".section __bug_table,\"aw\"\n" \
|
||||
"1: .long 0b-.\n" \
|
||||
" .short %0\n" \
|
||||
" .org 1b+%1\n" \
|
||||
|
@ -12,36 +12,29 @@
|
||||
#ifndef _S390_CHECKSUM_H
|
||||
#define _S390_CHECKSUM_H
|
||||
|
||||
#include <linux/kasan-checks.h>
|
||||
#include <linux/instrumented.h>
|
||||
#include <linux/in6.h>
|
||||
|
||||
/*
|
||||
* Computes the checksum of a memory block at buff, length len,
|
||||
* and adds in "sum" (32-bit).
|
||||
*
|
||||
* Returns a 32-bit number suitable for feeding into itself
|
||||
* or csum_tcpudp_magic.
|
||||
*
|
||||
* This function must be called with even lengths, except
|
||||
* for the last fragment, which may be odd.
|
||||
*
|
||||
* It's best to have buff aligned on a 32-bit boundary.
|
||||
*/
|
||||
static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
|
||||
static inline __wsum cksm(const void *buff, int len, __wsum sum)
|
||||
{
|
||||
union register_pair rp = {
|
||||
.even = (unsigned long) buff,
|
||||
.odd = (unsigned long) len,
|
||||
.even = (unsigned long)buff,
|
||||
.odd = (unsigned long)len,
|
||||
};
|
||||
|
||||
kasan_check_read(buff, len);
|
||||
asm volatile(
|
||||
instrument_read(buff, len);
|
||||
asm volatile("\n"
|
||||
"0: cksm %[sum],%[rp]\n"
|
||||
" jo 0b\n"
|
||||
: [sum] "+&d" (sum), [rp] "+&d" (rp.pair) : : "cc", "memory");
|
||||
return sum;
|
||||
}
|
||||
|
||||
__wsum csum_partial(const void *buff, int len, __wsum sum);
|
||||
|
||||
#define _HAVE_ARCH_CSUM_AND_COPY
|
||||
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
|
||||
|
||||
/*
|
||||
* Fold a partial checksum without adding pseudo headers.
|
||||
*/
|
||||
|
@ -44,6 +44,13 @@ enum diag_stat_enum {
|
||||
void diag_stat_inc(enum diag_stat_enum nr);
|
||||
void diag_stat_inc_norecursion(enum diag_stat_enum nr);
|
||||
|
||||
struct hypfs_diag0c_entry;
|
||||
|
||||
/*
|
||||
* Diagnose 0c: Pseudo Timer
|
||||
*/
|
||||
void diag0c(struct hypfs_diag0c_entry *data);
|
||||
|
||||
/*
|
||||
* Diagnose 10: Release page range
|
||||
*/
|
||||
@ -331,10 +338,10 @@ struct hypfs_diag0c_entry;
|
||||
*/
|
||||
struct diag_ops {
|
||||
int (*diag210)(struct diag210 *addr);
|
||||
int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
|
||||
int (*diag26c)(unsigned long rx, unsigned long rx1, enum diag26c_sc subcode);
|
||||
int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode);
|
||||
int (*diag8c)(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
|
||||
void (*diag0c)(struct hypfs_diag0c_entry *entry);
|
||||
void (*diag0c)(unsigned long rx);
|
||||
void (*diag308_reset)(void);
|
||||
};
|
||||
|
||||
@ -342,9 +349,9 @@ extern struct diag_ops diag_amode31_ops;
|
||||
extern struct diag210 *__diag210_tmp_amode31;
|
||||
|
||||
int _diag210_amode31(struct diag210 *addr);
|
||||
int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
|
||||
int _diag26c_amode31(unsigned long rx, unsigned long rx1, enum diag26c_sc subcode);
|
||||
int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
|
||||
void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
|
||||
void _diag0c_amode31(unsigned long rx);
|
||||
void _diag308_reset_amode31(void);
|
||||
int _diag8c_amode31(struct diag8c *addr, struct ccw_dev_id *devno, size_t len);
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <linux/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/pai.h>
|
||||
|
||||
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
|
||||
@ -41,8 +41,7 @@ static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
|
||||
|
||||
static __always_inline void arch_exit_to_user_mode(void)
|
||||
{
|
||||
if (test_cpu_flag(CIF_FPU))
|
||||
__load_fpu_regs();
|
||||
load_user_fpu_regs();
|
||||
|
||||
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
|
||||
debug_user_asce(1);
|
||||
|
@ -9,11 +9,11 @@
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_S390_VX_INSN_INTERNAL_H
|
||||
#define __ASM_S390_VX_INSN_INTERNAL_H
|
||||
#ifndef __ASM_S390_FPU_INSN_ASM_H
|
||||
#define __ASM_S390_FPU_INSN_ASM_H
|
||||
|
||||
#ifndef __ASM_S390_VX_INSN_H
|
||||
#error only <asm/vx-insn.h> can be included directly
|
||||
#ifndef __ASM_S390_FPU_INSN_H
|
||||
#error only <asm/fpu-insn.h> can be included directly
|
||||
#endif
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
@ -195,10 +195,26 @@
|
||||
/* RXB - Compute most significant bit used vector registers
|
||||
*
|
||||
* @rxb: Operand to store computed RXB value
|
||||
* @v1: First vector register designated operand
|
||||
* @v2: Second vector register designated operand
|
||||
* @v3: Third vector register designated operand
|
||||
* @v4: Fourth vector register designated operand
|
||||
* @v1: Vector register designated operand whose MSB is stored in
|
||||
* RXB bit 0 (instruction bit 36) and whose remaining bits
|
||||
* are stored in instruction bits 8-11.
|
||||
* @v2: Vector register designated operand whose MSB is stored in
|
||||
* RXB bit 1 (instruction bit 37) and whose remaining bits
|
||||
* are stored in instruction bits 12-15.
|
||||
* @v3: Vector register designated operand whose MSB is stored in
|
||||
* RXB bit 2 (instruction bit 38) and whose remaining bits
|
||||
* are stored in instruction bits 16-19.
|
||||
* @v4: Vector register designated operand whose MSB is stored in
|
||||
* RXB bit 3 (instruction bit 39) and whose remaining bits
|
||||
* are stored in instruction bits 32-35.
|
||||
*
|
||||
* Note: In most vector instruction formats [1] V1, V2, V3, and V4 directly
|
||||
* correspond to @v1, @v2, @v3, and @v4. But there are exceptions, such as but
|
||||
* not limited to the vector instruction formats VRR-g, VRR-h, VRS-a, VRS-d,
|
||||
* and VSI.
|
||||
*
|
||||
* [1] IBM z/Architecture Principles of Operation, chapter "Program
|
||||
* Execution, section "Instructions", subsection "Instruction Formats".
|
||||
*/
|
||||
.macro RXB rxb v1 v2=0 v3=0 v4=0
|
||||
\rxb = 0
|
||||
@ -223,6 +239,9 @@
|
||||
* @v2: Second vector register designated operand (for RXB)
|
||||
* @v3: Third vector register designated operand (for RXB)
|
||||
* @v4: Fourth vector register designated operand (for RXB)
|
||||
*
|
||||
* Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
|
||||
* description for further details.
|
||||
*/
|
||||
.macro MRXB m v1 v2=0 v3=0 v4=0
|
||||
rxb = 0
|
||||
@ -238,6 +257,9 @@
|
||||
* @v2: Second vector register designated operand (for RXB)
|
||||
* @v3: Third vector register designated operand (for RXB)
|
||||
* @v4: Fourth vector register designated operand (for RXB)
|
||||
*
|
||||
* Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
|
||||
* description for further details.
|
||||
*/
|
||||
.macro MRXBOPC m opc v1 v2=0 v3=0 v4=0
|
||||
MRXB \m, \v1, \v2, \v3, \v4
|
||||
@ -350,7 +372,7 @@
|
||||
VX_NUM v3, \vr
|
||||
.word 0xE700 | (r1 << 4) | (v3&15)
|
||||
.word (b2 << 12) | (\disp)
|
||||
MRXBOPC \m, 0x21, v3
|
||||
MRXBOPC \m, 0x21, 0, v3
|
||||
.endm
|
||||
.macro VLGVB gr, vr, disp, base="%r0"
|
||||
VLGV \gr, \vr, \disp, \base, 0
|
||||
@ -499,6 +521,25 @@
|
||||
VMRL \vr1, \vr2, \vr3, 3
|
||||
.endm
|
||||
|
||||
/* VECTOR LOAD WITH LENGTH */
|
||||
.macro VLL v, gr, disp, base
|
||||
VX_NUM v1, \v
|
||||
GR_NUM b2, \base
|
||||
GR_NUM r3, \gr
|
||||
.word 0xE700 | ((v1&15) << 4) | r3
|
||||
.word (b2 << 12) | (\disp)
|
||||
MRXBOPC 0, 0x37, v1
|
||||
.endm
|
||||
|
||||
/* VECTOR STORE WITH LENGTH */
|
||||
.macro VSTL v, gr, disp, base
|
||||
VX_NUM v1, \v
|
||||
GR_NUM b2, \base
|
||||
GR_NUM r3, \gr
|
||||
.word 0xE700 | ((v1&15) << 4) | r3
|
||||
.word (b2 << 12) | (\disp)
|
||||
MRXBOPC 0, 0x3f, v1
|
||||
.endm
|
||||
|
||||
/* Vector integer instructions */
|
||||
|
||||
@ -512,6 +553,16 @@
|
||||
MRXBOPC 0, 0x68, v1, v2, v3
|
||||
.endm
|
||||
|
||||
/* VECTOR CHECKSUM */
|
||||
.macro VCKSM vr1, vr2, vr3
|
||||
VX_NUM v1, \vr1
|
||||
VX_NUM v2, \vr2
|
||||
VX_NUM v3, \vr3
|
||||
.word 0xE700 | ((v1&15) << 4) | (v2&15)
|
||||
.word ((v3&15) << 12)
|
||||
MRXBOPC 0, 0x66, v1, v2, v3
|
||||
.endm
|
||||
|
||||
/* VECTOR EXCLUSIVE OR */
|
||||
.macro VX vr1, vr2, vr3
|
||||
VX_NUM v1, \vr1
|
||||
@ -678,4 +729,4 @@
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_S390_VX_INSN_INTERNAL_H */
|
||||
#endif /* __ASM_S390_FPU_INSN_ASM_H */
|
486
arch/s390/include/asm/fpu-insn.h
Normal file
486
arch/s390/include/asm/fpu-insn.h
Normal file
@ -0,0 +1,486 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Support for Floating Point and Vector Instructions
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ASM_S390_FPU_INSN_H
|
||||
#define __ASM_S390_FPU_INSN_H
|
||||
|
||||
#include <asm/fpu-insn-asm.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/instrumented.h>
|
||||
#include <asm/asm-extable.h>
|
||||
|
||||
asm(".include \"asm/fpu-insn-asm.h\"\n");
|
||||
|
||||
/*
|
||||
* Various small helper functions, which can and should be used within
|
||||
* kernel fpu code sections. Each function represents only one floating
|
||||
* point or vector instruction (except for helper functions which require
|
||||
* exception handling).
|
||||
*
|
||||
* This allows to use floating point and vector instructions like C
|
||||
* functions, which has the advantage that all supporting code, like
|
||||
* e.g. loops, can be written in easy to read C code.
|
||||
*
|
||||
* Each of the helper functions provides support for code instrumentation,
|
||||
* like e.g. KASAN. Therefore instrumentation is also covered automatically
|
||||
* when using these functions.
|
||||
*
|
||||
* In order to ensure that code generated with the helper functions stays
|
||||
* within kernel fpu sections, which are guarded with kernel_fpu_begin()
|
||||
* and kernel_fpu_end() calls, each function has a mandatory "memory"
|
||||
* barrier.
|
||||
*/
|
||||
|
||||
static __always_inline void fpu_cefbr(u8 f1, s32 val)
|
||||
{
|
||||
asm volatile("cefbr %[f1],%[val]\n"
|
||||
:
|
||||
: [f1] "I" (f1), [val] "d" (val)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline unsigned long fpu_cgebr(u8 f2, u8 mode)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
asm volatile("cgebr %[val],%[mode],%[f2]\n"
|
||||
: [val] "=d" (val)
|
||||
: [f2] "I" (f2), [mode] "I" (mode)
|
||||
: "memory");
|
||||
return val;
|
||||
}
|
||||
|
||||
static __always_inline void fpu_debr(u8 f1, u8 f2)
|
||||
{
|
||||
asm volatile("debr %[f1],%[f2]\n"
|
||||
:
|
||||
: [f1] "I" (f1), [f2] "I" (f2)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg)
|
||||
{
|
||||
instrument_read(reg, sizeof(*reg));
|
||||
asm volatile("ld %[fpr],%[reg]\n"
|
||||
:
|
||||
: [fpr] "I" (fpr), [reg] "Q" (reg->ui)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_ldgr(u8 f1, u32 val)
|
||||
{
|
||||
asm volatile("ldgr %[f1],%[val]\n"
|
||||
:
|
||||
: [f1] "I" (f1), [val] "d" (val)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_lfpc(unsigned int *fpc)
|
||||
{
|
||||
instrument_read(fpc, sizeof(*fpc));
|
||||
asm volatile("lfpc %[fpc]"
|
||||
:
|
||||
: [fpc] "Q" (*fpc)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/**
|
||||
* fpu_lfpc_safe - Load floating point control register safely.
|
||||
* @fpc: new value for floating point control register
|
||||
*
|
||||
* Load floating point control register. This may lead to an exception,
|
||||
* since a saved value may have been modified by user space (ptrace,
|
||||
* signal return, kvm registers) to an invalid value. In such a case
|
||||
* set the floating point control register to zero.
|
||||
*/
|
||||
static inline void fpu_lfpc_safe(unsigned int *fpc)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
instrument_read(fpc, sizeof(*fpc));
|
||||
asm volatile("\n"
|
||||
"0: lfpc %[fpc]\n"
|
||||
"1: nopr %%r7\n"
|
||||
".pushsection .fixup, \"ax\"\n"
|
||||
"2: lghi %[tmp],0\n"
|
||||
" sfpc %[tmp]\n"
|
||||
" jg 1b\n"
|
||||
".popsection\n"
|
||||
EX_TABLE(1b, 2b)
|
||||
: [tmp] "=d" (tmp)
|
||||
: [fpc] "Q" (*fpc)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_std(unsigned short fpr, freg_t *reg)
|
||||
{
|
||||
instrument_write(reg, sizeof(*reg));
|
||||
asm volatile("std %[fpr],%[reg]\n"
|
||||
: [reg] "=Q" (reg->ui)
|
||||
: [fpr] "I" (fpr)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_sfpc(unsigned int fpc)
|
||||
{
|
||||
asm volatile("sfpc %[fpc]"
|
||||
:
|
||||
: [fpc] "d" (fpc)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_stfpc(unsigned int *fpc)
|
||||
{
|
||||
instrument_write(fpc, sizeof(*fpc));
|
||||
asm volatile("stfpc %[fpc]"
|
||||
: [fpc] "=Q" (*fpc)
|
||||
:
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vab(u8 v1, u8 v2, u8 v3)
|
||||
{
|
||||
asm volatile("VAB %[v1],%[v2],%[v3]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vcksm(u8 v1, u8 v2, u8 v3)
|
||||
{
|
||||
asm volatile("VCKSM %[v1],%[v2],%[v3]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vesravb(u8 v1, u8 v2, u8 v3)
|
||||
{
|
||||
asm volatile("VESRAVB %[v1],%[v2],%[v3]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vgfmag(u8 v1, u8 v2, u8 v3, u8 v4)
|
||||
{
|
||||
asm volatile("VGFMAG %[v1],%[v2],%[v3],%[v4]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vgfmg(u8 v1, u8 v2, u8 v3)
|
||||
{
|
||||
asm volatile("VGFMG %[v1],%[v2],%[v3]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
|
||||
static __always_inline void fpu_vl(u8 v1, const void *vxr)
|
||||
{
|
||||
instrument_read(vxr, sizeof(__vector128));
|
||||
asm volatile("\n"
|
||||
" la 1,%[vxr]\n"
|
||||
" VL %[v1],0,,1\n"
|
||||
:
|
||||
: [vxr] "R" (*(__vector128 *)vxr),
|
||||
[v1] "I" (v1)
|
||||
: "memory", "1");
|
||||
}
|
||||
|
||||
#else /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
static __always_inline void fpu_vl(u8 v1, const void *vxr)
|
||||
{
|
||||
instrument_read(vxr, sizeof(__vector128));
|
||||
asm volatile("VL %[v1],%O[vxr],,%R[vxr]\n"
|
||||
:
|
||||
: [vxr] "Q" (*(__vector128 *)vxr),
|
||||
[v1] "I" (v1)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
static __always_inline void fpu_vleib(u8 v, s16 val, u8 index)
|
||||
{
|
||||
asm volatile("VLEIB %[v],%[val],%[index]"
|
||||
:
|
||||
: [v] "I" (v), [val] "K" (val), [index] "I" (index)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vleig(u8 v, s16 val, u8 index)
|
||||
{
|
||||
asm volatile("VLEIG %[v],%[val],%[index]"
|
||||
:
|
||||
: [v] "I" (v), [val] "K" (val), [index] "I" (index)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline u64 fpu_vlgvf(u8 v, u16 index)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
asm volatile("VLGVF %[val],%[v],%[index]"
|
||||
: [val] "=d" (val)
|
||||
: [v] "I" (v), [index] "L" (index)
|
||||
: "memory");
|
||||
return val;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
|
||||
static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
|
||||
{
|
||||
unsigned int size;
|
||||
|
||||
size = min(index + 1, sizeof(__vector128));
|
||||
instrument_read(vxr, size);
|
||||
asm volatile("\n"
|
||||
" la 1,%[vxr]\n"
|
||||
" VLL %[v1],%[index],0,1\n"
|
||||
:
|
||||
: [vxr] "R" (*(u8 *)vxr),
|
||||
[index] "d" (index),
|
||||
[v1] "I" (v1)
|
||||
: "memory", "1");
|
||||
}
|
||||
|
||||
#else /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
static __always_inline void fpu_vll(u8 v1, u32 index, const void *vxr)
|
||||
{
|
||||
unsigned int size;
|
||||
|
||||
size = min(index + 1, sizeof(__vector128));
|
||||
instrument_read(vxr, size);
|
||||
asm volatile("VLL %[v1],%[index],%O[vxr],%R[vxr]\n"
|
||||
:
|
||||
: [vxr] "Q" (*(u8 *)vxr),
|
||||
[index] "d" (index),
|
||||
[v1] "I" (v1)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
|
||||
#define fpu_vlm(_v1, _v3, _vxrs) \
|
||||
({ \
|
||||
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
|
||||
struct { \
|
||||
__vector128 _v[(_v3) - (_v1) + 1]; \
|
||||
} *_v = (void *)(_vxrs); \
|
||||
\
|
||||
instrument_read(_v, size); \
|
||||
asm volatile("\n" \
|
||||
" la 1,%[vxrs]\n" \
|
||||
" VLM %[v1],%[v3],0,1\n" \
|
||||
: \
|
||||
: [vxrs] "R" (*_v), \
|
||||
[v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
: "memory", "1"); \
|
||||
(_v3) - (_v1) + 1; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
#define fpu_vlm(_v1, _v3, _vxrs) \
|
||||
({ \
|
||||
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
|
||||
struct { \
|
||||
__vector128 _v[(_v3) - (_v1) + 1]; \
|
||||
} *_v = (void *)(_vxrs); \
|
||||
\
|
||||
instrument_read(_v, size); \
|
||||
asm volatile("VLM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
|
||||
: \
|
||||
: [vxrs] "Q" (*_v), \
|
||||
[v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
: "memory"); \
|
||||
(_v3) - (_v1) + 1; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
static __always_inline void fpu_vlr(u8 v1, u8 v2)
|
||||
{
|
||||
asm volatile("VLR %[v1],%[v2]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vlvgf(u8 v, u32 val, u16 index)
|
||||
{
|
||||
asm volatile("VLVGF %[v],%[val],%[index]"
|
||||
:
|
||||
: [v] "I" (v), [val] "d" (val), [index] "L" (index)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vn(u8 v1, u8 v2, u8 v3)
|
||||
{
|
||||
asm volatile("VN %[v1],%[v2],%[v3]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vperm(u8 v1, u8 v2, u8 v3, u8 v4)
|
||||
{
|
||||
asm volatile("VPERM %[v1],%[v2],%[v3],%[v4]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3), [v4] "I" (v4)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vrepib(u8 v1, s16 i2)
|
||||
{
|
||||
asm volatile("VREPIB %[v1],%[i2]"
|
||||
:
|
||||
: [v1] "I" (v1), [i2] "K" (i2)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vsrlb(u8 v1, u8 v2, u8 v3)
|
||||
{
|
||||
asm volatile("VSRLB %[v1],%[v2],%[v3]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
|
||||
static __always_inline void fpu_vst(u8 v1, const void *vxr)
|
||||
{
|
||||
instrument_write(vxr, sizeof(__vector128));
|
||||
asm volatile("\n"
|
||||
" la 1,%[vxr]\n"
|
||||
" VST %[v1],0,,1\n"
|
||||
: [vxr] "=R" (*(__vector128 *)vxr)
|
||||
: [v1] "I" (v1)
|
||||
: "memory", "1");
|
||||
}
|
||||
|
||||
#else /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
static __always_inline void fpu_vst(u8 v1, const void *vxr)
|
||||
{
|
||||
instrument_write(vxr, sizeof(__vector128));
|
||||
asm volatile("VST %[v1],%O[vxr],,%R[vxr]\n"
|
||||
: [vxr] "=Q" (*(__vector128 *)vxr)
|
||||
: [v1] "I" (v1)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
|
||||
static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
|
||||
{
|
||||
unsigned int size;
|
||||
|
||||
size = min(index + 1, sizeof(__vector128));
|
||||
instrument_write(vxr, size);
|
||||
asm volatile("\n"
|
||||
" la 1,%[vxr]\n"
|
||||
" VSTL %[v1],%[index],0,1\n"
|
||||
: [vxr] "=R" (*(u8 *)vxr)
|
||||
: [index] "d" (index), [v1] "I" (v1)
|
||||
: "memory", "1");
|
||||
}
|
||||
|
||||
#else /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
static __always_inline void fpu_vstl(u8 v1, u32 index, const void *vxr)
|
||||
{
|
||||
unsigned int size;
|
||||
|
||||
size = min(index + 1, sizeof(__vector128));
|
||||
instrument_write(vxr, size);
|
||||
asm volatile("VSTL %[v1],%[index],%O[vxr],%R[vxr]\n"
|
||||
: [vxr] "=Q" (*(u8 *)vxr)
|
||||
: [index] "d" (index), [v1] "I" (v1)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
#ifdef CONFIG_CC_IS_CLANG
|
||||
|
||||
#define fpu_vstm(_v1, _v3, _vxrs) \
|
||||
({ \
|
||||
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
|
||||
struct { \
|
||||
__vector128 _v[(_v3) - (_v1) + 1]; \
|
||||
} *_v = (void *)(_vxrs); \
|
||||
\
|
||||
instrument_write(_v, size); \
|
||||
asm volatile("\n" \
|
||||
" la 1,%[vxrs]\n" \
|
||||
" VSTM %[v1],%[v3],0,1\n" \
|
||||
: [vxrs] "=R" (*_v) \
|
||||
: [v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
: "memory", "1"); \
|
||||
(_v3) - (_v1) + 1; \
|
||||
})
|
||||
|
||||
#else /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
#define fpu_vstm(_v1, _v3, _vxrs) \
|
||||
({ \
|
||||
unsigned int size = ((_v3) - (_v1) + 1) * sizeof(__vector128); \
|
||||
struct { \
|
||||
__vector128 _v[(_v3) - (_v1) + 1]; \
|
||||
} *_v = (void *)(_vxrs); \
|
||||
\
|
||||
instrument_write(_v, size); \
|
||||
asm volatile("VSTM %[v1],%[v3],%O[vxrs],%R[vxrs]\n" \
|
||||
: [vxrs] "=Q" (*_v) \
|
||||
: [v1] "I" (_v1), [v3] "I" (_v3) \
|
||||
: "memory"); \
|
||||
(_v3) - (_v1) + 1; \
|
||||
})
|
||||
|
||||
#endif /* CONFIG_CC_IS_CLANG */
|
||||
|
||||
static __always_inline void fpu_vupllf(u8 v1, u8 v2)
|
||||
{
|
||||
asm volatile("VUPLLF %[v1],%[v2]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vx(u8 v1, u8 v2, u8 v3)
|
||||
{
|
||||
asm volatile("VX %[v1],%[v2],%[v3]"
|
||||
:
|
||||
: [v1] "I" (v1), [v2] "I" (v2), [v3] "I" (v3)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static __always_inline void fpu_vzero(u8 v)
|
||||
{
|
||||
asm volatile("VZERO %[v]"
|
||||
:
|
||||
: [v] "I" (v)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_S390_FPU_INSN_H */
|
51
arch/s390/include/asm/fpu-types.h
Normal file
51
arch/s390/include/asm/fpu-types.h
Normal file
@ -0,0 +1,51 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* FPU data structures
|
||||
*
|
||||
* Copyright IBM Corp. 2015
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_S390_FPU_TYPES_H
|
||||
#define _ASM_S390_FPU_TYPES_H
|
||||
|
||||
#include <asm/sigcontext.h>
|
||||
|
||||
struct fpu {
|
||||
u32 fpc;
|
||||
__vector128 vxrs[__NUM_VXRS] __aligned(8);
|
||||
};
|
||||
|
||||
struct kernel_fpu_hdr {
|
||||
int mask;
|
||||
u32 fpc;
|
||||
};
|
||||
|
||||
struct kernel_fpu {
|
||||
struct kernel_fpu_hdr hdr;
|
||||
__vector128 vxrs[] __aligned(8);
|
||||
};
|
||||
|
||||
#define KERNEL_FPU_STRUCT(vxr_size) \
|
||||
struct kernel_fpu_##vxr_size { \
|
||||
struct kernel_fpu_hdr hdr; \
|
||||
__vector128 vxrs[vxr_size] __aligned(8); \
|
||||
}
|
||||
|
||||
KERNEL_FPU_STRUCT(8);
|
||||
KERNEL_FPU_STRUCT(16);
|
||||
KERNEL_FPU_STRUCT(32);
|
||||
|
||||
#define DECLARE_KERNEL_FPU_ONSTACK(vxr_size, name) \
|
||||
struct kernel_fpu_##vxr_size name __uninitialized
|
||||
|
||||
#define DECLARE_KERNEL_FPU_ONSTACK8(name) \
|
||||
DECLARE_KERNEL_FPU_ONSTACK(8, name)
|
||||
|
||||
#define DECLARE_KERNEL_FPU_ONSTACK16(name) \
|
||||
DECLARE_KERNEL_FPU_ONSTACK(16, name)
|
||||
|
||||
#define DECLARE_KERNEL_FPU_ONSTACK32(name) \
|
||||
DECLARE_KERNEL_FPU_ONSTACK(32, name)
|
||||
|
||||
#endif /* _ASM_S390_FPU_TYPES_H */
|
295
arch/s390/include/asm/fpu.h
Normal file
295
arch/s390/include/asm/fpu.h
Normal file
@ -0,0 +1,295 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* In-kernel FPU support functions
|
||||
*
|
||||
*
|
||||
* Consider these guidelines before using in-kernel FPU functions:
|
||||
*
|
||||
* 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
|
||||
* use of floating-point or vector registers and instructions.
|
||||
*
|
||||
* 2. For kernel_fpu_begin(), specify the vector register range you want to
|
||||
* use with the KERNEL_VXR_* constants. Consider these usage guidelines:
|
||||
*
|
||||
* a) If your function typically runs in process-context, use the lower
|
||||
* half of the vector registers, for example, specify KERNEL_VXR_LOW.
|
||||
* b) If your function typically runs in soft-irq or hard-irq context,
|
||||
* prefer using the upper half of the vector registers, for example,
|
||||
* specify KERNEL_VXR_HIGH.
|
||||
*
|
||||
* If you adhere to these guidelines, an interrupted process context
|
||||
* does not require to save and restore vector registers because of
|
||||
* disjoint register ranges.
|
||||
*
|
||||
* Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
|
||||
* includes logic to save and restore up to 16 vector registers at once.
|
||||
*
|
||||
* 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
|
||||
* struct kernel_fpu states. Vector registers that are in use by outer
|
||||
* levels are saved and restored. You can minimize the save and restore
|
||||
* effort by choosing disjoint vector register ranges.
|
||||
*
|
||||
* 5. To use vector floating-point instructions, specify the KERNEL_FPC
|
||||
* flag to save and restore floating-point controls in addition to any
|
||||
* vector register range.
|
||||
*
|
||||
* 6. To use floating-point registers and instructions only, specify the
|
||||
* KERNEL_FPR flag. This flag triggers a save and restore of vector
|
||||
* registers V0 to V15 and floating-point controls.
|
||||
*
|
||||
* Copyright IBM Corp. 2015
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_S390_FPU_H
|
||||
#define _ASM_S390_FPU_H
|
||||
|
||||
#include <linux/processor.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/fpu-types.h>
|
||||
#include <asm/fpu-insn.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
static inline bool cpu_has_vx(void)
|
||||
{
|
||||
return likely(test_facility(129));
|
||||
}
|
||||
|
||||
enum {
|
||||
KERNEL_FPC_BIT = 0,
|
||||
KERNEL_VXR_V0V7_BIT,
|
||||
KERNEL_VXR_V8V15_BIT,
|
||||
KERNEL_VXR_V16V23_BIT,
|
||||
KERNEL_VXR_V24V31_BIT,
|
||||
};
|
||||
|
||||
#define KERNEL_FPC BIT(KERNEL_FPC_BIT)
|
||||
#define KERNEL_VXR_V0V7 BIT(KERNEL_VXR_V0V7_BIT)
|
||||
#define KERNEL_VXR_V8V15 BIT(KERNEL_VXR_V8V15_BIT)
|
||||
#define KERNEL_VXR_V16V23 BIT(KERNEL_VXR_V16V23_BIT)
|
||||
#define KERNEL_VXR_V24V31 BIT(KERNEL_VXR_V24V31_BIT)
|
||||
|
||||
#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7 | KERNEL_VXR_V8V15)
|
||||
#define KERNEL_VXR_MID (KERNEL_VXR_V8V15 | KERNEL_VXR_V16V23)
|
||||
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23 | KERNEL_VXR_V24V31)
|
||||
|
||||
#define KERNEL_VXR (KERNEL_VXR_LOW | KERNEL_VXR_HIGH)
|
||||
#define KERNEL_FPR (KERNEL_FPC | KERNEL_VXR_LOW)
|
||||
|
||||
void load_fpu_state(struct fpu *state, int flags);
|
||||
void save_fpu_state(struct fpu *state, int flags);
|
||||
void __kernel_fpu_begin(struct kernel_fpu *state, int flags);
|
||||
void __kernel_fpu_end(struct kernel_fpu *state, int flags);
|
||||
|
||||
static __always_inline void save_vx_regs(__vector128 *vxrs)
|
||||
{
|
||||
fpu_vstm(0, 15, &vxrs[0]);
|
||||
fpu_vstm(16, 31, &vxrs[16]);
|
||||
}
|
||||
|
||||
static __always_inline void load_vx_regs(__vector128 *vxrs)
|
||||
{
|
||||
fpu_vlm(0, 15, &vxrs[0]);
|
||||
fpu_vlm(16, 31, &vxrs[16]);
|
||||
}
|
||||
|
||||
static __always_inline void __save_fp_regs(freg_t *fprs, unsigned int offset)
|
||||
{
|
||||
fpu_std(0, &fprs[0 * offset]);
|
||||
fpu_std(1, &fprs[1 * offset]);
|
||||
fpu_std(2, &fprs[2 * offset]);
|
||||
fpu_std(3, &fprs[3 * offset]);
|
||||
fpu_std(4, &fprs[4 * offset]);
|
||||
fpu_std(5, &fprs[5 * offset]);
|
||||
fpu_std(6, &fprs[6 * offset]);
|
||||
fpu_std(7, &fprs[7 * offset]);
|
||||
fpu_std(8, &fprs[8 * offset]);
|
||||
fpu_std(9, &fprs[9 * offset]);
|
||||
fpu_std(10, &fprs[10 * offset]);
|
||||
fpu_std(11, &fprs[11 * offset]);
|
||||
fpu_std(12, &fprs[12 * offset]);
|
||||
fpu_std(13, &fprs[13 * offset]);
|
||||
fpu_std(14, &fprs[14 * offset]);
|
||||
fpu_std(15, &fprs[15 * offset]);
|
||||
}
|
||||
|
||||
static __always_inline void __load_fp_regs(freg_t *fprs, unsigned int offset)
|
||||
{
|
||||
fpu_ld(0, &fprs[0 * offset]);
|
||||
fpu_ld(1, &fprs[1 * offset]);
|
||||
fpu_ld(2, &fprs[2 * offset]);
|
||||
fpu_ld(3, &fprs[3 * offset]);
|
||||
fpu_ld(4, &fprs[4 * offset]);
|
||||
fpu_ld(5, &fprs[5 * offset]);
|
||||
fpu_ld(6, &fprs[6 * offset]);
|
||||
fpu_ld(7, &fprs[7 * offset]);
|
||||
fpu_ld(8, &fprs[8 * offset]);
|
||||
fpu_ld(9, &fprs[9 * offset]);
|
||||
fpu_ld(10, &fprs[10 * offset]);
|
||||
fpu_ld(11, &fprs[11 * offset]);
|
||||
fpu_ld(12, &fprs[12 * offset]);
|
||||
fpu_ld(13, &fprs[13 * offset]);
|
||||
fpu_ld(14, &fprs[14 * offset]);
|
||||
fpu_ld(15, &fprs[15 * offset]);
|
||||
}
|
||||
|
||||
static __always_inline void save_fp_regs(freg_t *fprs)
|
||||
{
|
||||
__save_fp_regs(fprs, sizeof(freg_t) / sizeof(freg_t));
|
||||
}
|
||||
|
||||
static __always_inline void load_fp_regs(freg_t *fprs)
|
||||
{
|
||||
__load_fp_regs(fprs, sizeof(freg_t) / sizeof(freg_t));
|
||||
}
|
||||
|
||||
static __always_inline void save_fp_regs_vx(__vector128 *vxrs)
|
||||
{
|
||||
freg_t *fprs = (freg_t *)&vxrs[0].high;
|
||||
|
||||
__save_fp_regs(fprs, sizeof(__vector128) / sizeof(freg_t));
|
||||
}
|
||||
|
||||
static __always_inline void load_fp_regs_vx(__vector128 *vxrs)
|
||||
{
|
||||
freg_t *fprs = (freg_t *)&vxrs[0].high;
|
||||
|
||||
__load_fp_regs(fprs, sizeof(__vector128) / sizeof(freg_t));
|
||||
}
|
||||
|
||||
static inline void load_user_fpu_regs(void)
|
||||
{
|
||||
struct thread_struct *thread = ¤t->thread;
|
||||
|
||||
if (!thread->ufpu_flags)
|
||||
return;
|
||||
load_fpu_state(&thread->ufpu, thread->ufpu_flags);
|
||||
thread->ufpu_flags = 0;
|
||||
}
|
||||
|
||||
static __always_inline void __save_user_fpu_regs(struct thread_struct *thread, int flags)
|
||||
{
|
||||
save_fpu_state(&thread->ufpu, flags);
|
||||
__atomic_or(flags, &thread->ufpu_flags);
|
||||
}
|
||||
|
||||
static inline void save_user_fpu_regs(void)
|
||||
{
|
||||
struct thread_struct *thread = ¤t->thread;
|
||||
int mask, flags;
|
||||
|
||||
mask = __atomic_or(KERNEL_FPC | KERNEL_VXR, &thread->kfpu_flags);
|
||||
flags = ~READ_ONCE(thread->ufpu_flags) & (KERNEL_FPC | KERNEL_VXR);
|
||||
if (flags)
|
||||
__save_user_fpu_regs(thread, flags);
|
||||
barrier();
|
||||
WRITE_ONCE(thread->kfpu_flags, mask);
|
||||
}
|
||||
|
||||
static __always_inline void _kernel_fpu_begin(struct kernel_fpu *state, int flags)
|
||||
{
|
||||
struct thread_struct *thread = ¤t->thread;
|
||||
int mask, uflags;
|
||||
|
||||
mask = __atomic_or(flags, &thread->kfpu_flags);
|
||||
state->hdr.mask = mask;
|
||||
uflags = READ_ONCE(thread->ufpu_flags);
|
||||
if ((uflags & flags) != flags)
|
||||
__save_user_fpu_regs(thread, ~uflags & flags);
|
||||
if (mask & flags)
|
||||
__kernel_fpu_begin(state, flags);
|
||||
}
|
||||
|
||||
static __always_inline void _kernel_fpu_end(struct kernel_fpu *state, int flags)
|
||||
{
|
||||
int mask = state->hdr.mask;
|
||||
|
||||
if (mask & flags)
|
||||
__kernel_fpu_end(state, flags);
|
||||
barrier();
|
||||
WRITE_ONCE(current->thread.kfpu_flags, mask);
|
||||
}
|
||||
|
||||
void __kernel_fpu_invalid_size(void);
|
||||
|
||||
static __always_inline void kernel_fpu_check_size(int flags, unsigned int size)
|
||||
{
|
||||
unsigned int cnt = 0;
|
||||
|
||||
if (flags & KERNEL_VXR_V0V7)
|
||||
cnt += 8;
|
||||
if (flags & KERNEL_VXR_V8V15)
|
||||
cnt += 8;
|
||||
if (flags & KERNEL_VXR_V16V23)
|
||||
cnt += 8;
|
||||
if (flags & KERNEL_VXR_V24V31)
|
||||
cnt += 8;
|
||||
if (cnt != size)
|
||||
__kernel_fpu_invalid_size();
|
||||
}
|
||||
|
||||
#define kernel_fpu_begin(state, flags) \
|
||||
{ \
|
||||
typeof(state) s = (state); \
|
||||
int _flags = (flags); \
|
||||
\
|
||||
kernel_fpu_check_size(_flags, ARRAY_SIZE(s->vxrs)); \
|
||||
_kernel_fpu_begin((struct kernel_fpu *)s, _flags); \
|
||||
}
|
||||
|
||||
#define kernel_fpu_end(state, flags) \
|
||||
{ \
|
||||
typeof(state) s = (state); \
|
||||
int _flags = (flags); \
|
||||
\
|
||||
kernel_fpu_check_size(_flags, ARRAY_SIZE(s->vxrs)); \
|
||||
_kernel_fpu_end((struct kernel_fpu *)s, _flags); \
|
||||
}
|
||||
|
||||
static inline void save_kernel_fpu_regs(struct thread_struct *thread)
|
||||
{
|
||||
if (!thread->kfpu_flags)
|
||||
return;
|
||||
save_fpu_state(&thread->kfpu, thread->kfpu_flags);
|
||||
}
|
||||
|
||||
static inline void restore_kernel_fpu_regs(struct thread_struct *thread)
|
||||
{
|
||||
if (!thread->kfpu_flags)
|
||||
return;
|
||||
load_fpu_state(&thread->kfpu, thread->kfpu_flags);
|
||||
}
|
||||
|
||||
static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __NUM_FPRS; i++)
|
||||
fprs[i].ui = vxrs[i].high;
|
||||
}
|
||||
|
||||
static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __NUM_FPRS; i++)
|
||||
vxrs[i].high = fprs[i].ui;
|
||||
}
|
||||
|
||||
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
|
||||
{
|
||||
fpregs->pad = 0;
|
||||
fpregs->fpc = fpu->fpc;
|
||||
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
|
||||
}
|
||||
|
||||
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
|
||||
{
|
||||
fpu->fpc = fpregs->fpc;
|
||||
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_FPU_H */
|
@ -1,126 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* In-kernel FPU support functions
|
||||
*
|
||||
*
|
||||
* Consider these guidelines before using in-kernel FPU functions:
|
||||
*
|
||||
* 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
|
||||
* use of floating-point or vector registers and instructions.
|
||||
*
|
||||
* 2. For kernel_fpu_begin(), specify the vector register range you want to
|
||||
* use with the KERNEL_VXR_* constants. Consider these usage guidelines:
|
||||
*
|
||||
* a) If your function typically runs in process-context, use the lower
|
||||
* half of the vector registers, for example, specify KERNEL_VXR_LOW.
|
||||
* b) If your function typically runs in soft-irq or hard-irq context,
|
||||
* prefer using the upper half of the vector registers, for example,
|
||||
* specify KERNEL_VXR_HIGH.
|
||||
*
|
||||
* If you adhere to these guidelines, an interrupted process context
|
||||
* does not require to save and restore vector registers because of
|
||||
* disjoint register ranges.
|
||||
*
|
||||
* Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
|
||||
* includes logic to save and restore up to 16 vector registers at once.
|
||||
*
|
||||
* 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
|
||||
* struct kernel_fpu states. Vector registers that are in use by outer
|
||||
* levels are saved and restored. You can minimize the save and restore
|
||||
* effort by choosing disjoint vector register ranges.
|
||||
*
|
||||
* 5. To use vector floating-point instructions, specify the KERNEL_FPC
|
||||
* flag to save and restore floating-point controls in addition to any
|
||||
* vector register range.
|
||||
*
|
||||
* 6. To use floating-point registers and instructions only, specify the
|
||||
* KERNEL_FPR flag. This flag triggers a save and restore of vector
|
||||
* registers V0 to V15 and floating-point controls.
|
||||
*
|
||||
* Copyright IBM Corp. 2015
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_S390_FPU_API_H
|
||||
#define _ASM_S390_FPU_API_H
|
||||
|
||||
#include <linux/preempt.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
|
||||
void save_fpu_regs(void);
|
||||
void load_fpu_regs(void);
|
||||
void __load_fpu_regs(void);
|
||||
|
||||
/**
|
||||
* sfpc_safe - Set floating point control register safely.
|
||||
* @fpc: new value for floating point control register
|
||||
*
|
||||
* Set floating point control register. This may lead to an exception,
|
||||
* since a saved value may have been modified by user space (ptrace,
|
||||
* signal return, kvm registers) to an invalid value. In such a case
|
||||
* set the floating point control register to zero.
|
||||
*/
|
||||
static inline void sfpc_safe(u32 fpc)
|
||||
{
|
||||
asm volatile("\n"
|
||||
"0: sfpc %[fpc]\n"
|
||||
"1: nopr %%r7\n"
|
||||
".pushsection .fixup, \"ax\"\n"
|
||||
"2: lghi %[fpc],0\n"
|
||||
" jg 0b\n"
|
||||
".popsection\n"
|
||||
EX_TABLE(1b, 2b)
|
||||
: [fpc] "+d" (fpc)
|
||||
: : "memory");
|
||||
}
|
||||
|
||||
#define KERNEL_FPC 1
|
||||
#define KERNEL_VXR_V0V7 2
|
||||
#define KERNEL_VXR_V8V15 4
|
||||
#define KERNEL_VXR_V16V23 8
|
||||
#define KERNEL_VXR_V24V31 16
|
||||
|
||||
#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
|
||||
#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
|
||||
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
|
||||
|
||||
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
|
||||
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
|
||||
|
||||
struct kernel_fpu;
|
||||
|
||||
/*
|
||||
* Note the functions below must be called with preemption disabled.
|
||||
* Do not enable preemption before calling __kernel_fpu_end() to prevent
|
||||
* an corruption of an existing kernel FPU state.
|
||||
*
|
||||
* Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
|
||||
*/
|
||||
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
|
||||
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
|
||||
|
||||
|
||||
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
|
||||
{
|
||||
preempt_disable();
|
||||
state->mask = S390_lowcore.fpu_flags;
|
||||
if (!test_cpu_flag(CIF_FPU))
|
||||
/* Save user space FPU state and register contents */
|
||||
save_fpu_regs();
|
||||
else if (state->mask & flags)
|
||||
/* Save FPU/vector register in-use by the kernel */
|
||||
__kernel_fpu_begin(state, flags);
|
||||
S390_lowcore.fpu_flags |= flags;
|
||||
}
|
||||
|
||||
static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
|
||||
{
|
||||
S390_lowcore.fpu_flags = state->mask;
|
||||
if (state->mask & flags)
|
||||
/* Restore FPU/vector register in-use by the kernel */
|
||||
__kernel_fpu_end(state, flags);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_FPU_API_H */
|
@ -1,67 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* FPU state and register content conversion primitives
|
||||
*
|
||||
* Copyright IBM Corp. 2015
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_S390_FPU_INTERNAL_H
|
||||
#define _ASM_S390_FPU_INTERNAL_H
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/fpu/types.h>
|
||||
|
||||
static inline bool cpu_has_vx(void)
|
||||
{
|
||||
return likely(test_facility(129));
|
||||
}
|
||||
|
||||
static inline void save_vx_regs(__vector128 *vxrs)
|
||||
{
|
||||
asm volatile(
|
||||
" la 1,%0\n"
|
||||
" .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
|
||||
" .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
|
||||
: "=Q" (*(struct vx_array *) vxrs) : : "1");
|
||||
}
|
||||
|
||||
static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __NUM_FPRS; i++)
|
||||
fprs[i].ui = vxrs[i].high;
|
||||
}
|
||||
|
||||
static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < __NUM_FPRS; i++)
|
||||
vxrs[i].high = fprs[i].ui;
|
||||
}
|
||||
|
||||
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
|
||||
{
|
||||
fpregs->pad = 0;
|
||||
fpregs->fpc = fpu->fpc;
|
||||
if (cpu_has_vx())
|
||||
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
|
||||
else
|
||||
memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
|
||||
sizeof(fpregs->fprs));
|
||||
}
|
||||
|
||||
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
|
||||
{
|
||||
fpu->fpc = fpregs->fpc;
|
||||
if (cpu_has_vx())
|
||||
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
|
||||
else
|
||||
memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
|
||||
sizeof(fpregs->fprs));
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_FPU_INTERNAL_H */
|
@ -1,38 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* FPU data structures
|
||||
*
|
||||
* Copyright IBM Corp. 2015
|
||||
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ASM_S390_FPU_TYPES_H
|
||||
#define _ASM_S390_FPU_TYPES_H
|
||||
|
||||
#include <asm/sigcontext.h>
|
||||
|
||||
struct fpu {
|
||||
__u32 fpc; /* Floating-point control */
|
||||
void *regs; /* Pointer to the current save area */
|
||||
union {
|
||||
/* Floating-point register save area */
|
||||
freg_t fprs[__NUM_FPRS];
|
||||
/* Vector register save area */
|
||||
__vector128 vxrs[__NUM_VXRS];
|
||||
};
|
||||
};
|
||||
|
||||
/* VX array structure for address operand constraints in inline assemblies */
|
||||
struct vx_array { __vector128 _[__NUM_VXRS]; };
|
||||
|
||||
/* In-kernel FPU state structure */
|
||||
struct kernel_fpu {
|
||||
u32 mask;
|
||||
u32 fpc;
|
||||
union {
|
||||
freg_t fprs[__NUM_FPRS];
|
||||
__vector128 vxrs[__NUM_VXRS];
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* _ASM_S390_FPU_TYPES_H */
|
@ -23,7 +23,7 @@
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/isc.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
|
||||
@ -743,7 +743,6 @@ struct kvm_vcpu_arch {
|
||||
struct kvm_s390_sie_block *vsie_block;
|
||||
unsigned int host_acrs[NUM_ACRS];
|
||||
struct gs_cb *host_gscb;
|
||||
struct fpu host_fpregs;
|
||||
struct kvm_s390_local_interrupt local_int;
|
||||
struct hrtimer ckc_timer;
|
||||
struct kvm_s390_pgm_info pgm;
|
||||
@ -765,6 +764,8 @@ struct kvm_vcpu_arch {
|
||||
__u64 cputm_start;
|
||||
bool gs_enabled;
|
||||
bool skey_enabled;
|
||||
/* Indicator if the access registers have been loaded from guest */
|
||||
bool acrs_loaded;
|
||||
struct kvm_s390_pv_vcpu pv;
|
||||
union diag318_info diag318_info;
|
||||
};
|
||||
|
@ -157,7 +157,7 @@ struct lowcore {
|
||||
__s32 preempt_count; /* 0x03a8 */
|
||||
__u32 spinlock_lockval; /* 0x03ac */
|
||||
__u32 spinlock_index; /* 0x03b0 */
|
||||
__u32 fpu_flags; /* 0x03b4 */
|
||||
__u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
|
||||
__u64 percpu_offset; /* 0x03b8 */
|
||||
__u8 pad_0x03c0[0x03c8-0x03c0]; /* 0x03c0 */
|
||||
__u64 machine_flags; /* 0x03c8 */
|
||||
|
@ -16,7 +16,7 @@ struct qpaci_info_block {
|
||||
u64 header;
|
||||
struct {
|
||||
u64 : 8;
|
||||
u64 num_cc : 8; /* # of supported crypto counters */
|
||||
u64 num_cc : 8; /* # of supported crypto counters */
|
||||
u64 : 9;
|
||||
u64 num_nnpa : 7; /* # of supported NNPA counters */
|
||||
u64 : 32;
|
||||
@ -81,4 +81,5 @@ enum paievt_mode {
|
||||
PAI_MODE_COUNTING,
|
||||
};
|
||||
|
||||
#define PAI_SAVE_AREA(x) ((x)->hw.event_base)
|
||||
#endif
|
||||
|
@ -122,6 +122,7 @@ struct zpci_dev {
|
||||
struct rcu_head rcu;
|
||||
struct hotplug_slot hotplug_slot;
|
||||
|
||||
struct mutex state_lock; /* protect state changes */
|
||||
enum zpci_state state;
|
||||
u32 fid; /* function ID, used by sclp */
|
||||
u32 fh; /* function handle, used by insn's */
|
||||
@ -142,7 +143,6 @@ struct zpci_dev {
|
||||
u8 reserved : 2;
|
||||
unsigned int devfn; /* DEVFN part of the RID*/
|
||||
|
||||
struct mutex lock;
|
||||
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
|
||||
u32 uid; /* user defined id */
|
||||
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
|
||||
@ -170,6 +170,7 @@ struct zpci_dev {
|
||||
u64 dma_mask; /* DMA address space mask */
|
||||
|
||||
/* Function measurement block */
|
||||
struct mutex fmb_lock;
|
||||
struct zpci_fmb *fmb;
|
||||
u16 fmb_update; /* update interval */
|
||||
u16 fmb_length;
|
||||
|
@ -22,6 +22,7 @@ enum reserved_range_type {
|
||||
RR_DECOMPRESSOR,
|
||||
RR_INITRD,
|
||||
RR_VMLINUX,
|
||||
RR_RELOC,
|
||||
RR_AMODE31,
|
||||
RR_IPLREPORT,
|
||||
RR_CERT_COMP_LIST,
|
||||
|
@ -15,13 +15,11 @@
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
|
||||
#define CIF_FPU 3 /* restore FPU registers */
|
||||
#define CIF_ENABLED_WAIT 5 /* in enabled wait state */
|
||||
#define CIF_MCCK_GUEST 6 /* machine check happening in guest */
|
||||
#define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */
|
||||
|
||||
#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY)
|
||||
#define _CIF_FPU BIT(CIF_FPU)
|
||||
#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT)
|
||||
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
|
||||
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
|
||||
@ -33,13 +31,12 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <asm/fpu-types.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/runtime_instr.h>
|
||||
#include <asm/fpu/types.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/irqflags.h>
|
||||
|
||||
typedef long (*sys_call_ptr_t)(struct pt_regs *regs);
|
||||
@ -169,6 +166,8 @@ struct thread_struct {
|
||||
unsigned int gmap_write_flag; /* gmap fault write indication */
|
||||
unsigned int gmap_int_code; /* int code of last gmap fault */
|
||||
unsigned int gmap_pfault; /* signal of a pending guest pfault */
|
||||
int ufpu_flags; /* user fpu flags */
|
||||
int kfpu_flags; /* kernel fpu flags */
|
||||
|
||||
/* Per-thread information related to debugging */
|
||||
struct per_regs per_user; /* User specified PER registers */
|
||||
@ -184,7 +183,8 @@ struct thread_struct {
|
||||
struct gs_cb *gs_cb; /* Current guarded storage cb */
|
||||
struct gs_cb *gs_bc_cb; /* Broadcast guarded storage cb */
|
||||
struct pgm_tdb trap_tdb; /* Transaction abort diagnose block */
|
||||
struct fpu fpu; /* FP and VX register save area */
|
||||
struct fpu ufpu; /* User FP and VX register save area */
|
||||
struct fpu kfpu; /* Kernel FP and VX register save area */
|
||||
};
|
||||
|
||||
/* Flag to disable transactions. */
|
||||
@ -203,7 +203,6 @@ typedef struct thread_struct thread_struct;
|
||||
|
||||
#define INIT_THREAD { \
|
||||
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
|
||||
.fpu.regs = (void *) init_task.thread.fpu.fprs, \
|
||||
.last_break = 1, \
|
||||
}
|
||||
|
||||
|
@ -203,6 +203,10 @@ static inline int test_and_clear_pt_regs_flag(struct pt_regs *regs, int flag)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct task_struct;
|
||||
|
||||
void update_cr_regs(struct task_struct *task);
|
||||
|
||||
/*
|
||||
* These are defined as per linux/ptrace.h, which see.
|
||||
*/
|
||||
|
@ -4,7 +4,6 @@
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
struct stack_frame_user {
|
||||
unsigned long back_chain;
|
||||
|
@ -1,49 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2009
|
||||
*
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_SWITCH_TO_H
|
||||
#define __ASM_SWITCH_TO_H
|
||||
|
||||
#include <linux/thread_info.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
|
||||
extern struct task_struct *__switch_to(void *, void *);
|
||||
extern void update_cr_regs(struct task_struct *task);
|
||||
|
||||
static inline void save_access_regs(unsigned int *acrs)
|
||||
{
|
||||
typedef struct { int _[NUM_ACRS]; } acrstype;
|
||||
|
||||
asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
|
||||
}
|
||||
|
||||
static inline void restore_access_regs(unsigned int *acrs)
|
||||
{
|
||||
typedef struct { int _[NUM_ACRS]; } acrstype;
|
||||
|
||||
asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
|
||||
}
|
||||
|
||||
#define switch_to(prev, next, last) do { \
|
||||
/* save_fpu_regs() sets the CIF_FPU flag, which enforces \
|
||||
* a restore of the floating point / vector registers as \
|
||||
* soon as the next task returns to user space \
|
||||
*/ \
|
||||
save_fpu_regs(); \
|
||||
save_access_regs(&prev->thread.acrs[0]); \
|
||||
save_ri_cb(prev->thread.ri_cb); \
|
||||
save_gs_cb(prev->thread.gs_cb); \
|
||||
update_cr_regs(next); \
|
||||
restore_access_regs(&next->thread.acrs[0]); \
|
||||
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
|
||||
restore_gs_cb(next->thread.gs_cb); \
|
||||
prev = __switch_to(prev, next); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ASM_SWITCH_TO_H */
|
@ -1,19 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Support for Vector Instructions
|
||||
*
|
||||
* This wrapper header file allows to use the vector instruction macros in
|
||||
* both assembler files as well as in inline assemblies in C files.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_S390_VX_INSN_H
|
||||
#define __ASM_S390_VX_INSN_H
|
||||
|
||||
#include <asm/vx-insn-asm.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
asm(".include \"asm/vx-insn-asm.h\"\n");
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_S390_VX_INSN_H */
|
@ -166,5 +166,6 @@ int populate_cache_leaves(unsigned int cpu)
|
||||
ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
|
||||
}
|
||||
}
|
||||
this_cpu_ci->cpu_map_populated = true;
|
||||
return 0;
|
||||
}
|
||||
|
@ -24,12 +24,12 @@
|
||||
#include <linux/tty.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
#include "compat_linux.h"
|
||||
#include "compat_ptrace.h"
|
||||
#include "entry.h"
|
||||
@ -56,7 +56,7 @@ typedef struct
|
||||
static void store_sigregs(void)
|
||||
{
|
||||
save_access_regs(current->thread.acrs);
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
}
|
||||
|
||||
/* Load registers after signal return */
|
||||
@ -79,7 +79,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
|
||||
user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
|
||||
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
|
||||
sizeof(user_sregs.regs.acrs));
|
||||
fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
|
||||
fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.ufpu);
|
||||
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
@ -113,7 +113,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
|
||||
regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
|
||||
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
|
||||
sizeof(current->thread.acrs));
|
||||
fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
|
||||
fpregs_load((_s390_fp_regs *)&user_sregs.fpregs, ¤t->thread.ufpu);
|
||||
|
||||
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
|
||||
return 0;
|
||||
@ -136,11 +136,11 @@ static int save_sigregs_ext32(struct pt_regs *regs,
|
||||
/* Save vector registers to signal stack */
|
||||
if (cpu_has_vx()) {
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = current->thread.fpu.vxrs[i].low;
|
||||
vxrs[i] = current->thread.ufpu.vxrs[i].low;
|
||||
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
|
||||
sizeof(sregs_ext->vxrs_low)) ||
|
||||
__copy_to_user(&sregs_ext->vxrs_high,
|
||||
current->thread.fpu.vxrs + __NUM_VXRS_LOW,
|
||||
current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
|
||||
sizeof(sregs_ext->vxrs_high)))
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -165,12 +165,12 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
|
||||
if (cpu_has_vx()) {
|
||||
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
|
||||
sizeof(sregs_ext->vxrs_low)) ||
|
||||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
|
||||
__copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
|
||||
&sregs_ext->vxrs_high,
|
||||
sizeof(sregs_ext->vxrs_high)))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
current->thread.fpu.vxrs[i].low = vxrs[i];
|
||||
current->thread.ufpu.vxrs[i].low = vxrs[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -184,7 +184,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
|
||||
if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
|
||||
goto badframe;
|
||||
set_current_blocked(&set);
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
if (restore_sigregs32(regs, &frame->sregs))
|
||||
goto badframe;
|
||||
if (restore_sigregs_ext32(regs, &frame->sregs_ext))
|
||||
@ -207,7 +207,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
|
||||
set_current_blocked(&set);
|
||||
if (compat_restore_altstack(&frame->uc.uc_stack))
|
||||
goto badframe;
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
|
||||
goto badframe;
|
||||
if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/maccess.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
#define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y)))
|
||||
#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
|
||||
|
@ -146,12 +146,41 @@ void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr)
|
||||
}
|
||||
EXPORT_SYMBOL(diag_stat_inc_norecursion);
|
||||
|
||||
/*
|
||||
* Diagnose 0c: Pseudo Timer
|
||||
*/
|
||||
void diag0c(struct hypfs_diag0c_entry *data)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X00C);
|
||||
diag_amode31_ops.diag0c(virt_to_phys(data));
|
||||
}
|
||||
|
||||
/*
|
||||
* Diagnose 14: Input spool file manipulation
|
||||
*
|
||||
* The subcode parameter determines the type of the first parameter rx.
|
||||
* Currently used are the following 3 subcommands:
|
||||
* 0x0: Read the Next Spool File Buffer (Data Record)
|
||||
* 0x28: Position a Spool File to the Designated Record
|
||||
* 0xfff: Retrieve Next File Descriptor
|
||||
*
|
||||
* For subcommands 0x0 and 0xfff, the value of the first parameter is
|
||||
* a virtual address of a memory buffer and needs virtual to physical
|
||||
* address translation. For other subcommands the rx parameter is not
|
||||
* a virtual address.
|
||||
*/
|
||||
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X014);
|
||||
switch (subcode) {
|
||||
case 0x0:
|
||||
case 0xfff:
|
||||
rx = virt_to_phys((void *)rx);
|
||||
break;
|
||||
default:
|
||||
/* Do nothing */
|
||||
break;
|
||||
}
|
||||
return diag_amode31_ops.diag14(rx, ry1, subcode);
|
||||
}
|
||||
EXPORT_SYMBOL(diag14);
|
||||
@ -265,6 +294,6 @@ EXPORT_SYMBOL(diag224);
|
||||
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X26C);
|
||||
return diag_amode31_ops.diag26c(req, resp, subcode);
|
||||
return diag_amode31_ops.diag26c(virt_to_phys(req), virt_to_phys(resp), subcode);
|
||||
}
|
||||
EXPORT_SYMBOL(diag26c);
|
||||
|
@ -19,8 +19,10 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/processor.h>
|
||||
@ -31,7 +33,6 @@
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/boot_data.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include "entry.h"
|
||||
|
||||
#define decompressor_handled_param(param) \
|
||||
|
@ -24,7 +24,7 @@
|
||||
#include <asm/page.h>
|
||||
#include <asm/sigp.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/vx-insn.h>
|
||||
#include <asm/fpu-insn.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
@ -171,13 +171,13 @@ _LPP_OFFSET = __LC_LPP
|
||||
nop 0
|
||||
|
||||
/*
|
||||
* Scheduler resume function, called by switch_to
|
||||
* gpr2 = (task_struct *) prev
|
||||
* gpr3 = (task_struct *) next
|
||||
* Scheduler resume function, called by __switch_to
|
||||
* gpr2 = (task_struct *)prev
|
||||
* gpr3 = (task_struct *)next
|
||||
* Returns:
|
||||
* gpr2 = prev
|
||||
*/
|
||||
SYM_FUNC_START(__switch_to)
|
||||
SYM_FUNC_START(__switch_to_asm)
|
||||
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
|
||||
lghi %r4,__TASK_stack
|
||||
lghi %r1,__TASK_thread
|
||||
@ -193,7 +193,7 @@ SYM_FUNC_START(__switch_to)
|
||||
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
|
||||
ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
|
||||
BR_EX %r14
|
||||
SYM_FUNC_END(__switch_to)
|
||||
SYM_FUNC_END(__switch_to_asm)
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
/*
|
||||
@ -220,8 +220,6 @@ SYM_FUNC_START(__sie64a)
|
||||
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
|
||||
tm __SIE_PROG20+3(%r14),3 # last exit...
|
||||
jnz .Lsie_skip
|
||||
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
||||
jo .Lsie_skip # exit if fp/vx regs changed
|
||||
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
|
||||
BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST
|
||||
.Lsie_entry:
|
||||
@ -489,16 +487,11 @@ SYM_FUNC_END(psw_idle)
|
||||
*/
|
||||
SYM_CODE_START(mcck_int_handler)
|
||||
BPOFF
|
||||
la %r1,4095 # validate r1
|
||||
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
|
||||
LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA # validate gprs
|
||||
lmg %r8,%r9,__LC_MCK_OLD_PSW
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
|
||||
jo .Lmcck_panic # yes -> rest of mcck code invalid
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
|
||||
jno .Lmcck_panic # control registers invalid -> panic
|
||||
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA # validate ctl regs
|
||||
ptlb
|
||||
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
|
@ -19,6 +19,7 @@ void mcck_int_handler(void);
|
||||
void restart_int_handler(void);
|
||||
void early_pgm_check_handler(void);
|
||||
|
||||
struct task_struct *__switch_to_asm(struct task_struct *prev, struct task_struct *next);
|
||||
void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs);
|
||||
void __do_pgm_check(struct pt_regs *regs);
|
||||
void __do_syscall(struct pt_regs *regs, int per_trap);
|
||||
|
@ -8,256 +8,186 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/fpu/types.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/vx-insn.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
|
||||
void __kernel_fpu_begin(struct kernel_fpu *state, int flags)
|
||||
{
|
||||
__vector128 *vxrs = state->vxrs;
|
||||
int mask;
|
||||
|
||||
/*
|
||||
* Limit the save to the FPU/vector registers already
|
||||
* in use by the previous context
|
||||
* in use by the previous context.
|
||||
*/
|
||||
flags &= state->mask;
|
||||
|
||||
flags &= state->hdr.mask;
|
||||
if (flags & KERNEL_FPC)
|
||||
/* Save floating point control */
|
||||
asm volatile("stfpc %0" : "=Q" (state->fpc));
|
||||
|
||||
fpu_stfpc(&state->hdr.fpc);
|
||||
if (!cpu_has_vx()) {
|
||||
if (flags & KERNEL_VXR_V0V7) {
|
||||
/* Save floating-point registers */
|
||||
asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
|
||||
asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
|
||||
asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
|
||||
asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
|
||||
asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
|
||||
asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
|
||||
asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
|
||||
asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
|
||||
asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
|
||||
asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
|
||||
asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
|
||||
asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
|
||||
asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
|
||||
asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
|
||||
asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
|
||||
asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
|
||||
}
|
||||
if (flags & KERNEL_VXR_LOW)
|
||||
save_fp_regs_vx(vxrs);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Test and save vector registers */
|
||||
asm volatile (
|
||||
/*
|
||||
* Test if any vector register must be saved and, if so,
|
||||
* test if all register can be saved.
|
||||
*/
|
||||
" la 1,%[vxrs]\n" /* load save area */
|
||||
" tmll %[m],30\n" /* KERNEL_VXR */
|
||||
" jz 7f\n" /* no work -> done */
|
||||
" jo 5f\n" /* -> save V0..V31 */
|
||||
/*
|
||||
* Test for special case KERNEL_FPU_MID only. In this
|
||||
* case a vstm V8..V23 is the best instruction
|
||||
*/
|
||||
" chi %[m],12\n" /* KERNEL_VXR_MID */
|
||||
" jne 0f\n" /* -> save V8..V23 */
|
||||
" VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */
|
||||
" j 7f\n"
|
||||
/* Test and save the first half of 16 vector registers */
|
||||
"0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
|
||||
" jz 3f\n" /* -> KERNEL_VXR_HIGH */
|
||||
" jo 2f\n" /* 11 -> save V0..V15 */
|
||||
" brc 2,1f\n" /* 10 -> save V8..V15 */
|
||||
" VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */
|
||||
" j 3f\n"
|
||||
"1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */
|
||||
" j 3f\n"
|
||||
"2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
|
||||
/* Test and save the second half of 16 vector registers */
|
||||
"3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
|
||||
" jz 7f\n"
|
||||
" jo 6f\n" /* 11 -> save V16..V31 */
|
||||
" brc 2,4f\n" /* 10 -> save V24..V31 */
|
||||
" VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */
|
||||
" j 7f\n"
|
||||
"4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */
|
||||
" j 7f\n"
|
||||
"5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
|
||||
"6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */
|
||||
"7:"
|
||||
: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
|
||||
: [m] "d" (flags)
|
||||
: "1", "cc");
|
||||
mask = flags & KERNEL_VXR;
|
||||
if (mask == KERNEL_VXR) {
|
||||
vxrs += fpu_vstm(0, 15, vxrs);
|
||||
vxrs += fpu_vstm(16, 31, vxrs);
|
||||
return;
|
||||
}
|
||||
if (mask == KERNEL_VXR_MID) {
|
||||
vxrs += fpu_vstm(8, 23, vxrs);
|
||||
return;
|
||||
}
|
||||
mask = flags & KERNEL_VXR_LOW;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_LOW)
|
||||
vxrs += fpu_vstm(0, 15, vxrs);
|
||||
else if (mask == KERNEL_VXR_V0V7)
|
||||
vxrs += fpu_vstm(0, 7, vxrs);
|
||||
else
|
||||
vxrs += fpu_vstm(8, 15, vxrs);
|
||||
}
|
||||
mask = flags & KERNEL_VXR_HIGH;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_HIGH)
|
||||
vxrs += fpu_vstm(16, 31, vxrs);
|
||||
else if (mask == KERNEL_VXR_V16V23)
|
||||
vxrs += fpu_vstm(16, 23, vxrs);
|
||||
else
|
||||
vxrs += fpu_vstm(24, 31, vxrs);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__kernel_fpu_begin);
|
||||
|
||||
void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
|
||||
void __kernel_fpu_end(struct kernel_fpu *state, int flags)
|
||||
{
|
||||
__vector128 *vxrs = state->vxrs;
|
||||
int mask;
|
||||
|
||||
/*
|
||||
* Limit the restore to the FPU/vector registers of the
|
||||
* previous context that have been overwritte by the
|
||||
* current context
|
||||
* previous context that have been overwritten by the
|
||||
* current context.
|
||||
*/
|
||||
flags &= state->mask;
|
||||
|
||||
flags &= state->hdr.mask;
|
||||
if (flags & KERNEL_FPC)
|
||||
/* Restore floating-point controls */
|
||||
asm volatile("lfpc %0" : : "Q" (state->fpc));
|
||||
|
||||
fpu_lfpc(&state->hdr.fpc);
|
||||
if (!cpu_has_vx()) {
|
||||
if (flags & KERNEL_VXR_V0V7) {
|
||||
/* Restore floating-point registers */
|
||||
asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
|
||||
asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
|
||||
asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
|
||||
asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
|
||||
asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
|
||||
asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
|
||||
asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
|
||||
asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
|
||||
asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
|
||||
asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
|
||||
asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
|
||||
asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
|
||||
asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
|
||||
asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
|
||||
asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
|
||||
asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
|
||||
}
|
||||
if (flags & KERNEL_VXR_LOW)
|
||||
load_fp_regs_vx(vxrs);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Test and restore (load) vector registers */
|
||||
asm volatile (
|
||||
/*
|
||||
* Test if any vector register must be loaded and, if so,
|
||||
* test if all registers can be loaded at once.
|
||||
*/
|
||||
" la 1,%[vxrs]\n" /* load restore area */
|
||||
" tmll %[m],30\n" /* KERNEL_VXR */
|
||||
" jz 7f\n" /* no work -> done */
|
||||
" jo 5f\n" /* -> restore V0..V31 */
|
||||
/*
|
||||
* Test for special case KERNEL_FPU_MID only. In this
|
||||
* case a vlm V8..V23 is the best instruction
|
||||
*/
|
||||
" chi %[m],12\n" /* KERNEL_VXR_MID */
|
||||
" jne 0f\n" /* -> restore V8..V23 */
|
||||
" VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */
|
||||
" j 7f\n"
|
||||
/* Test and restore the first half of 16 vector registers */
|
||||
"0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
|
||||
" jz 3f\n" /* -> KERNEL_VXR_HIGH */
|
||||
" jo 2f\n" /* 11 -> restore V0..V15 */
|
||||
" brc 2,1f\n" /* 10 -> restore V8..V15 */
|
||||
" VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */
|
||||
" j 3f\n"
|
||||
"1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */
|
||||
" j 3f\n"
|
||||
"2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
|
||||
/* Test and restore the second half of 16 vector registers */
|
||||
"3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
|
||||
" jz 7f\n"
|
||||
" jo 6f\n" /* 11 -> restore V16..V31 */
|
||||
" brc 2,4f\n" /* 10 -> restore V24..V31 */
|
||||
" VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */
|
||||
" j 7f\n"
|
||||
"4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */
|
||||
" j 7f\n"
|
||||
"5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
|
||||
"6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */
|
||||
"7:"
|
||||
: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
|
||||
: [m] "d" (flags)
|
||||
: "1", "cc");
|
||||
mask = flags & KERNEL_VXR;
|
||||
if (mask == KERNEL_VXR) {
|
||||
vxrs += fpu_vlm(0, 15, vxrs);
|
||||
vxrs += fpu_vlm(16, 31, vxrs);
|
||||
return;
|
||||
}
|
||||
if (mask == KERNEL_VXR_MID) {
|
||||
vxrs += fpu_vlm(8, 23, vxrs);
|
||||
return;
|
||||
}
|
||||
mask = flags & KERNEL_VXR_LOW;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_LOW)
|
||||
vxrs += fpu_vlm(0, 15, vxrs);
|
||||
else if (mask == KERNEL_VXR_V0V7)
|
||||
vxrs += fpu_vlm(0, 7, vxrs);
|
||||
else
|
||||
vxrs += fpu_vlm(8, 15, vxrs);
|
||||
}
|
||||
mask = flags & KERNEL_VXR_HIGH;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_HIGH)
|
||||
vxrs += fpu_vlm(16, 31, vxrs);
|
||||
else if (mask == KERNEL_VXR_V16V23)
|
||||
vxrs += fpu_vlm(16, 23, vxrs);
|
||||
else
|
||||
vxrs += fpu_vlm(24, 31, vxrs);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__kernel_fpu_end);
|
||||
|
||||
void __load_fpu_regs(void)
|
||||
void load_fpu_state(struct fpu *state, int flags)
|
||||
{
|
||||
unsigned long *regs = current->thread.fpu.regs;
|
||||
struct fpu *state = ¤t->thread.fpu;
|
||||
__vector128 *vxrs = &state->vxrs[0];
|
||||
int mask;
|
||||
|
||||
sfpc_safe(state->fpc);
|
||||
if (likely(cpu_has_vx())) {
|
||||
asm volatile("lgr 1,%0\n"
|
||||
"VLM 0,15,0,1\n"
|
||||
"VLM 16,31,256,1\n"
|
||||
:
|
||||
: "d" (regs)
|
||||
: "1", "cc", "memory");
|
||||
} else {
|
||||
asm volatile("ld 0,%0" : : "Q" (regs[0]));
|
||||
asm volatile("ld 1,%0" : : "Q" (regs[1]));
|
||||
asm volatile("ld 2,%0" : : "Q" (regs[2]));
|
||||
asm volatile("ld 3,%0" : : "Q" (regs[3]));
|
||||
asm volatile("ld 4,%0" : : "Q" (regs[4]));
|
||||
asm volatile("ld 5,%0" : : "Q" (regs[5]));
|
||||
asm volatile("ld 6,%0" : : "Q" (regs[6]));
|
||||
asm volatile("ld 7,%0" : : "Q" (regs[7]));
|
||||
asm volatile("ld 8,%0" : : "Q" (regs[8]));
|
||||
asm volatile("ld 9,%0" : : "Q" (regs[9]));
|
||||
asm volatile("ld 10,%0" : : "Q" (regs[10]));
|
||||
asm volatile("ld 11,%0" : : "Q" (regs[11]));
|
||||
asm volatile("ld 12,%0" : : "Q" (regs[12]));
|
||||
asm volatile("ld 13,%0" : : "Q" (regs[13]));
|
||||
asm volatile("ld 14,%0" : : "Q" (regs[14]));
|
||||
asm volatile("ld 15,%0" : : "Q" (regs[15]));
|
||||
if (flags & KERNEL_FPC)
|
||||
fpu_lfpc(&state->fpc);
|
||||
if (!cpu_has_vx()) {
|
||||
if (flags & KERNEL_VXR_V0V7)
|
||||
load_fp_regs_vx(state->vxrs);
|
||||
return;
|
||||
}
|
||||
clear_cpu_flag(CIF_FPU);
|
||||
}
|
||||
|
||||
void load_fpu_regs(void)
|
||||
{
|
||||
raw_local_irq_disable();
|
||||
__load_fpu_regs();
|
||||
raw_local_irq_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(load_fpu_regs);
|
||||
|
||||
void save_fpu_regs(void)
|
||||
{
|
||||
unsigned long flags, *regs;
|
||||
struct fpu *state;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (test_cpu_flag(CIF_FPU))
|
||||
goto out;
|
||||
|
||||
state = ¤t->thread.fpu;
|
||||
regs = current->thread.fpu.regs;
|
||||
|
||||
asm volatile("stfpc %0" : "=Q" (state->fpc));
|
||||
if (likely(cpu_has_vx())) {
|
||||
asm volatile("lgr 1,%0\n"
|
||||
"VSTM 0,15,0,1\n"
|
||||
"VSTM 16,31,256,1\n"
|
||||
:
|
||||
: "d" (regs)
|
||||
: "1", "cc", "memory");
|
||||
} else {
|
||||
asm volatile("std 0,%0" : "=Q" (regs[0]));
|
||||
asm volatile("std 1,%0" : "=Q" (regs[1]));
|
||||
asm volatile("std 2,%0" : "=Q" (regs[2]));
|
||||
asm volatile("std 3,%0" : "=Q" (regs[3]));
|
||||
asm volatile("std 4,%0" : "=Q" (regs[4]));
|
||||
asm volatile("std 5,%0" : "=Q" (regs[5]));
|
||||
asm volatile("std 6,%0" : "=Q" (regs[6]));
|
||||
asm volatile("std 7,%0" : "=Q" (regs[7]));
|
||||
asm volatile("std 8,%0" : "=Q" (regs[8]));
|
||||
asm volatile("std 9,%0" : "=Q" (regs[9]));
|
||||
asm volatile("std 10,%0" : "=Q" (regs[10]));
|
||||
asm volatile("std 11,%0" : "=Q" (regs[11]));
|
||||
asm volatile("std 12,%0" : "=Q" (regs[12]));
|
||||
asm volatile("std 13,%0" : "=Q" (regs[13]));
|
||||
asm volatile("std 14,%0" : "=Q" (regs[14]));
|
||||
asm volatile("std 15,%0" : "=Q" (regs[15]));
|
||||
mask = flags & KERNEL_VXR;
|
||||
if (mask == KERNEL_VXR) {
|
||||
fpu_vlm(0, 15, &vxrs[0]);
|
||||
fpu_vlm(16, 31, &vxrs[16]);
|
||||
return;
|
||||
}
|
||||
if (mask == KERNEL_VXR_MID) {
|
||||
fpu_vlm(8, 23, &vxrs[8]);
|
||||
return;
|
||||
}
|
||||
mask = flags & KERNEL_VXR_LOW;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_LOW)
|
||||
fpu_vlm(0, 15, &vxrs[0]);
|
||||
else if (mask == KERNEL_VXR_V0V7)
|
||||
fpu_vlm(0, 7, &vxrs[0]);
|
||||
else
|
||||
fpu_vlm(8, 15, &vxrs[8]);
|
||||
}
|
||||
mask = flags & KERNEL_VXR_HIGH;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_HIGH)
|
||||
fpu_vlm(16, 31, &vxrs[16]);
|
||||
else if (mask == KERNEL_VXR_V16V23)
|
||||
fpu_vlm(16, 23, &vxrs[16]);
|
||||
else
|
||||
fpu_vlm(24, 31, &vxrs[24]);
|
||||
}
|
||||
set_cpu_flag(CIF_FPU);
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(save_fpu_regs);
|
||||
|
||||
void save_fpu_state(struct fpu *state, int flags)
|
||||
{
|
||||
__vector128 *vxrs = &state->vxrs[0];
|
||||
int mask;
|
||||
|
||||
if (flags & KERNEL_FPC)
|
||||
fpu_stfpc(&state->fpc);
|
||||
if (!cpu_has_vx()) {
|
||||
if (flags & KERNEL_VXR_LOW)
|
||||
save_fp_regs_vx(state->vxrs);
|
||||
return;
|
||||
}
|
||||
mask = flags & KERNEL_VXR;
|
||||
if (mask == KERNEL_VXR) {
|
||||
fpu_vstm(0, 15, &vxrs[0]);
|
||||
fpu_vstm(16, 31, &vxrs[16]);
|
||||
return;
|
||||
}
|
||||
if (mask == KERNEL_VXR_MID) {
|
||||
fpu_vstm(8, 23, &vxrs[8]);
|
||||
return;
|
||||
}
|
||||
mask = flags & KERNEL_VXR_LOW;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_LOW)
|
||||
fpu_vstm(0, 15, &vxrs[0]);
|
||||
else if (mask == KERNEL_VXR_V0V7)
|
||||
fpu_vstm(0, 7, &vxrs[0]);
|
||||
else
|
||||
fpu_vstm(8, 15, &vxrs[8]);
|
||||
}
|
||||
mask = flags & KERNEL_VXR_HIGH;
|
||||
if (mask) {
|
||||
if (mask == KERNEL_VXR_HIGH)
|
||||
fpu_vstm(16, 31, &vxrs[16]);
|
||||
else if (mask == KERNEL_VXR_V16V23)
|
||||
fpu_vstm(16, 23, &vxrs[16]);
|
||||
else
|
||||
fpu_vstm(24, 31, &vxrs[24]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(save_fpu_state);
|
||||
|
@ -1941,8 +1941,7 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
|
||||
reipl_type == IPL_TYPE_UNKNOWN)
|
||||
os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR;
|
||||
os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags));
|
||||
csum = (__force unsigned int)
|
||||
csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
|
||||
csum = (__force unsigned int)cksm(reipl_block_actual, reipl_block_actual->hdr.len, 0);
|
||||
abs_lc = get_abs_lowcore();
|
||||
abs_lc->ipib = __pa(reipl_block_actual);
|
||||
abs_lc->ipib_checksum = csum;
|
||||
|
@ -13,8 +13,10 @@
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
#include <asm/pfault.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/ipl.h>
|
||||
@ -26,7 +28,6 @@
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
|
@ -23,16 +23,14 @@
|
||||
#include <linux/export.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/stp.h>
|
||||
#include <asm/cputime.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/crw.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/pai.h>
|
||||
#include <asm/vx-insn.h>
|
||||
#include <asm/fpu/api.h>
|
||||
|
||||
struct mcck_struct {
|
||||
unsigned int kill_task : 1;
|
||||
@ -204,133 +202,63 @@ void s390_handle_mcck(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* returns 0 if register contents could be validated
|
||||
* returns 1 otherwise
|
||||
/**
|
||||
* nmi_registers_valid - verify if registers are valid
|
||||
* @mci: machine check interruption code
|
||||
*
|
||||
* Inspect a machine check interruption code and verify if all required
|
||||
* registers are valid. For some registers the corresponding validity bit is
|
||||
* ignored and the registers are set to the expected value.
|
||||
* Returns true if all registers are valid, otherwise false.
|
||||
*/
|
||||
static int notrace s390_validate_registers(union mci mci)
|
||||
static bool notrace nmi_registers_valid(union mci mci)
|
||||
{
|
||||
struct mcesa *mcesa;
|
||||
void *fpt_save_area;
|
||||
union ctlreg2 cr2;
|
||||
int kill_task;
|
||||
u64 zero;
|
||||
|
||||
kill_task = 0;
|
||||
zero = 0;
|
||||
|
||||
if (!mci.gr || !mci.fp)
|
||||
kill_task = 1;
|
||||
fpt_save_area = &S390_lowcore.floating_pt_save_area;
|
||||
if (!mci.fc) {
|
||||
kill_task = 1;
|
||||
asm volatile(
|
||||
" lfpc %0\n"
|
||||
:
|
||||
: "Q" (zero));
|
||||
} else {
|
||||
asm volatile(
|
||||
" lfpc %0\n"
|
||||
:
|
||||
: "Q" (S390_lowcore.fpt_creg_save_area));
|
||||
}
|
||||
|
||||
mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
|
||||
if (!cpu_has_vx()) {
|
||||
/* Validate floating point registers */
|
||||
asm volatile(
|
||||
" ld 0,0(%0)\n"
|
||||
" ld 1,8(%0)\n"
|
||||
" ld 2,16(%0)\n"
|
||||
" ld 3,24(%0)\n"
|
||||
" ld 4,32(%0)\n"
|
||||
" ld 5,40(%0)\n"
|
||||
" ld 6,48(%0)\n"
|
||||
" ld 7,56(%0)\n"
|
||||
" ld 8,64(%0)\n"
|
||||
" ld 9,72(%0)\n"
|
||||
" ld 10,80(%0)\n"
|
||||
" ld 11,88(%0)\n"
|
||||
" ld 12,96(%0)\n"
|
||||
" ld 13,104(%0)\n"
|
||||
" ld 14,112(%0)\n"
|
||||
" ld 15,120(%0)\n"
|
||||
:
|
||||
: "a" (fpt_save_area)
|
||||
: "memory");
|
||||
} else {
|
||||
/* Validate vector registers */
|
||||
union ctlreg0 cr0;
|
||||
|
||||
/*
|
||||
* The vector validity must only be checked if not running a
|
||||
* KVM guest. For KVM guests the machine check is forwarded by
|
||||
* KVM and it is the responsibility of the guest to take
|
||||
* appropriate actions. The host vector or FPU values have been
|
||||
* saved by KVM and will be restored by KVM.
|
||||
*/
|
||||
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
|
||||
kill_task = 1;
|
||||
cr0.reg = S390_lowcore.cregs_save_area[0];
|
||||
cr0.afp = cr0.vx = 1;
|
||||
local_ctl_load(0, &cr0.reg);
|
||||
asm volatile(
|
||||
" la 1,%0\n"
|
||||
" VLM 0,15,0,1\n"
|
||||
" VLM 16,31,256,1\n"
|
||||
:
|
||||
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
|
||||
: "1");
|
||||
local_ctl_load(0, &S390_lowcore.cregs_save_area[0]);
|
||||
}
|
||||
/* Validate access registers */
|
||||
asm volatile(
|
||||
" lam 0,15,0(%0)\n"
|
||||
:
|
||||
: "a" (&S390_lowcore.access_regs_save_area)
|
||||
: "memory");
|
||||
if (!mci.ar)
|
||||
kill_task = 1;
|
||||
/* Validate guarded storage registers */
|
||||
cr2.reg = S390_lowcore.cregs_save_area[2];
|
||||
if (cr2.gse) {
|
||||
if (!mci.gs) {
|
||||
/*
|
||||
* 2 cases:
|
||||
* - machine check in kernel or userspace
|
||||
* - machine check while running SIE (KVM guest)
|
||||
* For kernel or userspace the userspace values of
|
||||
* guarded storage control can not be recreated, the
|
||||
* process must be terminated.
|
||||
* For SIE the guest values of guarded storage can not
|
||||
* be recreated. This is either due to a bug or due to
|
||||
* GS being disabled in the guest. The guest will be
|
||||
* notified by KVM code and the guests machine check
|
||||
* handling must take care of this. The host values
|
||||
* are saved by KVM and are not affected.
|
||||
*/
|
||||
if (!test_cpu_flag(CIF_MCCK_GUEST))
|
||||
kill_task = 1;
|
||||
} else {
|
||||
load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* The getcpu vdso syscall reads CPU number from the programmable
|
||||
* The getcpu vdso syscall reads the CPU number from the programmable
|
||||
* field of the TOD clock. Disregard the TOD programmable register
|
||||
* validity bit and load the CPU number into the TOD programmable
|
||||
* field unconditionally.
|
||||
* validity bit and load the CPU number into the TOD programmable field
|
||||
* unconditionally.
|
||||
*/
|
||||
set_tod_programmable_field(raw_smp_processor_id());
|
||||
/* Validate clock comparator register */
|
||||
/*
|
||||
* Set the clock comparator register to the next expected value.
|
||||
*/
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
|
||||
if (!mci.gr || !mci.fp || !mci.fc)
|
||||
return false;
|
||||
/*
|
||||
* The vector validity must only be checked if not running a
|
||||
* KVM guest. For KVM guests the machine check is forwarded by
|
||||
* KVM and it is the responsibility of the guest to take
|
||||
* appropriate actions. The host vector or FPU values have been
|
||||
* saved by KVM and will be restored by KVM.
|
||||
*/
|
||||
if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST))
|
||||
return false;
|
||||
if (!mci.ar)
|
||||
return false;
|
||||
/*
|
||||
* Two cases for guarded storage registers:
|
||||
* - machine check in kernel or userspace
|
||||
* - machine check while running SIE (KVM guest)
|
||||
* For kernel or userspace the userspace values of guarded storage
|
||||
* control can not be recreated, the process must be terminated.
|
||||
* For SIE the guest values of guarded storage can not be recreated.
|
||||
* This is either due to a bug or due to GS being disabled in the
|
||||
* guest. The guest will be notified by KVM code and the guests machine
|
||||
* check handling must take care of this. The host values are saved by
|
||||
* KVM and are not affected.
|
||||
*/
|
||||
cr2.reg = S390_lowcore.cregs_save_area[2];
|
||||
if (cr2.gse && !mci.gs && !test_cpu_flag(CIF_MCCK_GUEST))
|
||||
return false;
|
||||
if (!mci.ms || !mci.pm || !mci.ia)
|
||||
kill_task = 1;
|
||||
|
||||
return kill_task;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_validate_registers);
|
||||
NOKPROBE_SYMBOL(nmi_registers_valid);
|
||||
|
||||
/*
|
||||
* Backup the guest's machine check info to its description block
|
||||
@ -428,7 +356,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
|
||||
s390_handle_damage();
|
||||
}
|
||||
}
|
||||
if (s390_validate_registers(mci)) {
|
||||
if (!nmi_registers_valid(mci)) {
|
||||
if (!user_mode(regs))
|
||||
s390_handle_damage();
|
||||
/*
|
||||
|
@ -29,7 +29,7 @@ static struct os_info os_info __page_aligned_data;
|
||||
u32 os_info_csum(struct os_info *os_info)
|
||||
{
|
||||
int size = sizeof(*os_info) - offsetof(struct os_info, version_major);
|
||||
return (__force u32)csum_partial(&os_info->version_major, size, 0);
|
||||
return (__force u32)cksm(&os_info->version_major, size, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -49,7 +49,7 @@ void os_info_entry_add(int nr, void *ptr, u64 size)
|
||||
{
|
||||
os_info.entry[nr].addr = __pa(ptr);
|
||||
os_info.entry[nr].size = size;
|
||||
os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0);
|
||||
os_info.entry[nr].csum = (__force u32)cksm(ptr, size, 0);
|
||||
os_info.csum = os_info_csum(&os_info);
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ static void os_info_old_alloc(int nr, int align)
|
||||
msg = "copy failed";
|
||||
goto fail_free;
|
||||
}
|
||||
csum = (__force u32)csum_partial(buf_align, size, 0);
|
||||
csum = (__force u32)cksm(buf_align, size, 0);
|
||||
if (csum != os_info_old->entry[nr].csum) {
|
||||
msg = "checksum failed";
|
||||
goto fail_free;
|
||||
|
@ -98,6 +98,7 @@ static void paicrypt_event_destroy(struct perf_event *event)
|
||||
event->attr.config, event->cpu,
|
||||
cpump->active_events, cpump->mode,
|
||||
refcount_read(&cpump->refcnt));
|
||||
free_page(PAI_SAVE_AREA(event));
|
||||
if (refcount_dec_and_test(&cpump->refcnt)) {
|
||||
debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
|
||||
__func__, (unsigned long)cpump->page,
|
||||
@ -260,6 +261,7 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *a = &event->attr;
|
||||
struct paicrypt_map *cpump;
|
||||
int rc = 0;
|
||||
|
||||
/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
|
||||
if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
|
||||
@ -274,10 +276,21 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
/* Allow only CRYPTO_ALL for sampling. */
|
||||
if (a->sample_period && a->config != PAI_CRYPTO_BASE)
|
||||
return -EINVAL;
|
||||
/* Get a page to store last counter values for sampling */
|
||||
if (a->sample_period) {
|
||||
PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
|
||||
if (!PAI_SAVE_AREA(event)) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
cpump = paicrypt_busy(event);
|
||||
if (IS_ERR(cpump))
|
||||
return PTR_ERR(cpump);
|
||||
if (IS_ERR(cpump)) {
|
||||
free_page(PAI_SAVE_AREA(event));
|
||||
rc = PTR_ERR(cpump);
|
||||
goto out;
|
||||
}
|
||||
|
||||
event->destroy = paicrypt_event_destroy;
|
||||
|
||||
@ -293,7 +306,8 @@ static int paicrypt_event_init(struct perf_event *event)
|
||||
}
|
||||
|
||||
static_branch_inc(&pai_key);
|
||||
return 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void paicrypt_read(struct perf_event *event)
|
||||
@ -310,20 +324,15 @@ static void paicrypt_read(struct perf_event *event)
|
||||
|
||||
static void paicrypt_start(struct perf_event *event, int flags)
|
||||
{
|
||||
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
u64 sum;
|
||||
|
||||
/* Event initialization sets last_tag to 0. When later on the events
|
||||
* are deleted and re-added, do not reset the event count value to zero.
|
||||
* Events are added, deleted and re-added when 2 or more events
|
||||
* are active at the same time.
|
||||
*/
|
||||
if (!event->attr.sample_period) { /* Counting */
|
||||
if (!event->hw.last_tag) {
|
||||
event->hw.last_tag = 1;
|
||||
sum = paicrypt_getall(event); /* Get current value */
|
||||
local64_set(&event->hw.prev_count, sum);
|
||||
}
|
||||
sum = paicrypt_getall(event); /* Get current value */
|
||||
local64_set(&event->hw.prev_count, sum);
|
||||
} else { /* Sampling */
|
||||
cpump->event = event;
|
||||
perf_sched_cb_inc(event->pmu);
|
||||
}
|
||||
}
|
||||
@ -339,7 +348,6 @@ static int paicrypt_add(struct perf_event *event, int flags)
|
||||
WRITE_ONCE(S390_lowcore.ccd, ccd);
|
||||
local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
|
||||
}
|
||||
cpump->event = event;
|
||||
if (flags & PERF_EF_START)
|
||||
paicrypt_start(event, PERF_EF_RELOAD);
|
||||
event->hw.state = 0;
|
||||
@ -367,23 +375,34 @@ static void paicrypt_del(struct perf_event *event, int flags)
|
||||
}
|
||||
}
|
||||
|
||||
/* Create raw data and save it in buffer. Returns number of bytes copied.
|
||||
* Saves only positive counter entries of the form
|
||||
/* Create raw data and save it in buffer. Calculate the delta for each
|
||||
* counter between this invocation and the last invocation.
|
||||
* Returns number of bytes copied.
|
||||
* Saves only entries with positive counter difference of the form
|
||||
* 2 bytes: Number of counter
|
||||
* 8 bytes: Value of counter
|
||||
*/
|
||||
static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
|
||||
bool exclude_user, bool exclude_kernel)
|
||||
unsigned long *page_old, bool exclude_user,
|
||||
bool exclude_kernel)
|
||||
{
|
||||
int i, outidx = 0;
|
||||
|
||||
for (i = 1; i <= paicrypt_cnt; i++) {
|
||||
u64 val = 0;
|
||||
u64 val = 0, val_old = 0;
|
||||
|
||||
if (!exclude_kernel)
|
||||
if (!exclude_kernel) {
|
||||
val += paicrypt_getctr(page, i, true);
|
||||
if (!exclude_user)
|
||||
val_old += paicrypt_getctr(page_old, i, true);
|
||||
}
|
||||
if (!exclude_user) {
|
||||
val += paicrypt_getctr(page, i, false);
|
||||
val_old += paicrypt_getctr(page_old, i, false);
|
||||
}
|
||||
if (val >= val_old)
|
||||
val -= val_old;
|
||||
else
|
||||
val = (~0ULL - val_old) + val + 1;
|
||||
if (val) {
|
||||
userdata[outidx].num = i;
|
||||
userdata[outidx].value = val;
|
||||
@ -426,8 +445,8 @@ static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
|
||||
|
||||
overflow = perf_event_overflow(event, &data, ®s);
|
||||
perf_event_update_userpage(event);
|
||||
/* Clear lowcore page after read */
|
||||
memset(cpump->page, 0, PAGE_SIZE);
|
||||
/* Save crypto counter lowcore page after reading event data. */
|
||||
memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
@ -443,6 +462,7 @@ static int paicrypt_have_sample(void)
|
||||
if (!event) /* No event active */
|
||||
return 0;
|
||||
rawsize = paicrypt_copy(cpump->save, cpump->page,
|
||||
(unsigned long *)PAI_SAVE_AREA(event),
|
||||
cpump->event->attr.exclude_user,
|
||||
cpump->event->attr.exclude_kernel);
|
||||
if (rawsize) /* No incremented counters */
|
||||
@ -694,6 +714,12 @@ static int __init attr_event_init_one(struct attribute **attrs, int num)
|
||||
{
|
||||
struct perf_pmu_events_attr *pa;
|
||||
|
||||
/* Index larger than array_size, no counter name available */
|
||||
if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
|
||||
attrs[num] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
|
||||
if (!pa)
|
||||
return -ENOMEM;
|
||||
@ -714,14 +740,13 @@ static int __init attr_event_init(void)
|
||||
struct attribute **attrs;
|
||||
int ret, i;
|
||||
|
||||
attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
|
||||
GFP_KERNEL);
|
||||
attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
|
||||
if (!attrs)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
|
||||
for (i = 0; i <= paicrypt_cnt; i++) {
|
||||
ret = attr_event_init_one(attrs, i);
|
||||
if (ret) {
|
||||
attr_event_free(attrs, i - 1);
|
||||
attr_event_free(attrs, i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -742,8 +767,10 @@ static int __init paicrypt_init(void)
|
||||
paicrypt_cnt = ib.num_cc;
|
||||
if (paicrypt_cnt == 0)
|
||||
return 0;
|
||||
if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
|
||||
paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
|
||||
if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
|
||||
pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
rc = attr_event_init(); /* Export known PAI crypto events */
|
||||
if (rc) {
|
||||
|
@ -120,6 +120,7 @@ static void paiext_event_destroy(struct perf_event *event)
|
||||
struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
|
||||
struct paiext_map *cpump = mp->mapptr;
|
||||
|
||||
free_page(PAI_SAVE_AREA(event));
|
||||
mutex_lock(&paiext_reserve_mutex);
|
||||
cpump->event = NULL;
|
||||
if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
|
||||
@ -202,7 +203,6 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
|
||||
}
|
||||
|
||||
rc = 0;
|
||||
cpump->event = event;
|
||||
|
||||
undo:
|
||||
if (rc) {
|
||||
@ -256,10 +256,18 @@ static int paiext_event_init(struct perf_event *event)
|
||||
/* Prohibit exclude_user event selection */
|
||||
if (a->exclude_user)
|
||||
return -EINVAL;
|
||||
/* Get a page to store last counter values for sampling */
|
||||
if (a->sample_period) {
|
||||
PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
|
||||
if (!PAI_SAVE_AREA(event))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rc = paiext_alloc(a, event);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
free_page(PAI_SAVE_AREA(event));
|
||||
return rc;
|
||||
}
|
||||
event->destroy = paiext_event_destroy;
|
||||
|
||||
if (a->sample_period) {
|
||||
@ -319,15 +327,15 @@ static void paiext_read(struct perf_event *event)
|
||||
|
||||
static void paiext_start(struct perf_event *event, int flags)
|
||||
{
|
||||
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
|
||||
struct paiext_map *cpump = mp->mapptr;
|
||||
u64 sum;
|
||||
|
||||
if (!event->attr.sample_period) { /* Counting */
|
||||
if (!event->hw.last_tag) {
|
||||
event->hw.last_tag = 1;
|
||||
sum = paiext_getall(event); /* Get current value */
|
||||
local64_set(&event->hw.prev_count, sum);
|
||||
}
|
||||
sum = paiext_getall(event); /* Get current value */
|
||||
local64_set(&event->hw.prev_count, sum);
|
||||
} else { /* Sampling */
|
||||
cpump->event = event;
|
||||
perf_sched_cb_inc(event->pmu);
|
||||
}
|
||||
}
|
||||
@ -346,7 +354,6 @@ static int paiext_add(struct perf_event *event, int flags)
|
||||
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
|
||||
__func__, S390_lowcore.aicd, pcb->acc);
|
||||
}
|
||||
cpump->event = event;
|
||||
if (flags & PERF_EF_START)
|
||||
paiext_start(event, PERF_EF_RELOAD);
|
||||
event->hw.state = 0;
|
||||
@ -384,13 +391,19 @@ static void paiext_del(struct perf_event *event, int flags)
|
||||
* 2 bytes: Number of counter
|
||||
* 8 bytes: Value of counter
|
||||
*/
|
||||
static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area)
|
||||
static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area,
|
||||
unsigned long *area_old)
|
||||
{
|
||||
int i, outidx = 0;
|
||||
|
||||
for (i = 1; i <= paiext_cnt; i++) {
|
||||
u64 val = paiext_getctr(area, i);
|
||||
u64 val_old = paiext_getctr(area_old, i);
|
||||
|
||||
if (val >= val_old)
|
||||
val -= val_old;
|
||||
else
|
||||
val = (~0ULL - val_old) + val + 1;
|
||||
if (val) {
|
||||
userdata[outidx].num = i;
|
||||
userdata[outidx].value = val;
|
||||
@ -446,8 +459,9 @@ static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
|
||||
|
||||
overflow = perf_event_overflow(event, &data, ®s);
|
||||
perf_event_update_userpage(event);
|
||||
/* Clear lowcore area after read */
|
||||
memset(cpump->area, 0, PAIE1_CTRBLOCK_SZ);
|
||||
/* Save NNPA lowcore area after read in event */
|
||||
memcpy((void *)PAI_SAVE_AREA(event), cpump->area,
|
||||
PAIE1_CTRBLOCK_SZ);
|
||||
return overflow;
|
||||
}
|
||||
|
||||
@ -462,7 +476,8 @@ static int paiext_have_sample(void)
|
||||
|
||||
if (!event)
|
||||
return 0;
|
||||
rawsize = paiext_copy(cpump->save, cpump->area);
|
||||
rawsize = paiext_copy(cpump->save, cpump->area,
|
||||
(unsigned long *)PAI_SAVE_AREA(event));
|
||||
if (rawsize) /* Incremented counters */
|
||||
rc = paiext_push_sample(rawsize, cpump, event);
|
||||
return rc;
|
||||
@ -584,6 +599,12 @@ static int __init attr_event_init_one(struct attribute **attrs, int num)
|
||||
{
|
||||
struct perf_pmu_events_attr *pa;
|
||||
|
||||
/* Index larger than array_size, no counter name available */
|
||||
if (num >= ARRAY_SIZE(paiext_ctrnames)) {
|
||||
attrs[num] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
|
||||
if (!pa)
|
||||
return -ENOMEM;
|
||||
@ -604,14 +625,13 @@ static int __init attr_event_init(void)
|
||||
struct attribute **attrs;
|
||||
int ret, i;
|
||||
|
||||
attrs = kmalloc_array(ARRAY_SIZE(paiext_ctrnames) + 1, sizeof(*attrs),
|
||||
GFP_KERNEL);
|
||||
attrs = kmalloc_array(paiext_cnt + 2, sizeof(*attrs), GFP_KERNEL);
|
||||
if (!attrs)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
|
||||
for (i = 0; i <= paiext_cnt; i++) {
|
||||
ret = attr_event_init_one(attrs, i);
|
||||
if (ret) {
|
||||
attr_event_free(attrs, i - 1);
|
||||
attr_event_free(attrs, i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -5,8 +5,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu/types.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
u64 perf_reg_value(struct pt_regs *regs, int idx)
|
||||
{
|
||||
@ -20,10 +19,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
|
||||
return 0;
|
||||
|
||||
idx -= PERF_REG_S390_FP0;
|
||||
if (cpu_has_vx())
|
||||
fp = *(freg_t *)(current->thread.fpu.vxrs + idx);
|
||||
else
|
||||
fp = current->thread.fpu.fprs[idx];
|
||||
fp = *(freg_t *)(current->thread.ufpu.vxrs + idx);
|
||||
return fp.ui;
|
||||
}
|
||||
|
||||
@ -65,6 +61,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
*/
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
if (user_mode(regs_user->regs))
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
}
|
||||
|
@ -31,15 +31,19 @@
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/entry-common.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/cpu_mf.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/vtimer.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/runtime_instr.h>
|
||||
#include <asm/unwind.h>
|
||||
#include "entry.h"
|
||||
@ -84,13 +88,13 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
{
|
||||
/*
|
||||
* Save the floating-point or vector register state of the current
|
||||
* task and set the CIF_FPU flag to lazy restore the FPU register
|
||||
* task and set the TIF_FPU flag to lazy restore the FPU register
|
||||
* state when returning to user space.
|
||||
*/
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
|
||||
*dst = *src;
|
||||
dst->thread.fpu.regs = dst->thread.fpu.fprs;
|
||||
dst->thread.kfpu_flags = 0;
|
||||
|
||||
/*
|
||||
* Don't transfer over the runtime instrumentation or the guarded
|
||||
@ -186,8 +190,23 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
|
||||
void execve_tail(void)
|
||||
{
|
||||
current->thread.fpu.fpc = 0;
|
||||
asm volatile("sfpc %0" : : "d" (0));
|
||||
current->thread.ufpu.fpc = 0;
|
||||
fpu_sfpc(0);
|
||||
}
|
||||
|
||||
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
|
||||
{
|
||||
save_user_fpu_regs();
|
||||
save_kernel_fpu_regs(&prev->thread);
|
||||
save_access_regs(&prev->thread.acrs[0]);
|
||||
save_ri_cb(prev->thread.ri_cb);
|
||||
save_gs_cb(prev->thread.gs_cb);
|
||||
update_cr_regs(next);
|
||||
restore_kernel_fpu_regs(&next->thread);
|
||||
restore_access_regs(&next->thread.acrs[0]);
|
||||
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);
|
||||
restore_gs_cb(next->thread.gs_cb);
|
||||
return __switch_to_asm(prev, next);
|
||||
}
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *p)
|
||||
|
@ -24,13 +24,14 @@
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/compat.h>
|
||||
#include <trace/syscall.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/page.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/runtime_instr.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
#include "entry.h"
|
||||
|
||||
@ -246,22 +247,15 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
|
||||
/*
|
||||
* floating point control reg. is in the thread structure
|
||||
*/
|
||||
tmp = child->thread.fpu.fpc;
|
||||
tmp = child->thread.ufpu.fpc;
|
||||
tmp <<= BITS_PER_LONG - 32;
|
||||
|
||||
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
|
||||
/*
|
||||
* floating point regs. are either in child->thread.fpu
|
||||
* or the child->thread.fpu.vxrs array
|
||||
* floating point regs. are in the child->thread.ufpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
|
||||
if (cpu_has_vx())
|
||||
tmp = *(addr_t *)
|
||||
((addr_t) child->thread.fpu.vxrs + 2*offset);
|
||||
else
|
||||
tmp = *(addr_t *)
|
||||
((addr_t) child->thread.fpu.fprs + offset);
|
||||
|
||||
tmp = *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset);
|
||||
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
|
||||
/*
|
||||
* Handle access to the per_info structure.
|
||||
@ -395,21 +389,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||
*/
|
||||
if ((unsigned int)data != 0)
|
||||
return -EINVAL;
|
||||
child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
|
||||
child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32);
|
||||
|
||||
} else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) {
|
||||
/*
|
||||
* floating point regs. are either in child->thread.fpu
|
||||
* or the child->thread.fpu.vxrs array
|
||||
* floating point regs. are in the child->thread.ufpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct user, regs.fp_regs.fprs);
|
||||
if (cpu_has_vx())
|
||||
*(addr_t *)((addr_t)
|
||||
child->thread.fpu.vxrs + 2*offset) = data;
|
||||
else
|
||||
*(addr_t *)((addr_t)
|
||||
child->thread.fpu.fprs + offset) = data;
|
||||
|
||||
*(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = data;
|
||||
} else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) {
|
||||
/*
|
||||
* Handle access to the per_info structure.
|
||||
@ -622,21 +609,14 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
|
||||
/*
|
||||
* floating point control reg. is in the thread structure
|
||||
*/
|
||||
tmp = child->thread.fpu.fpc;
|
||||
tmp = child->thread.ufpu.fpc;
|
||||
|
||||
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
|
||||
/*
|
||||
* floating point regs. are either in child->thread.fpu
|
||||
* or the child->thread.fpu.vxrs array
|
||||
* floating point regs. are in the child->thread.ufpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
|
||||
if (cpu_has_vx())
|
||||
tmp = *(__u32 *)
|
||||
((addr_t) child->thread.fpu.vxrs + 2*offset);
|
||||
else
|
||||
tmp = *(__u32 *)
|
||||
((addr_t) child->thread.fpu.fprs + offset);
|
||||
|
||||
tmp = *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset);
|
||||
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
|
||||
/*
|
||||
* Handle access to the per_info structure.
|
||||
@ -748,21 +728,14 @@ static int __poke_user_compat(struct task_struct *child,
|
||||
/*
|
||||
* floating point control reg. is in the thread structure
|
||||
*/
|
||||
child->thread.fpu.fpc = data;
|
||||
child->thread.ufpu.fpc = data;
|
||||
|
||||
} else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) {
|
||||
/*
|
||||
* floating point regs. are either in child->thread.fpu
|
||||
* or the child->thread.fpu.vxrs array
|
||||
* floating point regs. are in the child->thread.ufpu.vxrs array
|
||||
*/
|
||||
offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs);
|
||||
if (cpu_has_vx())
|
||||
*(__u32 *)((addr_t)
|
||||
child->thread.fpu.vxrs + 2*offset) = tmp;
|
||||
else
|
||||
*(__u32 *)((addr_t)
|
||||
child->thread.fpu.fprs + offset) = tmp;
|
||||
|
||||
*(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = tmp;
|
||||
} else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) {
|
||||
/*
|
||||
* Handle access to the per_info structure.
|
||||
@ -893,10 +866,10 @@ static int s390_fpregs_get(struct task_struct *target,
|
||||
_s390_fp_regs fp_regs;
|
||||
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
|
||||
fp_regs.fpc = target->thread.fpu.fpc;
|
||||
fpregs_store(&fp_regs, &target->thread.fpu);
|
||||
fp_regs.fpc = target->thread.ufpu.fpc;
|
||||
fpregs_store(&fp_regs, &target->thread.ufpu);
|
||||
|
||||
return membuf_write(&to, &fp_regs, sizeof(fp_regs));
|
||||
}
|
||||
@ -910,22 +883,17 @@ static int s390_fpregs_set(struct task_struct *target,
|
||||
freg_t fprs[__NUM_FPRS];
|
||||
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
if (cpu_has_vx())
|
||||
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
|
||||
else
|
||||
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
|
||||
|
||||
save_user_fpu_regs();
|
||||
convert_vx_to_fp(fprs, target->thread.ufpu.vxrs);
|
||||
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
|
||||
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
|
||||
u32 ufpc[2] = { target->thread.ufpu.fpc, 0 };
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
|
||||
0, offsetof(s390_fp_regs, fprs));
|
||||
if (rc)
|
||||
return rc;
|
||||
if (ufpc[1] != 0)
|
||||
return -EINVAL;
|
||||
target->thread.fpu.fpc = ufpc[0];
|
||||
target->thread.ufpu.fpc = ufpc[0];
|
||||
}
|
||||
|
||||
if (rc == 0 && count > 0)
|
||||
@ -933,12 +901,7 @@ static int s390_fpregs_set(struct task_struct *target,
|
||||
fprs, offsetof(s390_fp_regs, fprs), -1);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (cpu_has_vx())
|
||||
convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
|
||||
else
|
||||
memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
|
||||
|
||||
convert_fp_to_vx(target->thread.ufpu.vxrs, fprs);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -988,9 +951,9 @@ static int s390_vxrs_low_get(struct task_struct *target,
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = target->thread.fpu.vxrs[i].low;
|
||||
vxrs[i] = target->thread.ufpu.vxrs[i].low;
|
||||
return membuf_write(&to, vxrs, sizeof(vxrs));
|
||||
}
|
||||
|
||||
@ -1005,15 +968,15 @@ static int s390_vxrs_low_set(struct task_struct *target,
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = target->thread.fpu.vxrs[i].low;
|
||||
vxrs[i] = target->thread.ufpu.vxrs[i].low;
|
||||
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
|
||||
if (rc == 0)
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
target->thread.fpu.vxrs[i].low = vxrs[i];
|
||||
target->thread.ufpu.vxrs[i].low = vxrs[i];
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -1025,8 +988,8 @@ static int s390_vxrs_high_get(struct task_struct *target,
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
|
||||
save_user_fpu_regs();
|
||||
return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW,
|
||||
__NUM_VXRS_HIGH * sizeof(__vector128));
|
||||
}
|
||||
|
||||
@ -1040,10 +1003,10 @@ static int s390_vxrs_high_set(struct task_struct *target,
|
||||
if (!cpu_has_vx())
|
||||
return -ENODEV;
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
|
||||
target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -504,12 +504,12 @@ static void __init setup_resources(void)
|
||||
int j;
|
||||
u64 i;
|
||||
|
||||
code_resource.start = (unsigned long) _text;
|
||||
code_resource.end = (unsigned long) _etext - 1;
|
||||
data_resource.start = (unsigned long) _etext;
|
||||
data_resource.end = (unsigned long) _edata - 1;
|
||||
bss_resource.start = (unsigned long) __bss_start;
|
||||
bss_resource.end = (unsigned long) __bss_stop - 1;
|
||||
code_resource.start = __pa_symbol(_text);
|
||||
code_resource.end = __pa_symbol(_etext) - 1;
|
||||
data_resource.start = __pa_symbol(_etext);
|
||||
data_resource.end = __pa_symbol(_edata) - 1;
|
||||
bss_resource.start = __pa_symbol(__bss_start);
|
||||
bss_resource.end = __pa_symbol(__bss_stop) - 1;
|
||||
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
res = memblock_alloc(sizeof(*res), 8);
|
||||
|
@ -30,8 +30,8 @@
|
||||
#include <linux/compat.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/vdso.h>
|
||||
#include "entry.h"
|
||||
|
||||
@ -109,7 +109,7 @@ struct rt_sigframe
|
||||
static void store_sigregs(void)
|
||||
{
|
||||
save_access_regs(current->thread.acrs);
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
}
|
||||
|
||||
/* Load registers after signal return */
|
||||
@ -131,7 +131,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
|
||||
memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs));
|
||||
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
|
||||
sizeof(user_sregs.regs.acrs));
|
||||
fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu);
|
||||
fpregs_store(&user_sregs.fpregs, ¤t->thread.ufpu);
|
||||
if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
@ -165,7 +165,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
|
||||
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
|
||||
sizeof(current->thread.acrs));
|
||||
|
||||
fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu);
|
||||
fpregs_load(&user_sregs.fpregs, ¤t->thread.ufpu);
|
||||
|
||||
clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
|
||||
return 0;
|
||||
@ -181,11 +181,11 @@ static int save_sigregs_ext(struct pt_regs *regs,
|
||||
/* Save vector registers to signal stack */
|
||||
if (cpu_has_vx()) {
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = current->thread.fpu.vxrs[i].low;
|
||||
vxrs[i] = current->thread.ufpu.vxrs[i].low;
|
||||
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
|
||||
sizeof(sregs_ext->vxrs_low)) ||
|
||||
__copy_to_user(&sregs_ext->vxrs_high,
|
||||
current->thread.fpu.vxrs + __NUM_VXRS_LOW,
|
||||
current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
|
||||
sizeof(sregs_ext->vxrs_high)))
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -202,12 +202,12 @@ static int restore_sigregs_ext(struct pt_regs *regs,
|
||||
if (cpu_has_vx()) {
|
||||
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
|
||||
sizeof(sregs_ext->vxrs_low)) ||
|
||||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
|
||||
__copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW,
|
||||
&sregs_ext->vxrs_high,
|
||||
sizeof(sregs_ext->vxrs_high)))
|
||||
return -EFAULT;
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
current->thread.fpu.vxrs[i].low = vxrs[i];
|
||||
current->thread.ufpu.vxrs[i].low = vxrs[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn)
|
||||
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
|
||||
goto badframe;
|
||||
set_current_blocked(&set);
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
if (restore_sigregs(regs, &frame->sregs))
|
||||
goto badframe;
|
||||
if (restore_sigregs_ext(regs, &frame->sregs_ext))
|
||||
@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
||||
set_current_blocked(&set);
|
||||
if (restore_altstack(&frame->uc.uc_stack))
|
||||
goto badframe;
|
||||
save_fpu_regs();
|
||||
save_user_fpu_regs();
|
||||
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
|
||||
goto badframe;
|
||||
if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
|
||||
|
@ -36,12 +36,13 @@
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/ctlreg.h>
|
||||
#include <asm/pfault.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <asm/sysinfo.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/topology.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
int topology_max_mnest;
|
||||
|
||||
@ -426,9 +426,9 @@ subsys_initcall(create_proc_service_level);
|
||||
*/
|
||||
void s390_adjust_jiffies(void)
|
||||
{
|
||||
DECLARE_KERNEL_FPU_ONSTACK16(fpu);
|
||||
struct sysinfo_1_2_2 *info;
|
||||
unsigned long capability;
|
||||
struct kernel_fpu fpu;
|
||||
|
||||
info = (void *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!info)
|
||||
@ -447,21 +447,14 @@ void s390_adjust_jiffies(void)
|
||||
* point division ..
|
||||
*/
|
||||
kernel_fpu_begin(&fpu, KERNEL_FPR);
|
||||
asm volatile(
|
||||
" sfpc %3\n"
|
||||
" l %0,%1\n"
|
||||
" tmlh %0,0xff80\n"
|
||||
" jnz 0f\n"
|
||||
" cefbr %%f2,%0\n"
|
||||
" j 1f\n"
|
||||
"0: le %%f2,%1\n"
|
||||
"1: cefbr %%f0,%2\n"
|
||||
" debr %%f0,%%f2\n"
|
||||
" cgebr %0,5,%%f0\n"
|
||||
: "=&d" (capability)
|
||||
: "Q" (info->capability), "d" (10000000), "d" (0)
|
||||
: "cc"
|
||||
);
|
||||
fpu_sfpc(0);
|
||||
if (info->capability & 0xff800000)
|
||||
fpu_ldgr(2, info->capability);
|
||||
else
|
||||
fpu_cefbr(2, info->capability);
|
||||
fpu_cefbr(0, 10000000);
|
||||
fpu_debr(0, 2);
|
||||
capability = fpu_cgebr(0, 5);
|
||||
kernel_fpu_end(&fpu, KERNEL_FPR);
|
||||
} else
|
||||
/*
|
||||
|
@ -90,7 +90,7 @@ SYM_FUNC_START(_diag26c_amode31)
|
||||
SYM_FUNC_END(_diag26c_amode31)
|
||||
|
||||
/*
|
||||
* void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
|
||||
* void _diag0c_amode31(unsigned long rx)
|
||||
*/
|
||||
SYM_FUNC_START(_diag0c_amode31)
|
||||
sam31
|
||||
|
@ -251,8 +251,8 @@ static struct clocksource clocksource_tod = {
|
||||
.rating = 400,
|
||||
.read = read_tod_clock,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.mult = 1000,
|
||||
.shift = 12,
|
||||
.mult = 4096000,
|
||||
.shift = 24,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
.vdso_clock_mode = VDSO_CLOCKMODE_TOD,
|
||||
};
|
||||
@ -716,7 +716,7 @@ out_unlock:
|
||||
/*
|
||||
* STP subsys sysfs interface functions
|
||||
*/
|
||||
static struct bus_type stp_subsys = {
|
||||
static const struct bus_type stp_subsys = {
|
||||
.name = "stp",
|
||||
.dev_name = "stp",
|
||||
};
|
||||
|
@ -28,8 +28,8 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/entry-common.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/vtime.h>
|
||||
#include <asm/fpu.h>
|
||||
#include "entry.h"
|
||||
|
||||
static inline void __user *get_trap_ip(struct pt_regs *regs)
|
||||
@ -201,8 +201,8 @@ static void vector_exception(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
/* get vector interrupt code from fpc */
|
||||
save_fpu_regs();
|
||||
vic = (current->thread.fpu.fpc & 0xf00) >> 8;
|
||||
save_user_fpu_regs();
|
||||
vic = (current->thread.ufpu.fpc & 0xf00) >> 8;
|
||||
switch (vic) {
|
||||
case 1: /* invalid vector operation */
|
||||
si_code = FPE_FLTINV;
|
||||
@ -227,9 +227,9 @@ static void vector_exception(struct pt_regs *regs)
|
||||
|
||||
static void data_exception(struct pt_regs *regs)
|
||||
{
|
||||
save_fpu_regs();
|
||||
if (current->thread.fpu.fpc & FPC_DXC_MASK)
|
||||
do_fp_trap(regs, current->thread.fpu.fpc);
|
||||
save_user_fpu_regs();
|
||||
if (current->thread.ufpu.fpc & FPC_DXC_MASK)
|
||||
do_fp_trap(regs, current->thread.ufpu.fpc);
|
||||
else
|
||||
do_trap(regs, SIGILL, ILL_ILLOPN, "data exception");
|
||||
}
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/dis.h>
|
||||
|
@ -22,7 +22,7 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
||||
KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
|
||||
KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
|
||||
|
||||
LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
|
||||
LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
|
||||
--hash-style=both --build-id=sha1 -melf_s390 -T
|
||||
|
||||
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
|
||||
OUTPUT_ARCH(s390:31-bit)
|
||||
ENTRY(_start)
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
|
@ -25,8 +25,9 @@ KBUILD_AFLAGS_64 += -m64
|
||||
|
||||
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
|
||||
KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
|
||||
KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
|
||||
KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
|
||||
ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
|
||||
ldflags-y := -shared -soname=linux-vdso64.so.1 \
|
||||
--hash-style=both --build-id=sha1 -T
|
||||
|
||||
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
|
||||
OUTPUT_ARCH(s390:64-bit)
|
||||
ENTRY(_start)
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
|
@ -59,6 +59,14 @@ SECTIONS
|
||||
} :text = 0x0700
|
||||
|
||||
RO_DATA(PAGE_SIZE)
|
||||
.data.rel.ro : {
|
||||
*(.data.rel.ro .data.rel.ro.*)
|
||||
}
|
||||
.got : {
|
||||
__got_start = .;
|
||||
*(.got)
|
||||
__got_end = .;
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_sdata = .; /* Start of data section */
|
||||
@ -73,6 +81,9 @@ SECTIONS
|
||||
__end_ro_after_init = .;
|
||||
|
||||
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
|
||||
.data.rel : {
|
||||
*(.data.rel*)
|
||||
}
|
||||
BOOT_DATA_PRESERVED
|
||||
|
||||
. = ALIGN(8);
|
||||
@ -181,6 +192,7 @@ SECTIONS
|
||||
|
||||
PERCPU_SECTION(0x100)
|
||||
|
||||
#ifdef CONFIG_PIE_BUILD
|
||||
.dynsym ALIGN(8) : {
|
||||
__dynsym_start = .;
|
||||
*(.dynsym)
|
||||
@ -191,6 +203,19 @@ SECTIONS
|
||||
*(.rela*)
|
||||
__rela_dyn_end = .;
|
||||
}
|
||||
.dynamic ALIGN(8) : {
|
||||
*(.dynamic)
|
||||
}
|
||||
.dynstr ALIGN(8) : {
|
||||
*(.dynstr)
|
||||
}
|
||||
#endif
|
||||
.hash ALIGN(8) : {
|
||||
*(.hash)
|
||||
}
|
||||
.gnu.hash ALIGN(8) : {
|
||||
*(.gnu.hash)
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .; /* freed after init ends here */
|
||||
@ -214,9 +239,14 @@ SECTIONS
|
||||
QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */
|
||||
QUAD(__boot_data_preserved_end -
|
||||
__boot_data_preserved_start) /* bootdata_preserved_size */
|
||||
#ifdef CONFIG_PIE_BUILD
|
||||
QUAD(__dynsym_start) /* dynsym_start */
|
||||
QUAD(__rela_dyn_start) /* rela_dyn_start */
|
||||
QUAD(__rela_dyn_end) /* rela_dyn_end */
|
||||
#else
|
||||
QUAD(__got_start) /* got_start */
|
||||
QUAD(__got_end) /* got_end */
|
||||
#endif
|
||||
QUAD(_eamode31 - _samode31) /* amode31_size */
|
||||
QUAD(init_mm)
|
||||
QUAD(swapper_pg_dir)
|
||||
@ -235,6 +265,30 @@ SECTIONS
|
||||
DWARF_DEBUG
|
||||
ELF_DETAILS
|
||||
|
||||
/*
|
||||
* Make sure that the .got.plt is either completely empty or it
|
||||
* contains only the three reserved double words.
|
||||
*/
|
||||
.got.plt : {
|
||||
*(.got.plt)
|
||||
}
|
||||
ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!")
|
||||
|
||||
/*
|
||||
* Sections that should stay zero sized, which is safer to
|
||||
* explicitly check instead of blindly discarding.
|
||||
*/
|
||||
.plt : {
|
||||
*(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt)
|
||||
}
|
||||
ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
|
||||
#ifndef CONFIG_PIE_BUILD
|
||||
.rela.dyn : {
|
||||
*(.rela.*) *(.rela_*)
|
||||
}
|
||||
ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
|
||||
#endif
|
||||
|
||||
/* Sections to be discarded */
|
||||
DISCARDS
|
||||
/DISCARD/ : {
|
||||
|
@ -11,11 +11,11 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/fault.h>
|
||||
#include <asm/gmap.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
union asce {
|
||||
unsigned long val;
|
||||
@ -391,7 +391,8 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, u8 ar,
|
||||
if (ar >= NUM_ACRS)
|
||||
return -EINVAL;
|
||||
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
if (vcpu->arch.acrs_loaded)
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
alet.val = vcpu->run->s.regs.acrs[ar];
|
||||
|
||||
if (ar == 0 || alet.val == 0) {
|
||||
|
@ -19,13 +19,13 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/dis.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/isc.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/airq.h>
|
||||
#include <asm/tpi.h>
|
||||
@ -584,7 +584,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
|
||||
mci.val = mchk->mcic;
|
||||
/* take care of lazy register loading */
|
||||
save_fpu_regs();
|
||||
kvm_s390_fpu_store(vcpu->run);
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
|
||||
save_gs_cb(current->thread.gs_cb);
|
||||
@ -648,7 +648,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
|
||||
vcpu->run->s.regs.gprs, 128);
|
||||
rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
|
||||
rc |= put_guest_lc(vcpu, vcpu->run->s.regs.fpc,
|
||||
(u32 __user *) __LC_FP_CREG_SAVE_AREA);
|
||||
rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
|
||||
(u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
|
||||
|
@ -33,19 +33,19 @@
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
#include <asm/access-regs.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/stp.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/isc.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/cpacf.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/ap.h>
|
||||
#include <asm/uv.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
#include "pci.h"
|
||||
@ -3951,6 +3951,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
KVM_SYNC_ARCH0 |
|
||||
KVM_SYNC_PFAULT |
|
||||
KVM_SYNC_DIAG318;
|
||||
vcpu->arch.acrs_loaded = false;
|
||||
kvm_s390_set_prefix(vcpu, 0);
|
||||
if (test_kvm_facility(vcpu->kvm, 64))
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
|
||||
@ -4829,8 +4830,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vcpu->run->s.regs.gprs,
|
||||
sizeof(sie_page->pv_grregs));
|
||||
}
|
||||
if (test_cpu_flag(CIF_FPU))
|
||||
load_fpu_regs();
|
||||
exit_reason = sie64a(vcpu->arch.sie_block,
|
||||
vcpu->run->s.regs.gprs);
|
||||
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
|
||||
@ -4951,16 +4950,8 @@ static void sync_regs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
save_access_regs(vcpu->arch.host_acrs);
|
||||
restore_access_regs(vcpu->run->s.regs.acrs);
|
||||
/* save host (userspace) fprs/vrs */
|
||||
save_fpu_regs();
|
||||
vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
|
||||
vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
|
||||
if (cpu_has_vx())
|
||||
current->thread.fpu.regs = vcpu->run->s.regs.vrs;
|
||||
else
|
||||
current->thread.fpu.regs = vcpu->run->s.regs.fprs;
|
||||
current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
|
||||
|
||||
vcpu->arch.acrs_loaded = true;
|
||||
kvm_s390_fpu_load(vcpu->run);
|
||||
/* Sync fmt2 only data */
|
||||
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
|
||||
sync_regs_fmt2(vcpu);
|
||||
@ -5021,12 +5012,8 @@ static void store_regs(struct kvm_vcpu *vcpu)
|
||||
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
restore_access_regs(vcpu->arch.host_acrs);
|
||||
/* Save guest register state */
|
||||
save_fpu_regs();
|
||||
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
|
||||
/* Restore will be done lazily at return */
|
||||
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
|
||||
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
|
||||
vcpu->arch.acrs_loaded = false;
|
||||
kvm_s390_fpu_store(vcpu->run);
|
||||
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
|
||||
store_regs_fmt2(vcpu);
|
||||
}
|
||||
@ -5034,6 +5021,7 @@ static void store_regs(struct kvm_vcpu *vcpu)
|
||||
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
DECLARE_KERNEL_FPU_ONSTACK32(fpu);
|
||||
int rc;
|
||||
|
||||
/*
|
||||
@ -5075,6 +5063,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
goto out;
|
||||
}
|
||||
|
||||
kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
|
||||
sync_regs(vcpu);
|
||||
enable_cpu_timer_accounting(vcpu);
|
||||
|
||||
@ -5098,6 +5087,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
disable_cpu_timer_accounting(vcpu);
|
||||
store_regs(vcpu);
|
||||
kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
|
||||
|
||||
kvm_sigset_deactivate(vcpu);
|
||||
|
||||
@ -5172,8 +5162,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
* switch in the run ioctl. Let's update our copies before we save
|
||||
* it into the save area
|
||||
*/
|
||||
save_fpu_regs();
|
||||
vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
|
||||
kvm_s390_fpu_store(vcpu->run);
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
|
||||
return kvm_s390_store_status_unloaded(vcpu, addr);
|
||||
|
@ -20,6 +20,24 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
static inline void kvm_s390_fpu_store(struct kvm_run *run)
|
||||
{
|
||||
fpu_stfpc(&run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
save_vx_regs((__vector128 *)&run->s.regs.vrs);
|
||||
else
|
||||
save_fp_regs((freg_t *)&run->s.regs.fprs);
|
||||
}
|
||||
|
||||
static inline void kvm_s390_fpu_load(struct kvm_run *run)
|
||||
{
|
||||
fpu_lfpc_safe(&run->s.regs.fpc);
|
||||
if (cpu_has_vx())
|
||||
load_vx_regs((__vector128 *)&run->s.regs.vrs);
|
||||
else
|
||||
load_fp_regs((freg_t *)&run->s.regs.fprs);
|
||||
}
|
||||
|
||||
/* Transactional Memory Execution related macros */
|
||||
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
|
||||
#define TDB_FORMAT1 1
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/dis.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/facility.h>
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
@ -1149,8 +1148,6 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
*/
|
||||
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
|
||||
barrier();
|
||||
if (test_cpu_flag(CIF_FPU))
|
||||
load_fpu_regs();
|
||||
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
|
||||
rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
|
||||
barrier();
|
||||
|
@ -4,6 +4,7 @@
|
||||
#
|
||||
|
||||
lib-y += delay.o string.o uaccess.o find.o spinlock.o tishift.o
|
||||
lib-y += csum-partial.o
|
||||
obj-y += mem.o xor.o
|
||||
lib-$(CONFIG_KPROBES) += probes.o
|
||||
lib-$(CONFIG_UPROBES) += probes.o
|
||||
|
91
arch/s390/lib/csum-partial.c
Normal file
91
arch/s390/lib/csum-partial.c
Normal file
@ -0,0 +1,91 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
/*
|
||||
* Computes the checksum of a memory block at src, length len,
|
||||
* and adds in "sum" (32-bit). If copy is true copies to dst.
|
||||
*
|
||||
* Returns a 32-bit number suitable for feeding into itself
|
||||
* or csum_tcpudp_magic.
|
||||
*
|
||||
* This function must be called with even lengths, except
|
||||
* for the last fragment, which may be odd.
|
||||
*
|
||||
* It's best to have src and dst aligned on a 64-bit boundary.
|
||||
*/
|
||||
static __always_inline __wsum csum_copy(void *dst, const void *src, int len, __wsum sum, bool copy)
|
||||
{
|
||||
DECLARE_KERNEL_FPU_ONSTACK8(vxstate);
|
||||
|
||||
if (!cpu_has_vx()) {
|
||||
if (copy)
|
||||
memcpy(dst, src, len);
|
||||
return cksm(dst, len, sum);
|
||||
}
|
||||
kernel_fpu_begin(&vxstate, KERNEL_VXR_V16V23);
|
||||
fpu_vlvgf(16, (__force u32)sum, 1);
|
||||
fpu_vzero(17);
|
||||
fpu_vzero(18);
|
||||
fpu_vzero(19);
|
||||
while (len >= 64) {
|
||||
fpu_vlm(20, 23, src);
|
||||
if (copy) {
|
||||
fpu_vstm(20, 23, dst);
|
||||
dst += 64;
|
||||
}
|
||||
fpu_vcksm(16, 20, 16);
|
||||
fpu_vcksm(17, 21, 17);
|
||||
fpu_vcksm(18, 22, 18);
|
||||
fpu_vcksm(19, 23, 19);
|
||||
src += 64;
|
||||
len -= 64;
|
||||
}
|
||||
while (len >= 32) {
|
||||
fpu_vlm(20, 21, src);
|
||||
if (copy) {
|
||||
fpu_vstm(20, 21, dst);
|
||||
dst += 32;
|
||||
}
|
||||
fpu_vcksm(16, 20, 16);
|
||||
fpu_vcksm(17, 21, 17);
|
||||
src += 32;
|
||||
len -= 32;
|
||||
}
|
||||
while (len >= 16) {
|
||||
fpu_vl(20, src);
|
||||
if (copy) {
|
||||
fpu_vst(20, dst);
|
||||
dst += 16;
|
||||
}
|
||||
fpu_vcksm(16, 20, 16);
|
||||
src += 16;
|
||||
len -= 16;
|
||||
}
|
||||
if (len) {
|
||||
fpu_vll(20, len - 1, src);
|
||||
if (copy)
|
||||
fpu_vstl(20, len - 1, dst);
|
||||
fpu_vcksm(16, 20, 16);
|
||||
}
|
||||
fpu_vcksm(18, 19, 18);
|
||||
fpu_vcksm(16, 17, 16);
|
||||
fpu_vcksm(16, 18, 16);
|
||||
sum = (__force __wsum)fpu_vlgvf(16, 1);
|
||||
kernel_fpu_end(&vxstate, KERNEL_VXR_V16V23);
|
||||
return sum;
|
||||
}
|
||||
|
||||
__wsum csum_partial(const void *buff, int len, __wsum sum)
|
||||
{
|
||||
return csum_copy(NULL, buff, len, sum, false);
|
||||
}
|
||||
EXPORT_SYMBOL(csum_partial);
|
||||
|
||||
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
|
||||
{
|
||||
return csum_copy(dst, src, len, 0, true);
|
||||
}
|
||||
EXPORT_SYMBOL(csum_partial_copy_nocheck);
|
@ -136,7 +136,7 @@ dcss_diag(int *func, void *parameter,
|
||||
unsigned long rx, ry;
|
||||
int rc;
|
||||
|
||||
rx = (unsigned long) parameter;
|
||||
rx = virt_to_phys(parameter);
|
||||
ry = (unsigned long) *func;
|
||||
|
||||
diag_stat_inc(DIAG_STAT_X064);
|
||||
@ -178,7 +178,7 @@ query_segment_type (struct dcss_segment *seg)
|
||||
|
||||
/* initialize diag input parameters */
|
||||
qin->qopcode = DCSS_FINDSEGA;
|
||||
qin->qoutptr = (unsigned long) qout;
|
||||
qin->qoutptr = virt_to_phys(qout);
|
||||
qin->qoutlen = sizeof(struct qout64);
|
||||
memcpy (qin->qname, seg->dcss_name, 8);
|
||||
|
||||
|
@ -71,6 +71,15 @@ static inline unsigned long mmap_base(unsigned long rnd,
|
||||
return PAGE_ALIGN(STACK_TOP - gap - rnd);
|
||||
}
|
||||
|
||||
static int get_align_mask(struct file *filp, unsigned long flags)
|
||||
{
|
||||
if (!(current->flags & PF_RANDOMIZE))
|
||||
return 0;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
return MMAP_ALIGN_MASK << PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff,
|
||||
unsigned long flags)
|
||||
@ -97,10 +106,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
info.length = len;
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = TASK_SIZE;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
|
||||
else
|
||||
info.align_mask = 0;
|
||||
info.align_mask = get_align_mask(filp, flags);
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
addr = vm_unmapped_area(&info);
|
||||
if (offset_in_page(addr))
|
||||
@ -138,10 +144,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long ad
|
||||
info.length = len;
|
||||
info.low_limit = PAGE_SIZE;
|
||||
info.high_limit = mm->mmap_base;
|
||||
if (filp || (flags & MAP_SHARED))
|
||||
info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
|
||||
else
|
||||
info.align_mask = 0;
|
||||
info.align_mask = get_align_mask(filp, flags);
|
||||
info.align_offset = pgoff << PAGE_SHIFT;
|
||||
addr = vm_unmapped_area(&info);
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include <asm/isc.h>
|
||||
#include <asm/airq.h>
|
||||
@ -730,12 +731,12 @@ EXPORT_SYMBOL_GPL(zpci_disable_device);
|
||||
* equivalent to its state during boot when first probing a driver.
|
||||
* Consequently after reset the PCI function requires re-initialization via the
|
||||
* common PCI code including re-enabling IRQs via pci_alloc_irq_vectors()
|
||||
* and enabling the function via e.g.pci_enablde_device_flags().The caller
|
||||
* and enabling the function via e.g. pci_enable_device_flags(). The caller
|
||||
* must guard against concurrent reset attempts.
|
||||
*
|
||||
* In most cases this function should not be called directly but through
|
||||
* pci_reset_function() or pci_reset_bus() which handle the save/restore and
|
||||
* locking.
|
||||
* locking - asserted by lockdep.
|
||||
*
|
||||
* Return: 0 on success and an error value otherwise
|
||||
*/
|
||||
@ -744,6 +745,7 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
|
||||
u8 status;
|
||||
int rc;
|
||||
|
||||
lockdep_assert_held(&zdev->state_lock);
|
||||
zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh);
|
||||
if (zdev_enabled(zdev)) {
|
||||
/* Disables device access, DMAs and IRQs (reset state) */
|
||||
@ -806,7 +808,8 @@ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
||||
zdev->state = state;
|
||||
|
||||
kref_init(&zdev->kref);
|
||||
mutex_init(&zdev->lock);
|
||||
mutex_init(&zdev->state_lock);
|
||||
mutex_init(&zdev->fmb_lock);
|
||||
mutex_init(&zdev->kzdev_lock);
|
||||
|
||||
rc = zpci_init_iommu(zdev);
|
||||
@ -870,6 +873,10 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
lockdep_assert_held(&zdev->state_lock);
|
||||
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
|
||||
return 0;
|
||||
|
||||
if (zdev->zbus->bus)
|
||||
zpci_bus_remove_device(zdev, false);
|
||||
|
||||
@ -889,7 +896,7 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
|
||||
}
|
||||
|
||||
/**
|
||||
* zpci_device_reserved() - Mark device as resverved
|
||||
* zpci_device_reserved() - Mark device as reserved
|
||||
* @zdev: the zpci_dev that was reserved
|
||||
*
|
||||
* Handle the case that a given zPCI function was reserved by another system.
|
||||
@ -899,8 +906,6 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
|
||||
*/
|
||||
void zpci_device_reserved(struct zpci_dev *zdev)
|
||||
{
|
||||
if (zdev->has_hp_slot)
|
||||
zpci_exit_slot(zdev);
|
||||
/*
|
||||
* Remove device from zpci_list as it is going away. This also
|
||||
* makes sure we ignore subsequent zPCI events for this device.
|
||||
@ -918,6 +923,9 @@ void zpci_release_device(struct kref *kref)
|
||||
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
||||
int ret;
|
||||
|
||||
if (zdev->has_hp_slot)
|
||||
zpci_exit_slot(zdev);
|
||||
|
||||
if (zdev->zbus->bus)
|
||||
zpci_bus_remove_device(zdev, false);
|
||||
|
||||
|
@ -91,9 +91,9 @@ static int pci_perf_show(struct seq_file *m, void *v)
|
||||
if (!zdev)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&zdev->lock);
|
||||
mutex_lock(&zdev->fmb_lock);
|
||||
if (!zdev->fmb) {
|
||||
mutex_unlock(&zdev->lock);
|
||||
mutex_unlock(&zdev->fmb_lock);
|
||||
seq_puts(m, "FMB statistics disabled\n");
|
||||
return 0;
|
||||
}
|
||||
@ -130,7 +130,7 @@ static int pci_perf_show(struct seq_file *m, void *v)
|
||||
}
|
||||
|
||||
pci_sw_counter_show(m);
|
||||
mutex_unlock(&zdev->lock);
|
||||
mutex_unlock(&zdev->fmb_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -148,7 +148,7 @@ static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mutex_lock(&zdev->lock);
|
||||
mutex_lock(&zdev->fmb_lock);
|
||||
switch (val) {
|
||||
case 0:
|
||||
rc = zpci_fmb_disable_device(zdev);
|
||||
@ -157,7 +157,7 @@ static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
|
||||
rc = zpci_fmb_enable_device(zdev);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&zdev->lock);
|
||||
mutex_unlock(&zdev->fmb_lock);
|
||||
return rc ? rc : count;
|
||||
}
|
||||
|
||||
|
@ -267,6 +267,7 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
||||
zpci_err_hex(ccdf, sizeof(*ccdf));
|
||||
|
||||
if (zdev) {
|
||||
mutex_lock(&zdev->state_lock);
|
||||
zpci_update_fh(zdev, ccdf->fh);
|
||||
if (zdev->zbus->bus)
|
||||
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
|
||||
@ -294,6 +295,8 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
|
||||
}
|
||||
pci_dev_put(pdev);
|
||||
no_pdev:
|
||||
if (zdev)
|
||||
mutex_unlock(&zdev->state_lock);
|
||||
zpci_zdev_put(zdev);
|
||||
}
|
||||
|
||||
@ -326,6 +329,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
|
||||
zpci_dbg(3, "avl fid:%x, fh:%x, pec:%x\n",
|
||||
ccdf->fid, ccdf->fh, ccdf->pec);
|
||||
|
||||
if (existing_zdev)
|
||||
mutex_lock(&zdev->state_lock);
|
||||
|
||||
switch (ccdf->pec) {
|
||||
case 0x0301: /* Reserved|Standby -> Configured */
|
||||
if (!zdev) {
|
||||
@ -348,7 +355,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
break;
|
||||
case 0x0303: /* Deconfiguration requested */
|
||||
if (zdev) {
|
||||
/* The event may have been queued before we confirgured
|
||||
/* The event may have been queued before we configured
|
||||
* the device.
|
||||
*/
|
||||
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
|
||||
@ -359,7 +366,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
break;
|
||||
case 0x0304: /* Configured -> Standby|Reserved */
|
||||
if (zdev) {
|
||||
/* The event may have been queued before we confirgured
|
||||
/* The event may have been queued before we configured
|
||||
* the device.:
|
||||
*/
|
||||
if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
|
||||
@ -383,8 +390,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (existing_zdev)
|
||||
if (existing_zdev) {
|
||||
mutex_unlock(&zdev->state_lock);
|
||||
zpci_zdev_put(zdev);
|
||||
}
|
||||
}
|
||||
|
||||
void zpci_event_availability(void *data)
|
||||
|
@ -49,6 +49,39 @@ static ssize_t mio_enabled_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(mio_enabled);
|
||||
|
||||
static int _do_recover(struct pci_dev *pdev, struct zpci_dev *zdev)
|
||||
{
|
||||
u8 status;
|
||||
int ret;
|
||||
|
||||
pci_stop_and_remove_bus_device(pdev);
|
||||
if (zdev_enabled(zdev)) {
|
||||
ret = zpci_disable_device(zdev);
|
||||
/*
|
||||
* Due to a z/VM vs LPAR inconsistency in the error
|
||||
* state the FH may indicate an enabled device but
|
||||
* disable says the device is already disabled don't
|
||||
* treat it as an error here.
|
||||
*/
|
||||
if (ret == -EINVAL)
|
||||
ret = 0;
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = zpci_enable_device(zdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (zdev->dma_table) {
|
||||
ret = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
virt_to_phys(zdev->dma_table), &status);
|
||||
if (ret)
|
||||
zpci_disable_device(zdev);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
@ -56,7 +89,6 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct zpci_dev *zdev = to_zpci(pdev);
|
||||
int ret = 0;
|
||||
u8 status;
|
||||
|
||||
/* Can't use device_remove_self() here as that would lead us to lock
|
||||
* the pci_rescan_remove_lock while holding the device' kernfs lock.
|
||||
@ -70,6 +102,12 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
|
||||
*/
|
||||
kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
|
||||
WARN_ON_ONCE(!kn);
|
||||
|
||||
/* Device needs to be configured and state must not change */
|
||||
mutex_lock(&zdev->state_lock);
|
||||
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
|
||||
goto out;
|
||||
|
||||
/* device_remove_file() serializes concurrent calls ignoring all but
|
||||
* the first
|
||||
*/
|
||||
@ -82,35 +120,13 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
|
||||
*/
|
||||
pci_lock_rescan_remove();
|
||||
if (pci_dev_is_added(pdev)) {
|
||||
pci_stop_and_remove_bus_device(pdev);
|
||||
if (zdev_enabled(zdev)) {
|
||||
ret = zpci_disable_device(zdev);
|
||||
/*
|
||||
* Due to a z/VM vs LPAR inconsistency in the error
|
||||
* state the FH may indicate an enabled device but
|
||||
* disable says the device is already disabled don't
|
||||
* treat it as an error here.
|
||||
*/
|
||||
if (ret == -EINVAL)
|
||||
ret = 0;
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = zpci_enable_device(zdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (zdev->dma_table) {
|
||||
ret = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
virt_to_phys(zdev->dma_table), &status);
|
||||
if (ret)
|
||||
zpci_disable_device(zdev);
|
||||
}
|
||||
ret = _do_recover(pdev, zdev);
|
||||
}
|
||||
out:
|
||||
pci_rescan_bus(zdev->zbus->bus);
|
||||
pci_unlock_rescan_remove();
|
||||
|
||||
out:
|
||||
mutex_unlock(&zdev->state_lock);
|
||||
if (kn)
|
||||
sysfs_unbreak_active_protection(kn);
|
||||
return ret ? ret : count;
|
||||
|
1
arch/s390/tools/.gitignore
vendored
1
arch/s390/tools/.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
gen_facilities
|
||||
gen_opcode_table
|
||||
relocs
|
||||
|
@ -25,3 +25,8 @@ $(kapi)/facility-defs.h: $(obj)/gen_facilities FORCE
|
||||
|
||||
$(kapi)/dis-defs.h: $(obj)/gen_opcode_table FORCE
|
||||
$(call filechk,dis-defs.h)
|
||||
|
||||
hostprogs += relocs
|
||||
PHONY += relocs
|
||||
relocs: $(obj)/relocs
|
||||
@:
|
||||
|
387
arch/s390/tools/relocs.c
Normal file
387
arch/s390/tools/relocs.c
Normal file
@ -0,0 +1,387 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include <elf.h>
|
||||
#include <byteswap.h>
|
||||
#define USE_BSD
|
||||
#include <endian.h>
|
||||
|
||||
#define ELF_BITS 64
|
||||
|
||||
#define ELF_MACHINE EM_S390
|
||||
#define ELF_MACHINE_NAME "IBM S/390"
|
||||
#define SHT_REL_TYPE SHT_RELA
|
||||
#define Elf_Rel Elf64_Rela
|
||||
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#define ELF_ENDIAN ELFDATA2MSB
|
||||
#define ELF_R_SYM(val) ELF64_R_SYM(val)
|
||||
#define ELF_R_TYPE(val) ELF64_R_TYPE(val)
|
||||
#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
|
||||
#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
|
||||
#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
|
||||
|
||||
#define ElfW(type) _ElfW(ELF_BITS, type)
|
||||
#define _ElfW(bits, type) __ElfW(bits, type)
|
||||
#define __ElfW(bits, type) Elf##bits##_##type
|
||||
|
||||
#define Elf_Addr ElfW(Addr)
|
||||
#define Elf_Ehdr ElfW(Ehdr)
|
||||
#define Elf_Phdr ElfW(Phdr)
|
||||
#define Elf_Shdr ElfW(Shdr)
|
||||
#define Elf_Sym ElfW(Sym)
|
||||
|
||||
static Elf_Ehdr ehdr;
|
||||
static unsigned long shnum;
|
||||
static unsigned int shstrndx;
|
||||
|
||||
struct relocs {
|
||||
uint32_t *offset;
|
||||
unsigned long count;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
static struct relocs relocs64;
|
||||
#define FMT PRIu64
|
||||
|
||||
struct section {
|
||||
Elf_Shdr shdr;
|
||||
struct section *link;
|
||||
Elf_Rel *reltab;
|
||||
};
|
||||
|
||||
static struct section *secs;
|
||||
|
||||
#if BYTE_ORDER == LITTLE_ENDIAN
|
||||
#define le16_to_cpu(val) (val)
|
||||
#define le32_to_cpu(val) (val)
|
||||
#define le64_to_cpu(val) (val)
|
||||
#define be16_to_cpu(val) bswap_16(val)
|
||||
#define be32_to_cpu(val) bswap_32(val)
|
||||
#define be64_to_cpu(val) bswap_64(val)
|
||||
#endif
|
||||
|
||||
#if BYTE_ORDER == BIG_ENDIAN
|
||||
#define le16_to_cpu(val) bswap_16(val)
|
||||
#define le32_to_cpu(val) bswap_32(val)
|
||||
#define le64_to_cpu(val) bswap_64(val)
|
||||
#define be16_to_cpu(val) (val)
|
||||
#define be32_to_cpu(val) (val)
|
||||
#define be64_to_cpu(val) (val)
|
||||
#endif
|
||||
|
||||
static uint16_t elf16_to_cpu(uint16_t val)
|
||||
{
|
||||
if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
|
||||
return le16_to_cpu(val);
|
||||
else
|
||||
return be16_to_cpu(val);
|
||||
}
|
||||
|
||||
static uint32_t elf32_to_cpu(uint32_t val)
|
||||
{
|
||||
if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
|
||||
return le32_to_cpu(val);
|
||||
else
|
||||
return be32_to_cpu(val);
|
||||
}
|
||||
|
||||
#define elf_half_to_cpu(x) elf16_to_cpu(x)
|
||||
#define elf_word_to_cpu(x) elf32_to_cpu(x)
|
||||
|
||||
static uint64_t elf64_to_cpu(uint64_t val)
|
||||
{
|
||||
return be64_to_cpu(val);
|
||||
}
|
||||
|
||||
#define elf_addr_to_cpu(x) elf64_to_cpu(x)
|
||||
#define elf_off_to_cpu(x) elf64_to_cpu(x)
|
||||
#define elf_xword_to_cpu(x) elf64_to_cpu(x)
|
||||
|
||||
static void die(char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, fmt);
|
||||
vfprintf(stderr, fmt, ap);
|
||||
va_end(ap);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void read_ehdr(FILE *fp)
|
||||
{
|
||||
if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
|
||||
die("Cannot read ELF header: %s\n", strerror(errno));
|
||||
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0)
|
||||
die("No ELF magic\n");
|
||||
if (ehdr.e_ident[EI_CLASS] != ELF_CLASS)
|
||||
die("Not a %d bit executable\n", ELF_BITS);
|
||||
if (ehdr.e_ident[EI_DATA] != ELF_ENDIAN)
|
||||
die("ELF endian mismatch\n");
|
||||
if (ehdr.e_ident[EI_VERSION] != EV_CURRENT)
|
||||
die("Unknown ELF version\n");
|
||||
|
||||
/* Convert the fields to native endian */
|
||||
ehdr.e_type = elf_half_to_cpu(ehdr.e_type);
|
||||
ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine);
|
||||
ehdr.e_version = elf_word_to_cpu(ehdr.e_version);
|
||||
ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry);
|
||||
ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff);
|
||||
ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff);
|
||||
ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags);
|
||||
ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize);
|
||||
ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize);
|
||||
ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum);
|
||||
ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize);
|
||||
ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
|
||||
ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
|
||||
|
||||
shnum = ehdr.e_shnum;
|
||||
shstrndx = ehdr.e_shstrndx;
|
||||
|
||||
if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN))
|
||||
die("Unsupported ELF header type\n");
|
||||
if (ehdr.e_machine != ELF_MACHINE)
|
||||
die("Not for %s\n", ELF_MACHINE_NAME);
|
||||
if (ehdr.e_version != EV_CURRENT)
|
||||
die("Unknown ELF version\n");
|
||||
if (ehdr.e_ehsize != sizeof(Elf_Ehdr))
|
||||
die("Bad Elf header size\n");
|
||||
if (ehdr.e_phentsize != sizeof(Elf_Phdr))
|
||||
die("Bad program header entry\n");
|
||||
if (ehdr.e_shentsize != sizeof(Elf_Shdr))
|
||||
die("Bad section header entry\n");
|
||||
|
||||
if (shnum == SHN_UNDEF || shstrndx == SHN_XINDEX) {
|
||||
Elf_Shdr shdr;
|
||||
|
||||
if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
|
||||
die("Seek to %" FMT " failed: %s\n", ehdr.e_shoff, strerror(errno));
|
||||
|
||||
if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
|
||||
die("Cannot read initial ELF section header: %s\n", strerror(errno));
|
||||
|
||||
if (shnum == SHN_UNDEF)
|
||||
shnum = elf_xword_to_cpu(shdr.sh_size);
|
||||
|
||||
if (shstrndx == SHN_XINDEX)
|
||||
shstrndx = elf_word_to_cpu(shdr.sh_link);
|
||||
}
|
||||
|
||||
if (shstrndx >= shnum)
|
||||
die("String table index out of bounds\n");
|
||||
}
|
||||
|
||||
static void read_shdrs(FILE *fp)
|
||||
{
|
||||
Elf_Shdr shdr;
|
||||
int i;
|
||||
|
||||
secs = calloc(shnum, sizeof(struct section));
|
||||
if (!secs)
|
||||
die("Unable to allocate %ld section headers\n", shnum);
|
||||
|
||||
if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
|
||||
die("Seek to %" FMT " failed: %s\n", ehdr.e_shoff, strerror(errno));
|
||||
|
||||
for (i = 0; i < shnum; i++) {
|
||||
struct section *sec = &secs[i];
|
||||
|
||||
if (fread(&shdr, sizeof(shdr), 1, fp) != 1) {
|
||||
die("Cannot read ELF section headers %d/%ld: %s\n",
|
||||
i, shnum, strerror(errno));
|
||||
}
|
||||
|
||||
sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
|
||||
sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
|
||||
sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
|
||||
sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr);
|
||||
sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset);
|
||||
sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size);
|
||||
sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link);
|
||||
sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
|
||||
sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
|
||||
sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
|
||||
|
||||
if (sec->shdr.sh_link < shnum)
|
||||
sec->link = &secs[sec->shdr.sh_link];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void read_relocs(FILE *fp)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < shnum; i++) {
|
||||
struct section *sec = &secs[i];
|
||||
|
||||
if (sec->shdr.sh_type != SHT_REL_TYPE)
|
||||
continue;
|
||||
|
||||
sec->reltab = malloc(sec->shdr.sh_size);
|
||||
if (!sec->reltab)
|
||||
die("malloc of %" FMT " bytes for relocs failed\n", sec->shdr.sh_size);
|
||||
|
||||
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
|
||||
die("Seek to %" FMT " failed: %s\n", sec->shdr.sh_offset, strerror(errno));
|
||||
|
||||
if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) != sec->shdr.sh_size)
|
||||
die("Cannot read symbol table: %s\n", strerror(errno));
|
||||
|
||||
for (j = 0; j < sec->shdr.sh_size / sizeof(Elf_Rel); j++) {
|
||||
Elf_Rel *rel = &sec->reltab[j];
|
||||
|
||||
rel->r_offset = elf_addr_to_cpu(rel->r_offset);
|
||||
rel->r_info = elf_xword_to_cpu(rel->r_info);
|
||||
#if (SHT_REL_TYPE == SHT_RELA)
|
||||
rel->r_addend = elf_xword_to_cpu(rel->r_addend);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void add_reloc(struct relocs *r, uint32_t offset)
|
||||
{
|
||||
if (r->count == r->size) {
|
||||
unsigned long newsize = r->size + 50000;
|
||||
void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
|
||||
|
||||
if (!mem)
|
||||
die("realloc of %ld entries for relocs failed\n", newsize);
|
||||
|
||||
r->offset = mem;
|
||||
r->size = newsize;
|
||||
}
|
||||
r->offset[r->count++] = offset;
|
||||
}
|
||||
|
||||
static int do_reloc(struct section *sec, Elf_Rel *rel)
|
||||
{
|
||||
unsigned int r_type = ELF64_R_TYPE(rel->r_info);
|
||||
ElfW(Addr) offset = rel->r_offset;
|
||||
|
||||
switch (r_type) {
|
||||
case R_390_NONE:
|
||||
case R_390_PC32:
|
||||
case R_390_PC64:
|
||||
case R_390_PC16DBL:
|
||||
case R_390_PC32DBL:
|
||||
case R_390_PLT32DBL:
|
||||
case R_390_GOTENT:
|
||||
case R_390_GOTPCDBL:
|
||||
case R_390_GOTOFF64:
|
||||
break;
|
||||
case R_390_64:
|
||||
add_reloc(&relocs64, offset);
|
||||
break;
|
||||
default:
|
||||
die("Unsupported relocation type: %d\n", r_type);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void walk_relocs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Walk through the relocations */
|
||||
for (i = 0; i < shnum; i++) {
|
||||
struct section *sec_applies;
|
||||
int j;
|
||||
struct section *sec = &secs[i];
|
||||
|
||||
if (sec->shdr.sh_type != SHT_REL_TYPE)
|
||||
continue;
|
||||
|
||||
sec_applies = &secs[sec->shdr.sh_info];
|
||||
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
|
||||
continue;
|
||||
|
||||
for (j = 0; j < sec->shdr.sh_size / sizeof(Elf_Rel); j++) {
|
||||
Elf_Rel *rel = &sec->reltab[j];
|
||||
|
||||
do_reloc(sec, rel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int cmp_relocs(const void *va, const void *vb)
|
||||
{
|
||||
const uint32_t *a, *b;
|
||||
|
||||
a = va; b = vb;
|
||||
return (*a == *b) ? 0 : (*a > *b) ? 1 : -1;
|
||||
}
|
||||
|
||||
static void sort_relocs(struct relocs *r)
|
||||
{
|
||||
qsort(r->offset, r->count, sizeof(r->offset[0]), cmp_relocs);
|
||||
}
|
||||
|
||||
static int print_reloc(uint32_t v)
|
||||
{
|
||||
return fprintf(stdout, "\t.long 0x%08"PRIx32"\n", v) > 0 ? 0 : -1;
|
||||
}
|
||||
|
||||
static void emit_relocs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
walk_relocs();
|
||||
sort_relocs(&relocs64);
|
||||
|
||||
printf(".section \".vmlinux.relocs_64\",\"a\"\n");
|
||||
for (i = 0; i < relocs64.count; i++)
|
||||
print_reloc(relocs64.offset[i]);
|
||||
}
|
||||
|
||||
static void process(FILE *fp)
|
||||
{
|
||||
read_ehdr(fp);
|
||||
read_shdrs(fp);
|
||||
read_relocs(fp);
|
||||
emit_relocs();
|
||||
}
|
||||
|
||||
static void usage(void)
|
||||
{
|
||||
die("relocs vmlinux\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
unsigned char e_ident[EI_NIDENT];
|
||||
const char *fname;
|
||||
FILE *fp;
|
||||
|
||||
fname = NULL;
|
||||
|
||||
if (argc != 2)
|
||||
usage();
|
||||
|
||||
fname = argv[1];
|
||||
|
||||
fp = fopen(fname, "r");
|
||||
if (!fp)
|
||||
die("Cannot open %s: %s\n", fname, strerror(errno));
|
||||
|
||||
if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT)
|
||||
die("Cannot read %s: %s", fname, strerror(errno));
|
||||
|
||||
rewind(fp);
|
||||
|
||||
process(fp);
|
||||
|
||||
fclose(fp);
|
||||
return 0;
|
||||
}
|
@ -26,58 +26,79 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
|
||||
hotplug_slot);
|
||||
int rc;
|
||||
|
||||
if (zdev->state != ZPCI_FN_STATE_STANDBY)
|
||||
return -EIO;
|
||||
mutex_lock(&zdev->state_lock);
|
||||
if (zdev->state != ZPCI_FN_STATE_STANDBY) {
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = sclp_pci_configure(zdev->fid);
|
||||
zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
zdev->state = ZPCI_FN_STATE_CONFIGURED;
|
||||
|
||||
return zpci_scan_configured_device(zdev, zdev->fh);
|
||||
rc = zpci_scan_configured_device(zdev, zdev->fh);
|
||||
out:
|
||||
mutex_unlock(&zdev->state_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int disable_slot(struct hotplug_slot *hotplug_slot)
|
||||
{
|
||||
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
|
||||
hotplug_slot);
|
||||
struct pci_dev *pdev;
|
||||
struct pci_dev *pdev = NULL;
|
||||
int rc;
|
||||
|
||||
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
|
||||
return -EIO;
|
||||
mutex_lock(&zdev->state_lock);
|
||||
if (zdev->state != ZPCI_FN_STATE_CONFIGURED) {
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
|
||||
if (pdev && pci_num_vf(pdev)) {
|
||||
pci_dev_put(pdev);
|
||||
return -EBUSY;
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
pci_dev_put(pdev);
|
||||
|
||||
return zpci_deconfigure_device(zdev);
|
||||
rc = zpci_deconfigure_device(zdev);
|
||||
out:
|
||||
mutex_unlock(&zdev->state_lock);
|
||||
if (pdev)
|
||||
pci_dev_put(pdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
|
||||
{
|
||||
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
|
||||
hotplug_slot);
|
||||
int rc = -EIO;
|
||||
|
||||
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
|
||||
return -EIO;
|
||||
/*
|
||||
* We can't take the zdev->lock as reset_slot may be called during
|
||||
* probing and/or device removal which already happens under the
|
||||
* zdev->lock. Instead the user should use the higher level
|
||||
* pci_reset_function() or pci_bus_reset() which hold the PCI device
|
||||
* lock preventing concurrent removal. If not using these functions
|
||||
* holding the PCI device lock is required.
|
||||
* If we can't get the zdev->state_lock the device state is
|
||||
* currently undergoing a transition and we bail out - just
|
||||
* the same as if the device's state is not configured at all.
|
||||
*/
|
||||
if (!mutex_trylock(&zdev->state_lock))
|
||||
return rc;
|
||||
|
||||
/* As long as the function is configured we can reset */
|
||||
if (probe)
|
||||
return 0;
|
||||
/* We can reset only if the function is configured */
|
||||
if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
|
||||
goto out;
|
||||
|
||||
return zpci_hot_reset_device(zdev);
|
||||
if (probe) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = zpci_hot_reset_device(zdev);
|
||||
out:
|
||||
mutex_unlock(&zdev->state_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
|
@ -195,7 +195,7 @@ static void free_chan_prog(struct ccw1 *cpa)
|
||||
struct ccw1 *ptr = cpa;
|
||||
|
||||
while (ptr->cda) {
|
||||
kfree((void *)(addr_t) ptr->cda);
|
||||
kfree(phys_to_virt(ptr->cda));
|
||||
ptr++;
|
||||
}
|
||||
kfree(cpa);
|
||||
@ -237,7 +237,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
|
||||
free_chan_prog(cpa);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
cpa[i].cda = (u32)(addr_t) kbuf;
|
||||
cpa[i].cda = (u32)virt_to_phys(kbuf);
|
||||
if (copy_from_user(kbuf, ubuf, reclen)) {
|
||||
free_chan_prog(cpa);
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/maccess.h>
|
||||
#include "sclp.h"
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
* to devices that use multiple subchannels.
|
||||
*/
|
||||
|
||||
static struct bus_type ccwgroup_bus_type;
|
||||
static const struct bus_type ccwgroup_bus_type;
|
||||
|
||||
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
|
||||
{
|
||||
@ -465,7 +465,7 @@ static void ccwgroup_shutdown(struct device *dev)
|
||||
gdrv->shutdown(gdev);
|
||||
}
|
||||
|
||||
static struct bus_type ccwgroup_bus_type = {
|
||||
static const struct bus_type ccwgroup_bus_type = {
|
||||
.name = "ccwgroup",
|
||||
.dev_groups = ccwgroup_dev_groups,
|
||||
.remove = ccwgroup_remove,
|
||||
|
@ -1091,8 +1091,8 @@ int __init chsc_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
sei_page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
chsc_page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sei_page || !chsc_page) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
|
@ -293,7 +293,7 @@ static int chsc_ioctl_start(void __user *user_area)
|
||||
if (!css_general_characteristics.dynio)
|
||||
/* It makes no sense to try. */
|
||||
return -EOPNOTSUPP;
|
||||
chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
|
||||
chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!chsc_area)
|
||||
return -ENOMEM;
|
||||
request = kzalloc(sizeof(*request), GFP_KERNEL);
|
||||
@ -341,7 +341,7 @@ static int chsc_ioctl_on_close_set(void __user *user_area)
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
|
||||
on_close_chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!on_close_chsc_area) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_request;
|
||||
@ -393,7 +393,7 @@ static int chsc_ioctl_start_sync(void __user *user_area)
|
||||
struct chsc_sync_area *chsc_area;
|
||||
int ret, ccode;
|
||||
|
||||
chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!chsc_area)
|
||||
return -ENOMEM;
|
||||
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
|
||||
@ -439,7 +439,7 @@ static int chsc_ioctl_info_channel_path(void __user *user_cd)
|
||||
u8 data[PAGE_SIZE - 20];
|
||||
} __attribute__ ((packed)) *scpcd_area;
|
||||
|
||||
scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
scpcd_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!scpcd_area)
|
||||
return -ENOMEM;
|
||||
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
|
||||
@ -501,7 +501,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd)
|
||||
u8 data[PAGE_SIZE - 20];
|
||||
} __attribute__ ((packed)) *scucd_area;
|
||||
|
||||
scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
scucd_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!scucd_area)
|
||||
return -ENOMEM;
|
||||
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
|
||||
@ -564,7 +564,7 @@ static int chsc_ioctl_info_sch_cu(void __user *user_cud)
|
||||
u8 data[PAGE_SIZE - 20];
|
||||
} __attribute__ ((packed)) *sscud_area;
|
||||
|
||||
sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
sscud_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sscud_area)
|
||||
return -ENOMEM;
|
||||
cud = kzalloc(sizeof(*cud), GFP_KERNEL);
|
||||
@ -626,7 +626,7 @@ static int chsc_ioctl_conf_info(void __user *user_ci)
|
||||
u8 data[PAGE_SIZE - 20];
|
||||
} __attribute__ ((packed)) *sci_area;
|
||||
|
||||
sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
sci_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sci_area)
|
||||
return -ENOMEM;
|
||||
ci = kzalloc(sizeof(*ci), GFP_KERNEL);
|
||||
@ -697,7 +697,7 @@ static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
|
||||
u32 res;
|
||||
} __attribute__ ((packed)) *cssids_parm;
|
||||
|
||||
sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
sccl_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sccl_area)
|
||||
return -ENOMEM;
|
||||
ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
|
||||
@ -757,7 +757,7 @@ static int chsc_ioctl_chpd(void __user *user_chpd)
|
||||
int ret;
|
||||
|
||||
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
|
||||
scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
scpd_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!scpd_area || !chpd) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
@ -797,7 +797,7 @@ static int chsc_ioctl_dcal(void __user *user_dcal)
|
||||
u8 data[PAGE_SIZE - 36];
|
||||
} __attribute__ ((packed)) *sdcal_area;
|
||||
|
||||
sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
sdcal_area = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sdcal_area)
|
||||
return -ENOMEM;
|
||||
dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
|
||||
|
@ -169,7 +169,8 @@ static inline void cmf_activate(void *area, unsigned int onoff)
|
||||
" lgr 2,%[mbo]\n"
|
||||
" schm\n"
|
||||
:
|
||||
: [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
|
||||
: [r1] "d" ((unsigned long)onoff),
|
||||
[mbo] "d" (virt_to_phys(area))
|
||||
: "1", "2");
|
||||
}
|
||||
|
||||
@ -501,8 +502,7 @@ static int alloc_cmb(struct ccw_device *cdev)
|
||||
WARN_ON(!list_empty(&cmb_area.list));
|
||||
|
||||
spin_unlock(&cmb_area.lock);
|
||||
mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
|
||||
get_order(size));
|
||||
mem = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
|
||||
spin_lock(&cmb_area.lock);
|
||||
|
||||
if (cmb_area.mem) {
|
||||
|
@ -39,7 +39,7 @@ int max_ssid;
|
||||
|
||||
#define MAX_CSS_IDX 0
|
||||
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
|
||||
static struct bus_type css_bus_type;
|
||||
static const struct bus_type css_bus_type;
|
||||
|
||||
int
|
||||
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
|
||||
@ -1409,7 +1409,7 @@ static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bus_type css_bus_type = {
|
||||
static const struct bus_type css_bus_type = {
|
||||
.name = "css",
|
||||
.match = css_bus_match,
|
||||
.probe = css_probe,
|
||||
|
@ -49,7 +49,7 @@ static const unsigned long recovery_delay[] = { 3, 30, 300 };
|
||||
|
||||
static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
|
||||
static struct bus_type ccw_bus_type;
|
||||
static const struct bus_type ccw_bus_type;
|
||||
|
||||
/******************* bus type handling ***********************/
|
||||
|
||||
@ -1776,7 +1776,7 @@ static void ccw_device_shutdown(struct device *dev)
|
||||
__disable_cmf(cdev);
|
||||
}
|
||||
|
||||
static struct bus_type ccw_bus_type = {
|
||||
static const struct bus_type ccw_bus_type = {
|
||||
.name = "ccw",
|
||||
.match = ccw_bus_match,
|
||||
.uevent = ccw_uevent,
|
||||
|
@ -42,7 +42,7 @@ static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
||||
return add_uevent_var(env, "MODALIAS=scm:scmdev");
|
||||
}
|
||||
|
||||
static struct bus_type scm_bus_type = {
|
||||
static const struct bus_type scm_bus_type = {
|
||||
.name = "scm",
|
||||
.probe = scmdev_probe,
|
||||
.remove = scmdev_remove,
|
||||
@ -228,7 +228,7 @@ int scm_update_information(void)
|
||||
size_t num;
|
||||
int ret;
|
||||
|
||||
scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
||||
scm_info = (void *)__get_free_page(GFP_KERNEL);
|
||||
if (!scm_info)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/uv.h>
|
||||
|
||||
#include "ap_bus.h"
|
||||
#include "ap_debug.h"
|
||||
@ -83,14 +84,11 @@ EXPORT_SYMBOL(ap_perms);
|
||||
DEFINE_MUTEX(ap_perms_mutex);
|
||||
EXPORT_SYMBOL(ap_perms_mutex);
|
||||
|
||||
/* # of bus scans since init */
|
||||
static atomic64_t ap_scan_bus_count;
|
||||
|
||||
/* # of bindings complete since init */
|
||||
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
|
||||
|
||||
/* completion for initial APQN bindings complete */
|
||||
static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
|
||||
/* completion for APQN bindings complete */
|
||||
static DECLARE_COMPLETION(ap_apqn_bindings_complete);
|
||||
|
||||
static struct ap_config_info *ap_qci_info;
|
||||
static struct ap_config_info *ap_qci_info_old;
|
||||
@ -101,12 +99,16 @@ static struct ap_config_info *ap_qci_info_old;
|
||||
debug_info_t *ap_dbf_info;
|
||||
|
||||
/*
|
||||
* Workqueue timer for bus rescan.
|
||||
* AP bus rescan related things.
|
||||
*/
|
||||
static struct timer_list ap_config_timer;
|
||||
static int ap_config_time = AP_CONFIG_TIME;
|
||||
static void ap_scan_bus(struct work_struct *);
|
||||
static DECLARE_WORK(ap_scan_work, ap_scan_bus);
|
||||
static bool ap_scan_bus(void);
|
||||
static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
|
||||
static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
|
||||
static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
|
||||
static int ap_scan_bus_time = AP_CONFIG_TIME;
|
||||
static struct timer_list ap_scan_bus_timer;
|
||||
static void ap_scan_bus_wq_callback(struct work_struct *);
|
||||
static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback);
|
||||
|
||||
/*
|
||||
* Tasklet & timer for AP request polling and interrupts
|
||||
@ -135,7 +137,7 @@ static int ap_max_domain_id = 15;
|
||||
/* Maximum adapter id, if not given via qci */
|
||||
static int ap_max_adapter_id = 63;
|
||||
|
||||
static struct bus_type ap_bus_type;
|
||||
static const struct bus_type ap_bus_type;
|
||||
|
||||
/* Adapter interrupt definitions */
|
||||
static void ap_interrupt_handler(struct airq_struct *airq,
|
||||
@ -753,7 +755,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
|
||||
}
|
||||
|
||||
/*
|
||||
* After initial ap bus scan do check if all existing APQNs are
|
||||
* After ap bus scan do check if all existing APQNs are
|
||||
* bound to device drivers.
|
||||
*/
|
||||
static void ap_check_bindings_complete(void)
|
||||
@ -763,9 +765,9 @@ static void ap_check_bindings_complete(void)
|
||||
if (atomic64_read(&ap_scan_bus_count) >= 1) {
|
||||
ap_calc_bound_apqns(&apqns, &bound);
|
||||
if (bound == apqns) {
|
||||
if (!completion_done(&ap_init_apqn_bindings_complete)) {
|
||||
complete_all(&ap_init_apqn_bindings_complete);
|
||||
AP_DBF_INFO("%s complete\n", __func__);
|
||||
if (!completion_done(&ap_apqn_bindings_complete)) {
|
||||
complete_all(&ap_apqn_bindings_complete);
|
||||
pr_debug("%s all apqn bindings complete\n", __func__);
|
||||
}
|
||||
ap_send_bindings_complete_uevent();
|
||||
}
|
||||
@ -782,27 +784,29 @@ static void ap_check_bindings_complete(void)
|
||||
* -ETIME is returned. On failures negative return values are
|
||||
* returned to the caller.
|
||||
*/
|
||||
int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
|
||||
int ap_wait_apqn_bindings_complete(unsigned long timeout)
|
||||
{
|
||||
int rc = 0;
|
||||
long l;
|
||||
|
||||
if (completion_done(&ap_init_apqn_bindings_complete))
|
||||
if (completion_done(&ap_apqn_bindings_complete))
|
||||
return 0;
|
||||
|
||||
if (timeout)
|
||||
l = wait_for_completion_interruptible_timeout(
|
||||
&ap_init_apqn_bindings_complete, timeout);
|
||||
&ap_apqn_bindings_complete, timeout);
|
||||
else
|
||||
l = wait_for_completion_interruptible(
|
||||
&ap_init_apqn_bindings_complete);
|
||||
&ap_apqn_bindings_complete);
|
||||
if (l < 0)
|
||||
return l == -ERESTARTSYS ? -EINTR : l;
|
||||
rc = l == -ERESTARTSYS ? -EINTR : l;
|
||||
else if (l == 0 && timeout)
|
||||
return -ETIME;
|
||||
rc = -ETIME;
|
||||
|
||||
return 0;
|
||||
pr_debug("%s rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
|
||||
EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
|
||||
|
||||
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
|
||||
{
|
||||
@ -826,8 +830,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
|
||||
drvres = to_ap_drv(dev->driver)->flags
|
||||
& AP_DRIVER_FLAG_DEFAULT;
|
||||
if (!!devres != !!drvres) {
|
||||
AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
|
||||
__func__, card, queue);
|
||||
pr_debug("%s reprobing queue=%02x.%04x\n",
|
||||
__func__, card, queue);
|
||||
rc = device_reprobe(dev);
|
||||
if (rc)
|
||||
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
|
||||
@ -939,8 +943,6 @@ static int ap_device_probe(struct device *dev)
|
||||
if (is_queue_dev(dev))
|
||||
hash_del(&to_ap_queue(dev)->hnode);
|
||||
spin_unlock_bh(&ap_queues_lock);
|
||||
} else {
|
||||
ap_check_bindings_complete();
|
||||
}
|
||||
|
||||
out:
|
||||
@ -1012,16 +1014,47 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
|
||||
}
|
||||
EXPORT_SYMBOL(ap_driver_unregister);
|
||||
|
||||
void ap_bus_force_rescan(void)
|
||||
/*
|
||||
* Enforce a synchronous AP bus rescan.
|
||||
* Returns true if the bus scan finds a change in the AP configuration
|
||||
* and AP devices have been added or deleted when this function returns.
|
||||
*/
|
||||
bool ap_bus_force_rescan(void)
|
||||
{
|
||||
/* Only trigger AP bus scans after the initial scan is done */
|
||||
if (atomic64_read(&ap_scan_bus_count) <= 0)
|
||||
return;
|
||||
unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
|
||||
bool rc = false;
|
||||
|
||||
/* processing a asynchronous bus rescan */
|
||||
del_timer(&ap_config_timer);
|
||||
queue_work(system_long_wq, &ap_scan_work);
|
||||
flush_work(&ap_scan_work);
|
||||
pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
|
||||
|
||||
/* Only trigger AP bus scans after the initial scan is done */
|
||||
if (scan_counter <= 0)
|
||||
goto out;
|
||||
|
||||
/* Try to acquire the AP scan bus mutex */
|
||||
if (mutex_trylock(&ap_scan_bus_mutex)) {
|
||||
/* mutex acquired, run the AP bus scan */
|
||||
ap_scan_bus_result = ap_scan_bus();
|
||||
rc = ap_scan_bus_result;
|
||||
mutex_unlock(&ap_scan_bus_mutex);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mutex acquire failed. So there is currently another task
|
||||
* already running the AP bus scan. Then let's simple wait
|
||||
* for the lock which means the other task has finished and
|
||||
* stored the result in ap_scan_bus_result.
|
||||
*/
|
||||
if (mutex_lock_interruptible(&ap_scan_bus_mutex)) {
|
||||
/* some error occurred, ignore and go out */
|
||||
goto out;
|
||||
}
|
||||
rc = ap_scan_bus_result;
|
||||
mutex_unlock(&ap_scan_bus_mutex);
|
||||
|
||||
out:
|
||||
pr_debug("%s rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(ap_bus_force_rescan);
|
||||
|
||||
@ -1030,7 +1063,7 @@ EXPORT_SYMBOL(ap_bus_force_rescan);
|
||||
*/
|
||||
void ap_bus_cfg_chg(void)
|
||||
{
|
||||
AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
|
||||
pr_debug("%s config change, forcing bus rescan\n", __func__);
|
||||
|
||||
ap_bus_force_rescan();
|
||||
}
|
||||
@ -1250,7 +1283,7 @@ static BUS_ATTR_RO(ap_interrupts);
|
||||
|
||||
static ssize_t config_time_show(const struct bus_type *bus, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d\n", ap_config_time);
|
||||
return sysfs_emit(buf, "%d\n", ap_scan_bus_time);
|
||||
}
|
||||
|
||||
static ssize_t config_time_store(const struct bus_type *bus,
|
||||
@ -1260,8 +1293,8 @@ static ssize_t config_time_store(const struct bus_type *bus,
|
||||
|
||||
if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
|
||||
return -EINVAL;
|
||||
ap_config_time = time;
|
||||
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
|
||||
ap_scan_bus_time = time;
|
||||
mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -1603,7 +1636,7 @@ static struct attribute *ap_bus_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(ap_bus);
|
||||
|
||||
static struct bus_type ap_bus_type = {
|
||||
static const struct bus_type ap_bus_type = {
|
||||
.name = "ap",
|
||||
.bus_groups = ap_bus_groups,
|
||||
.match = &ap_bus_match,
|
||||
@ -1888,8 +1921,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
|
||||
}
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
|
||||
__func__, ac->id, dom);
|
||||
pr_debug("%s(%d,%d) queue dev checkstop on\n",
|
||||
__func__, ac->id, dom);
|
||||
/* 'receive' pending messages with -EAGAIN */
|
||||
ap_flush_queue(aq);
|
||||
goto put_dev_and_continue;
|
||||
@ -1899,8 +1932,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
|
||||
_ap_queue_init_state(aq);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
|
||||
__func__, ac->id, dom);
|
||||
pr_debug("%s(%d,%d) queue dev checkstop off\n",
|
||||
__func__, ac->id, dom);
|
||||
goto put_dev_and_continue;
|
||||
}
|
||||
/* config state change */
|
||||
@ -1912,8 +1945,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
|
||||
}
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
|
||||
__func__, ac->id, dom);
|
||||
pr_debug("%s(%d,%d) queue dev config off\n",
|
||||
__func__, ac->id, dom);
|
||||
ap_send_config_uevent(&aq->ap_dev, aq->config);
|
||||
/* 'receive' pending messages with -EAGAIN */
|
||||
ap_flush_queue(aq);
|
||||
@ -1924,8 +1957,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
|
||||
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
|
||||
_ap_queue_init_state(aq);
|
||||
spin_unlock_bh(&aq->lock);
|
||||
AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
|
||||
__func__, ac->id, dom);
|
||||
pr_debug("%s(%d,%d) queue dev config on\n",
|
||||
__func__, ac->id, dom);
|
||||
ap_send_config_uevent(&aq->ap_dev, aq->config);
|
||||
goto put_dev_and_continue;
|
||||
}
|
||||
@ -1997,8 +2030,8 @@ static inline void ap_scan_adapter(int ap)
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
} else {
|
||||
AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
|
||||
__func__, ap);
|
||||
pr_debug("%s(%d) no type info (no APQN found), ignored\n",
|
||||
__func__, ap);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -2010,8 +2043,8 @@ static inline void ap_scan_adapter(int ap)
|
||||
ap_scan_rm_card_dev_and_queue_devs(ac);
|
||||
put_device(dev);
|
||||
} else {
|
||||
AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
|
||||
__func__, ap);
|
||||
pr_debug("%s(%d) no valid type (0) info, ignored\n",
|
||||
__func__, ap);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -2135,23 +2168,80 @@ static bool ap_get_configuration(void)
|
||||
sizeof(struct ap_config_info)) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_config_has_new_aps - Check current against old qci info if
|
||||
* new adapters have appeared. Returns true if at least one new
|
||||
* adapter in the apm mask is showing up. Existing adapters or
|
||||
* receding adapters are not counted.
|
||||
*/
|
||||
static bool ap_config_has_new_aps(void)
|
||||
{
|
||||
|
||||
unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
|
||||
|
||||
if (!ap_qci_info)
|
||||
return false;
|
||||
|
||||
bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
|
||||
(unsigned long *)ap_qci_info_old->apm, AP_DEVICES);
|
||||
if (!bitmap_empty(m, AP_DEVICES))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_config_has_new_doms - Check current against old qci info if
|
||||
* new (usage) domains have appeared. Returns true if at least one
|
||||
* new domain in the aqm mask is showing up. Existing domains or
|
||||
* receding domains are not counted.
|
||||
*/
|
||||
static bool ap_config_has_new_doms(void)
|
||||
{
|
||||
unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
|
||||
|
||||
if (!ap_qci_info)
|
||||
return false;
|
||||
|
||||
bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
|
||||
(unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS);
|
||||
if (!bitmap_empty(m, AP_DOMAINS))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ap_scan_bus(): Scan the AP bus for new devices
|
||||
* Runs periodically, workqueue timer (ap_config_time)
|
||||
* @unused: Unused pointer.
|
||||
* Always run under mutex ap_scan_bus_mutex protection
|
||||
* which needs to get locked/unlocked by the caller!
|
||||
* Returns true if any config change has been detected
|
||||
* during the scan, otherwise false.
|
||||
*/
|
||||
static void ap_scan_bus(struct work_struct *unused)
|
||||
static bool ap_scan_bus(void)
|
||||
{
|
||||
int ap, config_changed = 0;
|
||||
bool config_changed;
|
||||
int ap;
|
||||
|
||||
/* config change notify */
|
||||
pr_debug(">%s\n", __func__);
|
||||
|
||||
/* (re-)fetch configuration via QCI */
|
||||
config_changed = ap_get_configuration();
|
||||
if (config_changed)
|
||||
if (config_changed) {
|
||||
if (ap_config_has_new_aps() || ap_config_has_new_doms()) {
|
||||
/*
|
||||
* Appearance of new adapters and/or domains need to
|
||||
* build new ap devices which need to get bound to an
|
||||
* device driver. Thus reset the APQN bindings complete
|
||||
* completion.
|
||||
*/
|
||||
reinit_completion(&ap_apqn_bindings_complete);
|
||||
}
|
||||
/* post a config change notify */
|
||||
notify_config_changed();
|
||||
}
|
||||
ap_select_domain();
|
||||
|
||||
AP_DBF_DBG("%s running\n", __func__);
|
||||
|
||||
/* loop over all possible adapters */
|
||||
for (ap = 0; ap <= ap_max_adapter_id; ap++)
|
||||
ap_scan_adapter(ap);
|
||||
@ -2174,23 +2264,56 @@ static void ap_scan_bus(struct work_struct *unused)
|
||||
}
|
||||
|
||||
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
|
||||
AP_DBF_DBG("%s init scan complete\n", __func__);
|
||||
pr_debug("%s init scan complete\n", __func__);
|
||||
ap_send_init_scan_done_uevent();
|
||||
ap_check_bindings_complete();
|
||||
}
|
||||
|
||||
mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
|
||||
ap_check_bindings_complete();
|
||||
|
||||
mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
|
||||
|
||||
pr_debug("<%s config_changed=%d\n", __func__, config_changed);
|
||||
|
||||
return config_changed;
|
||||
}
|
||||
|
||||
static void ap_config_timeout(struct timer_list *unused)
|
||||
/*
|
||||
* Callback for the ap_scan_bus_timer
|
||||
* Runs periodically, workqueue timer (ap_scan_bus_time)
|
||||
*/
|
||||
static void ap_scan_bus_timer_callback(struct timer_list *unused)
|
||||
{
|
||||
queue_work(system_long_wq, &ap_scan_work);
|
||||
/*
|
||||
* schedule work into the system long wq which when
|
||||
* the work is finally executed, calls the AP bus scan.
|
||||
*/
|
||||
queue_work(system_long_wq, &ap_scan_bus_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback for the ap_scan_bus_work
|
||||
*/
|
||||
static void ap_scan_bus_wq_callback(struct work_struct *unused)
|
||||
{
|
||||
/*
|
||||
* Try to invoke an ap_scan_bus(). If the mutex acquisition
|
||||
* fails there is currently another task already running the
|
||||
* AP scan bus and there is no need to wait and re-trigger the
|
||||
* scan again. Please note at the end of the scan bus function
|
||||
* the AP scan bus timer is re-armed which triggers then the
|
||||
* ap_scan_bus_timer_callback which enqueues a work into the
|
||||
* system_long_wq which invokes this function here again.
|
||||
*/
|
||||
if (mutex_trylock(&ap_scan_bus_mutex)) {
|
||||
ap_scan_bus_result = ap_scan_bus();
|
||||
mutex_unlock(&ap_scan_bus_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init ap_debug_init(void)
|
||||
{
|
||||
ap_dbf_info = debug_register("ap", 2, 1,
|
||||
DBF_MAX_SPRINTF_ARGS * sizeof(long));
|
||||
AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
|
||||
debug_register_view(ap_dbf_info, &debug_sprintf_view);
|
||||
debug_set_level(ap_dbf_info, DBF_ERR);
|
||||
|
||||
@ -2274,7 +2397,7 @@ static int __init ap_module_init(void)
|
||||
ap_root_device->bus = &ap_bus_type;
|
||||
|
||||
/* Setup the AP bus rescan timer. */
|
||||
timer_setup(&ap_config_timer, ap_config_timeout, 0);
|
||||
timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
|
||||
|
||||
/*
|
||||
* Setup the high resolution poll timer.
|
||||
@ -2292,7 +2415,7 @@ static int __init ap_module_init(void)
|
||||
goto out_work;
|
||||
}
|
||||
|
||||
queue_work(system_long_wq, &ap_scan_work);
|
||||
queue_work(system_long_wq, &ap_scan_bus_work);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -266,7 +266,7 @@ int ap_sb_available(void);
|
||||
bool ap_is_se_guest(void);
|
||||
void ap_wait(enum ap_sm_wait wait);
|
||||
void ap_request_timeout(struct timer_list *t);
|
||||
void ap_bus_force_rescan(void);
|
||||
bool ap_bus_force_rescan(void);
|
||||
|
||||
int ap_test_config_usage_domain(unsigned int domain);
|
||||
int ap_test_config_ctrl_domain(unsigned int domain);
|
||||
@ -352,8 +352,12 @@ int ap_parse_mask_str(const char *str,
|
||||
* the return value is 0. If the timeout (in jiffies) hits instead
|
||||
* -ETIME is returned. On failures negative return values are
|
||||
* returned to the caller.
|
||||
* It may be that the AP bus scan finds new devices. Then the
|
||||
* condition that all APQNs are bound to their device drivers
|
||||
* is reset to false and this call again blocks until either all
|
||||
* APQNs are bound to a device driver or the timeout hits again.
|
||||
*/
|
||||
int ap_wait_init_apqn_bindings_complete(unsigned long timeout);
|
||||
int ap_wait_apqn_bindings_complete(unsigned long timeout);
|
||||
|
||||
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg);
|
||||
void ap_send_online_uevent(struct ap_device *ap_dev, int online);
|
||||
|
@ -16,7 +16,7 @@
|
||||
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
|
||||
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
|
||||
|
||||
#define DBF_MAX_SPRINTF_ARGS 6
|
||||
#define AP_DBF_MAX_SPRINTF_ARGS 6
|
||||
|
||||
#define AP_DBF(...) \
|
||||
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
|
||||
@ -26,8 +26,6 @@
|
||||
debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
|
||||
#define AP_DBF_INFO(...) \
|
||||
debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
|
||||
#define AP_DBF_DBG(...) \
|
||||
debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
|
||||
|
||||
extern debug_info_t *ap_dbf_info;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user