2012-03-28 17:30:02 +00:00
|
|
|
/*
|
|
|
|
* Copyright IBM Corp. 1999, 2009
|
|
|
|
*
|
|
|
|
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __ASM_CTL_REG_H
|
|
|
|
#define __ASM_CTL_REG_H
|
|
|
|
|
2013-11-13 09:38:27 +00:00
|
|
|
#include <linux/bug.h>
|
|
|
|
|
2013-09-30 12:47:46 +00:00
|
|
|
#define __ctl_load(array, low, high) { \
|
|
|
|
typedef struct { char _[sizeof(array)]; } addrtype; \
|
|
|
|
\
|
|
|
|
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
|
|
|
|
asm volatile( \
|
2015-02-12 12:08:27 +00:00
|
|
|
" lctlg %1,%2,%0\n" \
|
2013-09-30 12:47:46 +00:00
|
|
|
: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
|
|
|
|
}
|
|
|
|
|
|
|
|
#define __ctl_store(array, low, high) { \
|
|
|
|
typedef struct { char _[sizeof(array)]; } addrtype; \
|
|
|
|
\
|
|
|
|
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
|
|
|
|
asm volatile( \
|
2015-02-12 12:08:27 +00:00
|
|
|
" stctg %1,%2,%0\n" \
|
2013-09-30 12:47:46 +00:00
|
|
|
: "=Q" (*(addrtype *)(&array)) \
|
|
|
|
: "i" (low), "i" (high)); \
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
|
|
|
|
{
|
|
|
|
unsigned long reg;
|
|
|
|
|
|
|
|
__ctl_store(reg, cr, cr);
|
|
|
|
reg |= 1UL << bit;
|
|
|
|
__ctl_load(reg, cr, cr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
|
|
|
|
{
|
|
|
|
unsigned long reg;
|
|
|
|
|
|
|
|
__ctl_store(reg, cr, cr);
|
|
|
|
reg &= ~(1UL << bit);
|
|
|
|
__ctl_load(reg, cr, cr);
|
|
|
|
}
|
|
|
|
|
s390/kernel: lazy restore fpu registers
Improve the save and restore behavior of FPU register contents to use the
vector extension within the kernel.
The kernel does not use floating-point or vector registers and, therefore,
saving and restoring the FPU register contents are performed for handling
signals or switching processes only. To prepare for using vector
instructions and vector registers within the kernel, enhance the save
behavior and implement a lazy restore at return to user space from a
system call or interrupt.
To implement the lazy restore, the save_fpu_regs() sets a CPU information
flag, CIF_FPU, to indicate that the FPU registers must be restored.
Saving and setting CIF_FPU is performed in an atomic fashion to be
interrupt-safe. When the kernel wants to use the vector extension or
wants to change the FPU register state for a task during signal handling,
the save_fpu_regs() must be called first. The CIF_FPU flag is also set at
process switch. At return to user space, the FPU state is restored. In
particular, the FPU state includes the floating-point or vector register
contents, as well as, vector-enablement and floating-point control. The
FPU state restore and clearing CIF_FPU is also performed in an atomic
fashion.
For KVM, the restore of the FPU register state is performed when restoring
the general-purpose guest registers before the SIE instructions is started.
Because the path towards the SIE instruction is interruptible, the CIF_FPU
flag must be checked again right before going into SIE. If set, the guest
registers must be reloaded again by re-entering the outer SIE loop. This
is the same behavior as if the SIE critical section is interrupted.
Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2015-06-10 10:53:42 +00:00
|
|
|
void __ctl_set_vx(void);
|
|
|
|
|
2013-09-30 12:47:46 +00:00
|
|
|
void smp_ctl_set_bit(int cr, int bit);
|
|
|
|
void smp_ctl_clear_bit(int cr, int bit);
|
2012-03-28 17:30:02 +00:00
|
|
|
|
2014-01-01 15:08:37 +00:00
|
|
|
union ctlreg0 {
|
|
|
|
unsigned long val;
|
|
|
|
struct {
|
|
|
|
unsigned long : 32;
|
|
|
|
unsigned long : 3;
|
|
|
|
unsigned long lap : 1; /* Low-address-protection control */
|
|
|
|
unsigned long : 4;
|
|
|
|
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
|
s390/nmi: fix vector register corruption
If a machine check happens, the machine has the vector facility installed
and the extended save area exists, the cpu will save vector register
contents into the extended save area. This is regardless of control
register 0 contents, which enables and disables the vector facility during
runtime.
On each machine check we should validate the vector registers. The current
code however tries to validate the registers only if the running task is
using vector registers in user space.
However even the current code is broken and causes vector register
corruption on machine checks, if user space uses them:
the prefix area contains a pointer (absolute address) to the machine check
extended save area. In order to save some space the save area was put into
an unused area of the second prefix page.
When validating vector register contents the code uses the absolute address
of the extended save area, which is wrong. Due to prefixing the vector
instructions will then access contents using absolute addresses instead
of real addresses, where the machine stored the contents.
If the above would work there is still the problem that register validition
would only happen if user space uses vector registers. If kernel space uses
them also, this may also lead to vector register content corruption:
if the kernel makes use of vector instructions, but the current running
user space context does not, the machine check handler will validate
floating point registers instead of vector registers.
Given the fact that writing to a floating point register may change the
upper halve of the corresponding vector register, we also experience vector
register corruption in this case.
Fix all of these issues, and always validate vector registers on each
machine check, if the machine has the vector facility installed and the
extended save area is defined.
Cc: <stable@vger.kernel.org> # 4.1+
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2015-07-07 06:40:49 +00:00
|
|
|
unsigned long : 4;
|
|
|
|
unsigned long afp : 1; /* AFP-register control */
|
|
|
|
unsigned long vx : 1; /* Vector enablement control */
|
|
|
|
unsigned long : 17;
|
2014-01-01 15:08:37 +00:00
|
|
|
};
|
|
|
|
};
|
|
|
|
|
2012-03-28 17:30:02 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2013-09-30 12:47:46 +00:00
|
|
|
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
|
|
|
|
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
|
2012-03-28 17:30:02 +00:00
|
|
|
#else
|
2013-09-30 12:47:46 +00:00
|
|
|
# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
|
|
|
|
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
|
|
|
|
#endif
|
2012-03-28 17:30:02 +00:00
|
|
|
|
|
|
|
#endif /* __ASM_CTL_REG_H */
|