mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
Merge branch 'kprobe-fixes' of https://git.linaro.org/people/tixy/kernel into fixes
This commit is contained in:
commit
3872fe83a2
@ -653,6 +653,9 @@
|
||||
cpuidle.off=1 [CPU_IDLE]
|
||||
disable the cpuidle sub-system
|
||||
|
||||
cpufreq.off=1 [CPU_FREQ]
|
||||
disable the cpufreq sub-system
|
||||
|
||||
cpu_init_udelay=N
|
||||
[X86] Delay for N microsec between assert and de-assert
|
||||
of APIC INIT to start processors. This delay occurs
|
||||
@ -1183,6 +1186,12 @@
|
||||
functions that can be changed at run time by the
|
||||
set_graph_notrace file in the debugfs tracing directory.
|
||||
|
||||
ftrace_graph_max_depth=<uint>
|
||||
[FTRACE] Used with the function graph tracer. This is
|
||||
the max depth it will trace into a function. This value
|
||||
can be changed at run time by the max_graph_depth file
|
||||
in the tracefs tracing directory. default: 0 (no limit)
|
||||
|
||||
gamecon.map[2|3]=
|
||||
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
|
||||
support via parallel port (up to 5 devices per port)
|
||||
|
@ -68,3 +68,4 @@ stable kernels.
|
||||
| | | | |
|
||||
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
|
||||
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
|
||||
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
|
||||
|
@ -10,7 +10,7 @@ Note that kcov does not aim to collect as much coverage as possible. It aims
|
||||
to collect more or less stable coverage that is function of syscall inputs.
|
||||
To achieve this goal it does not collect coverage in soft/hard interrupts
|
||||
and instrumentation of some inherently non-deterministic parts of kernel is
|
||||
disbled (e.g. scheduler, locking).
|
||||
disabled (e.g. scheduler, locking).
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
@ -45,7 +45,7 @@ Required Properties:
|
||||
Optional Properties:
|
||||
- reg-names: In addition to the required properties, the following are optional
|
||||
- "efuse-address" - Contains efuse base address used to pick up ABB info.
|
||||
- "ldo-address" - Contains address of ABB LDO overide register address.
|
||||
- "ldo-address" - Contains address of ABB LDO override register.
|
||||
"efuse-address" is required for this.
|
||||
- ti,ldovbb-vset-mask - Required if ldo-address is set, mask for LDO override
|
||||
register to provide override vset value.
|
||||
|
@ -7,18 +7,18 @@ Required properties :
|
||||
- compatible : Should be "microchip,usb251xb" or one of the specific types:
|
||||
"microchip,usb2512b", "microchip,usb2512bi", "microchip,usb2513b",
|
||||
"microchip,usb2513bi", "microchip,usb2514b", "microchip,usb2514bi"
|
||||
- hub-reset-gpios : Should specify the gpio for hub reset
|
||||
- reset-gpios : Should specify the gpio for hub reset
|
||||
- reg : I2C address on the selected bus (default is <0x2C>)
|
||||
|
||||
Optional properties :
|
||||
- reg : I2C address on the selected bus (default is <0x2C>)
|
||||
- skip-config : Skip Hub configuration, but only send the USB-Attach command
|
||||
- vendor-id : USB Vendor ID of the hub (16 bit, default is 0x0424)
|
||||
- product-id : USB Product ID of the hub (16 bit, default depends on type)
|
||||
- device-id : USB Device ID of the hub (16 bit, default is 0x0bb3)
|
||||
- language-id : USB Language ID (16 bit, default is 0x0000)
|
||||
- manufacturer : USB Manufacturer string (max 31 characters long)
|
||||
- product : USB Product string (max 31 characters long)
|
||||
- serial : USB Serial string (max 31 characters long)
|
||||
- vendor-id : Set USB Vendor ID of the hub (16 bit, default is 0x0424)
|
||||
- product-id : Set USB Product ID of the hub (16 bit, default depends on type)
|
||||
- device-id : Set USB Device ID of the hub (16 bit, default is 0x0bb3)
|
||||
- language-id : Set USB Language ID (16 bit, default is 0x0000)
|
||||
- manufacturer : Set USB Manufacturer string (max 31 characters long)
|
||||
- product : Set USB Product string (max 31 characters long)
|
||||
- serial : Set USB Serial string (max 31 characters long)
|
||||
- {bus,self}-powered : selects between self- and bus-powered operation (default
|
||||
is self-powered)
|
||||
- disable-hi-speed : disable USB Hi-Speed support
|
||||
@ -31,8 +31,10 @@ Optional properties :
|
||||
(default is individual)
|
||||
- dynamic-power-switching : enable auto-switching from self- to bus-powered
|
||||
operation if the local power source is removed or unavailable
|
||||
- oc-delay-{100us,4ms,8ms,16ms} : set over current timer delay (default is 8ms)
|
||||
- compound-device : indicated the hub is part of a compound device
|
||||
- oc-delay-us : Delay time (in microseconds) for filtering the over-current
|
||||
sense inputs. Valid values are 100, 4000, 8000 (default) and 16000. If
|
||||
an invalid value is given, the default is used instead.
|
||||
- compound-device : indicate the hub is part of a compound device
|
||||
- port-mapping-mode : enable port mapping mode
|
||||
- string-support : enable string descriptor support (required for manufacturer,
|
||||
product and serial string configuration)
|
||||
@ -40,34 +42,15 @@ Optional properties :
|
||||
device connected.
|
||||
- sp-disabled-ports : Specifies the ports which will be self-power disabled
|
||||
- bp-disabled-ports : Specifies the ports which will be bus-power disabled
|
||||
- max-sp-power : Specifies the maximum current the hub consumes from an
|
||||
upstream port when operating as self-powered hub including the power
|
||||
consumption of a permanently attached peripheral if the hub is
|
||||
configured as a compound device. The value is given in mA in a 0 - 500
|
||||
range (default is 2).
|
||||
- max-bp-power : Specifies the maximum current the hub consumes from an
|
||||
upstream port when operating as bus-powered hub including the power
|
||||
consumption of a permanently attached peripheral if the hub is
|
||||
configured as a compound device. The value is given in mA in a 0 - 500
|
||||
range (default is 100).
|
||||
- max-sp-current : Specifies the maximum current the hub consumes from an
|
||||
upstream port when operating as self-powered hub EXCLUDING the power
|
||||
consumption of a permanently attached peripheral if the hub is
|
||||
configured as a compound device. The value is given in mA in a 0 - 500
|
||||
range (default is 2).
|
||||
- max-bp-current : Specifies the maximum current the hub consumes from an
|
||||
upstream port when operating as bus-powered hub EXCLUDING the power
|
||||
consumption of a permanently attached peripheral if the hub is
|
||||
configured as a compound device. The value is given in mA in a 0 - 500
|
||||
range (default is 100).
|
||||
- power-on-time : Specifies the time it takes from the time the host initiates
|
||||
the power-on sequence to a port until the port has adequate power. The
|
||||
value is given in ms in a 0 - 510 range (default is 100ms).
|
||||
- power-on-time-ms : Specifies the time it takes from the time the host
|
||||
initiates the power-on sequence to a port until the port has adequate
|
||||
power. The value is given in ms in a 0 - 510 range (default is 100ms).
|
||||
|
||||
Examples:
|
||||
usb2512b@2c {
|
||||
compatible = "microchip,usb2512b";
|
||||
hub-reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
|
||||
reg = <0x2c>;
|
||||
reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
usb2514b@2c {
|
||||
|
@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes
|
||||
functions). Unlike the Tracepoint based event, this can be added and removed
|
||||
dynamically, on the fly.
|
||||
|
||||
To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y.
|
||||
To enable this feature, build your kernel with CONFIG_KPROBE_EVENTS=y.
|
||||
|
||||
Similar to the events tracer, this doesn't need to be activated via
|
||||
current_tracer. Instead of that, add probe points via
|
||||
|
@ -7,7 +7,7 @@
|
||||
Overview
|
||||
--------
|
||||
Uprobe based trace events are similar to kprobe based trace events.
|
||||
To enable this feature, build your kernel with CONFIG_UPROBE_EVENT=y.
|
||||
To enable this feature, build your kernel with CONFIG_UPROBE_EVENTS=y.
|
||||
|
||||
Similar to the kprobe-event tracer, this doesn't need to be activated via
|
||||
current_tracer. Instead of that, add probe points via
|
||||
|
@ -951,6 +951,10 @@ This ioctl allows the user to create or modify a guest physical memory
|
||||
slot. When changing an existing slot, it may be moved in the guest
|
||||
physical memory space, or its flags may be modified. It may not be
|
||||
resized. Slots may not overlap in guest physical address space.
|
||||
Bits 0-15 of "slot" specifies the slot id and this value should be
|
||||
less than the maximum number of user memory slots supported per VM.
|
||||
The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
|
||||
if this capability is supported by the architecture.
|
||||
|
||||
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
|
||||
specifies the address space which is being modified. They must be
|
||||
|
@ -172,10 +172,6 @@ the same read(2) protocol as for the page fault notifications. The
|
||||
manager has to explicitly enable these events by setting appropriate
|
||||
bits in uffdio_api.features passed to UFFDIO_API ioctl:
|
||||
|
||||
UFFD_FEATURE_EVENT_EXIT - enable notification about exit() of the
|
||||
non-cooperative process. When the monitored process exits, the uffd
|
||||
manager will get UFFD_EVENT_EXIT.
|
||||
|
||||
UFFD_FEATURE_EVENT_FORK - enable userfaultfd hooks for fork(). When
|
||||
this feature is enabled, the userfaultfd context of the parent process
|
||||
is duplicated into the newly created process. The manager receives
|
||||
|
@ -8307,7 +8307,6 @@ M: Richard Leitner <richard.leitner@skidata.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/usb/misc/usb251xb.c
|
||||
F: include/linux/platform_data/usb251xb.h
|
||||
F: Documentation/devicetree/bindings/usb/usb251xb.txt
|
||||
|
||||
MICROSOFT SURFACE PRO 3 BUTTON DRIVER
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -11,6 +11,7 @@
|
||||
#define _ASM_ARC_HUGEPAGE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
static inline pte_t pmd_pte(pmd_t pmd)
|
||||
|
@ -37,6 +37,7 @@
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/mmu.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#include <linux/const.h>
|
||||
|
||||
|
@ -209,6 +209,7 @@
|
||||
#define HSR_EC_IABT_HYP (0x21)
|
||||
#define HSR_EC_DABT (0x24)
|
||||
#define HSR_EC_DABT_HYP (0x25)
|
||||
#define HSR_EC_MAX (0x3f)
|
||||
|
||||
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
|
||||
|
||||
|
@ -30,7 +30,6 @@
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_HAVE_ONE_REG
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#else
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
|
@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_MAX_VCPUS:
|
||||
r = KVM_MAX_VCPUS;
|
||||
break;
|
||||
case KVM_CAP_NR_MEMSLOTS:
|
||||
r = KVM_USER_MEM_SLOTS;
|
||||
break;
|
||||
case KVM_CAP_MSI_DEVID:
|
||||
if (!kvm)
|
||||
r = -EINVAL;
|
||||
|
@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
|
||||
hsr);
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static exit_handle_fn arm_exit_handlers[] = {
|
||||
[0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
|
||||
[HSR_EC_WFI] = kvm_handle_wfx,
|
||||
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
|
||||
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
|
||||
@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
||||
!arm_exit_handlers[hsr_ec]) {
|
||||
kvm_err("Unknown exception class: hsr: %#08x\n",
|
||||
(unsigned int)kvm_vcpu_get_hsr(vcpu));
|
||||
BUG();
|
||||
}
|
||||
|
||||
return arm_exit_handlers[hsr_ec];
|
||||
}
|
||||
|
||||
|
@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
#endif
|
||||
|
||||
if (p) {
|
||||
if (cur) {
|
||||
if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
|
||||
/*
|
||||
* Probe hit but conditional execution check failed,
|
||||
* so just skip the instruction and continue as if
|
||||
* nothing had happened.
|
||||
* In this case, we can skip recursing check too.
|
||||
*/
|
||||
singlestep_skip(p, regs);
|
||||
} else if (cur) {
|
||||
/* Kprobe is pending, so we're recursing. */
|
||||
switch (kcb->kprobe_status) {
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
case KPROBE_HIT_SSDONE:
|
||||
case KPROBE_HIT_SS:
|
||||
/* A pre- or post-handler probe got us here. */
|
||||
kprobes_inc_nmissed_count(p);
|
||||
save_previous_kprobe(kcb);
|
||||
@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
singlestep(p, regs, kcb);
|
||||
restore_previous_kprobe(kcb);
|
||||
break;
|
||||
case KPROBE_REENTER:
|
||||
/* A nested probe was hit in FIQ, it is a BUG */
|
||||
pr_warn("Unrecoverable kprobe detected at %p.\n",
|
||||
p->addr);
|
||||
/* fall through */
|
||||
default:
|
||||
/* impossible cases */
|
||||
BUG();
|
||||
}
|
||||
} else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
|
||||
} else {
|
||||
/* Probe hit and conditional execution check ok. */
|
||||
set_current_kprobe(p);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
}
|
||||
reset_current_kprobe();
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Probe hit but conditional execution check failed,
|
||||
* so just skip the instruction and continue as if
|
||||
* nothing had happened.
|
||||
*/
|
||||
singlestep_skip(p, regs);
|
||||
}
|
||||
} else if (cur) {
|
||||
/* We probably hit a jprobe. Call its break handler. */
|
||||
@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
@ -456,15 +464,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
@ -476,6 +476,33 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
|
||||
correct_ret_addr = ri->ret_addr;
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->ret_addr = correct_ret_addr;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
|
@ -977,7 +977,10 @@ static void coverage_end(void)
|
||||
void __naked __kprobes_test_case_start(void)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"stmdb sp!, {r4-r11} \n\t"
|
||||
"mov r2, sp \n\t"
|
||||
"bic r3, r2, #7 \n\t"
|
||||
"mov sp, r3 \n\t"
|
||||
"stmdb sp!, {r2-r11} \n\t"
|
||||
"sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"bic r0, lr, #1 @ r0 = inline data \n\t"
|
||||
"mov r1, sp \n\t"
|
||||
@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
|
||||
"movne pc, r0 \n\t"
|
||||
"mov r0, r4 \n\t"
|
||||
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"ldmia sp!, {r4-r11} \n\t"
|
||||
"ldmia sp!, {r2-r11} \n\t"
|
||||
"mov sp, r2 \n\t"
|
||||
"mov pc, r0 \n\t"
|
||||
);
|
||||
}
|
||||
@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
|
||||
"bxne r0 \n\t"
|
||||
"mov r0, r4 \n\t"
|
||||
"add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
|
||||
"ldmia sp!, {r4-r11} \n\t"
|
||||
"ldmia sp!, {r2-r11} \n\t"
|
||||
"mov sp, r2 \n\t"
|
||||
"bx r0 \n\t"
|
||||
);
|
||||
}
|
||||
|
@ -198,6 +198,8 @@ static const struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
.unmap_page = xen_swiotlb_unmap_page,
|
||||
.dma_supported = xen_swiotlb_dma_supported,
|
||||
.set_dma_mask = xen_swiotlb_set_dma_mask,
|
||||
.mmap = xen_swiotlb_dma_mmap,
|
||||
.get_sgtable = xen_swiotlb_get_sgtable,
|
||||
};
|
||||
|
||||
int __init xen_mm_init(void)
|
||||
|
@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config QCOM_QDF2400_ERRATUM_0065
|
||||
bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
|
||||
default y
|
||||
help
|
||||
On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
|
||||
ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
|
||||
been indicated as 16Bytes (0xf), not 8Bytes (0x7).
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
endmenu
|
||||
|
||||
|
||||
|
@ -30,8 +30,7 @@
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_USER_MEM_SLOTS 512
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
|
@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t;
|
||||
#define __pgprot(x) ((pgprot_t) { (x) } )
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#elif CONFIG_PGTABLE_LEVELS == 3
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#elif CONFIG_PGTABLE_LEVELS == 4
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_PGTABLE_TYPES_H */
|
||||
|
@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
|
||||
hsr, esr_get_class_string(hsr));
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static exit_handle_fn arm_exit_handlers[] = {
|
||||
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
|
||||
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
|
||||
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
|
||||
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
|
||||
@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u8 hsr_ec = ESR_ELx_EC(hsr);
|
||||
|
||||
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
||||
!arm_exit_handlers[hsr_ec]) {
|
||||
kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
|
||||
hsr, esr_get_class_string(hsr));
|
||||
BUG();
|
||||
}
|
||||
|
||||
return arm_exit_handlers[hsr_ec];
|
||||
}
|
||||
|
||||
|
@ -18,14 +18,62 @@
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
/*
|
||||
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
|
||||
* most TLB operations target EL2/EL0. In order to affect the
|
||||
* guest TLBs (EL1/EL0), we need to change one of these two
|
||||
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
|
||||
* let's flip TGE before executing the TLB operation.
|
||||
*/
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
val = read_sysreg(hcr_el2);
|
||||
val &= ~HCR_TGE;
|
||||
write_sysreg(val, hcr_el2);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
|
||||
{
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
isb();
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__tlb_switch_to_guest,
|
||||
__tlb_switch_to_guest_nvhe,
|
||||
__tlb_switch_to_guest_vhe,
|
||||
ARM64_HAS_VIRT_HOST_EXTN);
|
||||
|
||||
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* We're done with the TLB operation, let's restore the host's
|
||||
* view of HCR_EL2.
|
||||
*/
|
||||
write_sysreg(0, vttbr_el2);
|
||||
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
|
||||
}
|
||||
|
||||
static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
|
||||
{
|
||||
write_sysreg(0, vttbr_el2);
|
||||
}
|
||||
|
||||
static hyp_alternate_select(__tlb_switch_to_host,
|
||||
__tlb_switch_to_host_nvhe,
|
||||
__tlb_switch_to_host_vhe,
|
||||
ARM64_HAS_VIRT_HOST_EXTN);
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
{
|
||||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
isb();
|
||||
__tlb_switch_to_guest()(kvm);
|
||||
|
||||
/*
|
||||
* We could do so much better if we had the VA as well.
|
||||
@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, vttbr_el2);
|
||||
__tlb_switch_to_host()(kvm);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
isb();
|
||||
__tlb_switch_to_guest()(kvm);
|
||||
|
||||
__tlbi(vmalls12e1is);
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, vttbr_el2);
|
||||
__tlb_switch_to_host()(kvm);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
||||
isb();
|
||||
__tlb_switch_to_guest()(kvm);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, vttbr_el2);
|
||||
__tlb_switch_to_host()(kvm);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_flush_vm_context(void)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#ifndef __ASM_AVR32_PGTABLE_2LEVEL_H
|
||||
#define __ASM_AVR32_PGTABLE_2LEVEL_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/*
|
||||
|
@ -14,7 +14,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/oprofile.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/* The first two words of each frame on the stack look like this if we have
|
||||
|
@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void)
|
||||
dma_in_cfg.en = regk_dma_no;
|
||||
REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
|
||||
|
||||
/* Disble the cryptocop. */
|
||||
/* Disable the cryptocop. */
|
||||
rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
|
||||
rw_cfg.en = 0;
|
||||
REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define _CRIS_PGTABLE_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -16,6 +16,7 @@
|
||||
#ifndef _ASM_PGTABLE_H
|
||||
#define _ASM_PGTABLE_H
|
||||
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#include <asm/mem-layout.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -1,5 +1,6 @@
|
||||
#ifndef _H8300_PGTABLE_H
|
||||
#define _H8300_PGTABLE_H
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#include <asm-generic/pgtable.h>
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
|
@ -9,7 +9,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define BREAKINST 0x5730 /* trapa #3 */
|
||||
|
@ -26,6 +26,7 @@
|
||||
*/
|
||||
#include <linux/swap.h>
|
||||
#include <asm/page.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/* A handy thing to have if one has the RAM. Declared in head.S */
|
||||
|
@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr;
|
||||
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#endif
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
#endif /* _ASM_IA64_PGTABLE_H */
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define _METAG_PGTABLE_H
|
||||
|
||||
#include <asm/pgtable-bits.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
|
||||
|
@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t;
|
||||
# else /* CONFIG_MMU */
|
||||
typedef struct { unsigned long ste[64]; } pmd_t;
|
||||
typedef struct { pmd_t pue[1]; } pud_t;
|
||||
typedef struct { pud_t pge[1]; } pgd_t;
|
||||
typedef struct { pud_t p4e[1]; } p4d_t;
|
||||
typedef struct { p4d_t pge[1]; } pgd_t;
|
||||
# endif /* CONFIG_MMU */
|
||||
|
||||
# define pte_val(x) ((x).pte)
|
||||
|
@ -10,7 +10,9 @@
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/cop2.h>
|
||||
#include <asm/current.h>
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <asm/cop2.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include "octeon-crypto.h"
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/hotplug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <asm/cachectl.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
extern int temp_tlb_entry;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <asm/cachectl.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#else
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/hotplug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/smp.h>
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqchip/mips-gic.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/ptrace.h>
|
||||
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/cop2.h>
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
|
@ -9,11 +9,14 @@
|
||||
* Copyright (C) 2009 Wind River Systems,
|
||||
* written by Ralf Baechle <ralf@linux-mips.org>
|
||||
*/
|
||||
#include <linux/capability.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm/cop2.h>
|
||||
#include <asm/current.h>
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
|
@ -12,7 +12,9 @@
|
||||
#include <linux/signal.h> /* for SIGBUS */
|
||||
#include <linux/sched.h> /* schow_regs(), force_sig() */
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sn/addrs.h>
|
||||
#include <asm/sn/arch.h>
|
||||
#include <asm/sn/sn0/hub.h>
|
||||
|
@ -8,10 +8,13 @@
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/nodemask.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sn/arch.h>
|
||||
#include <asm/sn/gda.h>
|
||||
#include <asm/sn/intr.h>
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <asm/traps.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/addrspace.h>
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/rtc/ds1685.h>
|
||||
|
@ -57,6 +57,7 @@ typedef struct page *pgtable_t;
|
||||
#define __pgd(x) ((pgd_t) { (x) })
|
||||
#define __pgprot(x) ((pgprot_t) { (x) })
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include <asm/pgtable-bits.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#define FIRST_USER_ADDRESS 0UL
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef __ASM_OPENRISC_PGTABLE_H
|
||||
#define __ASM_OPENRISC_PGTABLE_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
config PPC
|
||||
bool
|
||||
default y
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
#
|
||||
# Please keep this list sorted alphabetically.
|
||||
#
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select BINFMT_ELF
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select EDAC_SUPPORT
|
||||
select GENERIC_ATOMIC64 if PPC32
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select GENERIC_CMOS_UPDATE
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IRQ_SHOW_LEVEL
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
select GENERIC_TIME_VSYSCALL_OLD
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_CBPF_JIT if !PPC64
|
||||
select HAVE_CONTEXT_TRACKING if PPC64
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
|
||||
select HAVE_EBPF_JIT if PPC64
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_GENERIC_RCU_GUP
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
|
||||
select HAVE_IDE
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI if PERF_EVENTS
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_OPTPROBES if PPC64
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select IRQ_DOMAIN
|
||||
select IRQ_FORCED_THREADING
|
||||
select MODULES_USE_ELF_RELA
|
||||
select NO_BOOTMEM
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
select OF_RESERVED_MEM
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_GCC_PLUGINS
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select VIRT_TO_BUS if !PPC64
|
||||
select HAVE_IDE
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
|
||||
select HAVE_KPROBES
|
||||
select HAVE_OPTPROBES if PPC64
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select GENERIC_ATOMIC64 if PPC32
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select SPARSE_IRQ
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_IRQ_SHOW_LEVEL
|
||||
select IRQ_FORCED_THREADING
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_CBPF_JIT if !PPC64
|
||||
select HAVE_EBPF_JIT if PPC64
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_CMOS_UPDATE
|
||||
select GENERIC_TIME_VSYSCALL_OLD
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select MODULES_USE_ELF_RELA
|
||||
select CLONE_BACKWARDS
|
||||
select ARCH_USE_BUILTIN_BSWAP
|
||||
select OLD_SIGACTION if PPC32
|
||||
select OLD_SIGSUSPEND
|
||||
select OLD_SIGACTION if PPC32
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
|
||||
select NO_BOOTMEM
|
||||
select HAVE_GENERIC_RCU_GUP
|
||||
select HAVE_PERF_EVENTS_NMI if PPC64
|
||||
select HAVE_NMI if PERF_EVENTS
|
||||
select EDAC_SUPPORT
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
|
||||
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_CONTEXT_TRACKING if PPC64
|
||||
select SPARSE_IRQ
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
select VIRT_TO_BUS if !PPC64
|
||||
#
|
||||
# Please keep this list sorted alphabetically.
|
||||
#
|
||||
|
||||
config GENERIC_CSUM
|
||||
def_bool n
|
||||
|
@ -72,8 +72,15 @@ GNUTARGET := powerpc
|
||||
MULTIPLEWORD := -mmultiple
|
||||
endif
|
||||
|
||||
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
|
||||
ifdef CONFIG_PPC64
|
||||
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
||||
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
|
||||
aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
|
||||
aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
|
||||
endif
|
||||
|
||||
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
|
||||
cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
|
||||
ifneq ($(cc-name),clang)
|
||||
cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
|
||||
endif
|
||||
@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
|
||||
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
|
||||
else
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
|
||||
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
|
||||
endif
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
|
||||
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
|
||||
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#include <asm/book3s/32/hash.h>
|
||||
|
@ -1,9 +1,12 @@
|
||||
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
|
||||
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
|
||||
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/mmdebug.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Common bits between hash and Radix page table
|
||||
*/
|
||||
@ -347,23 +350,58 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
|
||||
__r; \
|
||||
})
|
||||
|
||||
static inline int __pte_write(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
#define pte_savedwrite pte_savedwrite
|
||||
static inline bool pte_savedwrite(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* Saved write ptes are prot none ptes that doesn't have
|
||||
* privileged bit sit. We mark prot none as one which has
|
||||
* present and pviliged bit set and RWX cleared. To mark
|
||||
* protnone which used to have _PAGE_WRITE set we clear
|
||||
* the privileged bit.
|
||||
*/
|
||||
return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
|
||||
}
|
||||
#else
|
||||
#define pte_savedwrite pte_savedwrite
|
||||
static inline bool pte_savedwrite(pte_t pte)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int pte_write(pte_t pte)
|
||||
{
|
||||
return __pte_write(pte) || pte_savedwrite(pte);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
|
||||
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
|
||||
return;
|
||||
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
|
||||
if (__pte_write(*ptep))
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
|
||||
else if (unlikely(pte_savedwrite(*ptep)))
|
||||
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0);
|
||||
}
|
||||
|
||||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0)
|
||||
return;
|
||||
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
|
||||
/*
|
||||
* We should not find protnone for hugetlb, but this complete the
|
||||
* interface.
|
||||
*/
|
||||
if (__pte_write(*ptep))
|
||||
pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1);
|
||||
else if (unlikely(pte_savedwrite(*ptep)))
|
||||
pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
||||
@ -397,11 +435,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_update(mm, addr, ptep, ~0UL, 0, 0);
|
||||
}
|
||||
|
||||
static inline int pte_write(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
|
||||
}
|
||||
|
||||
static inline int pte_dirty(pte_t pte)
|
||||
{
|
||||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY));
|
||||
@ -465,19 +498,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
|
||||
VM_BUG_ON(!pte_protnone(pte));
|
||||
return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
|
||||
}
|
||||
|
||||
#define pte_savedwrite pte_savedwrite
|
||||
static inline bool pte_savedwrite(pte_t pte)
|
||||
#else
|
||||
#define pte_clear_savedwrite pte_clear_savedwrite
|
||||
static inline pte_t pte_clear_savedwrite(pte_t pte)
|
||||
{
|
||||
/*
|
||||
* Saved write ptes are prot none ptes that doesn't have
|
||||
* privileged bit sit. We mark prot none as one which has
|
||||
* present and pviliged bit set and RWX cleared. To mark
|
||||
* protnone which used to have _PAGE_WRITE set we clear
|
||||
* the privileged bit.
|
||||
*/
|
||||
VM_BUG_ON(!pte_protnone(pte));
|
||||
return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED));
|
||||
VM_WARN_ON(1);
|
||||
return __pte(pte_val(pte) & ~_PAGE_WRITE);
|
||||
}
|
||||
#endif /* CONFIG_NUMA_BALANCING */
|
||||
|
||||
@ -506,6 +532,8 @@ static inline unsigned long pte_pfn(pte_t pte)
|
||||
/* Generic modifiers for PTE bits */
|
||||
static inline pte_t pte_wrprotect(pte_t pte)
|
||||
{
|
||||
if (unlikely(pte_savedwrite(pte)))
|
||||
return pte_clear_savedwrite(pte);
|
||||
return __pte(pte_val(pte) & ~_PAGE_WRITE);
|
||||
}
|
||||
|
||||
@ -926,6 +954,7 @@ static inline int pmd_protnone(pmd_t pmd)
|
||||
|
||||
#define __HAVE_ARCH_PMD_WRITE
|
||||
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
|
||||
#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
|
||||
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
@ -982,11 +1011,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
|
||||
static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
|
||||
if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0)
|
||||
return;
|
||||
|
||||
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
|
||||
if (__pmd_write((*pmdp)))
|
||||
pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0);
|
||||
else if (unlikely(pmd_savedwrite(*pmdp)))
|
||||
pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
|
||||
}
|
||||
|
||||
static inline int pmd_trans_huge(pmd_t pmd)
|
||||
|
@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
|
||||
|
||||
#ifdef __powerpc64__
|
||||
res += (__force u64)addend;
|
||||
return (__force __wsum)((u32)res + (res >> 32));
|
||||
return (__force __wsum) from64to32(res);
|
||||
#else
|
||||
asm("addc %0,%0,%1;"
|
||||
"addze %0,%0;"
|
||||
|
@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
|
||||
std r0,0(r1); \
|
||||
ptesync; \
|
||||
ld r0,0(r1); \
|
||||
1: cmpd cr0,r0,r0; \
|
||||
bne 1b; \
|
||||
236: cmpd cr0,r0,r0; \
|
||||
bne 236b; \
|
||||
IDLE_INST; \
|
||||
|
||||
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
|
||||
|
@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
#define ARCH_DLINFO_CACHE_GEOMETRY \
|
||||
NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \
|
||||
NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \
|
||||
NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \
|
||||
NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \
|
||||
NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \
|
||||
NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \
|
||||
NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \
|
||||
NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \
|
||||
NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
|
||||
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -1,5 +1,8 @@
|
||||
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
|
||||
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
|
||||
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
|
||||
/*
|
||||
* Entries per page directory level. The PTE level must use a 64b record
|
||||
* for each page table entry. The PMD and PGD level use a 32b record for
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
|
||||
#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
|
||||
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
|
||||
|
@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
|
||||
return ((hpd_val(hpd) & 0x4) != 0);
|
||||
#else
|
||||
/* We clear the top bit to indicate hugepd */
|
||||
return ((hpd_val(hpd) & PD_HUGE) == 0);
|
||||
return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -284,6 +284,13 @@
|
||||
#define PPC_INST_BRANCH_COND 0x40800000
|
||||
#define PPC_INST_LBZCIX 0x7c0006aa
|
||||
#define PPC_INST_STBCIX 0x7c0007aa
|
||||
#define PPC_INST_LWZX 0x7c00002e
|
||||
#define PPC_INST_LFSX 0x7c00042e
|
||||
#define PPC_INST_STFSX 0x7c00052e
|
||||
#define PPC_INST_LFDX 0x7c0004ae
|
||||
#define PPC_INST_STFDX 0x7c0005ae
|
||||
#define PPC_INST_LVX 0x7c0000ce
|
||||
#define PPC_INST_STVX 0x7c0001ce
|
||||
|
||||
/* macros to insert fields into opcodes */
|
||||
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
|
||||
|
@ -160,12 +160,18 @@ struct of_drconf_cell {
|
||||
#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
|
||||
#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
|
||||
#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */
|
||||
#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */
|
||||
#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */
|
||||
#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */
|
||||
#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */
|
||||
#define OV5_MMU_SLB 0x1800 /* always use SLB */
|
||||
#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */
|
||||
/* MMU Base Architecture */
|
||||
#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
|
||||
#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
|
||||
#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */
|
||||
#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */
|
||||
#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */
|
||||
#define OV5_NMMU 0x1820 /* Nest MMU Available */
|
||||
/* Hash Table Extensions */
|
||||
#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */
|
||||
#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */
|
||||
/* Radix Table Extensions */
|
||||
#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */
|
||||
|
||||
/* Option Vector 6: IBM PAPR hints */
|
||||
#define OV6_LINUX 0x02 /* Linux is our OS */
|
||||
|
@ -276,19 +276,21 @@ power_enter_stop:
|
||||
*/
|
||||
andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
|
||||
clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
|
||||
bne 1f
|
||||
bne .Lhandle_esl_ec_set
|
||||
IDLE_STATE_ENTER_SEQ(PPC_STOP)
|
||||
li r3,0 /* Since we didn't lose state, return 0 */
|
||||
b pnv_wakeup_noloss
|
||||
|
||||
.Lhandle_esl_ec_set:
|
||||
/*
|
||||
* Check if the requested state is a deep idle state.
|
||||
*/
|
||||
1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
|
||||
LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
|
||||
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
|
||||
cmpd r3,r4
|
||||
bge 2f
|
||||
bge .Lhandle_deep_stop
|
||||
IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
|
||||
2:
|
||||
.Lhandle_deep_stop:
|
||||
/*
|
||||
* Entering deep idle state.
|
||||
* Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
|
||||
|
@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
|
||||
static unsigned long __initdata prom_tce_alloc_end;
|
||||
#endif
|
||||
|
||||
static bool __initdata prom_radix_disable;
|
||||
|
||||
struct platform_support {
|
||||
bool hash_mmu;
|
||||
bool radix_mmu;
|
||||
bool radix_gtse;
|
||||
};
|
||||
|
||||
/* Platforms codes are now obsolete in the kernel. Now only used within this
|
||||
* file and ultimately gone too. Feel free to change them if you need, they
|
||||
* are not shared with anything outside of this file anymore
|
||||
@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
|
||||
prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
|
||||
#endif
|
||||
}
|
||||
|
||||
opt = strstr(prom_cmd_line, "disable_radix");
|
||||
if (opt) {
|
||||
prom_debug("Radix disabled from cmdline\n");
|
||||
prom_radix_disable = true;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
|
||||
@ -695,6 +709,8 @@ struct option_vector5 {
|
||||
u8 byte22;
|
||||
u8 intarch;
|
||||
u8 mmu;
|
||||
u8 hash_ext;
|
||||
u8 radix_ext;
|
||||
} __packed;
|
||||
|
||||
struct option_vector6 {
|
||||
@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
|
||||
.reserved3 = 0,
|
||||
.subprocessors = 1,
|
||||
.intarch = 0,
|
||||
.mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) |
|
||||
OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE),
|
||||
.mmu = 0,
|
||||
.hash_ext = 0,
|
||||
.radix_ext = 0,
|
||||
},
|
||||
|
||||
/* option vector 6: IBM PAPR hints */
|
||||
@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
|
||||
|
||||
}
|
||||
|
||||
static void __init prom_parse_mmu_model(u8 val,
|
||||
struct platform_support *support)
|
||||
{
|
||||
switch (val) {
|
||||
case OV5_FEAT(OV5_MMU_DYNAMIC):
|
||||
case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
|
||||
prom_debug("MMU - either supported\n");
|
||||
support->radix_mmu = !prom_radix_disable;
|
||||
support->hash_mmu = true;
|
||||
break;
|
||||
case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
|
||||
prom_debug("MMU - radix only\n");
|
||||
if (prom_radix_disable) {
|
||||
/*
|
||||
* If we __have__ to do radix, we're better off ignoring
|
||||
* the command line rather than not booting.
|
||||
*/
|
||||
prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
|
||||
}
|
||||
support->radix_mmu = true;
|
||||
break;
|
||||
case OV5_FEAT(OV5_MMU_HASH):
|
||||
prom_debug("MMU - hash only\n");
|
||||
support->hash_mmu = true;
|
||||
break;
|
||||
default:
|
||||
prom_debug("Unknown mmu support option: 0x%x\n", val);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init prom_parse_platform_support(u8 index, u8 val,
|
||||
struct platform_support *support)
|
||||
{
|
||||
switch (index) {
|
||||
case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
|
||||
prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
|
||||
break;
|
||||
case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
|
||||
if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
|
||||
prom_debug("Radix - GTSE supported\n");
|
||||
support->radix_gtse = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init prom_check_platform_support(void)
|
||||
{
|
||||
struct platform_support supported = {
|
||||
.hash_mmu = false,
|
||||
.radix_mmu = false,
|
||||
.radix_gtse = false
|
||||
};
|
||||
int prop_len = prom_getproplen(prom.chosen,
|
||||
"ibm,arch-vec-5-platform-support");
|
||||
if (prop_len > 1) {
|
||||
int i;
|
||||
u8 vec[prop_len];
|
||||
prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
|
||||
prop_len);
|
||||
prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
|
||||
&vec, sizeof(vec));
|
||||
for (i = 0; i < prop_len; i += 2) {
|
||||
prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
|
||||
, vec[i]
|
||||
, vec[i + 1]);
|
||||
prom_parse_platform_support(vec[i], vec[i + 1],
|
||||
&supported);
|
||||
}
|
||||
}
|
||||
|
||||
if (supported.radix_mmu && supported.radix_gtse) {
|
||||
/* Radix preferred - but we require GTSE for now */
|
||||
prom_debug("Asking for radix with GTSE\n");
|
||||
ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
|
||||
ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
|
||||
} else if (supported.hash_mmu) {
|
||||
/* Default to hash mmu (if we can) */
|
||||
prom_debug("Asking for hash\n");
|
||||
ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
|
||||
} else {
|
||||
/* We're probably on a legacy hypervisor */
|
||||
prom_debug("Assuming legacy hash support\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void __init prom_send_capabilities(void)
|
||||
{
|
||||
@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
|
||||
prom_arg_t ret;
|
||||
u32 cores;
|
||||
|
||||
/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
|
||||
prom_check_platform_support();
|
||||
|
||||
root = call_prom("open", 1, 1, ADDR("/"));
|
||||
if (root != 0) {
|
||||
/* We need to tell the FW about the number of cores we support.
|
||||
@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
||||
*/
|
||||
prom_check_initrd(r3, r4);
|
||||
|
||||
/*
|
||||
* Do early parsing of command line
|
||||
*/
|
||||
early_cmdline_parse();
|
||||
|
||||
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
|
||||
/*
|
||||
* On pSeries, inform the firmware about our capabilities
|
||||
@ -3008,11 +3119,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
||||
if (of_platform != PLATFORM_POWERMAC)
|
||||
copy_and_flush(0, kbase, 0x100, 0);
|
||||
|
||||
/*
|
||||
* Do early parsing of command line
|
||||
*/
|
||||
early_cmdline_parse();
|
||||
|
||||
/*
|
||||
* Initialize memory management within prom_init
|
||||
*/
|
||||
|
@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
|
||||
info->line_size = lsize;
|
||||
info->block_size = bsize;
|
||||
info->log_block_size = __ilog2(bsize);
|
||||
info->blocks_per_page = PAGE_SIZE / bsize;
|
||||
if (bsize)
|
||||
info->blocks_per_page = PAGE_SIZE / bsize;
|
||||
else
|
||||
info->blocks_per_page = 0;
|
||||
|
||||
if (sets == 0)
|
||||
info->assoc = 0xffff;
|
||||
|
@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
hva, NULL, NULL);
|
||||
if (ptep) {
|
||||
pte = kvmppc_read_update_linux_pte(ptep, 1);
|
||||
if (pte_write(pte))
|
||||
if (__pte_write(pte))
|
||||
write_ok = 1;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
}
|
||||
pte = kvmppc_read_update_linux_pte(ptep, writing);
|
||||
if (pte_present(pte) && !pte_protnone(pte)) {
|
||||
if (writing && !pte_write(pte))
|
||||
if (writing && !__pte_write(pte))
|
||||
/* make the actual HPTE be read-only */
|
||||
ptel = hpte_make_readonly(ptel);
|
||||
is_ci = pte_ci(pte);
|
||||
|
@ -20,6 +20,7 @@ obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
|
||||
|
||||
obj64-$(CONFIG_SMP) += locks.o
|
||||
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
|
||||
obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
|
||||
|
||||
obj-y += checksum_$(BITS).o checksum_wrappers.o
|
||||
|
||||
|
@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
goto instr_done;
|
||||
|
||||
case LARX:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
if (op.ea & (size - 1))
|
||||
break; /* can't handle misaligned */
|
||||
if (!address_ok(regs, op.ea, size))
|
||||
@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
goto ldst_done;
|
||||
|
||||
case STCX:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
if (op.ea & (size - 1))
|
||||
break; /* can't handle misaligned */
|
||||
if (!address_ok(regs, op.ea, size))
|
||||
@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
goto ldst_done;
|
||||
|
||||
case LOAD:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
err = read_mem(®s->gpr[op.reg], op.ea, size, regs);
|
||||
if (!err) {
|
||||
if (op.type & SIGNEXT)
|
||||
@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
case LOAD_FP:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
if (size == 4)
|
||||
err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
|
||||
else
|
||||
@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case LOAD_VMX:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
|
||||
goto ldst_done;
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
case LOAD_VSX:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
|
||||
goto ldst_done;
|
||||
#endif
|
||||
@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
goto instr_done;
|
||||
|
||||
case STORE:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
if ((op.type & UPDATE) && size == sizeof(long) &&
|
||||
op.reg == 1 && op.update_reg == 1 &&
|
||||
!(regs->msr & MSR_PR) &&
|
||||
@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
case STORE_FP:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
if (size == 4)
|
||||
err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
|
||||
else
|
||||
@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
||||
#endif
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
case STORE_VMX:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
|
||||
goto ldst_done;
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
case STORE_VSX:
|
||||
if (regs->msr & MSR_LE)
|
||||
return 0;
|
||||
err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
|
||||
goto ldst_done;
|
||||
#endif
|
||||
|
434
arch/powerpc/lib/test_emulate_step.c
Normal file
434
arch/powerpc/lib/test_emulate_step.c
Normal file
@ -0,0 +1,434 @@
|
||||
/*
|
||||
* Simple sanity test for emulate_step load/store instructions.
|
||||
*
|
||||
* Copyright IBM Corp. 2016
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "emulate_step_test: " fmt
|
||||
|
||||
#include <linux/ptrace.h>
|
||||
#include <asm/sstep.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
|
||||
|
||||
/*
|
||||
* Defined with TEST_ prefix so it does not conflict with other
|
||||
* definitions.
|
||||
*/
|
||||
#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
|
||||
___PPC_RA(base) | IMM_L(i))
|
||||
#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
|
||||
___PPC_RA(base) | IMM_L(i))
|
||||
#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
|
||||
___PPC_RA(base) | ((i) & 0xfffc))
|
||||
#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b) | \
|
||||
__PPC_EH(eh))
|
||||
#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \
|
||||
___PPC_RA(a) | ___PPC_RB(b))
|
||||
#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
|
||||
#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
|
||||
|
||||
|
||||
static void __init init_pt_regs(struct pt_regs *regs)
|
||||
{
|
||||
static unsigned long msr;
|
||||
static bool msr_cached;
|
||||
|
||||
memset(regs, 0, sizeof(struct pt_regs));
|
||||
|
||||
if (likely(msr_cached)) {
|
||||
regs->msr = msr;
|
||||
return;
|
||||
}
|
||||
|
||||
asm volatile("mfmsr %0" : "=r"(regs->msr));
|
||||
|
||||
regs->msr |= MSR_FP;
|
||||
regs->msr |= MSR_VEC;
|
||||
regs->msr |= MSR_VSX;
|
||||
|
||||
msr = regs->msr;
|
||||
msr_cached = true;
|
||||
}
|
||||
|
||||
static void __init show_result(char *ins, char *result)
|
||||
{
|
||||
pr_info("%-14s : %s\n", ins, result);
|
||||
}
|
||||
|
||||
static void __init test_ld(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
unsigned long a = 0x23;
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
regs.gpr[3] = (unsigned long) &a;
|
||||
|
||||
/* ld r5, 0(r3) */
|
||||
stepped = emulate_step(®s, TEST_LD(5, 3, 0));
|
||||
|
||||
if (stepped == 1 && regs.gpr[5] == a)
|
||||
show_result("ld", "PASS");
|
||||
else
|
||||
show_result("ld", "FAIL");
|
||||
}
|
||||
|
||||
static void __init test_lwz(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
unsigned int a = 0x4545;
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
regs.gpr[3] = (unsigned long) &a;
|
||||
|
||||
/* lwz r5, 0(r3) */
|
||||
stepped = emulate_step(®s, TEST_LWZ(5, 3, 0));
|
||||
|
||||
if (stepped == 1 && regs.gpr[5] == a)
|
||||
show_result("lwz", "PASS");
|
||||
else
|
||||
show_result("lwz", "FAIL");
|
||||
}
|
||||
|
||||
static void __init test_lwzx(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
unsigned int a[3] = {0x0, 0x0, 0x1234};
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
regs.gpr[3] = (unsigned long) a;
|
||||
regs.gpr[4] = 8;
|
||||
regs.gpr[5] = 0x8765;
|
||||
|
||||
/* lwzx r5, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_LWZX(5, 3, 4));
|
||||
if (stepped == 1 && regs.gpr[5] == a[2])
|
||||
show_result("lwzx", "PASS");
|
||||
else
|
||||
show_result("lwzx", "FAIL");
|
||||
}
|
||||
|
||||
static void __init test_std(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
unsigned long a = 0x1234;
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
regs.gpr[3] = (unsigned long) &a;
|
||||
regs.gpr[5] = 0x5678;
|
||||
|
||||
/* std r5, 0(r3) */
|
||||
stepped = emulate_step(®s, TEST_STD(5, 3, 0));
|
||||
if (stepped == 1 || regs.gpr[5] == a)
|
||||
show_result("std", "PASS");
|
||||
else
|
||||
show_result("std", "FAIL");
|
||||
}
|
||||
|
||||
static void __init test_ldarx_stdcx(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
unsigned long a = 0x1234;
|
||||
int stepped = -1;
|
||||
unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
|
||||
|
||||
init_pt_regs(®s);
|
||||
asm volatile("mfcr %0" : "=r"(regs.ccr));
|
||||
|
||||
|
||||
/*** ldarx ***/
|
||||
|
||||
regs.gpr[3] = (unsigned long) &a;
|
||||
regs.gpr[4] = 0;
|
||||
regs.gpr[5] = 0x5678;
|
||||
|
||||
/* ldarx r5, r3, r4, 0 */
|
||||
stepped = emulate_step(®s, TEST_LDARX(5, 3, 4, 0));
|
||||
|
||||
/*
|
||||
* Don't touch 'a' here. Touching 'a' can do Load/store
|
||||
* of 'a' which result in failure of subsequent stdcx.
|
||||
* Instead, use hardcoded value for comparison.
|
||||
*/
|
||||
if (stepped <= 0 || regs.gpr[5] != 0x1234) {
|
||||
show_result("ldarx / stdcx.", "FAIL (ldarx)");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/*** stdcx. ***/
|
||||
|
||||
regs.gpr[5] = 0x9ABC;
|
||||
|
||||
/* stdcx. r5, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_STDCX(5, 3, 4));
|
||||
|
||||
/*
|
||||
* Two possible scenarios that indicates successful emulation
|
||||
* of stdcx. :
|
||||
* 1. Reservation is active and store is performed. In this
|
||||
* case cr0.eq bit will be set to 1.
|
||||
* 2. Reservation is not active and store is not performed.
|
||||
* In this case cr0.eq bit will be set to 0.
|
||||
*/
|
||||
if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
|
||||
|| (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
|
||||
show_result("ldarx / stdcx.", "PASS");
|
||||
else
|
||||
show_result("ldarx / stdcx.", "FAIL (stdcx.)");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
static void __init test_lfsx_stfsx(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
union {
|
||||
float a;
|
||||
int b;
|
||||
} c;
|
||||
int cached_b;
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
|
||||
|
||||
/*** lfsx ***/
|
||||
|
||||
c.a = 123.45;
|
||||
cached_b = c.b;
|
||||
|
||||
regs.gpr[3] = (unsigned long) &c.a;
|
||||
regs.gpr[4] = 0;
|
||||
|
||||
/* lfsx frt10, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_LFSX(10, 3, 4));
|
||||
|
||||
if (stepped == 1)
|
||||
show_result("lfsx", "PASS");
|
||||
else
|
||||
show_result("lfsx", "FAIL");
|
||||
|
||||
|
||||
/*** stfsx ***/
|
||||
|
||||
c.a = 678.91;
|
||||
|
||||
/* stfsx frs10, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_STFSX(10, 3, 4));
|
||||
|
||||
if (stepped == 1 && c.b == cached_b)
|
||||
show_result("stfsx", "PASS");
|
||||
else
|
||||
show_result("stfsx", "FAIL");
|
||||
}
|
||||
|
||||
static void __init test_lfdx_stfdx(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
union {
|
||||
double a;
|
||||
long b;
|
||||
} c;
|
||||
long cached_b;
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
|
||||
|
||||
/*** lfdx ***/
|
||||
|
||||
c.a = 123456.78;
|
||||
cached_b = c.b;
|
||||
|
||||
regs.gpr[3] = (unsigned long) &c.a;
|
||||
regs.gpr[4] = 0;
|
||||
|
||||
/* lfdx frt10, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_LFDX(10, 3, 4));
|
||||
|
||||
if (stepped == 1)
|
||||
show_result("lfdx", "PASS");
|
||||
else
|
||||
show_result("lfdx", "FAIL");
|
||||
|
||||
|
||||
/*** stfdx ***/
|
||||
|
||||
c.a = 987654.32;
|
||||
|
||||
/* stfdx frs10, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_STFDX(10, 3, 4));
|
||||
|
||||
if (stepped == 1 && c.b == cached_b)
|
||||
show_result("stfdx", "PASS");
|
||||
else
|
||||
show_result("stfdx", "FAIL");
|
||||
}
|
||||
#else
|
||||
static void __init test_lfsx_stfsx(void)
|
||||
{
|
||||
show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
|
||||
show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
|
||||
}
|
||||
|
||||
static void __init test_lfdx_stfdx(void)
|
||||
{
|
||||
show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
|
||||
show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
|
||||
}
|
||||
#endif /* CONFIG_PPC_FPU */
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
static void __init test_lvx_stvx(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
union {
|
||||
vector128 a;
|
||||
u32 b[4];
|
||||
} c;
|
||||
u32 cached_b[4];
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
|
||||
|
||||
/*** lvx ***/
|
||||
|
||||
cached_b[0] = c.b[0] = 923745;
|
||||
cached_b[1] = c.b[1] = 2139478;
|
||||
cached_b[2] = c.b[2] = 9012;
|
||||
cached_b[3] = c.b[3] = 982134;
|
||||
|
||||
regs.gpr[3] = (unsigned long) &c.a;
|
||||
regs.gpr[4] = 0;
|
||||
|
||||
/* lvx vrt10, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_LVX(10, 3, 4));
|
||||
|
||||
if (stepped == 1)
|
||||
show_result("lvx", "PASS");
|
||||
else
|
||||
show_result("lvx", "FAIL");
|
||||
|
||||
|
||||
/*** stvx ***/
|
||||
|
||||
c.b[0] = 4987513;
|
||||
c.b[1] = 84313948;
|
||||
c.b[2] = 71;
|
||||
c.b[3] = 498532;
|
||||
|
||||
/* stvx vrs10, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_STVX(10, 3, 4));
|
||||
|
||||
if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
|
||||
cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
|
||||
show_result("stvx", "PASS");
|
||||
else
|
||||
show_result("stvx", "FAIL");
|
||||
}
|
||||
#else
|
||||
static void __init test_lvx_stvx(void)
|
||||
{
|
||||
show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
|
||||
show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
|
||||
}
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
static void __init test_lxvd2x_stxvd2x(void)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
union {
|
||||
vector128 a;
|
||||
u32 b[4];
|
||||
} c;
|
||||
u32 cached_b[4];
|
||||
int stepped = -1;
|
||||
|
||||
init_pt_regs(®s);
|
||||
|
||||
|
||||
/*** lxvd2x ***/
|
||||
|
||||
cached_b[0] = c.b[0] = 18233;
|
||||
cached_b[1] = c.b[1] = 34863571;
|
||||
cached_b[2] = c.b[2] = 834;
|
||||
cached_b[3] = c.b[3] = 6138911;
|
||||
|
||||
regs.gpr[3] = (unsigned long) &c.a;
|
||||
regs.gpr[4] = 0;
|
||||
|
||||
/* lxvd2x vsr39, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_LXVD2X(39, 3, 4));
|
||||
|
||||
if (stepped == 1)
|
||||
show_result("lxvd2x", "PASS");
|
||||
else
|
||||
show_result("lxvd2x", "FAIL");
|
||||
|
||||
|
||||
/*** stxvd2x ***/
|
||||
|
||||
c.b[0] = 21379463;
|
||||
c.b[1] = 87;
|
||||
c.b[2] = 374234;
|
||||
c.b[3] = 4;
|
||||
|
||||
/* stxvd2x vsr39, r3, r4 */
|
||||
stepped = emulate_step(®s, TEST_STXVD2X(39, 3, 4));
|
||||
|
||||
if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
|
||||
cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
|
||||
show_result("stxvd2x", "PASS");
|
||||
else
|
||||
show_result("stxvd2x", "FAIL");
|
||||
}
|
||||
#else
|
||||
static void __init test_lxvd2x_stxvd2x(void)
|
||||
{
|
||||
show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
|
||||
show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
static int __init test_emulate_step(void)
|
||||
{
|
||||
test_ld();
|
||||
test_lwz();
|
||||
test_lwzx();
|
||||
test_std();
|
||||
test_ldarx_stdcx();
|
||||
test_lfsx_stfsx();
|
||||
test_lfdx_stfdx();
|
||||
test_lvx_stvx();
|
||||
test_lxvd2x_stxvd2x();
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(test_emulate_step);
|
@ -356,18 +356,42 @@ static void early_check_vec5(void)
|
||||
unsigned long root, chosen;
|
||||
int size;
|
||||
const u8 *vec5;
|
||||
u8 mmu_supported;
|
||||
|
||||
root = of_get_flat_dt_root();
|
||||
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
|
||||
if (chosen == -FDT_ERR_NOTFOUND)
|
||||
return;
|
||||
vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
|
||||
if (!vec5)
|
||||
return;
|
||||
if (size <= OV5_INDX(OV5_MMU_RADIX_300) ||
|
||||
!(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300)))
|
||||
/* Hypervisor doesn't support radix */
|
||||
if (chosen == -FDT_ERR_NOTFOUND) {
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
return;
|
||||
}
|
||||
vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
|
||||
if (!vec5) {
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
return;
|
||||
}
|
||||
if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check for supported configuration */
|
||||
mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
|
||||
OV5_FEAT(OV5_MMU_SUPPORT);
|
||||
if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
|
||||
/* Hypervisor only supports radix - check enabled && GTSE */
|
||||
if (!early_radix_enabled()) {
|
||||
pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
|
||||
}
|
||||
if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
|
||||
OV5_FEAT(OV5_RADIX_GTSE))) {
|
||||
pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
|
||||
}
|
||||
/* Do radix anyway - the hypervisor said we had to */
|
||||
cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
|
||||
} else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
|
||||
/* Hypervisor only supports hash - disable radix */
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
}
|
||||
}
|
||||
|
||||
void __init mmu_early_init_devtree(void)
|
||||
@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void)
|
||||
* even though the ibm,architecture-vec-5 property created by
|
||||
* skiboot doesn't have the necessary bits set.
|
||||
*/
|
||||
if (early_radix_enabled() && !(mfmsr() & MSR_HV))
|
||||
if (!(mfmsr() & MSR_HV))
|
||||
early_check_vec5();
|
||||
|
||||
if (early_radix_enabled())
|
||||
|
@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
|
||||
*/
|
||||
register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
|
||||
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
|
||||
asm volatile("ptesync" : : : "memory");
|
||||
asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
|
||||
"r" (TLBIEL_INVAL_SET_LPID), "r" (0));
|
||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||
}
|
||||
|
||||
static void __init radix_init_partition_table(void)
|
||||
|
@ -39,8 +39,8 @@ opal_tracepoint_refcount:
|
||||
BEGIN_FTR_SECTION; \
|
||||
b 1f; \
|
||||
END_FTR_SECTION(0, 1); \
|
||||
ld r12,opal_tracepoint_refcount@toc(r2); \
|
||||
cmpdi r12,0; \
|
||||
ld r11,opal_tracepoint_refcount@toc(r2); \
|
||||
cmpdi r11,0; \
|
||||
bne- LABEL; \
|
||||
1:
|
||||
|
||||
|
@ -116,13 +116,13 @@ dt_offset:
|
||||
|
||||
.data
|
||||
.balign 8
|
||||
.globl sha256_digest
|
||||
sha256_digest:
|
||||
.globl purgatory_sha256_digest
|
||||
purgatory_sha256_digest:
|
||||
.skip 32
|
||||
.size sha256_digest, . - sha256_digest
|
||||
.size purgatory_sha256_digest, . - purgatory_sha256_digest
|
||||
|
||||
.balign 8
|
||||
.globl sha_regions
|
||||
sha_regions:
|
||||
.globl purgatory_sha_regions
|
||||
purgatory_sha_regions:
|
||||
.skip 8 * 2 * 16
|
||||
.size sha_regions, . - sha_regions
|
||||
.size purgatory_sha_regions, . - purgatory_sha_regions
|
||||
|
@ -274,7 +274,9 @@ failed:
|
||||
if (bank->disk->major > 0)
|
||||
unregister_blkdev(bank->disk->major,
|
||||
bank->disk->disk_name);
|
||||
del_gendisk(bank->disk);
|
||||
if (bank->disk->flags & GENHD_FL_UP)
|
||||
del_gendisk(bank->disk);
|
||||
put_disk(bank->disk);
|
||||
}
|
||||
device->dev.platform_data = NULL;
|
||||
if (bank->io_addr != 0)
|
||||
@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
|
||||
device_remove_file(&device->dev, &dev_attr_ecc);
|
||||
free_irq(bank->irq_id, device);
|
||||
del_gendisk(bank->disk);
|
||||
put_disk(bank->disk);
|
||||
iounmap((void __iomem *) bank->io_addr);
|
||||
kfree(bank);
|
||||
|
||||
|
@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
|
||||
|
||||
static void icp_opal_set_cpu_priority(unsigned char cppr)
|
||||
{
|
||||
/*
|
||||
* Here be dragons. The caller has asked to allow only IPI's and not
|
||||
* external interrupts. But OPAL XIVE doesn't support that. So instead
|
||||
* of allowing no interrupts allow all. That's still not right, but
|
||||
* currently the only caller who does this is xics_migrate_irqs_away()
|
||||
* and it works in that case.
|
||||
*/
|
||||
if (cppr >= DEFAULT_PRIORITY)
|
||||
cppr = LOWEST_PRIORITY;
|
||||
|
||||
xics_set_base_cppr(cppr);
|
||||
opal_int_set_cppr(cppr);
|
||||
iosync();
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/prom.h>
|
||||
#include <asm/io.h>
|
||||
@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
|
||||
/* Remove ourselves from the global interrupt queue */
|
||||
xics_set_cpu_giq(xics_default_distrib_server, 0);
|
||||
|
||||
/* Allow IPIs again... */
|
||||
icp_ops->set_priority(DEFAULT_PRIORITY);
|
||||
|
||||
for_each_irq_desc(virq, desc) {
|
||||
struct irq_chip *chip;
|
||||
long server;
|
||||
@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
|
||||
unlock:
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
/* Allow "sufficient" time to drop any inflight IRQ's */
|
||||
mdelay(5);
|
||||
|
||||
/*
|
||||
* Allow IPIs again. This is done at the very end, after migrating all
|
||||
* interrupts, the expectation is that we'll only get woken up by an IPI
|
||||
* interrupt beyond this point, but leave externals masked just to be
|
||||
* safe. If we're using icp-opal this may actually allow all
|
||||
* interrupts anyway, but that should be OK.
|
||||
*/
|
||||
icp_ops->set_priority(DEFAULT_PRIORITY);
|
||||
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
|
@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
|
@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
|
@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
|
@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
|
||||
ret = blkcipher_walk_done(desc, walk, nbytes - n);
|
||||
}
|
||||
if (k < n) {
|
||||
if (__ctr_paes_set_key(ctx) != 0)
|
||||
if (__ctr_paes_set_key(ctx) != 0) {
|
||||
if (locked)
|
||||
spin_unlock(&ctrblk_lock);
|
||||
return blkcipher_walk_done(desc, walk, -EIO);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (locked)
|
||||
|
@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENT=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
|
@ -8,31 +8,27 @@
|
||||
#define _S390_CPUTIME_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/div64.h>
|
||||
#include <asm/timex.h>
|
||||
|
||||
#define CPUTIME_PER_USEC 4096ULL
|
||||
#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
|
||||
|
||||
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
|
||||
|
||||
typedef unsigned long long __nocast cputime_t;
|
||||
typedef unsigned long long __nocast cputime64_t;
|
||||
|
||||
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
|
||||
|
||||
static inline unsigned long __div(unsigned long long n, unsigned long base)
|
||||
/*
|
||||
* Convert cputime to microseconds.
|
||||
*/
|
||||
static inline u64 cputime_to_usecs(const u64 cputime)
|
||||
{
|
||||
return n / base;
|
||||
return cputime >> 12;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert cputime to microseconds and back.
|
||||
* Convert cputime to nanoseconds.
|
||||
*/
|
||||
static inline unsigned int cputime_to_usecs(const cputime_t cputime)
|
||||
{
|
||||
return (__force unsigned long long) cputime >> 12;
|
||||
}
|
||||
|
||||
#define cputime_to_nsecs(cputime) tod_to_ns(cputime)
|
||||
|
||||
u64 arch_cpu_idle_time(int cpu);
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
* the S390 page table tree.
|
||||
*/
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm-generic/5level-fixup.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/page-flags.h>
|
||||
|
@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void)
|
||||
* ns = (todval * 125) >> 9;
|
||||
*
|
||||
* In order to avoid an overflow with the multiplication we can rewrite this.
|
||||
* With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
|
||||
* With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits)
|
||||
* we end up with
|
||||
*
|
||||
* ns = ((2^32 * th + tl) * 125 ) >> 9;
|
||||
* -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
|
||||
* ns = ((2^9 * th + tl) * 125 ) >> 9;
|
||||
* -> ns = (th * 125) + ((tl * 125) >> 9);
|
||||
*
|
||||
*/
|
||||
static inline unsigned long long tod_to_ns(unsigned long long todval)
|
||||
{
|
||||
unsigned long long ns;
|
||||
|
||||
ns = ((todval >> 32) << 23) * 125;
|
||||
ns += ((todval & 0xffffffff) * 125) >> 9;
|
||||
return ns;
|
||||
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -313,7 +313,9 @@
|
||||
#define __NR_copy_file_range 375
|
||||
#define __NR_preadv2 376
|
||||
#define __NR_pwritev2 377
|
||||
#define NR_syscalls 378
|
||||
/* Number 378 is reserved for guarded storage */
|
||||
#define __NR_statx 379
|
||||
#define NR_syscalls 380
|
||||
|
||||
/*
|
||||
* There are some system calls that are not present on 64 bit, some
|
||||
|
@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
|
||||
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
|
||||
COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
|
||||
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
|
||||
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
|
||||
|
@ -490,7 +490,7 @@ ENTRY(pgm_check_handler)
|
||||
jnz .Lpgm_svcper # -> single stepped svc
|
||||
1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
j 3f
|
||||
j 4f
|
||||
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
|
||||
lg %r15,__LC_KERNEL_STACK
|
||||
lgr %r14,%r12
|
||||
@ -499,8 +499,8 @@ ENTRY(pgm_check_handler)
|
||||
tm __LC_PGM_ILC+2,0x02 # check for transaction abort
|
||||
jz 3f
|
||||
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
|
||||
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
stg %r10,__THREAD_last_break(%r14)
|
||||
3: stg %r10,__THREAD_last_break(%r14)
|
||||
4: la %r11,STACK_FRAME_OVERHEAD(%r15)
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
|
||||
stmg %r8,%r9,__PT_PSW(%r11)
|
||||
@ -509,14 +509,14 @@ ENTRY(pgm_check_handler)
|
||||
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
|
||||
stg %r10,__PT_ARGS(%r11)
|
||||
tm __LC_PGM_ILC+3,0x80 # check for per exception
|
||||
jz 4f
|
||||
jz 5f
|
||||
tmhh %r8,0x0001 # kernel per event ?
|
||||
jz .Lpgm_kprobe
|
||||
oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
|
||||
mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
|
||||
mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
|
||||
mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
|
||||
4: REENABLE_IRQS
|
||||
5: REENABLE_IRQS
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
larl %r1,pgm_check_table
|
||||
llgh %r10,__PT_INT_CODE+2(%r11)
|
||||
|
@ -564,6 +564,8 @@ static struct kset *ipl_kset;
|
||||
|
||||
static void __ipl_run(void *unused)
|
||||
{
|
||||
if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW)
|
||||
diag308(DIAG308_LOAD_NORMAL_DUMP, NULL);
|
||||
diag308(DIAG308_LOAD_CLEAR, NULL);
|
||||
if (MACHINE_IS_VM)
|
||||
__cpcmd("IPL", NULL, 0, NULL);
|
||||
|
@ -124,7 +124,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
|
||||
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
|
||||
/* Initialize per thread user and system timer values */
|
||||
p->thread.user_timer = 0;
|
||||
p->thread.guest_timer = 0;
|
||||
p->thread.system_timer = 0;
|
||||
p->thread.hardirq_timer = 0;
|
||||
p->thread.softirq_timer = 0;
|
||||
|
||||
frame->sf.back_chain = 0;
|
||||
/* new return point is ret_from_fork */
|
||||
|
@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2)
|
||||
SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
|
||||
SYSCALL(sys_preadv2,compat_sys_preadv2)
|
||||
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
|
||||
NI_SYSCALL
|
||||
SYSCALL(sys_statx,compat_sys_statx)
|
||||
|
@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime)
|
||||
}
|
||||
|
||||
static void account_system_index_scaled(struct task_struct *p,
|
||||
cputime_t cputime, cputime_t scaled,
|
||||
u64 cputime, u64 scaled,
|
||||
enum cpu_usage_stat index)
|
||||
{
|
||||
p->stimescaled += cputime_to_nsecs(scaled);
|
||||
|
@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
||||
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgste_t pgste;
|
||||
pte_t *ptep;
|
||||
pte_t pte;
|
||||
bool dirty;
|
||||
|
||||
ptep = get_locked_pte(mm, addr, &ptl);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (!pud)
|
||||
return false;
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (!pmd)
|
||||
return false;
|
||||
/* We can't run guests backed by huge pages, but userspace can
|
||||
* still set them up and then try to migrate them without any
|
||||
* migration support.
|
||||
*/
|
||||
if (pmd_large(*pmd))
|
||||
return true;
|
||||
|
||||
ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
||||
if (unlikely(!ptep))
|
||||
return false;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user