mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 23:23:03 +00:00
Merge branch 'linus' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
f446474889
@ -410,7 +410,7 @@ argument is passed to the kernel in the command line.
|
||||
That only is supported in some configurations, though (for example, if
|
||||
the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
|
||||
the operation mode of the driver cannot be changed), and if it is not
|
||||
supported in the current configuration, writes to this attribute with
|
||||
supported in the current configuration, writes to this attribute will
|
||||
fail with an appropriate error.
|
||||
|
||||
Interpretation of Policy Attributes
|
||||
|
@ -1,3 +1,4 @@
|
||||
==============================================================
|
||||
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
|
||||
==============================================================
|
||||
|
||||
@ -86,83 +87,84 @@ Event Log Message Level: The driver uses the message level flag to log events
|
||||
Additional Configurations
|
||||
=========================
|
||||
|
||||
Configuring the Driver on Different Distributions
|
||||
-------------------------------------------------
|
||||
Configuring the Driver on Different Distributions
|
||||
-------------------------------------------------
|
||||
|
||||
Configuring a network driver to load properly when the system is started is
|
||||
distribution dependent. Typically, the configuration process involves adding
|
||||
an alias line to /etc/modprobe.d/*.conf as well as editing other system
|
||||
startup scripts and/or configuration files. Many popular Linux
|
||||
distributions ship with tools to make these changes for you. To learn the
|
||||
proper way to configure a network device for your system, refer to your
|
||||
distribution documentation. If during this process you are asked for the
|
||||
driver or module name, the name for the Linux Base Driver for the Intel
|
||||
PRO/100 Family of Adapters is e100.
|
||||
Configuring a network driver to load properly when the system is started
|
||||
is distribution dependent. Typically, the configuration process involves
|
||||
adding an alias line to /etc/modprobe.d/*.conf as well as editing other
|
||||
system startup scripts and/or configuration files. Many popular Linux
|
||||
distributions ship with tools to make these changes for you. To learn
|
||||
the proper way to configure a network device for your system, refer to
|
||||
your distribution documentation. If during this process you are asked
|
||||
for the driver or module name, the name for the Linux Base Driver for
|
||||
the Intel PRO/100 Family of Adapters is e100.
|
||||
|
||||
As an example, if you install the e100 driver for two PRO/100 adapters
|
||||
(eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
|
||||
As an example, if you install the e100 driver for two PRO/100 adapters
|
||||
(eth0 and eth1), add the following to a configuration file in
|
||||
/etc/modprobe.d/::
|
||||
|
||||
alias eth0 e100
|
||||
alias eth1 e100
|
||||
|
||||
Viewing Link Messages
|
||||
---------------------
|
||||
In order to see link messages and other Intel driver information on your
|
||||
console, you must set the dmesg level up to six. This can be done by
|
||||
entering the following on the command line before loading the e100 driver::
|
||||
Viewing Link Messages
|
||||
---------------------
|
||||
|
||||
In order to see link messages and other Intel driver information on your
|
||||
console, you must set the dmesg level up to six. This can be done by
|
||||
entering the following on the command line before loading the e100
|
||||
driver::
|
||||
|
||||
dmesg -n 6
|
||||
|
||||
If you wish to see all messages issued by the driver, including debug
|
||||
messages, set the dmesg level to eight.
|
||||
If you wish to see all messages issued by the driver, including debug
|
||||
messages, set the dmesg level to eight.
|
||||
|
||||
NOTE: This setting is not saved across reboots.
|
||||
NOTE: This setting is not saved across reboots.
|
||||
|
||||
ethtool
|
||||
-------
|
||||
|
||||
ethtool
|
||||
-------
|
||||
The driver utilizes the ethtool interface for driver configuration and
|
||||
diagnostics, as well as displaying statistical information. The ethtool
|
||||
version 1.6 or later is required for this functionality.
|
||||
|
||||
The driver utilizes the ethtool interface for driver configuration and
|
||||
diagnostics, as well as displaying statistical information. The ethtool
|
||||
version 1.6 or later is required for this functionality.
|
||||
The latest release of ethtool can be found from
|
||||
https://www.kernel.org/pub/software/network/ethtool/
|
||||
|
||||
The latest release of ethtool can be found from
|
||||
https://www.kernel.org/pub/software/network/ethtool/
|
||||
Enabling Wake on LAN* (WoL)
|
||||
---------------------------
|
||||
WoL is provided through the ethtool* utility. For instructions on
|
||||
enabling WoL with ethtool, refer to the ethtool man page. WoL will be
|
||||
enabled on the system during the next shut down or reboot. For this
|
||||
driver version, in order to enable WoL, the e100 driver must be loaded
|
||||
when shutting down or rebooting the system.
|
||||
|
||||
Enabling Wake on LAN* (WoL)
|
||||
---------------------------
|
||||
WoL is provided through the ethtool* utility. For instructions on enabling
|
||||
WoL with ethtool, refer to the ethtool man page.
|
||||
NAPI
|
||||
----
|
||||
|
||||
WoL will be enabled on the system during the next shut down or reboot. For
|
||||
this driver version, in order to enable WoL, the e100 driver must be
|
||||
loaded when shutting down or rebooting the system.
|
||||
NAPI (Rx polling mode) is supported in the e100 driver.
|
||||
|
||||
NAPI
|
||||
----
|
||||
See https://wiki.linuxfoundation.org/networking/napi for more
|
||||
information on NAPI.
|
||||
|
||||
NAPI (Rx polling mode) is supported in the e100 driver.
|
||||
Multiple Interfaces on Same Ethernet Broadcast Network
|
||||
------------------------------------------------------
|
||||
|
||||
See https://wiki.linuxfoundation.org/networking/napi for more information
|
||||
on NAPI.
|
||||
Due to the default ARP behavior on Linux, it is not possible to have one
|
||||
system on two IP networks in the same Ethernet broadcast domain
|
||||
(non-partitioned switch) behave as expected. All Ethernet interfaces
|
||||
will respond to IP traffic for any IP address assigned to the system.
|
||||
This results in unbalanced receive traffic.
|
||||
|
||||
Multiple Interfaces on Same Ethernet Broadcast Network
|
||||
------------------------------------------------------
|
||||
If you have multiple interfaces in a server, either turn on ARP
|
||||
filtering by
|
||||
|
||||
Due to the default ARP behavior on Linux, it is not possible to have
|
||||
one system on two IP networks in the same Ethernet broadcast domain
|
||||
(non-partitioned switch) behave as expected. All Ethernet interfaces
|
||||
will respond to IP traffic for any IP address assigned to the system.
|
||||
This results in unbalanced receive traffic.
|
||||
(1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
|
||||
(this only works if your kernel's version is higher than 2.4.5), or
|
||||
|
||||
If you have multiple interfaces in a server, either turn on ARP
|
||||
filtering by
|
||||
|
||||
(1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
|
||||
(this only works if your kernel's version is higher than 2.4.5), or
|
||||
|
||||
(2) installing the interfaces in separate broadcast domains (either
|
||||
in different switches or in a switch partitioned to VLANs).
|
||||
(2) installing the interfaces in separate broadcast domains (either
|
||||
in different switches or in a switch partitioned to VLANs).
|
||||
|
||||
|
||||
Support
|
||||
|
@ -1,3 +1,4 @@
|
||||
===========================================================
|
||||
Linux* Base Driver for Intel(R) Ethernet Network Connection
|
||||
===========================================================
|
||||
|
||||
@ -354,57 +355,58 @@ previously mentioned to force the adapter to the same speed and duplex.
|
||||
Additional Configurations
|
||||
=========================
|
||||
|
||||
Jumbo Frames
|
||||
------------
|
||||
Jumbo Frames support is enabled by changing the MTU to a value larger than
|
||||
the default of 1500. Use the ifconfig command to increase the MTU size.
|
||||
For example::
|
||||
Jumbo Frames
|
||||
------------
|
||||
Jumbo Frames support is enabled by changing the MTU to a value larger
|
||||
than the default of 1500. Use the ifconfig command to increase the MTU
|
||||
size. For example::
|
||||
|
||||
ifconfig eth<x> mtu 9000 up
|
||||
|
||||
This setting is not saved across reboots. It can be made permanent if
|
||||
you add::
|
||||
This setting is not saved across reboots. It can be made permanent if
|
||||
you add::
|
||||
|
||||
MTU=9000
|
||||
|
||||
to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example
|
||||
applies to the Red Hat distributions; other distributions may store this
|
||||
setting in a different location.
|
||||
to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example
|
||||
applies to the Red Hat distributions; other distributions may store this
|
||||
setting in a different location.
|
||||
|
||||
Notes:
|
||||
Degradation in throughput performance may be observed in some Jumbo frames
|
||||
environments. If this is observed, increasing the application's socket buffer
|
||||
size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
|
||||
See the specific application manual and /usr/src/linux*/Documentation/
|
||||
networking/ip-sysctl.txt for more details.
|
||||
Notes: Degradation in throughput performance may be observed in some
|
||||
Jumbo frames environments. If this is observed, increasing the
|
||||
application's socket buffer size and/or increasing the
|
||||
/proc/sys/net/ipv4/tcp_*mem entry values may help. See the specific
|
||||
application manual and /usr/src/linux*/Documentation/
|
||||
networking/ip-sysctl.txt for more details.
|
||||
|
||||
- The maximum MTU setting for Jumbo Frames is 16110. This value coincides
|
||||
with the maximum Jumbo Frames size of 16128.
|
||||
- The maximum MTU setting for Jumbo Frames is 16110. This value
|
||||
coincides with the maximum Jumbo Frames size of 16128.
|
||||
|
||||
- Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
|
||||
poor performance or loss of link.
|
||||
- Using Jumbo frames at 10 or 100 Mbps is not supported and may result
|
||||
in poor performance or loss of link.
|
||||
|
||||
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
|
||||
support Jumbo Frames. These correspond to the following product names:
|
||||
Intel(R) PRO/1000 Gigabit Server Adapter
|
||||
Intel(R) PRO/1000 PM Network Connection
|
||||
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
|
||||
support Jumbo Frames. These correspond to the following product names:
|
||||
Intel(R) PRO/1000 Gigabit Server Adapter Intel(R) PRO/1000 PM Network
|
||||
Connection
|
||||
|
||||
ethtool
|
||||
-------
|
||||
The driver utilizes the ethtool interface for driver configuration and
|
||||
diagnostics, as well as displaying statistical information. The ethtool
|
||||
version 1.6 or later is required for this functionality.
|
||||
ethtool
|
||||
-------
|
||||
The driver utilizes the ethtool interface for driver configuration and
|
||||
diagnostics, as well as displaying statistical information. The ethtool
|
||||
version 1.6 or later is required for this functionality.
|
||||
|
||||
The latest release of ethtool can be found from
|
||||
https://www.kernel.org/pub/software/network/ethtool/
|
||||
The latest release of ethtool can be found from
|
||||
https://www.kernel.org/pub/software/network/ethtool/
|
||||
|
||||
Enabling Wake on LAN* (WoL)
|
||||
---------------------------
|
||||
WoL is configured through the ethtool* utility.
|
||||
Enabling Wake on LAN* (WoL)
|
||||
---------------------------
|
||||
WoL is configured through the ethtool* utility.
|
||||
|
||||
WoL will be enabled on the system during the next shut down or reboot.
|
||||
For this driver version, in order to enable WoL, the e1000 driver must be
|
||||
loaded when shutting down or rebooting the system.
|
||||
|
||||
WoL will be enabled on the system during the next shut down or reboot.
|
||||
For this driver version, in order to enable WoL, the e1000 driver must be
|
||||
loaded when shutting down or rebooting the system.
|
||||
|
||||
Support
|
||||
=======
|
||||
|
@ -48,7 +48,7 @@ void strp_pause(struct strparser *strp)
|
||||
Temporarily pause a stream parser. Message parsing is suspended
|
||||
and no new messages are delivered to the upper layer.
|
||||
|
||||
void strp_pause(struct strparser *strp)
|
||||
void strp_unpause(struct strparser *strp)
|
||||
|
||||
Unpause a paused stream parser.
|
||||
|
||||
|
@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the
|
||||
associated event field will be saved in a variable but won't be summed
|
||||
as a value:
|
||||
|
||||
# echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
|
||||
# echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
|
||||
|
||||
Multiple variables can be assigned at the same time. The below would
|
||||
result in both ts0 and b being created as variables, with both
|
||||
common_timestamp and field1 additionally being summed as values:
|
||||
|
||||
# echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
|
||||
# echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
|
||||
event/trigger
|
||||
|
||||
Note that variable assignments can appear either preceding or
|
||||
following their use. The command below behaves identically to the
|
||||
command above:
|
||||
|
||||
# echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
|
||||
# echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
|
||||
event/trigger
|
||||
|
||||
Any number of variables not bound to a 'vals=' prefix can also be
|
||||
assigned by simply separating them with colons. Below is the same
|
||||
thing but without the values being summed in the histogram:
|
||||
|
||||
# echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
|
||||
# echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
|
||||
|
||||
Variables set as above can be referenced and used in expressions on
|
||||
another event.
|
||||
|
||||
For example, here's how a latency can be calculated:
|
||||
|
||||
# echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
|
||||
# echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
|
||||
# echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
|
||||
# echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
|
||||
|
||||
In the first line above, the event's timetamp is saved into the
|
||||
variable ts0. In the next line, ts0 is subtracted from the second
|
||||
@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'. The hist trigger below in turn
|
||||
makes use of the wakeup_lat variable to compute a combined latency
|
||||
using the same key and variable from yet another event:
|
||||
|
||||
# echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
|
||||
# echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
|
||||
|
||||
2.2.2 Synthetic Events
|
||||
----------------------
|
||||
@ -1807,10 +1807,11 @@ the command that defined it with a '!':
|
||||
At this point, there isn't yet an actual 'wakeup_latency' event
|
||||
instantiated in the event subsytem - for this to happen, a 'hist
|
||||
trigger action' needs to be instantiated and bound to actual fields
|
||||
and variables defined on other events (see Section 6.3.3 below).
|
||||
and variables defined on other events (see Section 2.2.3 below on
|
||||
how that is done using hist trigger 'onmatch' action). Once that is
|
||||
done, the 'wakeup_latency' synthetic event instance is created.
|
||||
|
||||
Once that is done, an event instance is created, and a histogram can
|
||||
be defined using it:
|
||||
A histogram can now be defined for the new synthetic event:
|
||||
|
||||
# echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
|
||||
/sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
|
||||
@ -1960,7 +1961,7 @@ hist trigger specification.
|
||||
back to that pid, the timestamp difference is calculated. If the
|
||||
resulting latency, stored in wakeup_lat, exceeds the current
|
||||
maximum latency, the values specified in the save() fields are
|
||||
recoreded:
|
||||
recorded:
|
||||
|
||||
# echo 'hist:keys=pid:ts0=common_timestamp.usecs \
|
||||
if comm=="cyclictest"' >> \
|
||||
|
@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle
|
||||
reset, migration and nested KVM for branch prediction blocking. The stfle
|
||||
facility 82 should not be provided to the guest without this capability.
|
||||
|
||||
8.14 KVM_CAP_HYPERV_TLBFLUSH
|
||||
8.18 KVM_CAP_HYPERV_TLBFLUSH
|
||||
|
||||
Architectures: x86
|
||||
|
||||
|
@ -9882,6 +9882,7 @@ M: Andrew Lunn <andrew@lunn.ch>
|
||||
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/net/dsa/
|
||||
F: net/dsa/
|
||||
F: include/net/dsa.h
|
||||
F: include/linux/dsa/
|
||||
@ -15572,6 +15573,7 @@ M: x86@kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/x86/
|
||||
F: Documentation/x86/
|
||||
F: arch/x86/
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -555,11 +555,6 @@ config SMP
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config HAVE_DEC_LOCK
|
||||
bool
|
||||
depends on SMP
|
||||
default y
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-32)"
|
||||
range 2 32
|
||||
|
@ -35,8 +35,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
|
||||
callback_srm.o srm_puts.o srm_printk.o \
|
||||
fls.o
|
||||
|
||||
lib-$(CONFIG_SMP) += dec_and_lock.o
|
||||
|
||||
# The division routines are built from single source, with different defines.
|
||||
AFLAGS___divqu.o = -DDIV
|
||||
AFLAGS___remqu.o = -DREM
|
||||
|
@ -1,44 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* arch/alpha/lib/dec_and_lock.c
|
||||
*
|
||||
* ll/sc version of atomic_dec_and_lock()
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
asm (".text \n\
|
||||
.global _atomic_dec_and_lock \n\
|
||||
.ent _atomic_dec_and_lock \n\
|
||||
.align 4 \n\
|
||||
_atomic_dec_and_lock: \n\
|
||||
.prologue 0 \n\
|
||||
1: ldl_l $1, 0($16) \n\
|
||||
subl $1, 1, $1 \n\
|
||||
beq $1, 2f \n\
|
||||
stl_c $1, 0($16) \n\
|
||||
beq $1, 4f \n\
|
||||
mb \n\
|
||||
clr $0 \n\
|
||||
ret \n\
|
||||
2: br $29, 3f \n\
|
||||
3: ldgp $29, 0($29) \n\
|
||||
br $atomic_dec_and_lock_1..ng \n\
|
||||
.subsection 2 \n\
|
||||
4: br 1b \n\
|
||||
.previous \n\
|
||||
.end _atomic_dec_and_lock");
|
||||
|
||||
static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
|
||||
{
|
||||
/* Slow path */
|
||||
spin_lock(lock);
|
||||
if (atomic_dec_and_test(atomic))
|
||||
return 1;
|
||||
spin_unlock(lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic_dec_and_lock);
|
@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
* Increment event counter and perform fixup for the pre-signal
|
||||
* frame.
|
||||
*/
|
||||
rseq_signal_deliver(regs);
|
||||
rseq_signal_deliver(ksig, regs);
|
||||
|
||||
/*
|
||||
* Set up the stack frame
|
||||
@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
|
||||
} else {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
}
|
||||
local_irq_disable();
|
||||
|
@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
|
||||
|
||||
static __read_mostly unsigned int xen_events_irq;
|
||||
|
||||
uint32_t xen_start_flags;
|
||||
EXPORT_SYMBOL(xen_start_flags);
|
||||
|
||||
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t *gfn, int nr,
|
||||
@ -293,9 +296,7 @@ void __init xen_early_init(void)
|
||||
xen_setup_features();
|
||||
|
||||
if (xen_feature(XENFEAT_dom0))
|
||||
xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
|
||||
else
|
||||
xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
|
||||
xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
|
||||
|
||||
if (!console_set_on_cmdline && !xen_initial_domain())
|
||||
add_preferred_console("hvc", 0, NULL);
|
||||
|
@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
|
||||
kernel_neon_begin();
|
||||
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
|
||||
(u8 *)ctx->key_enc, rounds, blocks, walk.iv);
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
kernel_neon_end();
|
||||
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
|
||||
}
|
||||
if (walk.nbytes) {
|
||||
u8 __aligned(8) tail[AES_BLOCK_SIZE];
|
||||
|
@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
|
||||
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
|
||||
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
|
||||
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
|
||||
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
|
||||
|
||||
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
|
||||
|
||||
|
@ -728,6 +728,17 @@ asm(
|
||||
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
|
||||
* set mask are set. Other bits are left as-is.
|
||||
*/
|
||||
#define sysreg_clear_set(sysreg, clear, set) do { \
|
||||
u64 __scs_val = read_sysreg(sysreg); \
|
||||
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
|
||||
if (__scs_new != __scs_val) \
|
||||
write_sysreg(__scs_new, sysreg); \
|
||||
} while (0)
|
||||
|
||||
static inline void config_sctlr_el1(u32 clear, u32 set)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
|
||||
__kpti_forced = enabled ? 1 : -1;
|
||||
return 0;
|
||||
}
|
||||
__setup("kpti=", parse_kpti);
|
||||
early_param("kpti", parse_kpti);
|
||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||
|
||||
#ifdef CONFIG_ARM64_HW_AFDBM
|
||||
|
@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
* This is the secondary CPU boot entry. We're using this CPUs
|
||||
* idle thread stack, but a set of temporary page tables.
|
||||
*/
|
||||
asmlinkage void secondary_start_kernel(void)
|
||||
asmlinkage notrace void secondary_start_kernel(void)
|
||||
{
|
||||
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
|
||||
struct mm_struct *mm = &init_mm;
|
||||
|
@ -5,13 +5,14 @@
|
||||
* Copyright 2018 Arm Limited
|
||||
* Author: Dave Martin <Dave.Martin@arm.com>
|
||||
*/
|
||||
#include <linux/bottom_half.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/*
|
||||
* Called on entry to KVM_RUN unless this vcpu previously ran at least
|
||||
@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
BUG_ON(!current->mm);
|
||||
|
||||
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
|
||||
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
|
||||
KVM_ARM64_HOST_SVE_IN_USE |
|
||||
KVM_ARM64_HOST_SVE_ENABLED);
|
||||
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
|
||||
|
||||
if (test_thread_flag(TIF_SVE))
|
||||
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
|
||||
|
||||
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
|
||||
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
local_bh_disable();
|
||||
unsigned long flags;
|
||||
|
||||
update_thread_flag(TIF_SVE,
|
||||
vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
|
||||
local_irq_save(flags);
|
||||
|
||||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
|
||||
/* Clean guest FP state to memory and invalidate cpu view */
|
||||
fpsimd_save();
|
||||
fpsimd_flush_cpu_state();
|
||||
} else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
|
||||
/* Ensure user trap controls are correctly restored */
|
||||
fpsimd_bind_task_to_cpu();
|
||||
} else if (system_supports_sve()) {
|
||||
/*
|
||||
* The FPSIMD/SVE state in the CPU has not been touched, and we
|
||||
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
|
||||
* reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
|
||||
* for EL0. To avoid spurious traps, restore the trap state
|
||||
* seen by kvm_arch_vcpu_load_fp():
|
||||
*/
|
||||
if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
|
||||
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
|
||||
else
|
||||
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
|
||||
}
|
||||
|
||||
local_bh_enable();
|
||||
update_thread_flag(TIF_SVE,
|
||||
vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
size >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
}
|
||||
if (!coherent)
|
||||
__dma_flush_area(page_to_virt(page), iosize);
|
||||
|
||||
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr) {
|
||||
if (addr) {
|
||||
memset(addr, 0, size);
|
||||
if (!coherent)
|
||||
__dma_flush_area(page_to_virt(page), iosize);
|
||||
} else {
|
||||
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
|
@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
|
||||
|
||||
.macro __idmap_kpti_put_pgtable_ent_ng, type
|
||||
orr \type, \type, #PTE_NG // Same bit for blocks and pages
|
||||
str \type, [cur_\()\type\()p] // Update the entry and ensure it
|
||||
dc civac, cur_\()\type\()p // is visible to all CPUs.
|
||||
str \type, [cur_\()\type\()p] // Update the entry and ensure
|
||||
dmb sy // that it is visible to all
|
||||
dc civac, cur_\()\type\()p // CPUs.
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -65,6 +65,7 @@ config MIPS
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RSEQ
|
||||
select HAVE_STACKPROTECTOR
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
|
||||
|
@ -34,7 +34,7 @@
|
||||
#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
|
||||
|
||||
static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
|
||||
.dev_id = "i2c-gpio",
|
||||
.dev_id = "i2c-gpio.0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
|
||||
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
|
||||
|
@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
|
||||
*/
|
||||
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
|
||||
cpu_wait = NULL;
|
||||
|
||||
/*
|
||||
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
|
||||
* Enable ExternalSync for sync instruction to take effect
|
||||
*/
|
||||
set_c0_config7(MIPS_CONF7_ES);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
|
||||
__val = *__addr; \
|
||||
slow; \
|
||||
\
|
||||
/* prevent prefetching of coherent DMA data prematurely */ \
|
||||
rmb(); \
|
||||
return pfx##ioswab##bwlq(__addr, __val); \
|
||||
}
|
||||
|
||||
|
@ -681,6 +681,8 @@
|
||||
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
|
||||
|
||||
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
|
||||
/* ExternalSync */
|
||||
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
|
||||
|
||||
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
|
||||
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
|
||||
@ -2765,6 +2767,7 @@ __BUILD_SET_C0(status)
|
||||
__BUILD_SET_C0(cause)
|
||||
__BUILD_SET_C0(config)
|
||||
__BUILD_SET_C0(config5)
|
||||
__BUILD_SET_C0(config7)
|
||||
__BUILD_SET_C0(intcontrol)
|
||||
__BUILD_SET_C0(intctl)
|
||||
__BUILD_SET_C0(srsmap)
|
||||
|
@ -388,17 +388,19 @@
|
||||
#define __NR_pkey_alloc (__NR_Linux + 364)
|
||||
#define __NR_pkey_free (__NR_Linux + 365)
|
||||
#define __NR_statx (__NR_Linux + 366)
|
||||
#define __NR_rseq (__NR_Linux + 367)
|
||||
#define __NR_io_pgetevents (__NR_Linux + 368)
|
||||
|
||||
|
||||
/*
|
||||
* Offset of the last Linux o32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 366
|
||||
#define __NR_Linux_syscalls 368
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
|
||||
|
||||
#define __NR_O32_Linux 4000
|
||||
#define __NR_O32_Linux_syscalls 366
|
||||
#define __NR_O32_Linux_syscalls 368
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI64
|
||||
|
||||
@ -733,16 +735,18 @@
|
||||
#define __NR_pkey_alloc (__NR_Linux + 324)
|
||||
#define __NR_pkey_free (__NR_Linux + 325)
|
||||
#define __NR_statx (__NR_Linux + 326)
|
||||
#define __NR_rseq (__NR_Linux + 327)
|
||||
#define __NR_io_pgetevents (__NR_Linux + 328)
|
||||
|
||||
/*
|
||||
* Offset of the last Linux 64-bit flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 326
|
||||
#define __NR_Linux_syscalls 328
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
|
||||
|
||||
#define __NR_64_Linux 5000
|
||||
#define __NR_64_Linux_syscalls 326
|
||||
#define __NR_64_Linux_syscalls 328
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_NABI32
|
||||
|
||||
@ -1081,15 +1085,17 @@
|
||||
#define __NR_pkey_alloc (__NR_Linux + 328)
|
||||
#define __NR_pkey_free (__NR_Linux + 329)
|
||||
#define __NR_statx (__NR_Linux + 330)
|
||||
#define __NR_rseq (__NR_Linux + 331)
|
||||
#define __NR_io_pgetevents (__NR_Linux + 332)
|
||||
|
||||
/*
|
||||
* Offset of the last N32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 330
|
||||
#define __NR_Linux_syscalls 332
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
|
||||
|
||||
#define __NR_N32_Linux 6000
|
||||
#define __NR_N32_Linux_syscalls 330
|
||||
#define __NR_N32_Linux_syscalls 332
|
||||
|
||||
#endif /* _UAPI_ASM_UNISTD_H */
|
||||
|
@ -79,6 +79,10 @@ FEXPORT(ret_from_fork)
|
||||
jal schedule_tail # a0 = struct task_struct *prev
|
||||
|
||||
FEXPORT(syscall_exit)
|
||||
#ifdef CONFIG_DEBUG_RSEQ
|
||||
move a0, sp
|
||||
jal rseq_syscall
|
||||
#endif
|
||||
local_irq_disable # make sure need_resched and
|
||||
# signals dont change between
|
||||
# sampling and return
|
||||
@ -141,6 +145,10 @@ work_notifysig: # deal with pending signals and
|
||||
j resume_userspace_check
|
||||
|
||||
FEXPORT(syscall_exit_partial)
|
||||
#ifdef CONFIG_DEBUG_RSEQ
|
||||
move a0, sp
|
||||
jal rseq_syscall
|
||||
#endif
|
||||
local_irq_disable # make sure need_resched doesn't
|
||||
# change between and return
|
||||
LONG_L a2, TI_FLAGS($28) # current->work
|
||||
|
@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
|
||||
EXPORT_SYMBOL(_mcount)
|
||||
PTR_LA t1, ftrace_stub
|
||||
PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
|
||||
bne t1, t2, static_trace
|
||||
beq t1, t2, fgraph_trace
|
||||
nop
|
||||
|
||||
MCOUNT_SAVE_REGS
|
||||
|
||||
move a0, ra /* arg1: self return address */
|
||||
jalr t2 /* (1) call *ftrace_trace_function */
|
||||
move a1, AT /* arg2: parent's return address */
|
||||
|
||||
MCOUNT_RESTORE_REGS
|
||||
|
||||
fgraph_trace:
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
PTR_LA t1, ftrace_stub
|
||||
PTR_L t3, ftrace_graph_return
|
||||
bne t1, t3, ftrace_graph_caller
|
||||
nop
|
||||
@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
|
||||
bne t1, t3, ftrace_graph_caller
|
||||
nop
|
||||
#endif
|
||||
b ftrace_stub
|
||||
#ifdef CONFIG_32BIT
|
||||
addiu sp, sp, 8
|
||||
#else
|
||||
nop
|
||||
#endif
|
||||
|
||||
static_trace:
|
||||
MCOUNT_SAVE_REGS
|
||||
|
||||
move a0, ra /* arg1: self return address */
|
||||
jalr t2 /* (1) call *ftrace_trace_function */
|
||||
move a1, AT /* arg2: parent's return address */
|
||||
|
||||
MCOUNT_RESTORE_REGS
|
||||
#ifdef CONFIG_32BIT
|
||||
addiu sp, sp, 8
|
||||
#endif
|
||||
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
RETURN_BACK
|
||||
|
@ -590,3 +590,5 @@ EXPORT(sys_call_table)
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free /* 4365 */
|
||||
PTR sys_statx
|
||||
PTR sys_rseq
|
||||
PTR sys_io_pgetevents
|
||||
|
@ -439,4 +439,6 @@ EXPORT(sys_call_table)
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free /* 5325 */
|
||||
PTR sys_statx
|
||||
PTR sys_rseq
|
||||
PTR sys_io_pgetevents
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
@ -434,4 +434,6 @@ EXPORT(sysn32_call_table)
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free
|
||||
PTR sys_statx /* 6330 */
|
||||
PTR sys_rseq
|
||||
PTR compat_sys_io_pgetevents
|
||||
.size sysn32_call_table,.-sysn32_call_table
|
||||
|
@ -583,4 +583,6 @@ EXPORT(sys32_call_table)
|
||||
PTR sys_pkey_alloc
|
||||
PTR sys_pkey_free /* 4365 */
|
||||
PTR sys_statx
|
||||
PTR sys_rseq
|
||||
PTR compat_sys_io_pgetevents
|
||||
.size sys32_call_table,.-sys32_call_table
|
||||
|
@ -801,6 +801,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
regs->regs[0] = 0; /* Don't deal with this again. */
|
||||
}
|
||||
|
||||
rseq_signal_deliver(regs);
|
||||
|
||||
if (sig_uses_siginfo(&ksig->ka, abi))
|
||||
ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
|
||||
ksig, regs, oldset);
|
||||
@ -868,6 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(regs);
|
||||
}
|
||||
|
||||
user_enter();
|
||||
|
@ -244,6 +244,7 @@ cpu-as-$(CONFIG_4xx) += -Wa,-m405
|
||||
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
|
||||
cpu-as-$(CONFIG_E200) += -Wa,-me200
|
||||
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
|
||||
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
|
||||
|
||||
KBUILD_AFLAGS += $(cpu-as-y)
|
||||
KBUILD_CFLAGS += $(cpu-as-y)
|
||||
|
@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
#define get_hugepd_cache_index(x) (x)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
|
||||
|
@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd)
|
||||
}
|
||||
#define is_hugepd(hpd) (hugepd_ok(hpd))
|
||||
|
||||
/*
|
||||
* 16M and 16G huge page directory tables are allocated from slab cache
|
||||
*
|
||||
*/
|
||||
#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
|
||||
#define H_16G_CACHE_INDEX \
|
||||
(PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
|
||||
|
||||
static inline int get_hugepd_cache_index(int index)
|
||||
{
|
||||
switch (index) {
|
||||
case H_16M_CACHE_INDEX:
|
||||
return HTLB_16M_INDEX;
|
||||
case H_16G_CACHE_INDEX:
|
||||
return HTLB_16G_INDEX;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
/* should not reach */
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
static inline int pmd_huge(pmd_t pmd) { return 0; }
|
||||
static inline int pud_huge(pud_t pud) { return 0; }
|
||||
|
@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define is_hugepd(pdep) 0
|
||||
|
||||
/*
|
||||
* This should never get called
|
||||
*/
|
||||
static inline int get_hugepd_cache_index(int index)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
static inline int pmd_huge(pmd_t pmd) { return 0; }
|
||||
static inline int pud_huge(pud_t pud) { return 0; }
|
||||
|
@ -287,6 +287,11 @@ enum pgtable_index {
|
||||
PMD_INDEX,
|
||||
PUD_INDEX,
|
||||
PGD_INDEX,
|
||||
/*
|
||||
* Below are used with 4k page size and hugetlb
|
||||
*/
|
||||
HTLB_16M_INDEX,
|
||||
HTLB_16G_INDEX,
|
||||
};
|
||||
|
||||
extern unsigned long __vmalloc_start;
|
||||
|
@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void);
|
||||
static inline void arch_touch_nmi_watchdog(void) {}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
|
||||
#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
|
||||
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
|
||||
bool exclude_self);
|
||||
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
|
||||
|
@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
#define get_hugepd_cache_index(x) (x)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
|
||||
|
@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift)
|
||||
}
|
||||
}
|
||||
|
||||
#define get_hugepd_cache_index(x) (x)
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
|
||||
{
|
||||
|
@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
|
||||
} else /* DD2.1 and up have DD2_1 */
|
||||
} else if ((version & 0xffff0000) == 0x004e0000)
|
||||
/* DD2.1 and up have DD2_1 */
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
|
||||
|
||||
if ((version & 0xffff0000) == 0x004e0000) {
|
||||
|
@ -700,12 +700,19 @@ EXPORT_SYMBOL(check_legacy_ioport);
|
||||
static int ppc_panic_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
/*
|
||||
* panic does a local_irq_disable, but we really
|
||||
* want interrupts to be hard disabled.
|
||||
*/
|
||||
hard_irq_disable();
|
||||
|
||||
/*
|
||||
* If firmware-assisted dump has been registered then trigger
|
||||
* firmware-assisted dump and let firmware handle everything else.
|
||||
*/
|
||||
crash_fadump(NULL, ptr);
|
||||
ppc_md.panic(ptr); /* May not return */
|
||||
if (ppc_md.panic)
|
||||
ppc_md.panic(ptr); /* May not return */
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = {
|
||||
|
||||
void __init setup_panic(void)
|
||||
{
|
||||
if (!ppc_md.panic)
|
||||
/* PPC64 always does a hard irq disable in its panic handler */
|
||||
if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
|
||||
return;
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
|
||||
}
|
||||
|
@ -387,6 +387,14 @@ void early_setup_secondary(void)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
void panic_smp_self_stop(void)
|
||||
{
|
||||
hard_irq_disable();
|
||||
spin_begin();
|
||||
while (1)
|
||||
spin_cpu_relax();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
|
||||
static bool use_spinloop(void)
|
||||
{
|
||||
|
@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
|
||||
/* Re-enable the breakpoints for the signal stack */
|
||||
thread_change_pc(tsk, tsk->thread.regs);
|
||||
|
||||
rseq_signal_deliver(tsk->thread.regs);
|
||||
rseq_signal_deliver(&ksig, tsk->thread.regs);
|
||||
|
||||
if (is32) {
|
||||
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
|
||||
@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
user_enter();
|
||||
|
@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
|
||||
nmi_ipi_busy_count--;
|
||||
nmi_ipi_unlock();
|
||||
|
||||
/* Remove this CPU */
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
|
||||
spin_begin();
|
||||
while (1)
|
||||
spin_cpu_relax();
|
||||
@ -617,9 +614,6 @@ void smp_send_stop(void)
|
||||
|
||||
static void stop_this_cpu(void *dummy)
|
||||
{
|
||||
/* Remove this CPU */
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
|
||||
hard_irq_disable();
|
||||
spin_begin();
|
||||
while (1)
|
||||
|
@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
|
||||
#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
|
||||
static void handle_backtrace_ipi(struct pt_regs *regs)
|
||||
{
|
||||
nmi_cpu_backtrace(regs);
|
||||
@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
|
||||
}
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
|
||||
|
@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
|
||||
if (shift >= pdshift)
|
||||
hugepd_free(tlb, hugepte);
|
||||
else
|
||||
pgtable_free_tlb(tlb, hugepte, pdshift - shift);
|
||||
pgtable_free_tlb(tlb, hugepte,
|
||||
get_hugepd_cache_index(pdshift - shift));
|
||||
}
|
||||
|
||||
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
||||
|
@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
|
||||
case PUD_INDEX:
|
||||
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
|
||||
break;
|
||||
#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
|
||||
/* 16M hugepd directory at pud level */
|
||||
case HTLB_16M_INDEX:
|
||||
BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
|
||||
kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
|
||||
break;
|
||||
/* 16G hugepd directory at the pgd level */
|
||||
case HTLB_16G_INDEX:
|
||||
BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
|
||||
kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
|
||||
break;
|
||||
#endif
|
||||
/* We don't free pgd table via RCU callback */
|
||||
default:
|
||||
BUG();
|
||||
|
@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
|
||||
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
|
||||
static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
|
||||
|
||||
void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
static inline void __radix__flush_tlb_range(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end,
|
||||
bool flush_all_sizes)
|
||||
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long pid;
|
||||
unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
|
||||
unsigned long page_size = 1UL << page_shift;
|
||||
unsigned long nr_pages = (end - start) >> page_shift;
|
||||
bool local, full;
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
return radix__flush_hugetlb_tlb_range(vma, start, end);
|
||||
#endif
|
||||
|
||||
pid = mm->context.id;
|
||||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
return;
|
||||
@ -738,37 +733,64 @@ is_local:
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
}
|
||||
} else {
|
||||
bool hflush = false;
|
||||
bool hflush = flush_all_sizes;
|
||||
bool gflush = flush_all_sizes;
|
||||
unsigned long hstart, hend;
|
||||
unsigned long gstart, gend;
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
|
||||
hend = end >> HPAGE_PMD_SHIFT;
|
||||
if (hstart < hend) {
|
||||
hstart <<= HPAGE_PMD_SHIFT;
|
||||
hend <<= HPAGE_PMD_SHIFT;
|
||||
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
|
||||
hflush = true;
|
||||
|
||||
if (hflush) {
|
||||
hstart = (start + PMD_SIZE - 1) & PMD_MASK;
|
||||
hend = end & PMD_MASK;
|
||||
if (hstart == hend)
|
||||
hflush = false;
|
||||
}
|
||||
|
||||
if (gflush) {
|
||||
gstart = (start + PUD_SIZE - 1) & PUD_MASK;
|
||||
gend = end & PUD_MASK;
|
||||
if (gstart == gend)
|
||||
gflush = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
if (local) {
|
||||
__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
|
||||
if (hflush)
|
||||
__tlbiel_va_range(hstart, hend, pid,
|
||||
HPAGE_PMD_SIZE, MMU_PAGE_2M);
|
||||
PMD_SIZE, MMU_PAGE_2M);
|
||||
if (gflush)
|
||||
__tlbiel_va_range(gstart, gend, pid,
|
||||
PUD_SIZE, MMU_PAGE_1G);
|
||||
asm volatile("ptesync": : :"memory");
|
||||
} else {
|
||||
__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
|
||||
if (hflush)
|
||||
__tlbie_va_range(hstart, hend, pid,
|
||||
HPAGE_PMD_SIZE, MMU_PAGE_2M);
|
||||
PMD_SIZE, MMU_PAGE_2M);
|
||||
if (gflush)
|
||||
__tlbie_va_range(gstart, gend, pid,
|
||||
PUD_SIZE, MMU_PAGE_1G);
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
|
||||
{
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
return radix__flush_hugetlb_tlb_range(vma, start, end);
|
||||
#endif
|
||||
|
||||
__radix__flush_tlb_range(vma->vm_mm, start, end, false);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_range);
|
||||
|
||||
static int radix_get_mmu_psize(int page_size)
|
||||
@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb)
|
||||
int psize = 0;
|
||||
struct mm_struct *mm = tlb->mm;
|
||||
int page_size = tlb->page_size;
|
||||
unsigned long start = tlb->start;
|
||||
unsigned long end = tlb->end;
|
||||
|
||||
/*
|
||||
* if page size is not something we understand, do a full mm flush
|
||||
@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb)
|
||||
*/
|
||||
if (tlb->fullmm) {
|
||||
__flush_all_mm(mm, true);
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
|
||||
} else if (mm_tlb_flush_nested(mm)) {
|
||||
/*
|
||||
* If there is a concurrent invalidation that is clearing ptes,
|
||||
* then it's possible this invalidation will miss one of those
|
||||
* cleared ptes and miss flushing the TLB. If this invalidate
|
||||
* returns before the other one flushes TLBs, that can result
|
||||
* in it returning while there are still valid TLBs inside the
|
||||
* range to be invalidated.
|
||||
*
|
||||
* See mm/memory.c:tlb_finish_mmu() for more details.
|
||||
*
|
||||
* The solution to this is ensure the entire range is always
|
||||
* flushed here. The problem for powerpc is that the flushes
|
||||
* are page size specific, so this "forced flush" would not
|
||||
* do the right thing if there are a mix of page sizes in
|
||||
* the range to be invalidated. So use __flush_tlb_range
|
||||
* which invalidates all possible page sizes in the range.
|
||||
*
|
||||
* PWC flush probably is not be required because the core code
|
||||
* shouldn't free page tables in this path, but accounting
|
||||
* for the possibility makes us a bit more robust.
|
||||
*
|
||||
* need_flush_all is an uncommon case because page table
|
||||
* teardown should be done with exclusive locks held (but
|
||||
* after locks are dropped another invalidate could come
|
||||
* in), it could be optimized further if necessary.
|
||||
*/
|
||||
if (!tlb->need_flush_all)
|
||||
__radix__flush_tlb_range(mm, start, end, true);
|
||||
else
|
||||
radix__flush_all_mm(mm);
|
||||
#endif
|
||||
} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
|
||||
if (!tlb->need_flush_all)
|
||||
radix__flush_tlb_mm(mm);
|
||||
else
|
||||
radix__flush_all_mm(mm);
|
||||
} else {
|
||||
unsigned long start = tlb->start;
|
||||
unsigned long end = tlb->end;
|
||||
|
||||
if (!tlb->need_flush_all)
|
||||
radix__flush_tlb_range_psize(mm, start, end, psize);
|
||||
else
|
||||
@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
|
||||
for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
|
||||
if (sib == cpu)
|
||||
continue;
|
||||
if (!cpu_possible(sib))
|
||||
continue;
|
||||
if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
|
||||
flush = true;
|
||||
}
|
||||
|
@ -258,11 +258,6 @@ archscripts: scripts_basic
|
||||
archheaders:
|
||||
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
|
||||
|
||||
archprepare:
|
||||
ifeq ($(CONFIG_KEXEC_FILE),y)
|
||||
$(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
|
||||
endif
|
||||
|
||||
###
|
||||
# Kernel objects
|
||||
|
||||
@ -327,7 +322,6 @@ archclean:
|
||||
$(Q)rm -rf $(objtree)/arch/x86_64
|
||||
$(Q)$(MAKE) $(clean)=$(boot)
|
||||
$(Q)$(MAKE) $(clean)=arch/x86/tools
|
||||
$(Q)$(MAKE) $(clean)=arch/x86/purgatory
|
||||
|
||||
define archhelp
|
||||
echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
|
||||
|
@ -118,7 +118,7 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
|
||||
void *romimage;
|
||||
|
||||
status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
|
||||
EfiPciIoAttributeOperationGet, 0, 0,
|
||||
EfiPciIoAttributeOperationGet, 0ULL,
|
||||
&attributes);
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
|
||||
if (cached_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
||||
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
|
||||
|
@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
{
|
||||
unsigned long mask;
|
||||
|
||||
asm ("cmp %1,%2; sbb %0,%0;"
|
||||
asm volatile ("cmp %1,%2; sbb %0,%0;"
|
||||
:"=r" (mask)
|
||||
:"g"(size),"r" (index)
|
||||
:"cc");
|
||||
|
@ -114,6 +114,7 @@
|
||||
#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
|
||||
#define VMX_MISC_SAVE_EFER_LMA 0x00000020
|
||||
#define VMX_MISC_ACTIVITY_HLT 0x00000040
|
||||
#define VMX_MISC_ZERO_LEN_INS 0x40000000
|
||||
|
||||
/* VMFUNC functions */
|
||||
#define VMX_VMFUNC_EPTP_SWITCHING 0x00000001
|
||||
@ -351,11 +352,13 @@ enum vmcs_field {
|
||||
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
|
||||
|
||||
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
|
||||
#define INTR_TYPE_RESERVED (1 << 8) /* reserved */
|
||||
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
|
||||
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
|
||||
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
|
||||
#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
|
||||
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
|
||||
#define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */
|
||||
|
||||
/* GUEST_INTERRUPTIBILITY_INFO flags. */
|
||||
#define GUEST_INTR_STATE_STI 0x00000001
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/memory.h>
|
||||
|
||||
#include <asm/uv/uv_mmrs.h>
|
||||
#include <asm/uv/uv_hub.h>
|
||||
@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
|
||||
}
|
||||
EXPORT_SYMBOL(uv_hub_info_version);
|
||||
|
||||
/* Default UV memory block size is 2GB */
|
||||
static unsigned long mem_block_size = (2UL << 30);
|
||||
|
||||
/* Kernel parameter to specify UV mem block size */
|
||||
static int parse_mem_block_size(char *ptr)
|
||||
{
|
||||
unsigned long size = memparse(ptr, NULL);
|
||||
|
||||
/* Size will be rounded down by set_block_size() below */
|
||||
mem_block_size = size;
|
||||
return 0;
|
||||
}
|
||||
early_param("uv_memblksize", parse_mem_block_size);
|
||||
|
||||
static __init int adj_blksize(u32 lgre)
|
||||
{
|
||||
unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
|
||||
unsigned long size;
|
||||
|
||||
for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
|
||||
if (IS_ALIGNED(base, size))
|
||||
break;
|
||||
|
||||
if (size >= mem_block_size)
|
||||
return 0;
|
||||
|
||||
mem_block_size = size;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __init void set_block_size(void)
|
||||
{
|
||||
unsigned int order = ffs(mem_block_size);
|
||||
|
||||
if (order) {
|
||||
/* adjust for ffs return of 1..64 */
|
||||
set_memory_block_size_order(order - 1);
|
||||
pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
|
||||
} else {
|
||||
/* bad or zero value, default to 1UL << 31 (2GB) */
|
||||
pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
|
||||
set_memory_block_size_order(31);
|
||||
}
|
||||
}
|
||||
|
||||
/* Build GAM range lookup table: */
|
||||
static __init void build_uv_gr_table(void)
|
||||
{
|
||||
@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
|
||||
<< UV_GAM_RANGE_SHFT);
|
||||
int order = 0;
|
||||
char suffix[] = " KMGTPE";
|
||||
int flag = ' ';
|
||||
|
||||
while (size > 9999 && order < sizeof(suffix)) {
|
||||
size /= 1024;
|
||||
order++;
|
||||
}
|
||||
|
||||
/* adjust max block size to current range start */
|
||||
if (gre->type == 1 || gre->type == 2)
|
||||
if (adj_blksize(lgre))
|
||||
flag = '*';
|
||||
|
||||
if (!index) {
|
||||
pr_info("UV: GAM Range Table...\n");
|
||||
pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
|
||||
pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
|
||||
}
|
||||
pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
|
||||
pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n",
|
||||
index++,
|
||||
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
|
||||
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
|
||||
size, suffix[order],
|
||||
flag, size, suffix[order],
|
||||
gre->type, gre->nasid, gre->sockid, gre->pnode);
|
||||
|
||||
/* update to next range start */
|
||||
lgre = gre->limit;
|
||||
if (sock_min > gre->sockid)
|
||||
sock_min = gre->sockid;
|
||||
@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
|
||||
|
||||
build_socket_tables();
|
||||
build_uv_gr_table();
|
||||
set_block_size();
|
||||
uv_init_hub_info(&hub_info);
|
||||
uv_possible_blades = num_possible_nodes();
|
||||
if (!_node_to_pnode)
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
@ -664,6 +665,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
if (boot_cpu_has(X86_FEATURE_PTI))
|
||||
return sprintf(buf, "Mitigation: PTI\n");
|
||||
|
||||
if (hypervisor_is_type(X86_HYPER_XEN_PV))
|
||||
return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
|
||||
|
||||
break;
|
||||
|
||||
case X86_BUG_SPECTRE_V1:
|
||||
|
@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
|
||||
num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
|
||||
|
||||
if (num_sharing_cache) {
|
||||
int bits = get_count_order(num_sharing_cache) - 1;
|
||||
int bits = get_count_order(num_sharing_cache);
|
||||
|
||||
per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
|
||||
}
|
||||
|
@ -1,3 +1,6 @@
|
||||
/* cpu_feature_enabled() cannot be used this early */
|
||||
#define USE_EARLY_PGTABLE_L5
|
||||
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/bitops.h>
|
||||
|
@ -160,6 +160,11 @@ static struct severity {
|
||||
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
|
||||
USER
|
||||
),
|
||||
MCESEV(
|
||||
PANIC, "Data load in unrecoverable area of kernel",
|
||||
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
|
||||
KERNEL
|
||||
),
|
||||
#endif
|
||||
MCESEV(
|
||||
PANIC, "Action required: unknown MCACOD",
|
||||
|
@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
|
||||
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
int i, ret = 0;
|
||||
char *tmp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mca_cfg.banks; i++) {
|
||||
m->status = mce_rdmsrl(msr_ops.status(i));
|
||||
if (m->status & MCI_STATUS_VAL) {
|
||||
__set_bit(i, validp);
|
||||
if (quirk_no_way_out)
|
||||
quirk_no_way_out(i, m, regs);
|
||||
}
|
||||
if (!(m->status & MCI_STATUS_VAL))
|
||||
continue;
|
||||
|
||||
__set_bit(i, validp);
|
||||
if (quirk_no_way_out)
|
||||
quirk_no_way_out(i, m, regs);
|
||||
|
||||
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
|
||||
mce_read_aux(m, i);
|
||||
*msg = tmp;
|
||||
ret = 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
|
||||
lmce = m.mcgstatus & MCG_STATUS_LMCES;
|
||||
|
||||
/*
|
||||
* Local machine check may already know that we have to panic.
|
||||
* Broadcast machine check begins rendezvous in mce_start()
|
||||
* Go through all banks in exclusion of the other CPUs. This way we
|
||||
* don't report duplicated events on shared banks because the first one
|
||||
* to see it will clear it. If this is a Local MCE, then no need to
|
||||
* perform rendezvous.
|
||||
* to see it will clear it.
|
||||
*/
|
||||
if (!lmce)
|
||||
if (lmce) {
|
||||
if (no_way_out)
|
||||
mce_panic("Fatal local machine check", &m, msg);
|
||||
} else {
|
||||
order = mce_start(&no_way_out);
|
||||
}
|
||||
|
||||
for (i = 0; i < cfg->banks; i++) {
|
||||
__clear_bit(i, toclear);
|
||||
@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
|
||||
no_way_out = worst >= MCE_PANIC_SEVERITY;
|
||||
} else {
|
||||
/*
|
||||
* Local MCE skipped calling mce_reign()
|
||||
* If we found a fatal error, we need to panic here.
|
||||
* If there was a fatal machine check we should have
|
||||
* already called mce_panic earlier in this function.
|
||||
* Since we re-read the banks, we might have found
|
||||
* something new. Check again to see if we found a
|
||||
* fatal error. We call "mce_severity()" again to
|
||||
* make sure we have the right "msg".
|
||||
*/
|
||||
if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
|
||||
mce_panic("Machine check from unknown source",
|
||||
NULL, NULL);
|
||||
if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
|
||||
mce_severity(&m, cfg->tolerant, &msg, true);
|
||||
mce_panic("Local fatal machine check!", &m, msg);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
|
||||
p = memdup_patch(data, size);
|
||||
if (!p)
|
||||
pr_err("Error allocating buffer %p\n", data);
|
||||
else
|
||||
else {
|
||||
list_replace(&iter->plist, &p->plist);
|
||||
kfree(iter->data);
|
||||
kfree(iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt;
|
||||
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
|
||||
|
||||
#ifdef CONFIG_X86_5LEVEL
|
||||
unsigned int __pgtable_l5_enabled __initdata;
|
||||
unsigned int __pgtable_l5_enabled __ro_after_init;
|
||||
unsigned int pgdir_shift __ro_after_init = 39;
|
||||
EXPORT_SYMBOL(pgdir_shift);
|
||||
unsigned int ptrs_per_p4d __ro_after_init = 1;
|
||||
|
@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
|
||||
/* Skylake */
|
||||
static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
|
||||
{
|
||||
u32 capid0;
|
||||
u32 capid0, capid5;
|
||||
|
||||
pci_read_config_dword(pdev, 0x84, &capid0);
|
||||
pci_read_config_dword(pdev, 0x98, &capid5);
|
||||
|
||||
if ((capid0 & 0xc0) == 0xc0)
|
||||
/*
|
||||
* CAPID0{7:6} indicate whether this is an advanced RAS SKU
|
||||
* CAPID5{8:5} indicate that various NVDIMM usage modes are
|
||||
* enabled, so memory machine check recovery is also enabled.
|
||||
*/
|
||||
if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
|
||||
static_branch_inc(&mcsafe_key);
|
||||
|
||||
}
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
|
||||
|
@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
|
||||
* Increment event counter and perform fixup for the pre-signal
|
||||
* frame.
|
||||
*/
|
||||
rseq_signal_deliver(regs);
|
||||
rseq_signal_deliver(ksig, regs);
|
||||
|
||||
/* Set up the stack frame */
|
||||
if (is_ia32_frame(ksig)) {
|
||||
|
@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
|
||||
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
|
||||
"simd exception";
|
||||
|
||||
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
|
||||
return;
|
||||
cond_local_irq_enable(regs);
|
||||
|
||||
if (!user_mode(regs)) {
|
||||
if (!fixup_exception(regs, trapnr)) {
|
||||
task->thread.error_code = error_code;
|
||||
task->thread.trap_nr = trapnr;
|
||||
if (fixup_exception(regs, trapnr))
|
||||
return;
|
||||
|
||||
task->thread.error_code = error_code;
|
||||
task->thread.trap_nr = trapnr;
|
||||
|
||||
if (notify_die(DIE_TRAP, str, regs, error_code,
|
||||
trapnr, SIGFPE) != NOTIFY_STOP)
|
||||
die(str, regs, error_code);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -293,7 +293,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
|
||||
insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
|
||||
/* has the side-effect of processing the entire instruction */
|
||||
insn_get_length(insn);
|
||||
if (WARN_ON_ONCE(!insn_complete(insn)))
|
||||
if (!insn_complete(insn))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (is_prefix_bad(insn))
|
||||
|
@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
|
||||
MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
|
||||
}
|
||||
|
||||
static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
|
||||
}
|
||||
|
||||
static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
|
||||
CPU_BASED_MONITOR_TRAP_FLAG;
|
||||
}
|
||||
|
||||
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
|
||||
{
|
||||
return vmcs12->cpu_based_vm_exec_control & bit;
|
||||
@ -11620,6 +11631,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
|
||||
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
|
||||
|
||||
/*
|
||||
* From the Intel SDM, volume 3:
|
||||
* Fields relevant to VM-entry event injection must be set properly.
|
||||
* These fields are the VM-entry interruption-information field, the
|
||||
* VM-entry exception error code, and the VM-entry instruction length.
|
||||
*/
|
||||
if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
|
||||
u32 intr_info = vmcs12->vm_entry_intr_info_field;
|
||||
u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
|
||||
u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
|
||||
bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
|
||||
bool should_have_error_code;
|
||||
bool urg = nested_cpu_has2(vmcs12,
|
||||
SECONDARY_EXEC_UNRESTRICTED_GUEST);
|
||||
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
|
||||
|
||||
/* VM-entry interruption-info field: interruption type */
|
||||
if (intr_type == INTR_TYPE_RESERVED ||
|
||||
(intr_type == INTR_TYPE_OTHER_EVENT &&
|
||||
!nested_cpu_supports_monitor_trap_flag(vcpu)))
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
/* VM-entry interruption-info field: vector */
|
||||
if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
|
||||
(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
|
||||
(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
/* VM-entry interruption-info field: deliver error code */
|
||||
should_have_error_code =
|
||||
intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
|
||||
x86_exception_has_error_code(vector);
|
||||
if (has_error_code != should_have_error_code)
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
/* VM-entry exception error code */
|
||||
if (has_error_code &&
|
||||
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
/* VM-entry interruption-info field: reserved bits */
|
||||
if (intr_info & INTR_INFO_RESVD_BITS_MASK)
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
|
||||
/* VM-entry instruction length */
|
||||
switch (intr_type) {
|
||||
case INTR_TYPE_SOFT_EXCEPTION:
|
||||
case INTR_TYPE_SOFT_INTR:
|
||||
case INTR_TYPE_PRIV_SW_EXCEPTION:
|
||||
if ((vmcs12->vm_entry_instruction_len > 15) ||
|
||||
(vmcs12->vm_entry_instruction_len == 0 &&
|
||||
!nested_cpu_has_zero_length_injection(vcpu)))
|
||||
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool x86_exception_has_error_code(unsigned int vector)
|
||||
{
|
||||
static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
|
||||
BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
|
||||
BIT(PF_VECTOR) | BIT(AC_VECTOR);
|
||||
|
||||
return (1U << vector) & exception_has_error_code;
|
||||
}
|
||||
|
||||
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
|
||||
|
@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr)
|
||||
/* Amount of ram needed to start using large blocks */
|
||||
#define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
|
||||
|
||||
/* Adjustable memory block size */
|
||||
static unsigned long set_memory_block_size;
|
||||
int __init set_memory_block_size_order(unsigned int order)
|
||||
{
|
||||
unsigned long size = 1UL << order;
|
||||
|
||||
if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
set_memory_block_size = size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long probe_memory_block_size(void)
|
||||
{
|
||||
unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
|
||||
unsigned long bz;
|
||||
|
||||
/* If this is UV system, always set 2G block size */
|
||||
if (is_uv_system()) {
|
||||
bz = MAX_BLOCK_SIZE;
|
||||
/* If memory block size has been set, then use it */
|
||||
bz = set_memory_block_size;
|
||||
if (bz)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
|
||||
if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
|
||||
|
@ -64,6 +64,13 @@ struct shared_info xen_dummy_shared_info;
|
||||
__read_mostly int xen_have_vector_callback;
|
||||
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
|
||||
|
||||
/*
|
||||
* NB: needs to live in .data because it's used by xen_prepare_pvh which runs
|
||||
* before clearing the bss.
|
||||
*/
|
||||
uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
|
||||
EXPORT_SYMBOL(xen_start_flags);
|
||||
|
||||
/*
|
||||
* Point at some empty memory to start with. We map the real shared_info
|
||||
* page as soon as fixmap is up and running.
|
||||
|
@ -1203,6 +1203,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
return;
|
||||
|
||||
xen_domain_type = XEN_PV_DOMAIN;
|
||||
xen_start_flags = xen_start_info->flags;
|
||||
|
||||
xen_setup_features();
|
||||
|
||||
|
@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
|
||||
}
|
||||
|
||||
xen_pvh = 1;
|
||||
xen_start_flags = pvh_start_info.flags;
|
||||
|
||||
msr = cpuid_ebx(xen_cpuid_base() + 2);
|
||||
pfn = __pa(hypercall_page);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <xen/interface/vcpu.h>
|
||||
#include <xen/interface/xenpmu.h>
|
||||
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/xen/interface.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
||||
@ -70,6 +71,8 @@ static void cpu_bringup(void)
|
||||
cpu_data(cpu).x86_max_cores = 1;
|
||||
set_cpu_sibling_map(cpu);
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
|
||||
xen_setup_cpu_clockevents();
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
|
||||
}
|
||||
set_cpu_sibling_map(0);
|
||||
|
||||
speculative_store_bypass_ht_init();
|
||||
|
||||
xen_pmu_init(0);
|
||||
|
||||
if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
|
||||
|
@ -1807,9 +1807,6 @@ again:
|
||||
if (!bio_integrity_endio(bio))
|
||||
return;
|
||||
|
||||
if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL"))
|
||||
bio->bi_next = NULL;
|
||||
|
||||
/*
|
||||
* Need to have a real endio function for chained bios, otherwise
|
||||
* various corner cases will break (like stacking block devices that
|
||||
|
@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||
bio_advance(bio, nbytes);
|
||||
|
||||
/* don't actually finish bio if it's part of flush sequence */
|
||||
/*
|
||||
* XXX this code looks suspicious - it's not consistent with advancing
|
||||
* req->bio in caller
|
||||
*/
|
||||
if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
|
||||
bio_endio(bio);
|
||||
}
|
||||
@ -3081,10 +3077,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
|
||||
struct bio *bio = req->bio;
|
||||
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
|
||||
|
||||
if (bio_bytes == bio->bi_iter.bi_size) {
|
||||
if (bio_bytes == bio->bi_iter.bi_size)
|
||||
req->bio = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
}
|
||||
|
||||
/* Completion has already been traced */
|
||||
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
||||
|
@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = {
|
||||
|
||||
static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
|
||||
{
|
||||
if (WARN_ON_ONCE((unsigned int)rq_state >
|
||||
if (WARN_ON_ONCE((unsigned int)rq_state >=
|
||||
ARRAY_SIZE(blk_mq_rq_state_name_array)))
|
||||
return "(?)";
|
||||
return blk_mq_rq_state_name_array[rq_state];
|
||||
|
@ -781,7 +781,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
|
||||
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
|
||||
}
|
||||
|
||||
req->rq_flags &= ~RQF_TIMED_OUT;
|
||||
blk_add_timer(req);
|
||||
}
|
||||
|
||||
|
@ -144,6 +144,7 @@ do_local:
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(__blk_complete_request);
|
||||
|
||||
/**
|
||||
* blk_complete_request - end I/O on a request
|
||||
|
@ -210,6 +210,7 @@ void blk_add_timer(struct request *req)
|
||||
if (!req->timeout)
|
||||
req->timeout = q->rq_timeout;
|
||||
|
||||
req->rq_flags &= ~RQF_TIMED_OUT;
|
||||
blk_rq_set_deadline(req, jiffies + req->timeout);
|
||||
|
||||
/*
|
||||
|
@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (n > resp->num) {
|
||||
if (n >= resp->num) {
|
||||
pr_debug("Response has %d tokens. Can't access %d\n",
|
||||
resp->num, n);
|
||||
return 0;
|
||||
@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (n > resp->num) {
|
||||
if (n >= resp->num) {
|
||||
pr_debug("Response has %d tokens. Can't access %d\n",
|
||||
resp->num, n);
|
||||
return 0;
|
||||
|
@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
|
||||
union morus640_block_in tail;
|
||||
|
||||
memcpy(tail.bytes, src, size);
|
||||
memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
|
||||
|
||||
crypto_morus640_load_a(&m, src);
|
||||
crypto_morus640_load_a(&m, tail.bytes);
|
||||
crypto_morus640_core(state, &m);
|
||||
crypto_morus640_store_a(tail.bytes, &m);
|
||||
memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
|
||||
|
@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25])
|
||||
st[24] ^= bc[ 4];
|
||||
}
|
||||
|
||||
static void __optimize("O3") keccakf(u64 st[25])
|
||||
static void keccakf(u64 st[25])
|
||||
{
|
||||
int round;
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pwm.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "internal.h"
|
||||
@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void)
|
||||
mutex_unlock(&lpss_iosf_mutex);
|
||||
}
|
||||
|
||||
static int acpi_lpss_suspend(struct device *dev, bool wakeup)
|
||||
static int acpi_lpss_suspend(struct device *dev, bool runtime)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
bool wakeup = runtime || device_may_wakeup(dev);
|
||||
int ret;
|
||||
|
||||
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
||||
@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
|
||||
* wrong status for devices being about to be powered off. See
|
||||
* lpss_iosf_enter_d3_state() for further information.
|
||||
*/
|
||||
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
if ((runtime || !pm_suspend_via_firmware()) &&
|
||||
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
lpss_iosf_enter_d3_state();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume(struct device *dev)
|
||||
static int acpi_lpss_resume(struct device *dev, bool runtime)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev)
|
||||
* This call is kept first to be in symmetry with
|
||||
* acpi_lpss_runtime_suspend() one.
|
||||
*/
|
||||
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
if ((runtime || !pm_resume_via_firmware()) &&
|
||||
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
lpss_iosf_exit_d3_state();
|
||||
|
||||
ret = acpi_dev_resume(dev);
|
||||
@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
|
||||
return 0;
|
||||
|
||||
ret = pm_generic_suspend_late(dev);
|
||||
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
|
||||
return ret ? ret : acpi_lpss_suspend(dev, false);
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
int ret = acpi_lpss_resume(dev, false);
|
||||
|
||||
return ret ? ret : pm_generic_resume_early(dev);
|
||||
}
|
||||
@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
|
||||
|
||||
static int acpi_lpss_runtime_resume(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
int ret = acpi_lpss_resume(dev, true);
|
||||
|
||||
return ret ? ret : pm_generic_runtime_resume(dev);
|
||||
}
|
||||
|
@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void)
|
||||
}
|
||||
}
|
||||
|
||||
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
|
||||
{
|
||||
.ident = "Thinkpad X1 Carbon 6th",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
|
||||
},
|
||||
},
|
||||
{ },
|
||||
};
|
||||
|
||||
int __init acpi_ec_init(void)
|
||||
{
|
||||
int result;
|
||||
@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void)
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
/*
|
||||
* Disable EC wakeup on following systems to prevent periodic
|
||||
* wakeup from EC GPE.
|
||||
*/
|
||||
if (dmi_check_system(acpi_ec_no_wakeup)) {
|
||||
ec_no_wakeup = true;
|
||||
pr_debug("Disabling EC wakeup on suspend-to-idle\n");
|
||||
}
|
||||
|
||||
/* Drivers must be started after acpi_ec_query_init() */
|
||||
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
/*
|
||||
|
@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
|
||||
link->rpm_active = true;
|
||||
}
|
||||
pm_runtime_new_link(consumer);
|
||||
/*
|
||||
* If the link is being added by the consumer driver at probe
|
||||
* time, balance the decrementation of the supplier's runtime PM
|
||||
* usage counter after consumer probe in driver_probe_device().
|
||||
*/
|
||||
if (consumer->links.status == DL_DEV_PROBING)
|
||||
pm_runtime_get_noresume(supplier);
|
||||
}
|
||||
get_device(supplier);
|
||||
link->supplier = supplier;
|
||||
@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
|
||||
switch (consumer->links.status) {
|
||||
case DL_DEV_PROBING:
|
||||
/*
|
||||
* Balance the decrementation of the supplier's
|
||||
* runtime PM usage counter after consumer probe
|
||||
* in driver_probe_device().
|
||||
* Some callers expect the link creation during
|
||||
* consumer driver probe to resume the supplier
|
||||
* even without DL_FLAG_RPM_ACTIVE.
|
||||
*/
|
||||
if (flags & DL_FLAG_PM_RUNTIME)
|
||||
pm_runtime_get_sync(supplier);
|
||||
pm_runtime_resume(supplier);
|
||||
|
||||
link->status = DL_STATE_CONSUMER_PROBE;
|
||||
break;
|
||||
|
@ -76,6 +76,7 @@ struct link_dead_args {
|
||||
#define NBD_HAS_CONFIG_REF 4
|
||||
#define NBD_BOUND 5
|
||||
#define NBD_DESTROY_ON_DISCONNECT 6
|
||||
#define NBD_DISCONNECT_ON_CLOSE 7
|
||||
|
||||
struct nbd_config {
|
||||
u32 flags;
|
||||
@ -138,6 +139,7 @@ static void nbd_config_put(struct nbd_device *nbd);
|
||||
static void nbd_connect_reply(struct genl_info *info, int index);
|
||||
static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
|
||||
static void nbd_dead_link_work(struct work_struct *work);
|
||||
static void nbd_disconnect_and_put(struct nbd_device *nbd);
|
||||
|
||||
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
|
||||
{
|
||||
@ -1305,6 +1307,12 @@ out:
|
||||
static void nbd_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct nbd_device *nbd = disk->private_data;
|
||||
struct block_device *bdev = bdget_disk(disk, 0);
|
||||
|
||||
if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
|
||||
bdev->bd_openers == 0)
|
||||
nbd_disconnect_and_put(nbd);
|
||||
|
||||
nbd_config_put(nbd);
|
||||
nbd_put(nbd);
|
||||
}
|
||||
@ -1705,6 +1713,10 @@ again:
|
||||
&config->runtime_flags);
|
||||
put_dev = true;
|
||||
}
|
||||
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
|
||||
set_bit(NBD_DISCONNECT_ON_CLOSE,
|
||||
&config->runtime_flags);
|
||||
}
|
||||
}
|
||||
|
||||
if (info->attrs[NBD_ATTR_SOCKETS]) {
|
||||
@ -1749,6 +1761,17 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
||||
{
|
||||
mutex_lock(&nbd->config_lock);
|
||||
nbd_disconnect(nbd);
|
||||
nbd_clear_sock(nbd);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
||||
&nbd->config->runtime_flags))
|
||||
nbd_config_put(nbd);
|
||||
}
|
||||
|
||||
static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
struct nbd_device *nbd;
|
||||
@ -1781,13 +1804,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
|
||||
nbd_put(nbd);
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&nbd->config_lock);
|
||||
nbd_disconnect(nbd);
|
||||
nbd_clear_sock(nbd);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
||||
&nbd->config->runtime_flags))
|
||||
nbd_config_put(nbd);
|
||||
nbd_disconnect_and_put(nbd);
|
||||
nbd_config_put(nbd);
|
||||
nbd_put(nbd);
|
||||
return 0;
|
||||
@ -1798,7 +1815,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
|
||||
struct nbd_device *nbd = NULL;
|
||||
struct nbd_config *config;
|
||||
int index;
|
||||
int ret = -EINVAL;
|
||||
int ret = 0;
|
||||
bool put_dev = false;
|
||||
|
||||
if (!netlink_capable(skb, CAP_SYS_ADMIN))
|
||||
@ -1838,6 +1855,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
|
||||
!nbd->task_recv) {
|
||||
dev_err(nbd_to_dev(nbd),
|
||||
"not configured, cannot reconfigure\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1862,6 +1880,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
|
||||
&config->runtime_flags))
|
||||
refcount_inc(&nbd->refs);
|
||||
}
|
||||
|
||||
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
|
||||
set_bit(NBD_DISCONNECT_ON_CLOSE,
|
||||
&config->runtime_flags);
|
||||
} else {
|
||||
clear_bit(NBD_DISCONNECT_ON_CLOSE,
|
||||
&config->runtime_flags);
|
||||
}
|
||||
}
|
||||
|
||||
if (info->attrs[NBD_ATTR_SOCKETS]) {
|
||||
|
@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
|
||||
{
|
||||
pr_info("null: rq %p timed out\n", rq);
|
||||
blk_mq_complete_request(rq);
|
||||
__blk_complete_request(rq);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
||||
|
@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
|
||||
|
||||
void hwrng_unregister(struct hwrng *rng)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&rng_mutex);
|
||||
|
||||
list_del(&rng->list);
|
||||
if (current_rng == rng)
|
||||
enable_best_rng();
|
||||
if (current_rng == rng) {
|
||||
err = enable_best_rng();
|
||||
if (err) {
|
||||
drop_current_rng();
|
||||
cur_rng_set_by_user = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (list_empty(&rng_list)) {
|
||||
mutex_unlock(&rng_mutex);
|
||||
|
@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node)
|
||||
|
||||
to->private_data = kzalloc(sizeof(struct stm32_timer_private),
|
||||
GFP_KERNEL);
|
||||
if (!to->private_data)
|
||||
if (!to->private_data) {
|
||||
ret = -ENOMEM;
|
||||
goto deinit;
|
||||
}
|
||||
|
||||
rstc = of_reset_control_get(node, NULL);
|
||||
if (!IS_ERR(rstc)) {
|
||||
|
@ -294,6 +294,7 @@ struct pstate_funcs {
|
||||
static struct pstate_funcs pstate_funcs __read_mostly;
|
||||
|
||||
static int hwp_active __read_mostly;
|
||||
static int hwp_mode_bdw __read_mostly;
|
||||
static bool per_cpu_limits __read_mostly;
|
||||
static bool hwp_boost __read_mostly;
|
||||
|
||||
@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
||||
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
|
||||
cpu->pstate.scaling = pstate_funcs.get_scaling();
|
||||
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
|
||||
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
||||
|
||||
if (hwp_active && !hwp_mode_bdw) {
|
||||
unsigned int phy_max, current_max;
|
||||
|
||||
intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max);
|
||||
cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
|
||||
} else {
|
||||
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
|
||||
}
|
||||
|
||||
if (pstate_funcs.get_aperf_mperf_shift)
|
||||
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
|
||||
@ -2467,28 +2476,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
|
||||
static inline void intel_pstate_request_control_from_smm(void) {}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
#define INTEL_PSTATE_HWP_BROADWELL 0x01
|
||||
|
||||
#define ICPU_HWP(model, hwp_mode) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
|
||||
|
||||
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
|
||||
{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
|
||||
ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
|
||||
ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
|
||||
ICPU_HWP(X86_MODEL_ANY, 0),
|
||||
{}
|
||||
};
|
||||
|
||||
static int __init intel_pstate_init(void)
|
||||
{
|
||||
const struct x86_cpu_id *id;
|
||||
int rc;
|
||||
|
||||
if (no_load)
|
||||
return -ENODEV;
|
||||
|
||||
if (x86_match_cpu(hwp_support_ids)) {
|
||||
id = x86_match_cpu(hwp_support_ids);
|
||||
if (id) {
|
||||
copy_cpu_funcs(&core_funcs);
|
||||
if (!no_hwp) {
|
||||
hwp_active++;
|
||||
hwp_mode_bdw = id->driver_data;
|
||||
intel_pstate.attr = hwp_cpufreq_attrs;
|
||||
goto hwp_cpu_matched;
|
||||
}
|
||||
} else {
|
||||
const struct x86_cpu_id *id;
|
||||
|
||||
id = x86_match_cpu(intel_pstate_cpu_ids);
|
||||
if (!id)
|
||||
return -ENODEV;
|
||||
|
@ -42,6 +42,8 @@ enum _msm8996_version {
|
||||
NUM_OF_MSM8996_VERSIONS,
|
||||
};
|
||||
|
||||
struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
|
||||
|
||||
static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
|
||||
{
|
||||
size_t len;
|
||||
@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
|
||||
static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct opp_table *opp_tables[NR_CPUS] = {0};
|
||||
struct platform_device *cpufreq_dt_pdev;
|
||||
enum _msm8996_version msm8996_version;
|
||||
struct nvmem_cell *speedbin_nvmem;
|
||||
struct device_node *np;
|
||||
@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
|
||||
|
||||
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
if (IS_ERR(speedbin))
|
||||
return PTR_ERR(speedbin);
|
||||
|
||||
switch (msm8996_version) {
|
||||
case MSM8996_V3:
|
||||
@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
kfree(speedbin);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
@ -162,8 +166,15 @@ free_opp:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
|
||||
{
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver qcom_cpufreq_kryo_driver = {
|
||||
.probe = qcom_cpufreq_kryo_probe,
|
||||
.remove = qcom_cpufreq_kryo_remove,
|
||||
.driver = {
|
||||
.name = "qcom-cpufreq-kryo",
|
||||
},
|
||||
@ -198,8 +209,9 @@ static int __init qcom_cpufreq_kryo_init(void)
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
|
||||
"qcom-cpufreq-kryo", -1, NULL, 0));
|
||||
kryo_cpufreq_pdev = platform_device_register_simple(
|
||||
"qcom-cpufreq-kryo", -1, NULL, 0);
|
||||
ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
|
||||
if (0 == ret)
|
||||
return 0;
|
||||
|
||||
@ -208,5 +220,12 @@ static int __init qcom_cpufreq_kryo_init(void)
|
||||
}
|
||||
module_init(qcom_cpufreq_kryo_init);
|
||||
|
||||
static void __init qcom_cpufreq_kryo_exit(void)
|
||||
{
|
||||
platform_device_unregister(kryo_cpufreq_pdev);
|
||||
platform_driver_unregister(&qcom_cpufreq_kryo_driver);
|
||||
}
|
||||
module_exit(qcom_cpufreq_kryo_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -1548,15 +1548,14 @@ skip_copy:
|
||||
tp->urg_data = 0;
|
||||
|
||||
if ((avail + offset) >= skb->len) {
|
||||
if (likely(skb))
|
||||
chtls_free_skb(sk, skb);
|
||||
buffers_freed++;
|
||||
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
|
||||
tp->copied_seq += skb->len;
|
||||
hws->rcvpld = skb->hdr_len;
|
||||
} else {
|
||||
tp->copied_seq += hws->rcvpld;
|
||||
}
|
||||
chtls_free_skb(sk, skb);
|
||||
buffers_freed++;
|
||||
hws->copied_seq = 0;
|
||||
if (copied >= target &&
|
||||
!skb_peek(&sk->sk_receive_queue))
|
||||
|
@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
|
||||
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
|
||||
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
|
||||
efi_status_t status;
|
||||
efi_physical_addr_t log_location, log_last_entry;
|
||||
efi_physical_addr_t log_location = 0, log_last_entry = 0;
|
||||
struct linux_efi_tpm_eventlog *log_tbl = NULL;
|
||||
unsigned long first_entry_addr, last_entry_addr;
|
||||
size_t log_size, last_entry_size;
|
||||
|
@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||
switch (asic_type) {
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
/*
|
||||
* We have systems in the wild with these ASICs that require
|
||||
* LVDS and VGA support which is not supported with DC.
|
||||
*
|
||||
* Fallback to the non-DC driver here by default so as not to
|
||||
* cause regressions.
|
||||
*/
|
||||
return amdgpu_dc > 0;
|
||||
case CHIP_HAWAII:
|
||||
case CHIP_CARRIZO:
|
||||
case CHIP_STONEY:
|
||||
case CHIP_POLARIS10:
|
||||
|
@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
adev->vram_pin_size += amdgpu_bo_size(bo);
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
adev->invisible_pin_size += amdgpu_bo_size(bo);
|
||||
adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
|
||||
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
|
||||
adev->gart_pin_size += amdgpu_bo_size(bo);
|
||||
}
|
||||
@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
||||
bo->pin_count--;
|
||||
if (bo->pin_count)
|
||||
return 0;
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
|
||||
adev->vram_pin_size -= amdgpu_bo_size(bo);
|
||||
adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
|
||||
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
adev->gart_pin_size -= amdgpu_bo_size(bo);
|
||||
}
|
||||
|
||||
for (i = 0; i < bo->placement.num_placement; i++) {
|
||||
bo->placements[i].lpfn = 0;
|
||||
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
|
||||
}
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
if (unlikely(r)) {
|
||||
if (unlikely(r))
|
||||
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
|
||||
adev->vram_pin_size -= amdgpu_bo_size(bo);
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
adev->invisible_pin_size -= amdgpu_bo_size(bo);
|
||||
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
adev->gart_pin_size -= amdgpu_bo_size(bo);
|
||||
}
|
||||
|
||||
error:
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
|
||||
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
|
||||
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
|
||||
|
||||
u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
|
||||
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
||||
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
|
||||
|
||||
|
@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
unsigned version_major, version_minor, family_id;
|
||||
int i, j, r;
|
||||
|
||||
INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
|
||||
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
|
||||
|
||||
switch (adev->asic_type) {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||
void *ptr;
|
||||
int i, j;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
|
||||
if (adev->uvd.inst[j].vcpu_bo == NULL)
|
||||
continue;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
|
||||
|
||||
/* only valid for physical mode */
|
||||
if (adev->asic_type < CHIP_POLARIS10) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
|
||||
container_of(work, struct amdgpu_device, uvd.idle_work.work);
|
||||
unsigned fences = 0, i, j;
|
||||
|
||||
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
|
||||
@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
|
||||
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return;
|
||||
|
||||
set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
|
||||
set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
if (set_clocks) {
|
||||
if (adev->pm.dpm_enabled) {
|
||||
amdgpu_dpm_enable_uvd(adev, true);
|
||||
@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
|
||||
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!amdgpu_sriov_vf(ring->adev))
|
||||
schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
|
||||
schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
|
||||
void *saved_bo;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct delayed_work idle_work;
|
||||
struct amdgpu_ring ring;
|
||||
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
|
||||
struct amdgpu_irq_src irq;
|
||||
@ -62,6 +61,7 @@ struct amdgpu_uvd {
|
||||
bool address_64_bit;
|
||||
bool use_ctx_buf;
|
||||
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
|
||||
struct delayed_work idle_work;
|
||||
};
|
||||
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user