mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
The UDP reuseport conflict was a little bit tricky.
The net-next code, via bpf-next, extracted the reuseport handling
into a helper so that the BPF sk lookup code could invoke it.
At the same time, the logic for reuseport handling of unconnected
sockets changed via commit efc6b6f6c3
which changed the logic to carry on the reuseport result into the
rest of the lookup loop if we do not return immediately.
This requires moving the reuseport_has_conns() logic into the callers.
While we are here, get rid of inline directives as they do not belong
in foo.c files.
The other changes were cases of more straightforward overlapping
modifications.
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a57066b1a0
3
.mailmap
3
.mailmap
@ -198,6 +198,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
|
||||
Mayuresh Janorkar <mayur@ti.com>
|
||||
Michael Buesch <m@bues.ch>
|
||||
Michel Dänzer <michel@tungstengraphics.com>
|
||||
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
|
||||
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
|
||||
Mike Rapoport <rppt@kernel.org> <rppt@linux.ibm.com>
|
||||
Miodrag Dinic <miodrag.dinic@mips.com> <miodrag.dinic@imgtec.com>
|
||||
Miquel Raynal <miquel.raynal@bootlin.com> <miquel.raynal@free-electrons.com>
|
||||
Mitesh shah <mshah@teja.com>
|
||||
|
@ -378,6 +378,8 @@ examples:
|
||||
- |
|
||||
sound {
|
||||
compatible = "simple-audio-card";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
simple-audio-card,name = "rsnd-ak4643";
|
||||
simple-audio-card,format = "left_j";
|
||||
@ -391,10 +393,12 @@ examples:
|
||||
"ak4642 Playback", "DAI1 Playback";
|
||||
|
||||
dpcmcpu: simple-audio-card,cpu@0 {
|
||||
reg = <0>;
|
||||
sound-dai = <&rcar_sound 0>;
|
||||
};
|
||||
|
||||
simple-audio-card,cpu@1 {
|
||||
reg = <1>;
|
||||
sound-dai = <&rcar_sound 1>;
|
||||
};
|
||||
|
||||
@ -418,6 +422,8 @@ examples:
|
||||
- |
|
||||
sound {
|
||||
compatible = "simple-audio-card";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
simple-audio-card,routing =
|
||||
"pcm3168a Playback", "DAI1 Playback",
|
||||
@ -426,6 +432,7 @@ examples:
|
||||
"pcm3168a Playback", "DAI4 Playback";
|
||||
|
||||
simple-audio-card,dai-link@0 {
|
||||
reg = <0>;
|
||||
format = "left_j";
|
||||
bitclock-master = <&sndcpu0>;
|
||||
frame-master = <&sndcpu0>;
|
||||
@ -439,22 +446,23 @@ examples:
|
||||
};
|
||||
|
||||
simple-audio-card,dai-link@1 {
|
||||
reg = <1>;
|
||||
format = "i2s";
|
||||
bitclock-master = <&sndcpu1>;
|
||||
frame-master = <&sndcpu1>;
|
||||
|
||||
convert-channels = <8>; /* TDM Split */
|
||||
|
||||
sndcpu1: cpu@0 {
|
||||
sndcpu1: cpu0 {
|
||||
sound-dai = <&rcar_sound 1>;
|
||||
};
|
||||
cpu@1 {
|
||||
cpu1 {
|
||||
sound-dai = <&rcar_sound 2>;
|
||||
};
|
||||
cpu@2 {
|
||||
cpu2 {
|
||||
sound-dai = <&rcar_sound 3>;
|
||||
};
|
||||
cpu@3 {
|
||||
cpu3 {
|
||||
sound-dai = <&rcar_sound 4>;
|
||||
};
|
||||
codec {
|
||||
@ -466,6 +474,7 @@ examples:
|
||||
};
|
||||
|
||||
simple-audio-card,dai-link@2 {
|
||||
reg = <2>;
|
||||
format = "i2s";
|
||||
bitclock-master = <&sndcpu2>;
|
||||
frame-master = <&sndcpu2>;
|
||||
|
@ -23,6 +23,7 @@ PTP hardware clock infrastructure for Linux
|
||||
+ Ancillary clock features
|
||||
- Time stamp external events
|
||||
- Period output signals configurable from user space
|
||||
- Low Pass Filter (LPF) access from user space
|
||||
- Synchronization of the Linux system time via the PPS subsystem
|
||||
|
||||
PTP hardware clock kernel API
|
||||
@ -94,3 +95,14 @@ Supported hardware
|
||||
|
||||
- Auxiliary Slave/Master Mode Snapshot (optional interrupt)
|
||||
- Target Time (optional interrupt)
|
||||
|
||||
* Renesas (IDT) ClockMatrix™
|
||||
|
||||
- Up to 4 independent PHC channels
|
||||
- Integrated low pass filter (LPF), access via .adjPhase (compliant to ITU-T G.8273.2)
|
||||
- Programmable output periodic signals
|
||||
- Programmable inputs can time stamp external triggers
|
||||
- Driver and/or hardware configuration through firmware (idtcm.bin)
|
||||
- LPF settings (bandwidth, phase limiting, automatic holdover, physical layer assist (per ITU-T G.8273.2))
|
||||
- Programmable output PTP clocks, any frequency up to 1GHz (to other PHY/MAC time stampers, refclk to ASSPs/SoCs/FPGAs)
|
||||
- Lock to GNSS input, automatic switching between GNSS and user-space PHC control (optional)
|
||||
|
@ -560,8 +560,8 @@ When the NFS export feature is enabled, all directory index entries are
|
||||
verified on mount time to check that upper file handles are not stale.
|
||||
This verification may cause significant overhead in some cases.
|
||||
|
||||
Note: the mount options index=off,nfs_export=on are conflicting and will
|
||||
result in an error.
|
||||
Note: the mount options index=off,nfs_export=on are conflicting for a
|
||||
read-write mount and will result in an error.
|
||||
|
||||
|
||||
Testsuite
|
||||
|
@ -26,7 +26,7 @@ Usage
|
||||
|
||||
1) Device creation & deletion
|
||||
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847.
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc
|
||||
|
||||
This creates a bareudp tunnel device which tunnels L3 traffic with ethertype
|
||||
0x8847 (MPLS traffic). The destination port of the UDP header will be set to
|
||||
@ -34,14 +34,21 @@ Usage
|
||||
|
||||
b) ip link delete bareudp0
|
||||
|
||||
2) Device creation with multiple proto mode enabled
|
||||
2) Device creation with multiproto mode enabled
|
||||
|
||||
There are two ways to create a bareudp device for MPLS & IP with multiproto mode
|
||||
enabled.
|
||||
The multiproto mode allows bareudp tunnels to handle several protocols of the
|
||||
same family. It is currently only available for IP and MPLS. This mode has to
|
||||
be enabled explicitly with the "multiproto" flag.
|
||||
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype 0x8847 multiproto
|
||||
a) ip link add dev bareudp0 type bareudp dstport 6635 ethertype ipv4 multiproto
|
||||
|
||||
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls
|
||||
For an IPv4 tunnel the multiproto mode allows the tunnel to also handle
|
||||
IPv6.
|
||||
|
||||
b) ip link add dev bareudp0 type bareudp dstport 6635 ethertype mpls_uc multiproto
|
||||
|
||||
For MPLS, the multiproto mode allows the tunnel to handle both unicast
|
||||
and multicast MPLS packets.
|
||||
|
||||
3) Device Usage
|
||||
|
||||
|
@ -213,7 +213,7 @@ request buffers are not in memory. The operating system handles the fault by
|
||||
updating CSB with the following data:
|
||||
|
||||
csb.flags = CSB_V;
|
||||
csb.cc = CSB_CC_TRANSLATION;
|
||||
csb.cc = CSB_CC_FAULT_ADDRESS;
|
||||
csb.ce = CSB_CE_TERMINATION;
|
||||
csb.address = fault_address;
|
||||
|
||||
|
28
MAINTAINERS
28
MAINTAINERS
@ -5022,7 +5022,6 @@ F: drivers/mfd/da91??-*.c
|
||||
F: drivers/pinctrl/pinctrl-da90??.c
|
||||
F: drivers/power/supply/da9052-battery.c
|
||||
F: drivers/power/supply/da91??-*.c
|
||||
F: drivers/regulator/da903x.c
|
||||
F: drivers/regulator/da9???-regulator.[ch]
|
||||
F: drivers/regulator/slg51000-regulator.[ch]
|
||||
F: drivers/rtc/rtc-da90??.c
|
||||
@ -5112,7 +5111,7 @@ M: Vinod Koul <vkoul@kernel.org>
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Maintained
|
||||
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
|
||||
T: git git://git.infradead.org/users/vkoul/slave-dma.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
|
||||
F: Documentation/devicetree/bindings/dma/
|
||||
F: Documentation/driver-api/dmaengine/
|
||||
F: drivers/dma/
|
||||
@ -6957,6 +6956,7 @@ M: Timur Tabi <timur@kernel.org>
|
||||
M: Nicolin Chen <nicoleotsuka@gmail.com>
|
||||
M: Xiubo Li <Xiubo.Lee@gmail.com>
|
||||
R: Fabio Estevam <festevam@gmail.com>
|
||||
R: Shengjiu Wang <shengjiu.wang@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
@ -9295,6 +9295,17 @@ F: Documentation/kbuild/kconfig*
|
||||
F: scripts/Kconfig.include
|
||||
F: scripts/kconfig/
|
||||
|
||||
KCOV
|
||||
R: Dmitry Vyukov <dvyukov@google.com>
|
||||
R: Andrey Konovalov <andreyknvl@google.com>
|
||||
L: kasan-dev@googlegroups.com
|
||||
S: Maintained
|
||||
F: Documentation/dev-tools/kcov.rst
|
||||
F: include/linux/kcov.h
|
||||
F: include/uapi/linux/kcov.h
|
||||
F: kernel/kcov.c
|
||||
F: scripts/Makefile.kcov
|
||||
|
||||
KCSAN
|
||||
M: Marco Elver <elver@google.com>
|
||||
R: Dmitry Vyukov <dvyukov@google.com>
|
||||
@ -11230,7 +11241,7 @@ S: Maintained
|
||||
F: drivers/crypto/atmel-ecc.*
|
||||
|
||||
MICROCHIP I2C DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-at91-*.c
|
||||
@ -11323,17 +11334,17 @@ F: drivers/iio/adc/at91-sama5d2_adc.c
|
||||
F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h
|
||||
|
||||
MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Claudiu Beznea <claudiu.beznea@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
||||
|
||||
MICROCHIP SPI DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Tudor Ambarus <tudor.ambarus@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
MICROCHIP SSC DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Codrin Ciubotariu <codrin.ciubotariu@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/misc/atmel-ssc.c
|
||||
@ -14859,6 +14870,7 @@ F: drivers/s390/block/dasd*
|
||||
F: include/linux/dasd_mod.h
|
||||
|
||||
S390 IOMMU (PCI)
|
||||
M: Matthew Rosato <mjrosato@linux.ibm.com>
|
||||
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
@ -17503,7 +17515,7 @@ F: Documentation/admin-guide/ufs.rst
|
||||
F: fs/ufs/
|
||||
|
||||
UHID USERSPACE HID IO DRIVER
|
||||
M: David Herrmann <dh.herrmann@googlemail.com>
|
||||
M: David Rheinsberg <david.rheinsberg@gmail.com>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/hid/uhid.c
|
||||
@ -18462,7 +18474,7 @@ S: Maintained
|
||||
F: drivers/rtc/rtc-sd3078.c
|
||||
|
||||
WIIMOTE HID DRIVER
|
||||
M: David Herrmann <dh.herrmann@googlemail.com>
|
||||
M: David Rheinsberg <david.rheinsberg@gmail.com>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/hid/hid-wiimote*
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1540,8 +1540,9 @@
|
||||
reg = <0xcc020 0x4>;
|
||||
reg-names = "rev";
|
||||
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
|
||||
clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>,
|
||||
<&dcan0_fck>;
|
||||
clock-names = "fck", "osc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x0 0xcc000 0x2000>;
|
||||
@ -1549,6 +1550,8 @@
|
||||
dcan0: can@0 {
|
||||
compatible = "ti,am4372-d_can", "ti,am3352-d_can";
|
||||
reg = <0x0 0x2000>;
|
||||
clocks = <&dcan0_fck>;
|
||||
clock-names = "fck";
|
||||
syscon-raminit = <&scm_conf 0x644 0>;
|
||||
interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
@ -1560,8 +1563,9 @@
|
||||
reg = <0xd0020 0x4>;
|
||||
reg-names = "rev";
|
||||
/* Domains (P, C): per_pwrdm, l4ls_clkdm */
|
||||
clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
|
||||
clock-names = "fck";
|
||||
clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>,
|
||||
<&dcan1_fck>;
|
||||
clock-names = "fck", "osc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x0 0xd0000 0x2000>;
|
||||
@ -1569,6 +1573,8 @@
|
||||
dcan1: can@0 {
|
||||
compatible = "ti,am4372-d_can", "ti,am3352-d_can";
|
||||
reg = <0x0 0x2000>;
|
||||
clocks = <&dcan1_fck>;
|
||||
clock-name = "fck";
|
||||
syscon-raminit = <&scm_conf 0x644 1>;
|
||||
interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
|
@ -110,7 +110,7 @@
|
||||
simple-audio-card,frame-master = <&sound_codec>;
|
||||
|
||||
sound_cpu: simple-audio-card,cpu {
|
||||
sound-dai = <&ssi2>;
|
||||
sound-dai = <&ssi1>;
|
||||
};
|
||||
|
||||
sound_codec: simple-audio-card,codec {
|
||||
|
@ -11,7 +11,7 @@
|
||||
#size-cells = <1>;
|
||||
interrupt-parent = <&gic>;
|
||||
|
||||
L2: l2-cache-controller@c4200000 {
|
||||
L2: cache-controller@c4200000 {
|
||||
compatible = "arm,pl310-cache";
|
||||
reg = <0xc4200000 0x1000>;
|
||||
cache-unified;
|
||||
|
@ -105,6 +105,14 @@
|
||||
linux,code = <SW_FRONT_PROXIMITY>;
|
||||
linux,can-disable;
|
||||
};
|
||||
|
||||
machine_cover {
|
||||
label = "Machine Cover";
|
||||
gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
|
||||
linux,input-type = <EV_SW>;
|
||||
linux,code = <SW_MACHINE_COVER>;
|
||||
linux,can-disable;
|
||||
};
|
||||
};
|
||||
|
||||
isp1707: isp1707 {
|
||||
@ -819,10 +827,6 @@
|
||||
pinctrl-0 = <&mmc1_pins>;
|
||||
vmmc-supply = <&vmmc1>;
|
||||
bus-width = <4>;
|
||||
/* For debugging, it is often good idea to remove this GPIO.
|
||||
It means you can remove back cover (to reboot by removing
|
||||
battery) and still use the MMC card. */
|
||||
cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */
|
||||
};
|
||||
|
||||
/* most boards use vaux3, only some old versions use vmmc2 instead */
|
||||
|
@ -726,7 +726,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
L2: l2-cache@fffef000 {
|
||||
L2: cache-controller@fffef000 {
|
||||
compatible = "arm,pl310-cache";
|
||||
reg = <0xfffef000 0x1000>;
|
||||
interrupts = <0 38 0x04>;
|
||||
|
@ -636,7 +636,7 @@
|
||||
reg = <0xffcfb100 0x80>;
|
||||
};
|
||||
|
||||
L2: l2-cache@fffff000 {
|
||||
L2: cache-controller@fffff000 {
|
||||
compatible = "arm,pl310-cache";
|
||||
reg = <0xfffff000 0x1000>;
|
||||
interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
@ -289,6 +289,6 @@ struct platform_device *__init imx_add_spi_imx(
|
||||
const struct spi_imx_master *pdata);
|
||||
|
||||
struct platform_device *imx_add_imx_dma(char *name, resource_size_t iobase,
|
||||
int irq, int irq_err);
|
||||
int irq);
|
||||
struct platform_device *imx_add_imx_sdma(char *name,
|
||||
resource_size_t iobase, int irq, struct sdma_platform_data *pdata);
|
||||
|
@ -24,7 +24,8 @@ struct platform_device *__init mxc_register_gpio(char *name, int id,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
unsigned int nres;
|
||||
|
||||
return platform_device_register_resndata(&mxc_aips_bus,
|
||||
name, id, res, ARRAY_SIZE(res), NULL, 0);
|
||||
nres = irq_high ? ARRAY_SIZE(res) : ARRAY_SIZE(res) - 1;
|
||||
return platform_device_register_resndata(&mxc_aips_bus, name, id, res, nres, NULL, 0);
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "devices-common.h"
|
||||
|
||||
struct platform_device __init __maybe_unused *imx_add_imx_dma(char *name,
|
||||
resource_size_t iobase, int irq, int irq_err)
|
||||
resource_size_t iobase, int irq)
|
||||
{
|
||||
struct resource res[] = {
|
||||
{
|
||||
@ -17,10 +17,6 @@ struct platform_device __init __maybe_unused *imx_add_imx_dma(char *name,
|
||||
.start = irq,
|
||||
.end = irq,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}, {
|
||||
.start = irq_err,
|
||||
.end = irq_err,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -78,8 +78,7 @@ void __init imx21_soc_init(void)
|
||||
mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
|
||||
|
||||
pinctrl_provide_dummies();
|
||||
imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR,
|
||||
MX21_INT_DMACH0, 0); /* No ERR irq */
|
||||
imx_add_imx_dma("imx21-dma", MX21_DMA_BASE_ADDR, MX21_INT_DMACH0);
|
||||
platform_device_register_simple("imx21-audmux", 0, imx21_audmux_res,
|
||||
ARRAY_SIZE(imx21_audmux_res));
|
||||
}
|
||||
|
@ -79,8 +79,7 @@ void __init imx27_soc_init(void)
|
||||
mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
|
||||
|
||||
pinctrl_provide_dummies();
|
||||
imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR,
|
||||
MX27_INT_DMACH0, 0); /* No ERR irq */
|
||||
imx_add_imx_dma("imx27-dma", MX27_DMA_BASE_ADDR, MX27_INT_DMACH0);
|
||||
/* imx27 has the imx21 type audmux */
|
||||
platform_device_register_simple("imx21-audmux", 0, imx27_audmux_res,
|
||||
ARRAY_SIZE(imx27_audmux_res));
|
||||
|
@ -3435,7 +3435,7 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
|
||||
regs = ioremap(data->module_pa,
|
||||
data->module_size);
|
||||
if (!regs)
|
||||
return -ENOMEM;
|
||||
goto out_free_sysc;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3445,13 +3445,13 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
|
||||
if (oh->class->name && strcmp(oh->class->name, data->name)) {
|
||||
class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL);
|
||||
if (!class)
|
||||
return -ENOMEM;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
if (list_empty(&oh->slave_ports)) {
|
||||
oi = kcalloc(1, sizeof(*oi), GFP_KERNEL);
|
||||
if (!oi)
|
||||
return -ENOMEM;
|
||||
goto out_free_class;
|
||||
|
||||
/*
|
||||
* Note that we assume interconnect interface clocks will be
|
||||
@ -3478,6 +3478,14 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
|
||||
spin_unlock_irqrestore(&oh->_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_class:
|
||||
kfree(class);
|
||||
out_unmap:
|
||||
iounmap(regs);
|
||||
out_free_sysc:
|
||||
kfree(sysc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static const struct omap_hwmod_reset omap24xx_reset_quirks[] = {
|
||||
|
@ -137,7 +137,7 @@ export TEXT_OFFSET
|
||||
|
||||
core-y += arch/arm64/
|
||||
libs-y := arch/arm64/lib/ $(libs-y)
|
||||
core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
|
||||
# Default target when executing plain make
|
||||
boot := arch/arm64/boot
|
||||
|
@ -77,7 +77,7 @@
|
||||
method = "smc";
|
||||
};
|
||||
|
||||
intc: intc@fffc1000 {
|
||||
intc: interrupt-controller@fffc1000 {
|
||||
compatible = "arm,gic-400", "arm,cortex-a15-gic";
|
||||
#interrupt-cells = <3>;
|
||||
interrupt-controller;
|
||||
@ -302,7 +302,7 @@
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
nand: nand@ffb90000 {
|
||||
nand: nand-controller@ffb90000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "altr,socfpga-denali-nand";
|
||||
@ -445,7 +445,7 @@
|
||||
clock-names = "timer";
|
||||
};
|
||||
|
||||
uart0: serial0@ffc02000 {
|
||||
uart0: serial@ffc02000 {
|
||||
compatible = "snps,dw-apb-uart";
|
||||
reg = <0xffc02000 0x100>;
|
||||
interrupts = <0 108 4>;
|
||||
@ -456,7 +456,7 @@
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
uart1: serial1@ffc02100 {
|
||||
uart1: serial@ffc02100 {
|
||||
compatible = "snps,dw-apb-uart";
|
||||
reg = <0xffc02100 0x100>;
|
||||
interrupts = <0 109 4>;
|
||||
|
@ -155,6 +155,7 @@
|
||||
};
|
||||
|
||||
&qspi {
|
||||
status = "okay";
|
||||
flash@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
@ -188,6 +188,7 @@
|
||||
};
|
||||
|
||||
&qspi {
|
||||
status = "okay";
|
||||
flash@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
@ -211,12 +212,12 @@
|
||||
|
||||
qspi_boot: partition@0 {
|
||||
label = "Boot and fpga data";
|
||||
reg = <0x0 0x034B0000>;
|
||||
reg = <0x0 0x03FE0000>;
|
||||
};
|
||||
|
||||
qspi_rootfs: partition@4000000 {
|
||||
qspi_rootfs: partition@3FE0000 {
|
||||
label = "Root Filesystem - JFFS2";
|
||||
reg = <0x034B0000 0x0EB50000>;
|
||||
reg = <0x03FE0000 0x0C020000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <dt-bindings/input/input.h>
|
||||
#include <dt-bindings/sound/meson-aiu.h>
|
||||
|
||||
#include "meson-gxl-s905x.dtsi"
|
||||
#include "meson-gxl-s805x.dtsi"
|
||||
|
||||
/ {
|
||||
compatible = "libretech,aml-s805x-ac", "amlogic,s805x",
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include <dt-bindings/input/input.h>
|
||||
|
||||
#include "meson-gxl-s905x.dtsi"
|
||||
#include "meson-gxl-s805x.dtsi"
|
||||
|
||||
/ {
|
||||
compatible = "amlogic,p241", "amlogic,s805x", "amlogic,meson-gxl";
|
||||
|
24
arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi
Normal file
24
arch/arm64/boot/dts/amlogic/meson-gxl-s805x.dtsi
Normal file
@ -0,0 +1,24 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
|
||||
/*
|
||||
* Copyright (c) 2020 BayLibre SAS
|
||||
* Author: Neil Armstrong <narmstrong@baylibre.com>
|
||||
*/
|
||||
|
||||
#include "meson-gxl-s905x.dtsi"
|
||||
|
||||
/ {
|
||||
compatible = "amlogic,s805x", "amlogic,meson-gxl";
|
||||
};
|
||||
|
||||
/* The S805X Package doesn't seem to handle the 744MHz OPP correctly */
|
||||
&mali {
|
||||
assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
|
||||
<&clkc CLKID_MALI_0>,
|
||||
<&clkc CLKID_MALI>; /* Glitch free mux */
|
||||
assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
|
||||
<0>, /* Do Nothing */
|
||||
<&clkc CLKID_MALI_0>;
|
||||
assigned-clock-rates = <0>, /* Do Nothing */
|
||||
<666666666>,
|
||||
<0>; /* Do Nothing */
|
||||
};
|
@ -337,6 +337,11 @@
|
||||
};
|
||||
};
|
||||
|
||||
&hwrng {
|
||||
clocks = <&clkc CLKID_RNG0>;
|
||||
clock-names = "core";
|
||||
};
|
||||
|
||||
&i2c_A {
|
||||
clocks = <&clkc CLKID_I2C>;
|
||||
};
|
||||
|
@ -98,6 +98,7 @@
|
||||
};
|
||||
|
||||
&qspi {
|
||||
status = "okay";
|
||||
flash@0 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
@ -454,10 +454,7 @@
|
||||
status = "okay";
|
||||
phy-mode = "2500base-x";
|
||||
phys = <&cp1_comphy5 2>;
|
||||
fixed-link {
|
||||
speed = <2500>;
|
||||
full-duplex;
|
||||
};
|
||||
managed = "in-band-status";
|
||||
};
|
||||
|
||||
&cp1_spi1 {
|
||||
|
@ -194,7 +194,7 @@ CONFIG_HOTPLUG_PCI=y
|
||||
CONFIG_HOTPLUG_PCI_ACPI=y
|
||||
CONFIG_PCI_AARDVARK=y
|
||||
CONFIG_PCI_TEGRA=y
|
||||
CONFIG_PCIE_RCAR=y
|
||||
CONFIG_PCIE_RCAR_HOST=y
|
||||
CONFIG_PCI_HOST_GENERIC=y
|
||||
CONFIG_PCI_XGENE=y
|
||||
CONFIG_PCIE_ALTERA=y
|
||||
|
@ -109,6 +109,8 @@ void disable_debug_monitors(enum dbg_active_el el);
|
||||
|
||||
void user_rewind_single_step(struct task_struct *task);
|
||||
void user_fastforward_single_step(struct task_struct *task);
|
||||
void user_regs_reset_single_step(struct user_pt_regs *regs,
|
||||
struct task_struct *task);
|
||||
|
||||
void kernel_enable_single_step(struct pt_regs *regs);
|
||||
void kernel_disable_single_step(void);
|
||||
|
@ -34,6 +34,10 @@ static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long error = regs->regs[0];
|
||||
|
||||
if (is_compat_thread(task_thread_info(task)))
|
||||
error = sign_extend64(error, 31);
|
||||
|
||||
return IS_ERR_VALUE(error) ? error : 0;
|
||||
}
|
||||
|
||||
@ -47,7 +51,13 @@ static inline void syscall_set_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
{
|
||||
regs->regs[0] = (long) error ? error : val;
|
||||
if (error)
|
||||
val = error;
|
||||
|
||||
if (is_compat_thread(task_thread_info(task)))
|
||||
val = lower_32_bits(val);
|
||||
|
||||
regs->regs[0] = val;
|
||||
}
|
||||
|
||||
#define SYSCALL_MAX_ARGS 6
|
||||
|
@ -93,6 +93,7 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_SVE (1 << TIF_SVE)
|
||||
|
||||
|
@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init);
|
||||
/*
|
||||
* Single step API and exception handling.
|
||||
*/
|
||||
static void set_regs_spsr_ss(struct pt_regs *regs)
|
||||
static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
|
||||
{
|
||||
regs->pstate |= DBG_SPSR_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(set_regs_spsr_ss);
|
||||
NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
|
||||
|
||||
static void clear_regs_spsr_ss(struct pt_regs *regs)
|
||||
static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
|
||||
{
|
||||
regs->pstate &= ~DBG_SPSR_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
|
||||
NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
|
||||
|
||||
#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
|
||||
#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
|
||||
|
||||
static DEFINE_SPINLOCK(debug_hook_lock);
|
||||
static LIST_HEAD(user_step_hook);
|
||||
@ -391,17 +394,26 @@ void user_rewind_single_step(struct task_struct *task)
|
||||
* If single step is active for this thread, then set SPSR.SS
|
||||
* to 1 to avoid returning to the active-pending state.
|
||||
*/
|
||||
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
|
||||
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
set_regs_spsr_ss(task_pt_regs(task));
|
||||
}
|
||||
NOKPROBE_SYMBOL(user_rewind_single_step);
|
||||
|
||||
void user_fastforward_single_step(struct task_struct *task)
|
||||
{
|
||||
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
|
||||
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
clear_regs_spsr_ss(task_pt_regs(task));
|
||||
}
|
||||
|
||||
void user_regs_reset_single_step(struct user_pt_regs *regs,
|
||||
struct task_struct *task)
|
||||
{
|
||||
if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
set_user_regs_spsr_ss(regs);
|
||||
else
|
||||
clear_user_regs_spsr_ss(regs);
|
||||
}
|
||||
|
||||
/* Kernel API */
|
||||
void kernel_enable_single_step(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -1811,19 +1811,42 @@ static void tracehook_report_syscall(struct pt_regs *regs,
|
||||
unsigned long saved_reg;
|
||||
|
||||
/*
|
||||
* A scratch register (ip(r12) on AArch32, x7 on AArch64) is
|
||||
* used to denote syscall entry/exit:
|
||||
* We have some ABI weirdness here in the way that we handle syscall
|
||||
* exit stops because we indicate whether or not the stop has been
|
||||
* signalled from syscall entry or syscall exit by clobbering a general
|
||||
* purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
|
||||
* and restoring its old value after the stop. This means that:
|
||||
*
|
||||
* - Any writes by the tracer to this register during the stop are
|
||||
* ignored/discarded.
|
||||
*
|
||||
* - The actual value of the register is not available during the stop,
|
||||
* so the tracer cannot save it and restore it later.
|
||||
*
|
||||
* - Syscall stops behave differently to seccomp and pseudo-step traps
|
||||
* (the latter do not nobble any registers).
|
||||
*/
|
||||
regno = (is_compat_task() ? 12 : 7);
|
||||
saved_reg = regs->regs[regno];
|
||||
regs->regs[regno] = dir;
|
||||
|
||||
if (dir == PTRACE_SYSCALL_EXIT)
|
||||
if (dir == PTRACE_SYSCALL_ENTER) {
|
||||
if (tracehook_report_syscall_entry(regs))
|
||||
forget_syscall(regs);
|
||||
regs->regs[regno] = saved_reg;
|
||||
} else if (!test_thread_flag(TIF_SINGLESTEP)) {
|
||||
tracehook_report_syscall_exit(regs, 0);
|
||||
else if (tracehook_report_syscall_entry(regs))
|
||||
forget_syscall(regs);
|
||||
regs->regs[regno] = saved_reg;
|
||||
} else {
|
||||
regs->regs[regno] = saved_reg;
|
||||
|
||||
regs->regs[regno] = saved_reg;
|
||||
/*
|
||||
* Signal a pseudo-step exception since we are stepping but
|
||||
* tracer modifications to the registers may have rewound the
|
||||
* state machine.
|
||||
*/
|
||||
tracehook_report_syscall_exit(regs, 1);
|
||||
}
|
||||
}
|
||||
|
||||
int syscall_trace_enter(struct pt_regs *regs)
|
||||
@ -1833,12 +1856,12 @@ int syscall_trace_enter(struct pt_regs *regs)
|
||||
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
|
||||
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
|
||||
if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
|
||||
return -1;
|
||||
return NO_SYSCALL;
|
||||
}
|
||||
|
||||
/* Do the secure computing after ptrace; failures should be fast. */
|
||||
if (secure_computing() == -1)
|
||||
return -1;
|
||||
return NO_SYSCALL;
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
||||
trace_sys_enter(regs, regs->syscallno);
|
||||
@ -1851,12 +1874,14 @@ int syscall_trace_enter(struct pt_regs *regs)
|
||||
|
||||
void syscall_trace_exit(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long flags = READ_ONCE(current_thread_info()->flags);
|
||||
|
||||
audit_syscall_exit(regs);
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
|
||||
if (flags & _TIF_SYSCALL_TRACEPOINT)
|
||||
trace_sys_exit(regs, regs_return_value(regs));
|
||||
|
||||
if (test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
|
||||
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
|
||||
|
||||
rseq_syscall(regs);
|
||||
@ -1934,8 +1959,8 @@ static int valid_native_regs(struct user_pt_regs *regs)
|
||||
*/
|
||||
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
|
||||
{
|
||||
if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
|
||||
regs->pstate &= ~DBG_SPSR_SS;
|
||||
/* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
|
||||
user_regs_reset_single_step(regs, task);
|
||||
|
||||
if (is_compat_thread(task_thread_info(task)))
|
||||
return valid_compat_regs(regs);
|
||||
|
@ -800,7 +800,6 @@ static void setup_restart_syscall(struct pt_regs *regs)
|
||||
*/
|
||||
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
sigset_t *oldset = sigmask_to_save();
|
||||
int usig = ksig->sig;
|
||||
int ret;
|
||||
@ -824,14 +823,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
*/
|
||||
ret |= !valid_user_regs(®s->user_regs, current);
|
||||
|
||||
/*
|
||||
* Fast forward the stepping logic so we step into the signal
|
||||
* handler.
|
||||
*/
|
||||
if (!ret)
|
||||
user_fastforward_single_step(tsk);
|
||||
|
||||
signal_setup_done(ret, ksig, 0);
|
||||
/* Step into the signal handler if we are stepping */
|
||||
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
||||
ret = do_ni_syscall(regs, scno);
|
||||
}
|
||||
|
||||
if (is_compat_task())
|
||||
ret = lower_32_bits(ret);
|
||||
|
||||
regs->regs[0] = ret;
|
||||
}
|
||||
|
||||
@ -121,7 +124,21 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
||||
user_exit();
|
||||
|
||||
if (has_syscall_work(flags)) {
|
||||
/* set default errno for user-issued syscall(-1) */
|
||||
/*
|
||||
* The de-facto standard way to skip a system call using ptrace
|
||||
* is to set the system call to -1 (NO_SYSCALL) and set x0 to a
|
||||
* suitable error code for consumption by userspace. However,
|
||||
* this cannot be distinguished from a user-issued syscall(-1)
|
||||
* and so we must set x0 to -ENOSYS here in case the tracer doesn't
|
||||
* issue the skip and we fall into trace_exit with x0 preserved.
|
||||
*
|
||||
* This is slightly odd because it also means that if a tracer
|
||||
* sets the system call number to -1 but does not initialise x0,
|
||||
* then x0 will be preserved for all system calls apart from a
|
||||
* user-issued syscall(-1). However, requesting a skip and not
|
||||
* setting the return value is unlikely to do anything sensible
|
||||
* anyway.
|
||||
*/
|
||||
if (scno == NO_SYSCALL)
|
||||
regs->regs[0] = -ENOSYS;
|
||||
scno = syscall_trace_enter(regs);
|
||||
@ -139,7 +156,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
||||
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
|
||||
local_daif_mask();
|
||||
flags = current_thread_info()->flags;
|
||||
if (!has_syscall_work(flags)) {
|
||||
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
|
||||
/*
|
||||
* We're off to userspace, where interrupts are
|
||||
* always enabled after we restore the flags from
|
||||
|
@ -14,7 +14,7 @@ COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
|
||||
COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
|
||||
|
||||
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
|
||||
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)
|
||||
CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
|
||||
CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
|
||||
ifneq ($(COMPAT_GCC_TOOLCHAIN),)
|
||||
CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
|
||||
|
@ -627,9 +627,10 @@ static int bridge_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
|
||||
&bridge_domain_ops, NULL);
|
||||
irq_domain_free_fwnode(fn);
|
||||
if (!domain)
|
||||
if (!domain) {
|
||||
irq_domain_free_fwnode(fn);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pci_set_flags(PCI_PROBE_ONLY);
|
||||
|
||||
|
@ -77,6 +77,8 @@ struct coprocessor_completion_block {
|
||||
#define CSB_CC_CHAIN (37)
|
||||
#define CSB_CC_SEQUENCE (38)
|
||||
#define CSB_CC_HW (39)
|
||||
/* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */
|
||||
#define CSB_CC_FAULT_ADDRESS (250)
|
||||
|
||||
#define CSB_SIZE (0x10)
|
||||
#define CSB_ALIGN CSB_SIZE
|
||||
|
@ -2551,7 +2551,7 @@ EXC_VIRT_NONE(0x5400, 0x100)
|
||||
INT_DEFINE_BEGIN(denorm_exception)
|
||||
IVEC=0x1500
|
||||
IHSRR=1
|
||||
IBRANCH_COMMON=0
|
||||
IBRANCH_TO_COMMON=0
|
||||
IKVM_REAL=1
|
||||
INT_DEFINE_END(denorm_exception)
|
||||
|
||||
|
@ -87,7 +87,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
|
||||
* This is very early in boot, so no harm done if the kernel crashes at
|
||||
* this point.
|
||||
*/
|
||||
BUG_ON(shared_lppaca_size >= shared_lppaca_total_size);
|
||||
BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
@ -354,12 +354,14 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
|
||||
u64 amr;
|
||||
|
||||
pkey_shift = pkeyshift(pkey);
|
||||
if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
|
||||
return true;
|
||||
if (execute)
|
||||
return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
|
||||
|
||||
amr = read_amr(); /* Delay reading amr until absolutely needed */
|
||||
return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) ||
|
||||
(write && !(amr & (AMR_WR_BIT << pkey_shift))));
|
||||
amr = read_amr();
|
||||
if (write)
|
||||
return !(amr & (AMR_WR_BIT << pkey_shift));
|
||||
|
||||
return !(amr & (AMR_RD_BIT << pkey_shift));
|
||||
}
|
||||
|
||||
bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
|
||||
|
@ -79,7 +79,7 @@ static void update_csb(struct vas_window *window,
|
||||
csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
|
||||
|
||||
memset(&csb, 0, sizeof(csb));
|
||||
csb.cc = CSB_CC_TRANSLATION;
|
||||
csb.cc = CSB_CC_FAULT_ADDRESS;
|
||||
csb.ce = CSB_CE_TERMINATION;
|
||||
csb.cs = 0;
|
||||
csb.count = 0;
|
||||
|
@ -23,6 +23,8 @@ config RISCV
|
||||
select ARCH_HAS_SET_DIRECT_MAP
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if MMU
|
||||
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
|
||||
|
@ -58,8 +58,16 @@ do { \
|
||||
* The AQ/RL pair provides a RCpc critical section, but there's not really any
|
||||
* way we can take advantage of that here because the ordering is only enforced
|
||||
* on that one lock. Thus, we're just doing a full fence.
|
||||
*
|
||||
* Since we allow writeX to be called from preemptive regions we need at least
|
||||
* an "o" in the predecessor set to ensure device writes are visible before the
|
||||
* task is marked as available for scheduling on a new hart. While I don't see
|
||||
* any concrete reason we need a full IO fence, it seems safer to just upgrade
|
||||
* this in order to avoid any IO crossing a scheduling boundary. In both
|
||||
* instances the scheduler pairs this with an mb(), so nothing is necessary on
|
||||
* the new hart.
|
||||
*/
|
||||
#define smp_mb__after_spinlock() RISCV_FENCE(rw,rw)
|
||||
#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
|
@ -3,8 +3,7 @@
|
||||
#ifndef __ASM_GDB_XML_H_
|
||||
#define __ASM_GDB_XML_H_
|
||||
|
||||
#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
|
||||
static const char riscv_gdb_stub_feature[64] =
|
||||
const char riscv_gdb_stub_feature[64] =
|
||||
"PacketSize=800;qXfer:features:read+;";
|
||||
|
||||
static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern int kgdb_has_hit_break(unsigned long addr);
|
||||
extern unsigned long kgdb_compiled_break;
|
||||
|
||||
static inline void arch_kgdb_breakpoint(void)
|
||||
@ -106,7 +105,9 @@ static inline void arch_kgdb_breakpoint(void)
|
||||
#define DBG_REG_BADADDR_OFF 34
|
||||
#define DBG_REG_CAUSE_OFF 35
|
||||
|
||||
#include <asm/gdb_xml.h>
|
||||
extern const char riscv_gdb_stub_feature[64];
|
||||
|
||||
#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -12,7 +12,11 @@
|
||||
#include <linux/const.h>
|
||||
|
||||
/* thread information allocation */
|
||||
#ifdef CONFIG_64BIT
|
||||
#define THREAD_SIZE_ORDER (2)
|
||||
#else
|
||||
#define THREAD_SIZE_ORDER (1)
|
||||
#endif
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@ -44,18 +44,18 @@ DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
|
||||
DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
|
||||
DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
|
||||
|
||||
int decode_register_index(unsigned long opcode, int offset)
|
||||
static int decode_register_index(unsigned long opcode, int offset)
|
||||
{
|
||||
return (opcode >> offset) & 0x1F;
|
||||
}
|
||||
|
||||
int decode_register_index_short(unsigned long opcode, int offset)
|
||||
static int decode_register_index_short(unsigned long opcode, int offset)
|
||||
{
|
||||
return ((opcode >> offset) & 0x7) + 8;
|
||||
}
|
||||
|
||||
/* Calculate the new address for after a step */
|
||||
int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
|
||||
static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
|
||||
{
|
||||
unsigned long pc = regs->epc;
|
||||
unsigned long *regs_ptr = (unsigned long *)regs;
|
||||
@ -136,7 +136,7 @@ int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int do_single_step(struct pt_regs *regs)
|
||||
static int do_single_step(struct pt_regs *regs)
|
||||
{
|
||||
/* Determine where the target instruction will send us to */
|
||||
unsigned long addr = 0;
|
||||
@ -320,7 +320,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
|
||||
return err;
|
||||
}
|
||||
|
||||
int kgdb_riscv_kgdbbreak(unsigned long addr)
|
||||
static int kgdb_riscv_kgdbbreak(unsigned long addr)
|
||||
{
|
||||
if (stepped_address == addr)
|
||||
return KGDB_SW_SINGLE_STEP;
|
||||
|
@ -95,19 +95,40 @@ void __init mem_init(void)
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
static void __init setup_initrd(void)
|
||||
{
|
||||
phys_addr_t start;
|
||||
unsigned long size;
|
||||
|
||||
if (initrd_start >= initrd_end) {
|
||||
pr_info("initrd not found or empty");
|
||||
goto disable;
|
||||
}
|
||||
if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||
pr_err("initrd extends beyond end of memory");
|
||||
/* Ignore the virtul address computed during device tree parsing */
|
||||
initrd_start = initrd_end = 0;
|
||||
|
||||
if (!phys_initrd_size)
|
||||
return;
|
||||
/*
|
||||
* Round the memory region to page boundaries as per free_initrd_mem()
|
||||
* This allows us to detect whether the pages overlapping the initrd
|
||||
* are in use, but more importantly, reserves the entire set of pages
|
||||
* as we don't want these pages allocated for other purposes.
|
||||
*/
|
||||
start = round_down(phys_initrd_start, PAGE_SIZE);
|
||||
size = phys_initrd_size + (phys_initrd_start - start);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
|
||||
if (!memblock_is_region_memory(start, size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
|
||||
(u64)start, size);
|
||||
goto disable;
|
||||
}
|
||||
|
||||
size = initrd_end - initrd_start;
|
||||
memblock_reserve(__pa_symbol(initrd_start), size);
|
||||
if (memblock_is_region_reserved(start, size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
|
||||
(u64)start, size);
|
||||
goto disable;
|
||||
}
|
||||
|
||||
memblock_reserve(start, size);
|
||||
/* Now convert initrd to virtual addresses */
|
||||
initrd_start = (unsigned long)__va(phys_initrd_start);
|
||||
initrd_end = initrd_start + phys_initrd_size;
|
||||
initrd_below_start_ok = 1;
|
||||
|
||||
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||
@ -126,33 +147,36 @@ void __init setup_bootmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t mem_size = 0;
|
||||
phys_addr_t total_mem = 0;
|
||||
phys_addr_t mem_start, end = 0;
|
||||
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
||||
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
||||
|
||||
/* Find the memory region containing the kernel */
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t end = reg->base + reg->size;
|
||||
|
||||
if (reg->base <= vmlinux_start && vmlinux_end <= end) {
|
||||
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
|
||||
|
||||
/*
|
||||
* Remove memblock from the end of usable area to the
|
||||
* end of region
|
||||
*/
|
||||
if (reg->base + mem_size < end)
|
||||
memblock_remove(reg->base + mem_size,
|
||||
end - reg->base - mem_size);
|
||||
}
|
||||
end = reg->base + reg->size;
|
||||
if (!total_mem)
|
||||
mem_start = reg->base;
|
||||
if (reg->base <= vmlinux_start && vmlinux_end <= end)
|
||||
BUG_ON(reg->size == 0);
|
||||
total_mem = total_mem + reg->size;
|
||||
}
|
||||
BUG_ON(mem_size == 0);
|
||||
|
||||
/*
|
||||
* Remove memblock from the end of usable area to the
|
||||
* end of region
|
||||
*/
|
||||
mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
|
||||
if (mem_start + mem_size < end)
|
||||
memblock_remove(mem_start + mem_size,
|
||||
end - mem_start - mem_size);
|
||||
|
||||
/* Reserve from the start of the kernel to the end of the kernel */
|
||||
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
||||
|
||||
set_max_mapnr(PFN_DOWN(mem_size));
|
||||
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
max_low_pfn = max_pfn;
|
||||
set_max_mapnr(max_low_pfn);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
setup_initrd();
|
||||
|
@ -44,7 +44,7 @@ asmlinkage void __init kasan_early_init(void)
|
||||
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static void __init populate(void *start, void *end)
|
||||
@ -79,7 +79,7 @@ static void __init populate(void *start, void *end)
|
||||
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
|
||||
__pgprot(_PAGE_TABLE)));
|
||||
|
||||
flush_tlb_all();
|
||||
local_flush_tlb_all();
|
||||
memset(start, 0, end - start);
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCERROR, 0x00109);
|
||||
CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109);
|
||||
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||
CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||
|
||||
@ -629,7 +629,7 @@ static struct attribute *cpumcf_z15_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_ACCESS),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CC),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CCERROR),
|
||||
CPUMF_EVENT_PTR(cf_z15, DFLT_CCFINISH),
|
||||
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
|
||||
CPUMF_EVENT_PTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
|
||||
NULL,
|
||||
|
@ -90,8 +90,8 @@ endif
|
||||
|
||||
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
|
||||
|
||||
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
|
||||
efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
|
||||
# The compressed kernel is built with -fPIC/-fPIE so that a boot loader
|
||||
# can place it anywhere in memory and it will still run. However, since
|
||||
@ -115,7 +115,7 @@ endef
|
||||
quiet_cmd_check-and-link-vmlinux = LD $@
|
||||
cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
|
||||
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
|
||||
$(call if_changed,check-and-link-vmlinux)
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||
|
@ -7,12 +7,20 @@ KASAN_SANITIZE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
|
||||
CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
|
||||
CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
|
||||
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_syscall_x32.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
CFLAGS_common.o += -fno-stack-protector
|
||||
CFLAGS_syscall_64.o += -fno-stack-protector
|
||||
CFLAGS_syscall_32.o += -fno-stack-protector
|
||||
CFLAGS_syscall_x32.o += -fno-stack-protector
|
||||
|
||||
CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
|
||||
CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,)
|
||||
|
||||
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
|
||||
obj-y += common.o
|
||||
|
||||
|
@ -46,7 +46,7 @@
|
||||
#include <trace/events/syscalls.h>
|
||||
|
||||
/* Check that the stack and regs on entry from user mode are sane. */
|
||||
static void check_user_regs(struct pt_regs *regs)
|
||||
static noinstr void check_user_regs(struct pt_regs *regs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
|
||||
/*
|
||||
@ -294,7 +294,7 @@ static void __prepare_exit_to_usermode(struct pt_regs *regs)
|
||||
#endif
|
||||
}
|
||||
|
||||
__visible noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
|
||||
static noinstr void prepare_exit_to_usermode(struct pt_regs *regs)
|
||||
{
|
||||
instrumentation_begin();
|
||||
__prepare_exit_to_usermode(regs);
|
||||
|
@ -469,16 +469,15 @@ __visible noinstr void func(struct pt_regs *regs, \
|
||||
.align 8
|
||||
SYM_CODE_START(irq_entries_start)
|
||||
vector=FIRST_EXTERNAL_VECTOR
|
||||
pos = .
|
||||
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
|
||||
UNWIND_HINT_IRET_REGS
|
||||
0 :
|
||||
.byte 0x6a, vector
|
||||
jmp asm_common_interrupt
|
||||
nop
|
||||
/* Ensure that the above is 8 bytes max */
|
||||
. = pos + 8
|
||||
pos=pos+8
|
||||
vector=vector+1
|
||||
. = 0b + 8
|
||||
vector = vector+1
|
||||
.endr
|
||||
SYM_CODE_END(irq_entries_start)
|
||||
|
||||
@ -486,16 +485,15 @@ SYM_CODE_END(irq_entries_start)
|
||||
.align 8
|
||||
SYM_CODE_START(spurious_entries_start)
|
||||
vector=FIRST_SYSTEM_VECTOR
|
||||
pos = .
|
||||
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
|
||||
UNWIND_HINT_IRET_REGS
|
||||
0 :
|
||||
.byte 0x6a, vector
|
||||
jmp asm_spurious_interrupt
|
||||
nop
|
||||
/* Ensure that the above is 8 bytes max */
|
||||
. = pos + 8
|
||||
pos=pos+8
|
||||
vector=vector+1
|
||||
. = 0b + 8
|
||||
vector = vector+1
|
||||
.endr
|
||||
SYM_CODE_END(spurious_entries_start)
|
||||
#endif
|
||||
@ -553,7 +551,7 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
|
||||
|
||||
/* NMI */
|
||||
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
|
||||
#ifdef CONFIG_XEN_PV
|
||||
#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
|
||||
DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
|
||||
#endif
|
||||
|
||||
@ -563,7 +561,7 @@ DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
|
||||
#else
|
||||
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug);
|
||||
#endif
|
||||
#ifdef CONFIG_XEN_PV
|
||||
#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
|
||||
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug);
|
||||
#endif
|
||||
|
||||
@ -626,8 +624,8 @@ DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_STIMER0_VECTOR, sysvec_hyperv_stimer0);
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
|
||||
DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACRN_GUEST)
|
||||
|
@ -19,12 +19,28 @@ struct task_struct;
|
||||
void io_bitmap_share(struct task_struct *tsk);
|
||||
void io_bitmap_exit(struct task_struct *tsk);
|
||||
|
||||
static inline void native_tss_invalidate_io_bitmap(void)
|
||||
{
|
||||
/*
|
||||
* Invalidate the I/O bitmap by moving io_bitmap_base outside the
|
||||
* TSS limit so any subsequent I/O access from user space will
|
||||
* trigger a #GP.
|
||||
*
|
||||
* This is correct even when VMEXIT rewrites the TSS limit
|
||||
* to 0x67 as the only requirement is that the base points
|
||||
* outside the limit.
|
||||
*/
|
||||
this_cpu_write(cpu_tss_rw.x86_tss.io_bitmap_base,
|
||||
IO_BITMAP_OFFSET_INVALID);
|
||||
}
|
||||
|
||||
void native_tss_update_io_bitmap(void);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_XXL
|
||||
#include <asm/paravirt.h>
|
||||
#else
|
||||
#define tss_update_io_bitmap native_tss_update_io_bitmap
|
||||
#define tss_invalidate_io_bitmap native_tss_invalidate_io_bitmap
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define BT_MBI_UNIT_PMC 0x04
|
||||
#define BT_MBI_UNIT_GFX 0x06
|
||||
#define BT_MBI_UNIT_SMI 0x0C
|
||||
#define BT_MBI_UNIT_CCK 0x14
|
||||
#define BT_MBI_UNIT_USB 0x43
|
||||
#define BT_MBI_UNIT_SATA 0xA3
|
||||
#define BT_MBI_UNIT_PCIE 0xA6
|
||||
|
@ -302,6 +302,11 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
static inline void tss_invalidate_io_bitmap(void)
|
||||
{
|
||||
PVOP_VCALL0(cpu.invalidate_io_bitmap);
|
||||
}
|
||||
|
||||
static inline void tss_update_io_bitmap(void)
|
||||
{
|
||||
PVOP_VCALL0(cpu.update_io_bitmap);
|
||||
|
@ -141,6 +141,7 @@ struct pv_cpu_ops {
|
||||
void (*load_sp0)(unsigned long sp0);
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
void (*invalidate_io_bitmap)(void);
|
||||
void (*update_io_bitmap)(void);
|
||||
#endif
|
||||
|
||||
|
@ -2316,12 +2316,12 @@ static int mp_irqdomain_create(int ioapic)
|
||||
ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops,
|
||||
(void *)(long)ioapic);
|
||||
|
||||
/* Release fw handle if it was allocated above */
|
||||
if (!cfg->dev)
|
||||
irq_domain_free_fwnode(fn);
|
||||
|
||||
if (!ip->irqdomain)
|
||||
if (!ip->irqdomain) {
|
||||
/* Release fw handle if it was allocated above */
|
||||
if (!cfg->dev)
|
||||
irq_domain_free_fwnode(fn);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ip->irqdomain->parent = parent;
|
||||
|
||||
|
@ -263,12 +263,13 @@ void __init arch_init_msi_domain(struct irq_domain *parent)
|
||||
msi_default_domain =
|
||||
pci_msi_create_irq_domain(fn, &pci_msi_domain_info,
|
||||
parent);
|
||||
irq_domain_free_fwnode(fn);
|
||||
}
|
||||
if (!msi_default_domain)
|
||||
if (!msi_default_domain) {
|
||||
irq_domain_free_fwnode(fn);
|
||||
pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
|
||||
else
|
||||
} else {
|
||||
msi_default_domain->flags |= IRQ_DOMAIN_MSI_NOMASK_QUIRK;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
@ -301,7 +302,8 @@ struct irq_domain *arch_create_remap_msi_irq_domain(struct irq_domain *parent,
|
||||
if (!fn)
|
||||
return NULL;
|
||||
d = pci_msi_create_irq_domain(fn, &pci_msi_ir_domain_info, parent);
|
||||
irq_domain_free_fwnode(fn);
|
||||
if (!d)
|
||||
irq_domain_free_fwnode(fn);
|
||||
return d;
|
||||
}
|
||||
#endif
|
||||
@ -364,7 +366,8 @@ static struct irq_domain *dmar_get_irq_domain(void)
|
||||
if (fn) {
|
||||
dmar_domain = msi_create_irq_domain(fn, &dmar_msi_domain_info,
|
||||
x86_vector_domain);
|
||||
irq_domain_free_fwnode(fn);
|
||||
if (!dmar_domain)
|
||||
irq_domain_free_fwnode(fn);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&dmar_lock);
|
||||
@ -489,7 +492,10 @@ struct irq_domain *hpet_create_irq_domain(int hpet_id)
|
||||
}
|
||||
|
||||
d = msi_create_irq_domain(fn, domain_info, parent);
|
||||
irq_domain_free_fwnode(fn);
|
||||
if (!d) {
|
||||
irq_domain_free_fwnode(fn);
|
||||
kfree(domain_info);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
|
@ -446,12 +446,10 @@ static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
|
||||
trace_vector_activate(irqd->irq, apicd->is_managed,
|
||||
apicd->can_reserve, reserve);
|
||||
|
||||
/* Nothing to do for fixed assigned vectors */
|
||||
if (!apicd->can_reserve && !apicd->is_managed)
|
||||
return 0;
|
||||
|
||||
raw_spin_lock_irqsave(&vector_lock, flags);
|
||||
if (reserve || irqd_is_managed_and_shutdown(irqd))
|
||||
if (!apicd->can_reserve && !apicd->is_managed)
|
||||
assign_irq_vector_any_locked(irqd);
|
||||
else if (reserve || irqd_is_managed_and_shutdown(irqd))
|
||||
vector_assign_managed_shutdown(irqd);
|
||||
else if (apicd->is_managed)
|
||||
ret = activate_managed(irqd);
|
||||
@ -709,7 +707,6 @@ int __init arch_early_irq_init(void)
|
||||
x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
|
||||
NULL);
|
||||
BUG_ON(x86_vector_domain == NULL);
|
||||
irq_domain_free_fwnode(fn);
|
||||
irq_set_default_host(x86_vector_domain);
|
||||
|
||||
arch_init_msi_domain(x86_vector_domain);
|
||||
@ -775,20 +772,10 @@ void lapic_offline(void)
|
||||
static int apic_set_affinity(struct irq_data *irqd,
|
||||
const struct cpumask *dest, bool force)
|
||||
{
|
||||
struct apic_chip_data *apicd = apic_chip_data(irqd);
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Core code can call here for inactive interrupts. For inactive
|
||||
* interrupts which use managed or reservation mode there is no
|
||||
* point in going through the vector assignment right now as the
|
||||
* activation will assign a vector which fits the destination
|
||||
* cpumask. Let the core code store the destination mask and be
|
||||
* done with it.
|
||||
*/
|
||||
if (!irqd_is_activated(irqd) &&
|
||||
(apicd->is_managed || apicd->can_reserve))
|
||||
return IRQ_SET_MASK_OK;
|
||||
if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
|
||||
return -EIO;
|
||||
|
||||
raw_spin_lock(&vector_lock);
|
||||
cpumask_and(vector_searchmask, dest, cpu_online_mask);
|
||||
|
@ -71,6 +71,22 @@ static void printk_stack_address(unsigned long address, int reliable,
|
||||
printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
|
||||
}
|
||||
|
||||
static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
if (!user_mode(regs))
|
||||
return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
|
||||
|
||||
/*
|
||||
* Make sure userspace isn't trying to trick us into dumping kernel
|
||||
* memory by pointing the userspace instruction pointer at it.
|
||||
*/
|
||||
if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
return copy_from_user_nmi(buf, (void __user *)src, nbytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
|
||||
*
|
||||
@ -97,17 +113,8 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
|
||||
#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
|
||||
u8 opcodes[OPCODE_BUFSIZE];
|
||||
unsigned long prologue = regs->ip - PROLOGUE_SIZE;
|
||||
bool bad_ip;
|
||||
|
||||
/*
|
||||
* Make sure userspace isn't trying to trick us into dumping kernel
|
||||
* memory by pointing the userspace instruction pointer at it.
|
||||
*/
|
||||
bad_ip = user_mode(regs) &&
|
||||
__chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
|
||||
|
||||
if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue,
|
||||
OPCODE_BUFSIZE)) {
|
||||
if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
|
||||
printk("%sCode: Bad RIP value.\n", loglvl);
|
||||
} else {
|
||||
printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
|
||||
|
@ -1074,7 +1074,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
|
||||
copy_part(offsetof(struct fxregs_state, st_space), 128,
|
||||
&xsave->i387.st_space, &kbuf, &offset_start, &count);
|
||||
if (header.xfeatures & XFEATURE_MASK_SSE)
|
||||
copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
|
||||
copy_part(xstate_offsets[XFEATURE_SSE], 256,
|
||||
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
|
||||
/*
|
||||
* Fill xsave->i387.sw_reserved value for ptrace frame:
|
||||
|
@ -324,7 +324,8 @@ struct paravirt_patch_template pv_ops = {
|
||||
.cpu.swapgs = native_swapgs,
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
.cpu.update_io_bitmap = native_tss_update_io_bitmap,
|
||||
.cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
|
||||
.cpu.update_io_bitmap = native_tss_update_io_bitmap,
|
||||
#endif
|
||||
|
||||
.cpu.start_context_switch = paravirt_nop,
|
||||
|
@ -322,20 +322,6 @@ void arch_setup_new_exec(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
static inline void tss_invalidate_io_bitmap(struct tss_struct *tss)
|
||||
{
|
||||
/*
|
||||
* Invalidate the I/O bitmap by moving io_bitmap_base outside the
|
||||
* TSS limit so any subsequent I/O access from user space will
|
||||
* trigger a #GP.
|
||||
*
|
||||
* This is correct even when VMEXIT rewrites the TSS limit
|
||||
* to 0x67 as the only requirement is that the base points
|
||||
* outside the limit.
|
||||
*/
|
||||
tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
|
||||
}
|
||||
|
||||
static inline void switch_to_bitmap(unsigned long tifp)
|
||||
{
|
||||
/*
|
||||
@ -346,7 +332,7 @@ static inline void switch_to_bitmap(unsigned long tifp)
|
||||
* user mode.
|
||||
*/
|
||||
if (tifp & _TIF_IO_BITMAP)
|
||||
tss_invalidate_io_bitmap(this_cpu_ptr(&cpu_tss_rw));
|
||||
tss_invalidate_io_bitmap();
|
||||
}
|
||||
|
||||
static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
|
||||
@ -380,7 +366,7 @@ void native_tss_update_io_bitmap(void)
|
||||
u16 *base = &tss->x86_tss.io_bitmap_base;
|
||||
|
||||
if (!test_thread_flag(TIF_IO_BITMAP)) {
|
||||
tss_invalidate_io_bitmap(tss);
|
||||
native_tss_invalidate_io_bitmap();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
* or a page fault), which can make frame pointers
|
||||
* unreliable.
|
||||
*/
|
||||
|
||||
if (IS_ENABLED(CONFIG_FRAME_POINTER))
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||
if (unwind_error(&state))
|
||||
return -EINVAL;
|
||||
|
||||
/* Success path for non-user tasks, i.e. kthreads and idle tasks */
|
||||
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -303,6 +303,8 @@ DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
|
||||
|
||||
do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
|
||||
error_code, BUS_ADRALN, NULL);
|
||||
|
||||
local_irq_disable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
|
@ -440,8 +440,11 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
/*
|
||||
* Find the orc_entry associated with the text address.
|
||||
*
|
||||
* Decrement call return addresses by one so they work for sibling
|
||||
* calls and calls to noreturn functions.
|
||||
* For a call frame (as opposed to a signal frame), state->ip points to
|
||||
* the instruction after the call. That instruction's stack layout
|
||||
* could be different from the call instruction's layout, for example
|
||||
* if the call was to a noreturn function. So get the ORC data for the
|
||||
* call instruction itself.
|
||||
*/
|
||||
orc = orc_find(state->signal ? state->ip : state->ip - 1);
|
||||
if (!orc) {
|
||||
@ -662,6 +665,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
|
||||
state->sp = task->thread.sp;
|
||||
state->bp = READ_ONCE_NOCHECK(frame->bp);
|
||||
state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
|
||||
state->signal = (void *)state->ip == ret_from_fork;
|
||||
}
|
||||
|
||||
if (get_stack_info((unsigned long *)state->sp, state->task,
|
||||
|
@ -358,6 +358,7 @@ SECTIONS
|
||||
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
|
||||
__bss_start = .;
|
||||
*(.bss..page_aligned)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
*(BSS_MAIN)
|
||||
BSS_DECRYPTED
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
@ -209,7 +209,7 @@ sqrt_stage_2_finish:
|
||||
|
||||
#ifdef PARANOID
|
||||
/* It should be possible to get here only if the arg is ffff....ffff */
|
||||
cmp $0xffffffff,FPU_fsqrt_arg_1
|
||||
cmpl $0xffffffff,FPU_fsqrt_arg_1
|
||||
jnz sqrt_stage_2_error
|
||||
#endif /* PARANOID */
|
||||
|
||||
|
@ -167,9 +167,10 @@ static struct irq_domain *uv_get_irq_domain(void)
|
||||
goto out;
|
||||
|
||||
uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
|
||||
irq_domain_free_fwnode(fn);
|
||||
if (uv_domain)
|
||||
uv_domain->parent = x86_vector_domain;
|
||||
else
|
||||
irq_domain_free_fwnode(fn);
|
||||
out:
|
||||
mutex_unlock(&uv_lock);
|
||||
|
||||
|
@ -870,6 +870,17 @@ static void xen_load_sp0(unsigned long sp0)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
static void xen_invalidate_io_bitmap(void)
|
||||
{
|
||||
struct physdev_set_iobitmap iobitmap = {
|
||||
.bitmap = 0,
|
||||
.nr_ports = 0,
|
||||
};
|
||||
|
||||
native_tss_invalidate_io_bitmap();
|
||||
HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
|
||||
}
|
||||
|
||||
static void xen_update_io_bitmap(void)
|
||||
{
|
||||
struct physdev_set_iobitmap iobitmap;
|
||||
@ -1099,6 +1110,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
||||
.load_sp0 = xen_load_sp0,
|
||||
|
||||
#ifdef CONFIG_X86_IOPL_IOPERM
|
||||
.invalidate_io_bitmap = xen_invalidate_io_bitmap,
|
||||
.update_io_bitmap = xen_update_io_bitmap,
|
||||
#endif
|
||||
.io_delay = xen_io_delay,
|
||||
|
@ -57,7 +57,7 @@ static inline
|
||||
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
|
||||
int len, __wsum sum, int *err_ptr)
|
||||
{
|
||||
if (access_ok(dst, len))
|
||||
if (access_ok(src, len))
|
||||
return csum_partial_copy_generic((__force const void *)src, dst,
|
||||
len, sum, err_ptr, NULL);
|
||||
if (len)
|
||||
|
@ -362,9 +362,7 @@ irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
|
||||
struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
|
||||
unsigned i;
|
||||
|
||||
for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
|
||||
i < XCHAL_NUM_PERF_COUNTERS;
|
||||
i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
|
||||
for_each_set_bit(i, ev->used_mask, XCHAL_NUM_PERF_COUNTERS) {
|
||||
uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
|
||||
struct perf_event *event = ev->event[i];
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
@ -724,7 +724,8 @@ c_start(struct seq_file *f, loff_t *pos)
|
||||
static void *
|
||||
c_next(struct seq_file *f, void *v, loff_t *pos)
|
||||
{
|
||||
return NULL;
|
||||
++*pos;
|
||||
return c_start(f, pos);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -87,13 +87,13 @@ void __xtensa_libgcc_window_spill(void)
|
||||
}
|
||||
EXPORT_SYMBOL(__xtensa_libgcc_window_spill);
|
||||
|
||||
unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v)
|
||||
unsigned int __sync_fetch_and_and_4(volatile void *p, unsigned int v)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
EXPORT_SYMBOL(__sync_fetch_and_and_4);
|
||||
|
||||
unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v)
|
||||
unsigned int __sync_fetch_and_or_4(volatile void *p, unsigned int v)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
@ -119,6 +119,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
|
||||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
ret = -ENOMEM;
|
||||
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
|
||||
GFP_KERNEL);
|
||||
if (!key)
|
||||
|
@ -153,7 +153,6 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
|
||||
extern int devres_release_all(struct device *dev);
|
||||
extern void device_block_probing(void);
|
||||
extern void device_unblock_probing(void);
|
||||
extern void driver_deferred_probe_force_trigger(void);
|
||||
|
||||
/* /sys/devices directory */
|
||||
extern struct kset *devices_kset;
|
||||
|
@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock);
|
||||
static LIST_HEAD(deferred_sync);
|
||||
static unsigned int defer_sync_state_count = 1;
|
||||
static unsigned int defer_fw_devlink_count;
|
||||
static LIST_HEAD(deferred_fw_devlink);
|
||||
static DEFINE_MUTEX(defer_fw_devlink_lock);
|
||||
static bool fw_devlink_is_permissive(void);
|
||||
|
||||
@ -754,11 +755,11 @@ static void __device_links_queue_sync_state(struct device *dev,
|
||||
*/
|
||||
dev->state_synced = true;
|
||||
|
||||
if (WARN_ON(!list_empty(&dev->links.defer_sync)))
|
||||
if (WARN_ON(!list_empty(&dev->links.defer_hook)))
|
||||
return;
|
||||
|
||||
get_device(dev);
|
||||
list_add_tail(&dev->links.defer_sync, list);
|
||||
list_add_tail(&dev->links.defer_hook, list);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -776,8 +777,8 @@ static void device_links_flush_sync_list(struct list_head *list,
|
||||
{
|
||||
struct device *dev, *tmp;
|
||||
|
||||
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
|
||||
list_del_init(&dev->links.defer_sync);
|
||||
list_for_each_entry_safe(dev, tmp, list, links.defer_hook) {
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
|
||||
if (dev != dont_lock_dev)
|
||||
device_lock(dev);
|
||||
@ -815,12 +816,12 @@ void device_links_supplier_sync_state_resume(void)
|
||||
if (defer_sync_state_count)
|
||||
goto out;
|
||||
|
||||
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
|
||||
list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) {
|
||||
/*
|
||||
* Delete from deferred_sync list before queuing it to
|
||||
* sync_list because defer_sync is used for both lists.
|
||||
* sync_list because defer_hook is used for both lists.
|
||||
*/
|
||||
list_del_init(&dev->links.defer_sync);
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
__device_links_queue_sync_state(dev, &sync_list);
|
||||
}
|
||||
out:
|
||||
@ -838,8 +839,8 @@ late_initcall(sync_state_resume_initcall);
|
||||
|
||||
static void __device_links_supplier_defer_sync(struct device *sup)
|
||||
{
|
||||
if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
|
||||
list_add_tail(&sup->links.defer_sync, &deferred_sync);
|
||||
if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup))
|
||||
list_add_tail(&sup->links.defer_hook, &deferred_sync);
|
||||
}
|
||||
|
||||
static void device_link_drop_managed(struct device_link *link)
|
||||
@ -1052,7 +1053,7 @@ void device_links_driver_cleanup(struct device *dev)
|
||||
WRITE_ONCE(link->status, DL_STATE_DORMANT);
|
||||
}
|
||||
|
||||
list_del_init(&dev->links.defer_sync);
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
__device_links_no_driver(dev);
|
||||
|
||||
device_links_write_unlock();
|
||||
@ -1244,6 +1245,12 @@ static void fw_devlink_link_device(struct device *dev)
|
||||
fw_ret = -EAGAIN;
|
||||
} else {
|
||||
fw_ret = -ENODEV;
|
||||
/*
|
||||
* defer_hook is not used to add device to deferred_sync list
|
||||
* until device is bound. Since deferred fw devlink also blocks
|
||||
* probing, same list hook can be used for deferred_fw_devlink.
|
||||
*/
|
||||
list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink);
|
||||
}
|
||||
|
||||
if (fw_ret == -ENODEV)
|
||||
@ -1312,6 +1319,9 @@ void fw_devlink_pause(void)
|
||||
*/
|
||||
void fw_devlink_resume(void)
|
||||
{
|
||||
struct device *dev, *tmp;
|
||||
LIST_HEAD(probe_list);
|
||||
|
||||
mutex_lock(&defer_fw_devlink_lock);
|
||||
if (!defer_fw_devlink_count) {
|
||||
WARN(true, "Unmatched fw_devlink pause/resume!");
|
||||
@ -1323,9 +1333,19 @@ void fw_devlink_resume(void)
|
||||
goto out;
|
||||
|
||||
device_link_add_missing_supplier_links();
|
||||
driver_deferred_probe_force_trigger();
|
||||
list_splice_tail_init(&deferred_fw_devlink, &probe_list);
|
||||
out:
|
||||
mutex_unlock(&defer_fw_devlink_lock);
|
||||
|
||||
/*
|
||||
* bus_probe_device() can cause new devices to get added and they'll
|
||||
* try to grab defer_fw_devlink_lock. So, this needs to be done outside
|
||||
* the defer_fw_devlink_lock.
|
||||
*/
|
||||
list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) {
|
||||
list_del_init(&dev->links.defer_hook);
|
||||
bus_probe_device(dev);
|
||||
}
|
||||
}
|
||||
/* Device links support end. */
|
||||
|
||||
@ -2172,7 +2192,7 @@ void device_initialize(struct device *dev)
|
||||
INIT_LIST_HEAD(&dev->links.consumers);
|
||||
INIT_LIST_HEAD(&dev->links.suppliers);
|
||||
INIT_LIST_HEAD(&dev->links.needs_suppliers);
|
||||
INIT_LIST_HEAD(&dev->links.defer_sync);
|
||||
INIT_LIST_HEAD(&dev->links.defer_hook);
|
||||
dev->links.status = DL_DEV_NO_DRIVER;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_initialize);
|
||||
|
@ -164,11 +164,6 @@ static void driver_deferred_probe_trigger(void)
|
||||
if (!driver_deferred_probe_enable)
|
||||
return;
|
||||
|
||||
driver_deferred_probe_force_trigger();
|
||||
}
|
||||
|
||||
void driver_deferred_probe_force_trigger(void)
|
||||
{
|
||||
/*
|
||||
* A successful probe means that all the devices in the pending list
|
||||
* should be triggered to be reprobed. Move all the deferred devices
|
||||
|
@ -4,7 +4,7 @@
|
||||
# subsystems should select the appropriate symbols.
|
||||
|
||||
config REGMAP
|
||||
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C)
|
||||
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C)
|
||||
select IRQ_DOMAIN if REGMAP_IRQ
|
||||
bool
|
||||
|
||||
|
@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
|
||||
{
|
||||
struct regmap *map = container_of(file->private_data,
|
||||
struct regmap, cache_only);
|
||||
ssize_t result;
|
||||
bool was_enabled, require_sync = false;
|
||||
bool new_val, require_sync = false;
|
||||
int err;
|
||||
|
||||
err = kstrtobool_from_user(user_buf, count, &new_val);
|
||||
/* Ignore malforned data like debugfs_write_file_bool() */
|
||||
if (err)
|
||||
return count;
|
||||
|
||||
err = debugfs_file_get(file->f_path.dentry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
was_enabled = map->cache_only;
|
||||
|
||||
result = debugfs_write_file_bool(file, user_buf, count, ppos);
|
||||
if (result < 0) {
|
||||
map->unlock(map->lock_arg);
|
||||
return result;
|
||||
}
|
||||
|
||||
if (map->cache_only && !was_enabled) {
|
||||
if (new_val && !map->cache_only) {
|
||||
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
|
||||
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
|
||||
} else if (!map->cache_only && was_enabled) {
|
||||
} else if (!new_val && map->cache_only) {
|
||||
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
|
||||
require_sync = true;
|
||||
}
|
||||
map->cache_only = new_val;
|
||||
|
||||
map->unlock(map->lock_arg);
|
||||
debugfs_file_put(file->f_path.dentry);
|
||||
|
||||
if (require_sync) {
|
||||
err = regcache_sync(map);
|
||||
@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
|
||||
dev_err(map->dev, "Failed to sync cache %d\n", err);
|
||||
}
|
||||
|
||||
return result;
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations regmap_cache_only_fops = {
|
||||
@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
|
||||
{
|
||||
struct regmap *map = container_of(file->private_data,
|
||||
struct regmap, cache_bypass);
|
||||
ssize_t result;
|
||||
bool was_enabled;
|
||||
bool new_val;
|
||||
int err;
|
||||
|
||||
err = kstrtobool_from_user(user_buf, count, &new_val);
|
||||
/* Ignore malforned data like debugfs_write_file_bool() */
|
||||
if (err)
|
||||
return count;
|
||||
|
||||
err = debugfs_file_get(file->f_path.dentry);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
was_enabled = map->cache_bypass;
|
||||
|
||||
result = debugfs_write_file_bool(file, user_buf, count, ppos);
|
||||
if (result < 0)
|
||||
goto out;
|
||||
|
||||
if (map->cache_bypass && !was_enabled) {
|
||||
if (new_val && !map->cache_bypass) {
|
||||
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
|
||||
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
|
||||
} else if (!map->cache_bypass && was_enabled) {
|
||||
} else if (!new_val && map->cache_bypass) {
|
||||
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
|
||||
}
|
||||
map->cache_bypass = new_val;
|
||||
|
||||
out:
|
||||
map->unlock(map->lock_arg);
|
||||
debugfs_file_put(file->f_path.dentry);
|
||||
|
||||
return result;
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations regmap_cache_bypass_fops = {
|
||||
|
@ -1364,7 +1364,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
|
||||
|
||||
/* If the user didn't specify a name match any */
|
||||
if (data)
|
||||
return (*r)->name == data;
|
||||
return !strcmp((*r)->name, data);
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
@ -2021,7 +2021,8 @@ static ssize_t hot_add_show(struct class *class,
|
||||
return ret;
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
|
||||
}
|
||||
static CLASS_ATTR_RO(hot_add);
|
||||
static struct class_attribute class_attr_hot_add =
|
||||
__ATTR(hot_add, 0400, hot_add_show, NULL);
|
||||
|
||||
static ssize_t hot_remove_store(struct class *class,
|
||||
struct class_attribute *attr,
|
||||
|
@ -236,15 +236,14 @@ static int sysc_wait_softreset(struct sysc *ddata)
|
||||
syss_done = ddata->cfg.syss_mask;
|
||||
|
||||
if (syss_offset >= 0) {
|
||||
error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
|
||||
(rstval & ddata->cfg.syss_mask) ==
|
||||
syss_done,
|
||||
100, MAX_MODULE_SOFTRESET_WAIT);
|
||||
error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
|
||||
rstval, (rstval & ddata->cfg.syss_mask) ==
|
||||
syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
|
||||
|
||||
} else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
|
||||
error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
|
||||
!(rstval & sysc_mask),
|
||||
100, MAX_MODULE_SOFTRESET_WAIT);
|
||||
error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
|
||||
rstval, !(rstval & sysc_mask),
|
||||
100, MAX_MODULE_SOFTRESET_WAIT);
|
||||
}
|
||||
|
||||
return error;
|
||||
@ -1279,7 +1278,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
|
||||
|
||||
ddata = dev_get_drvdata(dev);
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
|
||||
if (ddata->cfg.quirks &
|
||||
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
|
||||
return 0;
|
||||
|
||||
return pm_runtime_force_suspend(dev);
|
||||
@ -1291,7 +1291,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
|
||||
|
||||
ddata = dev_get_drvdata(dev);
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
|
||||
if (ddata->cfg.quirks &
|
||||
(SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
|
||||
return 0;
|
||||
|
||||
return pm_runtime_force_resume(dev);
|
||||
@ -1728,8 +1729,8 @@ static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
|
||||
|
||||
local_irq_save(flags);
|
||||
/* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
|
||||
error = readl_poll_timeout(ddata->module_va + 0x44, val,
|
||||
!(val & BIT(0)), 100, 50);
|
||||
error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
|
||||
!(val & BIT(0)), 100, 50);
|
||||
if (error)
|
||||
dev_warn(ddata->dev, "rtc busy timeout\n");
|
||||
/* Now we have ~15 microseconds to read/write various registers */
|
||||
@ -2864,6 +2865,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ignore timers tagged with no-reset and no-idle. These are likely in use,
|
||||
* for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
|
||||
* are needed, we could also look at the timer register configuration.
|
||||
*/
|
||||
static int sysc_check_active_timer(struct sysc *ddata)
|
||||
{
|
||||
if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
|
||||
ddata->cap->type != TI_SYSC_OMAP4_TIMER)
|
||||
return 0;
|
||||
|
||||
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
|
||||
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id sysc_match_table[] = {
|
||||
{ .compatible = "simple-bus", },
|
||||
{ /* sentinel */ },
|
||||
@ -2920,6 +2939,10 @@ static int sysc_probe(struct platform_device *pdev)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysc_check_active_timer(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysc_get_clocks(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -2116,6 +2116,7 @@ static struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(virtio, id_table);
|
||||
|
||||
static unsigned int features[] = {
|
||||
VIRTIO_CONSOLE_F_SIZE,
|
||||
@ -2128,6 +2129,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
|
||||
#endif
|
||||
{ 0 },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
|
||||
|
||||
static unsigned int rproc_serial_features[] = {
|
||||
};
|
||||
@ -2280,6 +2282,5 @@ static void __exit fini(void)
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_DEVICE_TABLE(virtio, id_table);
|
||||
MODULE_DESCRIPTION("Virtio console driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -50,6 +50,7 @@ source "drivers/clk/versatile/Kconfig"
|
||||
config CLK_HSDK
|
||||
bool "PLL Driver for HSDK platform"
|
||||
depends on OF || COMPILE_TEST
|
||||
depends on IOMEM
|
||||
help
|
||||
This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
|
||||
control.
|
||||
|
@ -131,6 +131,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = {
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
static const struct clk_div_table ast2600_emmc_extclk_div_table[] = {
|
||||
{ 0x0, 2 },
|
||||
{ 0x1, 4 },
|
||||
{ 0x2, 6 },
|
||||
{ 0x3, 8 },
|
||||
{ 0x4, 10 },
|
||||
{ 0x5, 12 },
|
||||
{ 0x6, 14 },
|
||||
{ 0x7, 16 },
|
||||
{ 0 }
|
||||
};
|
||||
|
||||
static const struct clk_div_table ast2600_mac_div_table[] = {
|
||||
{ 0x0, 4 },
|
||||
{ 0x1, 4 },
|
||||
@ -390,6 +402,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev,
|
||||
return hw;
|
||||
}
|
||||
|
||||
static const char *const emmc_extclk_parent_names[] = {
|
||||
"emmc_extclk_hpll_in",
|
||||
"mpll",
|
||||
};
|
||||
|
||||
static const char * const vclk_parent_names[] = {
|
||||
"dpll",
|
||||
"d1pll",
|
||||
@ -459,16 +476,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(hw);
|
||||
aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw;
|
||||
|
||||
/* EMMC ext clock divider */
|
||||
hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0,
|
||||
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0,
|
||||
&aspeed_g6_clk_lock);
|
||||
/* EMMC ext clock */
|
||||
hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll",
|
||||
0, 1, 2);
|
||||
if (IS_ERR(hw))
|
||||
return PTR_ERR(hw);
|
||||
hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0,
|
||||
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0,
|
||||
ast2600_div_table,
|
||||
&aspeed_g6_clk_lock);
|
||||
|
||||
hw = clk_hw_register_mux(dev, "emmc_extclk_mux",
|
||||
emmc_extclk_parent_names,
|
||||
ARRAY_SIZE(emmc_extclk_parent_names), 0,
|
||||
scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1,
|
||||
0, &aspeed_g6_clk_lock);
|
||||
if (IS_ERR(hw))
|
||||
return PTR_ERR(hw);
|
||||
|
||||
hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux",
|
||||
0, scu_g6_base + ASPEED_G6_CLK_SELECTION1,
|
||||
15, 0, &aspeed_g6_clk_lock);
|
||||
if (IS_ERR(hw))
|
||||
return PTR_ERR(hw);
|
||||
|
||||
hw = clk_hw_register_divider_table(dev, "emmc_extclk",
|
||||
"emmc_extclk_gate", 0,
|
||||
scu_g6_base +
|
||||
ASPEED_G6_CLK_SELECTION1, 12,
|
||||
3, 0, ast2600_emmc_extclk_div_table,
|
||||
&aspeed_g6_clk_lock);
|
||||
if (IS_ERR(hw))
|
||||
return PTR_ERR(hw);
|
||||
aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
|
||||
|
@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON
|
||||
|
||||
config ARMADA_AP_CPU_CLK
|
||||
bool
|
||||
select ARMADA_AP_CP_HELPER
|
||||
|
||||
config ARMADA_CP110_SYSCON
|
||||
bool
|
||||
|
@ -19,7 +19,7 @@
|
||||
/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
|
||||
#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
|
||||
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
|
||||
|
||||
#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
|
||||
#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
|
||||
#define DMTIMER_RESET_WAIT 100000
|
||||
|
||||
@ -44,6 +44,8 @@ struct dmtimer_systimer {
|
||||
u8 ctrl;
|
||||
u8 wakeup;
|
||||
u8 ifctrl;
|
||||
struct clk *fck;
|
||||
struct clk *ick;
|
||||
unsigned long rate;
|
||||
};
|
||||
|
||||
@ -298,16 +300,20 @@ static void __init dmtimer_systimer_select_best(void)
|
||||
}
|
||||
|
||||
/* Interface clocks are only available on some SoCs variants */
|
||||
static int __init dmtimer_systimer_init_clock(struct device_node *np,
|
||||
static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
|
||||
struct device_node *np,
|
||||
const char *name,
|
||||
unsigned long *rate)
|
||||
{
|
||||
struct clk *clock;
|
||||
unsigned long r;
|
||||
bool is_ick = false;
|
||||
int error;
|
||||
|
||||
is_ick = !strncmp(name, "ick", 3);
|
||||
|
||||
clock = of_clk_get_by_name(np, name);
|
||||
if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3))
|
||||
if ((PTR_ERR(clock) == -EINVAL) && is_ick)
|
||||
return 0;
|
||||
else if (IS_ERR(clock))
|
||||
return PTR_ERR(clock);
|
||||
@ -320,6 +326,11 @@ static int __init dmtimer_systimer_init_clock(struct device_node *np,
|
||||
if (!r)
|
||||
return -ENODEV;
|
||||
|
||||
if (is_ick)
|
||||
t->ick = clock;
|
||||
else
|
||||
t->fck = clock;
|
||||
|
||||
*rate = r;
|
||||
|
||||
return 0;
|
||||
@ -339,7 +350,10 @@ static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
|
||||
|
||||
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
|
||||
{
|
||||
writel_relaxed(0, t->base + t->sysc);
|
||||
if (!dmtimer_systimer_revision1(t))
|
||||
return;
|
||||
|
||||
writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
|
||||
}
|
||||
|
||||
static int __init dmtimer_systimer_setup(struct device_node *np,
|
||||
@ -366,13 +380,13 @@ static int __init dmtimer_systimer_setup(struct device_node *np,
|
||||
pr_err("%s: clock source init failed: %i\n", __func__, error);
|
||||
|
||||
/* For ti-sysc, we have timer clocks at the parent module level */
|
||||
error = dmtimer_systimer_init_clock(np->parent, "fck", &rate);
|
||||
error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
|
||||
if (error)
|
||||
goto err_unmap;
|
||||
|
||||
t->rate = rate;
|
||||
|
||||
error = dmtimer_systimer_init_clock(np->parent, "ick", &rate);
|
||||
error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
|
||||
if (error)
|
||||
goto err_unmap;
|
||||
|
||||
@ -496,12 +510,18 @@ static void omap_clockevent_idle(struct clock_event_device *evt)
|
||||
struct dmtimer_systimer *t = &clkevt->t;
|
||||
|
||||
dmtimer_systimer_disable(t);
|
||||
clk_disable(t->fck);
|
||||
}
|
||||
|
||||
static void omap_clockevent_unidle(struct clock_event_device *evt)
|
||||
{
|
||||
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
|
||||
struct dmtimer_systimer *t = &clkevt->t;
|
||||
int error;
|
||||
|
||||
error = clk_enable(t->fck);
|
||||
if (error)
|
||||
pr_err("could not enable timer fck on resume: %i\n", error);
|
||||
|
||||
dmtimer_systimer_enable(t);
|
||||
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
|
||||
@ -570,8 +590,8 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
|
||||
3, /* Timer internal resynch latency */
|
||||
0xffffffff);
|
||||
|
||||
if (of_device_is_compatible(np, "ti,am33xx") ||
|
||||
of_device_is_compatible(np, "ti,am43")) {
|
||||
if (of_machine_is_compatible("ti,am33xx") ||
|
||||
of_machine_is_compatible("ti,am43")) {
|
||||
dev->suspend = omap_clockevent_idle;
|
||||
dev->resume = omap_clockevent_unidle;
|
||||
}
|
||||
@ -616,12 +636,18 @@ static void dmtimer_clocksource_suspend(struct clocksource *cs)
|
||||
|
||||
clksrc->loadval = readl_relaxed(t->base + t->counter);
|
||||
dmtimer_systimer_disable(t);
|
||||
clk_disable(t->fck);
|
||||
}
|
||||
|
||||
static void dmtimer_clocksource_resume(struct clocksource *cs)
|
||||
{
|
||||
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
|
||||
struct dmtimer_systimer *t = &clksrc->t;
|
||||
int error;
|
||||
|
||||
error = clk_enable(t->fck);
|
||||
if (error)
|
||||
pr_err("could not enable timer fck on resume: %i\n", error);
|
||||
|
||||
dmtimer_systimer_enable(t);
|
||||
writel_relaxed(clksrc->loadval, t->base + t->counter);
|
||||
@ -653,8 +679,8 @@ static int __init dmtimer_clocksource_init(struct device_node *np)
|
||||
dev->mask = CLOCKSOURCE_MASK(32);
|
||||
dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
|
||||
|
||||
if (of_device_is_compatible(np, "ti,am33xx") ||
|
||||
of_device_is_compatible(np, "ti,am43")) {
|
||||
/* Unlike for clockevent, legacy code sets suspend only for am4 */
|
||||
if (of_machine_is_compatible("ti,am43")) {
|
||||
dev->suspend = dmtimer_clocksource_suspend;
|
||||
dev->resume = dmtimer_clocksource_resume;
|
||||
}
|
||||
|
@ -1274,18 +1274,26 @@ static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter,
|
||||
struct counter_signal *signal,
|
||||
void *private, char *buf)
|
||||
{
|
||||
const struct quad8_iio *const priv = counter->priv;
|
||||
struct quad8_iio *const priv = counter->priv;
|
||||
const size_t channel_id = signal->id / 2;
|
||||
const bool disabled = !(priv->cable_fault_enable & BIT(channel_id));
|
||||
bool disabled;
|
||||
unsigned int status;
|
||||
unsigned int fault;
|
||||
|
||||
if (disabled)
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
disabled = !(priv->cable_fault_enable & BIT(channel_id));
|
||||
|
||||
if (disabled) {
|
||||
mutex_unlock(&priv->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Logic 0 = cable fault */
|
||||
status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
/* Mask respective channel and invert logic */
|
||||
fault = !(status & BIT(channel_id));
|
||||
|
||||
@ -1317,6 +1325,8 @@ static ssize_t quad8_signal_cable_fault_enable_write(
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (enable)
|
||||
priv->cable_fault_enable |= BIT(channel_id);
|
||||
else
|
||||
@ -1327,6 +1337,8 @@ static ssize_t quad8_signal_cable_fault_enable_write(
|
||||
|
||||
outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -1353,6 +1365,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
priv->fck_prescaler[channel_id] = prescaler;
|
||||
|
||||
/* Reset Byte Pointer */
|
||||
@ -1363,6 +1377,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
|
||||
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
|
||||
base_offset + 1);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -2464,7 +2464,7 @@ static struct cpufreq_driver intel_cpufreq = {
|
||||
.name = "intel_cpufreq",
|
||||
};
|
||||
|
||||
static struct cpufreq_driver *default_driver = &intel_pstate;
|
||||
static struct cpufreq_driver *default_driver;
|
||||
|
||||
static void intel_pstate_driver_cleanup(void)
|
||||
{
|
||||
@ -2758,6 +2758,7 @@ static int __init intel_pstate_init(void)
|
||||
hwp_active++;
|
||||
hwp_mode_bdw = id->driver_data;
|
||||
intel_pstate.attr = hwp_cpufreq_attrs;
|
||||
default_driver = &intel_pstate;
|
||||
goto hwp_cpu_matched;
|
||||
}
|
||||
} else {
|
||||
@ -2775,7 +2776,8 @@ static int __init intel_pstate_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
/* Without HWP start in the passive mode. */
|
||||
default_driver = &intel_cpufreq;
|
||||
if (!default_driver)
|
||||
default_driver = &intel_cpufreq;
|
||||
|
||||
hwp_cpu_matched:
|
||||
/*
|
||||
@ -2820,6 +2822,8 @@ static int __init intel_pstate_setup(char *str)
|
||||
|
||||
if (!strcmp(str, "disable")) {
|
||||
no_load = 1;
|
||||
} else if (!strcmp(str, "active")) {
|
||||
default_driver = &intel_pstate;
|
||||
} else if (!strcmp(str, "passive")) {
|
||||
default_driver = &intel_cpufreq;
|
||||
no_hwp = 1;
|
||||
|
@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
|
||||
case PF_INET:
|
||||
if (likely(!inet_sk(sk)->inet_rcv_saddr))
|
||||
return ndev;
|
||||
ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
|
||||
ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case PF_INET6:
|
||||
|
@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
|
||||
&record_type);
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/* Avoid appending tls handshake, alert to tls data */
|
||||
if (skb)
|
||||
tx_skb_finalize(skb);
|
||||
}
|
||||
|
||||
recordsz = size;
|
||||
csk->tlshws.txleft = recordsz;
|
||||
csk->tlshws.type = record_type;
|
||||
|
||||
if (skb)
|
||||
ULP_SKB_CB(skb)->ulp.tls.type = record_type;
|
||||
}
|
||||
|
||||
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
|
||||
|
@ -45,10 +45,10 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
|
||||
size_t ret = 0;
|
||||
|
||||
dmabuf = dentry->d_fsdata;
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
spin_lock(&dmabuf->name_lock);
|
||||
if (dmabuf->name)
|
||||
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
spin_unlock(&dmabuf->name_lock);
|
||||
|
||||
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
|
||||
dentry->d_name.name, ret > 0 ? name : "");
|
||||
@ -338,8 +338,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
|
||||
kfree(name);
|
||||
goto out_unlock;
|
||||
}
|
||||
spin_lock(&dmabuf->name_lock);
|
||||
kfree(dmabuf->name);
|
||||
dmabuf->name = name;
|
||||
spin_unlock(&dmabuf->name_lock);
|
||||
|
||||
out_unlock:
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
@ -402,10 +404,10 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
|
||||
/* Don't count the temporary reference taken inside procfs seq_show */
|
||||
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
|
||||
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
spin_lock(&dmabuf->name_lock);
|
||||
if (dmabuf->name)
|
||||
seq_printf(m, "name:\t%s\n", dmabuf->name);
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
spin_unlock(&dmabuf->name_lock);
|
||||
}
|
||||
|
||||
static const struct file_operations dma_buf_fops = {
|
||||
@ -542,6 +544,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
dmabuf->size = exp_info->size;
|
||||
dmabuf->exp_name = exp_info->exp_name;
|
||||
dmabuf->owner = exp_info->owner;
|
||||
spin_lock_init(&dmabuf->name_lock);
|
||||
init_waitqueue_head(&dmabuf->poll);
|
||||
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
|
||||
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
|
||||
|
@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
|
||||
} else if (dmatest_run) {
|
||||
if (!is_threaded_test_pending(info)) {
|
||||
pr_info("No channels configured, continue with any\n");
|
||||
if (!is_threaded_test_run(info))
|
||||
stop_threaded_test(info);
|
||||
add_threaded_test(info);
|
||||
}
|
||||
start_threaded_tests(info);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user