forked from Minki/linux
Merge char-misc-next into staging-next
This resolves the merge issues and confusions people were having with the goldfish drivers due to changes for them showing up in two different trees. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
bb27d4998a
@ -27,3 +27,17 @@ Description: The mapping of which primary/sub channels are bound to which
|
||||
Virtual Processors.
|
||||
Format: <channel's child_relid:the bound cpu's number>
|
||||
Users: tools/hv/lsvmbus
|
||||
|
||||
What: /sys/bus/vmbus/devices/vmbus_*/device
|
||||
Date: Dec. 2015
|
||||
KernelVersion: 4.5
|
||||
Contact: K. Y. Srinivasan <kys@microsoft.com>
|
||||
Description: The 16 bit device ID of the device
|
||||
Users: tools/hv/lsvmbus and user level RDMA libraries
|
||||
|
||||
What: /sys/bus/vmbus/devices/vmbus_*/vendor
|
||||
Date: Dec. 2015
|
||||
KernelVersion: 4.5
|
||||
Contact: K. Y. Srinivasan <kys@microsoft.com>
|
||||
Description: The 16 bit vendor ID of the device
|
||||
Users: tools/hv/lsvmbus and user level RDMA libraries
|
||||
|
@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and
|
||||
conventions of cgroup v2. It describes all userland-visible aspects
|
||||
of cgroup including core and specific controller behaviors. All
|
||||
future changes must be reflected in this document. Documentation for
|
||||
v1 is available under Documentation/cgroup-legacy/.
|
||||
v1 is available under Documentation/cgroup-v1/.
|
||||
|
||||
CONTENTS
|
||||
|
||||
|
17
Documentation/devicetree/bindings/goldfish/pipe.txt
Normal file
17
Documentation/devicetree/bindings/goldfish/pipe.txt
Normal file
@ -0,0 +1,17 @@
|
||||
Android Goldfish QEMU Pipe
|
||||
|
||||
Andorid pipe virtual device generated by android emulator.
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should contain "google,android-pipe" to match emulator
|
||||
- reg : <registers mapping>
|
||||
- interrupts : <interrupt mapping>
|
||||
|
||||
Example:
|
||||
|
||||
android_pipe@a010000 {
|
||||
compatible = "google,android-pipe";
|
||||
reg = <ff018000 0x2000>;
|
||||
interrupts = <0x12>;
|
||||
};
|
25
Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
Normal file
25
Documentation/devicetree/bindings/misc/eeprom-93xx46.txt
Normal file
@ -0,0 +1,25 @@
|
||||
EEPROMs (SPI) compatible with Microchip Technology 93xx46 family.
|
||||
|
||||
Required properties:
|
||||
- compatible : shall be one of:
|
||||
"atmel,at93c46d"
|
||||
"eeprom-93xx46"
|
||||
- data-size : number of data bits per word (either 8 or 16)
|
||||
|
||||
Optional properties:
|
||||
- read-only : parameter-less property which disables writes to the EEPROM
|
||||
- select-gpios : if present, specifies the GPIO that will be asserted prior to
|
||||
each access to the EEPROM (e.g. for SPI bus multiplexing)
|
||||
|
||||
Property rules described in Documentation/devicetree/bindings/spi/spi-bus.txt
|
||||
apply. In particular, "reg" and "spi-max-frequency" properties must be given.
|
||||
|
||||
Example:
|
||||
eeprom@0 {
|
||||
compatible = "eeprom-93xx46";
|
||||
reg = <0>;
|
||||
spi-max-frequency = <1000000>;
|
||||
spi-cs-high;
|
||||
data-size = <8>;
|
||||
select-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>;
|
||||
};
|
28
Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt
Normal file
28
Documentation/devicetree/bindings/nvmem/lpc1857-eeprom.txt
Normal file
@ -0,0 +1,28 @@
|
||||
* NXP LPC18xx EEPROM memory NVMEM driver
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "nxp,lpc1857-eeprom"
|
||||
- reg: Must contain an entry with the physical base address and length
|
||||
for each entry in reg-names.
|
||||
- reg-names: Must include the following entries.
|
||||
- reg: EEPROM registers.
|
||||
- mem: EEPROM address space.
|
||||
- clocks: Must contain an entry for each entry in clock-names.
|
||||
- clock-names: Must include the following entries.
|
||||
- eeprom: EEPROM operating clock.
|
||||
- resets: Should contain a reference to the reset controller asserting
|
||||
the EEPROM in reset.
|
||||
- interrupts: Should contain EEPROM interrupt.
|
||||
|
||||
Example:
|
||||
|
||||
eeprom: eeprom@4000e000 {
|
||||
compatible = "nxp,lpc1857-eeprom";
|
||||
reg = <0x4000e000 0x1000>,
|
||||
<0x20040000 0x4000>;
|
||||
reg-names = "reg", "mem";
|
||||
clocks = <&ccu1 CLK_CPU_EEPROM>;
|
||||
clock-names = "eeprom";
|
||||
resets = <&rgu 27>;
|
||||
interrupts = <4>;
|
||||
};
|
36
Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
Normal file
36
Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
Normal file
@ -0,0 +1,36 @@
|
||||
= Mediatek MTK-EFUSE device tree bindings =
|
||||
|
||||
This binding is intended to represent MTK-EFUSE which is found in most Mediatek SOCs.
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "mediatek,mt8173-efuse" or "mediatek,efuse"
|
||||
- reg: Should contain registers location and length
|
||||
|
||||
= Data cells =
|
||||
Are child nodes of MTK-EFUSE, bindings of which as described in
|
||||
bindings/nvmem/nvmem.txt
|
||||
|
||||
Example:
|
||||
|
||||
efuse: efuse@10206000 {
|
||||
compatible = "mediatek,mt8173-efuse";
|
||||
reg = <0 0x10206000 0 0x1000>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
/* Data cells */
|
||||
thermal_calibration: calib@528 {
|
||||
reg = <0x528 0xc>;
|
||||
};
|
||||
};
|
||||
|
||||
= Data consumers =
|
||||
Are device nodes which consume nvmem data cells.
|
||||
|
||||
For example:
|
||||
|
||||
thermal {
|
||||
...
|
||||
nvmem-cells = <&thermal_calibration>;
|
||||
nvmem-cell-names = "calibration";
|
||||
};
|
@ -8,6 +8,7 @@ OHCI and EHCI controllers.
|
||||
Required properties:
|
||||
- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
|
||||
"renesas,pci-r8a7791" for the R8A7791 SoC;
|
||||
"renesas,pci-r8a7793" for the R8A7793 SoC;
|
||||
"renesas,pci-r8a7794" for the R8A7794 SoC;
|
||||
"renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
|
||||
|
||||
|
@ -4,6 +4,7 @@ Required properties:
|
||||
compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
|
||||
"renesas,pcie-r8a7790" for the R8A7790 SoC;
|
||||
"renesas,pcie-r8a7791" for the R8A7791 SoC;
|
||||
"renesas,pcie-r8a7793" for the R8A7793 SoC;
|
||||
"renesas,pcie-r8a7795" for the R8A7795 SoC;
|
||||
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
|
||||
|
||||
|
@ -30,6 +30,8 @@ The compatible list for this generic sound card currently:
|
||||
"fsl,imx-audio-sgtl5000"
|
||||
(compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
|
||||
|
||||
"fsl,imx-audio-wm8960"
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : Contains one of entries in the compatible list.
|
||||
|
@ -1,8 +1,9 @@
|
||||
* Renesas R-Car Thermal
|
||||
|
||||
Required properties:
|
||||
- compatible : "renesas,thermal-<soctype>", "renesas,rcar-thermal"
|
||||
as fallback.
|
||||
- compatible : "renesas,thermal-<soctype>",
|
||||
"renesas,rcar-gen2-thermal" (with thermal-zone) or
|
||||
"renesas,rcar-thermal" (without thermal-zone) as fallback.
|
||||
Examples with soctypes are:
|
||||
- "renesas,thermal-r8a73a4" (R-Mobile APE6)
|
||||
- "renesas,thermal-r8a7779" (R-Car H1)
|
||||
@ -36,3 +37,35 @@ thermal@e61f0000 {
|
||||
0xe61f0300 0x38>;
|
||||
interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
Example (with thermal-zone):
|
||||
|
||||
thermal-zones {
|
||||
cpu_thermal: cpu-thermal {
|
||||
polling-delay-passive = <1000>;
|
||||
polling-delay = <5000>;
|
||||
|
||||
thermal-sensors = <&thermal>;
|
||||
|
||||
trips {
|
||||
cpu-crit {
|
||||
temperature = <115000>;
|
||||
hysteresis = <0>;
|
||||
type = "critical";
|
||||
};
|
||||
};
|
||||
cooling-maps {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
thermal: thermal@e61f0000 {
|
||||
compatible = "renesas,thermal-r8a7790",
|
||||
"renesas,rcar-gen2-thermal",
|
||||
"renesas,rcar-thermal";
|
||||
reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
|
||||
interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
|
||||
power-domains = <&cpg_clocks>;
|
||||
#thermal-sensor-cells = <0>;
|
||||
};
|
||||
|
@ -4235,6 +4235,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
The default value of this parameter is determined by
|
||||
the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
|
||||
|
||||
workqueue.debug_force_rr_cpu
|
||||
Workqueue used to implicitly guarantee that work
|
||||
items queued without explicit CPU specified are put
|
||||
on the local CPU. This guarantee is no longer true
|
||||
and while local CPU is still preferred work items
|
||||
may be put on foreign CPUs. This debug option
|
||||
forces round-robin CPU selection to flush out
|
||||
usages which depend on the now broken guarantee.
|
||||
When enabled, memory and cache locality will be
|
||||
impacted.
|
||||
|
||||
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
|
||||
default x2apic cluster mode on platforms
|
||||
supporting x2apic.
|
||||
|
@ -12,10 +12,19 @@ for the X100 devices.
|
||||
|
||||
Since it is a PCIe card, it does not have the ability to host hardware
|
||||
devices for networking, storage and console. We provide these devices
|
||||
on X100 coprocessors thus enabling a self-bootable equivalent environment
|
||||
for applications. A key benefit of our solution is that it leverages
|
||||
the standard virtio framework for network, disk and console devices,
|
||||
though in our case the virtio framework is used across a PCIe bus.
|
||||
on X100 coprocessors thus enabling a self-bootable equivalent
|
||||
environment for applications. A key benefit of our solution is that it
|
||||
leverages the standard virtio framework for network, disk and console
|
||||
devices, though in our case the virtio framework is used across a PCIe
|
||||
bus. A Virtio Over PCIe (VOP) driver allows creating user space
|
||||
backends or devices on the host which are used to probe virtio drivers
|
||||
for these devices on the MIC card. The existing VRINGH infrastructure
|
||||
in the kernel is used to access virtio rings from the host. The card
|
||||
VOP driver allows card virtio drivers to communicate with their user
|
||||
space backends on the host via a device page. Ring 3 apps on the host
|
||||
can add, remove and configure virtio devices. A thin MIC specific
|
||||
virtio_config_ops is implemented which is borrowed heavily from
|
||||
previous similar implementations in lguest and s390.
|
||||
|
||||
MIC PCIe card has a dma controller with 8 channels. These channels are
|
||||
shared between the host s/w and the card s/w. 0 to 3 are used by host
|
||||
@ -38,7 +47,6 @@ single threaded performance for the host compared to MIC, the ability of
|
||||
the host to initiate DMA's to/from the card using the MIC DMA engine and
|
||||
the fact that the virtio block storage backend can only be on the host.
|
||||
|
||||
|
|
||||
+----------+ | +----------+
|
||||
| Card OS | | | Host OS |
|
||||
+----------+ | +----------+
|
||||
@ -47,27 +55,25 @@ the fact that the virtio block storage backend can only be on the host.
|
||||
| Virtio| |Virtio | |Virtio| | |Virtio | |Virtio | |Virtio |
|
||||
| Net | |Console | |Block | | |Net | |Console | |Block |
|
||||
| Driver| |Driver | |Driver| | |backend | |backend | |backend |
|
||||
+-------+ +--------+ +------+ | +---------+ +--------+ +--------+
|
||||
+---+---+ +---+----+ +--+---+ | +---------+ +----+---+ +--------+
|
||||
| | | | | | |
|
||||
| | | |User | | |
|
||||
| | | |------|------------|---------|-------
|
||||
+-------------------+ |Kernel +--------------------------+
|
||||
| | | Virtio over PCIe IOCTLs |
|
||||
| | +--------------------------+
|
||||
+-----------+ | | | +-----------+
|
||||
| MIC DMA | | +------+ | +------+ +------+ | | MIC DMA |
|
||||
| Driver | | | SCIF | | | SCIF | | COSM | | | Driver |
|
||||
+-----------+ | +------+ | +------+ +--+---+ | +-----------+
|
||||
| | | | | | | |
|
||||
+---------------+ | +------+ | +--+---+ +--+---+ | +----------------+
|
||||
|MIC virtual Bus| | |SCIF | | |SCIF | | COSM | | |MIC virtual Bus |
|
||||
+---------------+ | |HW Bus| | |HW Bus| | Bus | | +----------------+
|
||||
| | +------+ | +--+---+ +------+ | |
|
||||
| | | | | | | |
|
||||
| +-----------+---+ | | | +---------------+ |
|
||||
| |Intel MIC | | | | |Intel MIC | |
|
||||
+---|Card Driver | | | | |Host Driver | |
|
||||
+------------+--------+ | +----+---------------+-----+
|
||||
| | | |------|------------|--+------|-------
|
||||
+---------+---------+ |Kernel |
|
||||
| | |
|
||||
+---------+ +---+----+ +------+ | +------+ +------+ +--+---+ +-------+
|
||||
|MIC DMA | | VOP | | SCIF | | | SCIF | | COSM | | VOP | |MIC DMA|
|
||||
+---+-----+ +---+----+ +--+---+ | +--+---+ +--+---+ +------+ +----+--+
|
||||
| | | | | | |
|
||||
+---+-----+ +---+----+ +--+---+ | +--+---+ +--+---+ +------+ +----+--+
|
||||
|MIC | | VOP | |SCIF | | |SCIF | | COSM | | VOP | | MIC |
|
||||
|HW Bus | | HW Bus| |HW Bus| | |HW Bus| | Bus | |HW Bus| |HW Bus |
|
||||
+---------+ +--------+ +--+---+ | +--+---+ +------+ +------+ +-------+
|
||||
| | | | | | |
|
||||
| +-----------+--+ | | | +---------------+ |
|
||||
| |Intel MIC | | | | |Intel MIC | |
|
||||
| |Card Driver | | | | |Host Driver | |
|
||||
+---+--------------+------+ | +----+---------------+-----+
|
||||
| | |
|
||||
+-------------------------------------------------------------+
|
||||
| |
|
||||
|
@ -35,7 +35,7 @@
|
||||
|
||||
exec=/usr/sbin/mpssd
|
||||
sysfs="/sys/class/mic"
|
||||
mic_modules="mic_host mic_x100_dma scif"
|
||||
mic_modules="mic_host mic_x100_dma scif vop"
|
||||
|
||||
start()
|
||||
{
|
||||
|
@ -926,7 +926,7 @@ add_virtio_device(struct mic_info *mic, struct mic_device_desc *dd)
|
||||
char path[PATH_MAX];
|
||||
int fd, err;
|
||||
|
||||
snprintf(path, PATH_MAX, "/dev/mic%d", mic->id);
|
||||
snprintf(path, PATH_MAX, "/dev/vop_virtio%d", mic->id);
|
||||
fd = open(path, O_RDWR);
|
||||
if (fd < 0) {
|
||||
mpsslog("Could not open %s %s\n", path, strerror(errno));
|
||||
|
@ -231,15 +231,15 @@ IT knows when a platform crashes even when there is a hard failure on the host.
|
||||
The Intel AMT Watchdog is composed of two parts:
|
||||
1) Firmware feature - receives the heartbeats
|
||||
and sends an event when the heartbeats stop.
|
||||
2) Intel MEI driver - connects to the watchdog feature, configures the
|
||||
watchdog and sends the heartbeats.
|
||||
2) Intel MEI iAMT watchdog driver - connects to the watchdog feature,
|
||||
configures the watchdog and sends the heartbeats.
|
||||
|
||||
The Intel MEI driver uses the kernel watchdog API to configure the Intel AMT
|
||||
Watchdog and to send heartbeats to it. The default timeout of the
|
||||
The Intel iAMT watchdog MEI driver uses the kernel watchdog API to configure
|
||||
the Intel AMT Watchdog and to send heartbeats to it. The default timeout of the
|
||||
watchdog is 120 seconds.
|
||||
|
||||
If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
|
||||
the Intel MEI driver will disable the sending of heartbeats.
|
||||
If the Intel AMT is not enabled in the firmware then the watchdog client won't enumerate
|
||||
on the me client bus and watchdog devices won't be exposed.
|
||||
|
||||
|
||||
Supported Chipsets
|
||||
|
18
MAINTAINERS
18
MAINTAINERS
@ -2374,14 +2374,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
|
||||
S: Maintained
|
||||
N: bcm2835
|
||||
|
||||
BROADCOM BCM33XX MIPS ARCHITECTURE
|
||||
M: Kevin Cernekee <cernekee@gmail.com>
|
||||
L: linux-mips@linux-mips.org
|
||||
S: Maintained
|
||||
F: arch/mips/bcm3384/*
|
||||
F: arch/mips/include/asm/mach-bcm3384/*
|
||||
F: arch/mips/kernel/*bmips*
|
||||
|
||||
BROADCOM BCM47XX MIPS ARCHITECTURE
|
||||
M: Hauke Mehrtens <hauke@hauke-m.de>
|
||||
M: Rafał Miłecki <zajec5@gmail.com>
|
||||
@ -5754,6 +5746,7 @@ S: Supported
|
||||
F: include/uapi/linux/mei.h
|
||||
F: include/linux/mei_cl_bus.h
|
||||
F: drivers/misc/mei/*
|
||||
F: drivers/watchdog/mei_wdt.c
|
||||
F: Documentation/misc-devices/mei/*
|
||||
|
||||
INTEL MIC DRIVERS (mic)
|
||||
@ -9799,10 +9792,11 @@ S: Supported
|
||||
F: drivers/scsi/be2iscsi/
|
||||
|
||||
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
|
||||
M: Sathya Perla <sathya.perla@avagotech.com>
|
||||
M: Ajit Khaparde <ajit.khaparde@avagotech.com>
|
||||
M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
|
||||
M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
|
||||
M: Sathya Perla <sathya.perla@broadcom.com>
|
||||
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
|
||||
M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
|
||||
M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
|
||||
M: Somnath Kotur <somnath.kotur@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.emulex.com
|
||||
S: Supported
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -338,6 +338,19 @@ config ARC_PAGE_SIZE_4K
|
||||
|
||||
endchoice
|
||||
|
||||
choice
|
||||
prompt "MMU Super Page Size"
|
||||
depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE
|
||||
default ARC_HUGEPAGE_2M
|
||||
|
||||
config ARC_HUGEPAGE_2M
|
||||
bool "2MB"
|
||||
|
||||
config ARC_HUGEPAGE_16M
|
||||
bool "16MB"
|
||||
|
||||
endchoice
|
||||
|
||||
if ISA_ARCOMPACT
|
||||
|
||||
config ARC_COMPACT_IRQ_LEVELS
|
||||
@ -410,7 +423,7 @@ config ARC_HAS_RTC
|
||||
default n
|
||||
depends on !SMP
|
||||
|
||||
config ARC_HAS_GRTC
|
||||
config ARC_HAS_GFRC
|
||||
bool "SMP synchronized 64-bit cycle counter"
|
||||
default y
|
||||
depends on SMP
|
||||
@ -566,6 +579,12 @@ endmenu
|
||||
endmenu # "ARC Architecture Configuration"
|
||||
|
||||
source "mm/Kconfig"
|
||||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int "Maximum zone order"
|
||||
default "12" if ARC_HUGEPAGE_16M
|
||||
default "11"
|
||||
|
||||
source "net/Kconfig"
|
||||
source "drivers/Kconfig"
|
||||
source "fs/Kconfig"
|
||||
|
@ -16,7 +16,7 @@ CONFIG_ARC_PLAT_AXS10X=y
|
||||
CONFIG_AXS103=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
# CONFIG_ARC_HAS_GRTC is not set
|
||||
# CONFIG_ARC_HAS_GFRC is not set
|
||||
CONFIG_ARC_UBOOT_SUPPORT=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
|
||||
CONFIG_PREEMPT=y
|
||||
|
@ -349,14 +349,13 @@ struct cpuinfo_arc {
|
||||
struct cpuinfo_arc_bpu bpu;
|
||||
struct bcr_identity core;
|
||||
struct bcr_isa isa;
|
||||
struct bcr_timer timers;
|
||||
unsigned int vec_base;
|
||||
struct cpuinfo_arc_ccm iccm, dccm;
|
||||
struct {
|
||||
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
|
||||
fpu_sp:1, fpu_dp:1, pad2:6,
|
||||
debug:1, ap:1, smart:1, rtt:1, pad3:4,
|
||||
pad4:8;
|
||||
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
|
||||
} extn;
|
||||
struct bcr_mpy extn_mpy;
|
||||
struct bcr_extn_xymem extn_xymem;
|
||||
|
@ -30,8 +30,11 @@
|
||||
/* Was Intr taken in User Mode */
|
||||
#define AUX_IRQ_ACT_BIT_U 31
|
||||
|
||||
/* 0 is highest level, but taken by FIRQs, if present in design */
|
||||
#define ARCV2_IRQ_DEF_PRIO 0
|
||||
/*
|
||||
* User space should be interruptable even by lowest prio interrupt
|
||||
* Safe even if actual interrupt priorities is fewer or even one
|
||||
*/
|
||||
#define ARCV2_IRQ_DEF_PRIO 15
|
||||
|
||||
/* seed value for status register */
|
||||
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
|
||||
|
@ -39,8 +39,8 @@ struct mcip_cmd {
|
||||
#define CMD_DEBUG_SET_MASK 0x34
|
||||
#define CMD_DEBUG_SET_SELECT 0x36
|
||||
|
||||
#define CMD_GRTC_READ_LO 0x42
|
||||
#define CMD_GRTC_READ_HI 0x43
|
||||
#define CMD_GFRC_READ_LO 0x42
|
||||
#define CMD_GFRC_READ_HI 0x43
|
||||
|
||||
#define CMD_IDU_ENABLE 0x71
|
||||
#define CMD_IDU_DISABLE 0x72
|
||||
|
@ -179,37 +179,44 @@
|
||||
#define __S111 PAGE_U_X_W_R
|
||||
|
||||
/****************************************************************
|
||||
* Page Table Lookup split
|
||||
* 2 tier (PGD:PTE) software page walker
|
||||
*
|
||||
* We implement 2 tier paging and since this is all software, we are free
|
||||
* to customize the span of a PGD / PTE entry to suit us
|
||||
*
|
||||
* 32 bit virtual address
|
||||
* [31] 32 bit virtual address [0]
|
||||
* -------------------------------------------------------
|
||||
* | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
|
||||
* | | <------------ PGDIR_SHIFT ----------> |
|
||||
* | | |
|
||||
* | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> |
|
||||
* -------------------------------------------------------
|
||||
* | | |
|
||||
* | | --> off in page frame
|
||||
* | |
|
||||
* | ---> index into Page Table
|
||||
* |
|
||||
* ----> index into Page Directory
|
||||
*
|
||||
* In a single page size configuration, only PAGE_SHIFT is fixed
|
||||
* So both PGD and PTE sizing can be tweaked
|
||||
* e.g. 8K page (PAGE_SHIFT 13) can have
|
||||
* - PGDIR_SHIFT 21 -> 11:8:13 address split
|
||||
* - PGDIR_SHIFT 24 -> 8:11:13 address split
|
||||
*
|
||||
* If Super Page is configured, PGDIR_SHIFT becomes fixed too,
|
||||
* so the sizing flexibility is gone.
|
||||
*/
|
||||
|
||||
#define BITS_IN_PAGE PAGE_SHIFT
|
||||
|
||||
/* Optimal Sizing of Pg Tbl - based on MMU page size */
|
||||
#if defined(CONFIG_ARC_PAGE_SIZE_8K)
|
||||
#define BITS_FOR_PTE 8 /* 11:8:13 */
|
||||
#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
|
||||
#define BITS_FOR_PTE 8 /* 10:8:14 */
|
||||
#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
|
||||
#define BITS_FOR_PTE 9 /* 11:9:12 */
|
||||
#if defined(CONFIG_ARC_HUGEPAGE_16M)
|
||||
#define PGDIR_SHIFT 24
|
||||
#elif defined(CONFIG_ARC_HUGEPAGE_2M)
|
||||
#define PGDIR_SHIFT 21
|
||||
#else
|
||||
/*
|
||||
* Only Normal page support so "hackable" (see comment above)
|
||||
* Default value provides 11:8:13 (8K), 11:9:12 (4K)
|
||||
*/
|
||||
#define PGDIR_SHIFT 21
|
||||
#endif
|
||||
|
||||
#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
|
||||
#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
|
||||
#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
|
||||
|
||||
#define PGDIR_SHIFT (32 - BITS_FOR_PGD)
|
||||
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
|
||||
|
@ -211,7 +211,11 @@ debug_marker_syscall:
|
||||
; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
|
||||
; entry was via Exception in DS which got preempted in kernel).
|
||||
;
|
||||
; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
|
||||
; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
|
||||
;
|
||||
; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
|
||||
; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
|
||||
|
||||
.Lintr_ret_to_delay_slot:
|
||||
debug_marker_ds:
|
||||
|
||||
@ -222,18 +226,23 @@ debug_marker_ds:
|
||||
ld r2, [sp, PT_ret]
|
||||
ld r3, [sp, PT_status32]
|
||||
|
||||
; STAT32 for Int return created from scratch
|
||||
; (No delay dlot, disable Further intr in trampoline)
|
||||
|
||||
bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
|
||||
st r0, [sp, PT_status32]
|
||||
|
||||
mov r1, .Lintr_ret_to_delay_slot_2
|
||||
st r1, [sp, PT_ret]
|
||||
|
||||
; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
|
||||
st r2, [sp, 0]
|
||||
st r3, [sp, 4]
|
||||
|
||||
b .Lisr_ret_fast_path
|
||||
|
||||
.Lintr_ret_to_delay_slot_2:
|
||||
; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
|
||||
sub sp, sp, SZ_PT_REGS
|
||||
st r9, [sp, -4]
|
||||
|
||||
@ -243,11 +252,19 @@ debug_marker_ds:
|
||||
ld r9, [sp, 4]
|
||||
sr r9, [erstatus]
|
||||
|
||||
; restore AUX_USER_SP if returning to U mode
|
||||
bbit0 r9, STATUS_U_BIT, 1f
|
||||
ld r9, [sp, PT_sp]
|
||||
sr r9, [AUX_USER_SP]
|
||||
|
||||
1:
|
||||
ld r9, [sp, 8]
|
||||
sr r9, [erbta]
|
||||
|
||||
ld r9, [sp, -4]
|
||||
add sp, sp, SZ_PT_REGS
|
||||
|
||||
; return from pure kernel mode to delay slot
|
||||
rtie
|
||||
|
||||
END(ret_from_exception)
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/irqchip.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
static int irq_prio;
|
||||
|
||||
/*
|
||||
* Early Hardware specific Interrupt setup
|
||||
* -Called very early (start_kernel -> setup_arch -> setup_processor)
|
||||
@ -24,6 +26,14 @@ void arc_init_IRQ(void)
|
||||
{
|
||||
unsigned int tmp;
|
||||
|
||||
struct irq_build {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
|
||||
#else
|
||||
unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
|
||||
#endif
|
||||
} irq_bcr;
|
||||
|
||||
struct aux_irq_ctrl {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int res3:18, save_idx_regs:1, res2:1,
|
||||
@ -46,28 +56,25 @@ void arc_init_IRQ(void)
|
||||
|
||||
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
|
||||
|
||||
/* setup status32, don't enable intr yet as kernel doesn't want */
|
||||
tmp = read_aux_reg(0xa);
|
||||
tmp |= ISA_INIT_STATUS_BITS;
|
||||
tmp &= ~STATUS_IE_MASK;
|
||||
asm volatile("flag %0 \n"::"r"(tmp));
|
||||
|
||||
/*
|
||||
* ARCv2 core intc provides multiple interrupt priorities (upto 16).
|
||||
* Typical builds though have only two levels (0-high, 1-low)
|
||||
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
|
||||
* NMI style interrupts in future (say perf)
|
||||
*
|
||||
* Read the intc BCR to confirm that Linux default priority is avail
|
||||
* in h/w
|
||||
*
|
||||
* Note:
|
||||
* IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
|
||||
* is 0 based.
|
||||
*/
|
||||
tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF;
|
||||
if (ARCV2_IRQ_DEF_PRIO > tmp)
|
||||
panic("Linux default irq prio incorrect\n");
|
||||
|
||||
READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
|
||||
|
||||
irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
|
||||
pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
|
||||
irq_prio + 1, irq_prio,
|
||||
irq_bcr.firq ? " FIRQ (not used)":"");
|
||||
|
||||
/* setup status32, don't enable intr yet as kernel doesn't want */
|
||||
tmp = read_aux_reg(0xa);
|
||||
tmp |= STATUS_AD_MASK | (irq_prio << 1);
|
||||
tmp &= ~STATUS_IE_MASK;
|
||||
asm volatile("flag %0 \n"::"r"(tmp));
|
||||
}
|
||||
|
||||
static void arcv2_irq_mask(struct irq_data *data)
|
||||
@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data)
|
||||
{
|
||||
/* set default priority */
|
||||
write_aux_reg(AUX_IRQ_SELECT, data->irq);
|
||||
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
|
||||
write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
|
||||
|
||||
/*
|
||||
* hw auto enables (linux unmask) all by default
|
||||
|
@ -96,13 +96,13 @@ static void mcip_probe_n_setup(void)
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad3:8,
|
||||
idu:1, llm:1, num_cores:6,
|
||||
iocoh:1, grtc:1, dbg:1, pad2:1,
|
||||
iocoh:1, gfrc:1, dbg:1, pad2:1,
|
||||
msg:1, sem:1, ipi:1, pad:1,
|
||||
ver:8;
|
||||
#else
|
||||
unsigned int ver:8,
|
||||
pad:1, ipi:1, sem:1, msg:1,
|
||||
pad2:1, dbg:1, grtc:1, iocoh:1,
|
||||
pad2:1, dbg:1, gfrc:1, iocoh:1,
|
||||
num_cores:6, llm:1, idu:1,
|
||||
pad3:8;
|
||||
#endif
|
||||
@ -116,7 +116,7 @@ static void mcip_probe_n_setup(void)
|
||||
IS_AVAIL1(mp.ipi, "IPI "),
|
||||
IS_AVAIL1(mp.idu, "IDU "),
|
||||
IS_AVAIL1(mp.dbg, "DEBUG "),
|
||||
IS_AVAIL1(mp.grtc, "GRTC"));
|
||||
IS_AVAIL1(mp.gfrc, "GFRC"));
|
||||
|
||||
idu_detected = mp.idu;
|
||||
|
||||
@ -125,8 +125,8 @@ static void mcip_probe_n_setup(void)
|
||||
__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
|
||||
panic("kernel trying to use non-existent GRTC\n");
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
|
||||
panic("kernel trying to use non-existent GFRC\n");
|
||||
}
|
||||
|
||||
struct plat_smp_ops plat_smp_ops = {
|
||||
|
@ -45,6 +45,7 @@ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
|
||||
static void read_arc_build_cfg_regs(void)
|
||||
{
|
||||
struct bcr_perip uncached_space;
|
||||
struct bcr_timer timer;
|
||||
struct bcr_generic bcr;
|
||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||
unsigned long perip_space;
|
||||
@ -53,7 +54,11 @@ static void read_arc_build_cfg_regs(void)
|
||||
READ_BCR(AUX_IDENTITY, cpu->core);
|
||||
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
|
||||
|
||||
READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
|
||||
READ_BCR(ARC_REG_TIMERS_BCR, timer);
|
||||
cpu->extn.timer0 = timer.t0;
|
||||
cpu->extn.timer1 = timer.t1;
|
||||
cpu->extn.rtc = timer.rtc;
|
||||
|
||||
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
|
||||
|
||||
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
|
||||
@ -208,9 +213,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
|
||||
(unsigned int)(arc_get_core_freq() / 10000) % 100);
|
||||
|
||||
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
|
||||
IS_AVAIL1(cpu->timers.t0, "Timer0 "),
|
||||
IS_AVAIL1(cpu->timers.t1, "Timer1 "),
|
||||
IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ",
|
||||
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
|
||||
IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
|
||||
IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
|
||||
CONFIG_ARC_HAS_RTC));
|
||||
|
||||
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
|
||||
@ -293,13 +298,13 @@ static void arc_chk_core_config(void)
|
||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||
int fpu_enabled;
|
||||
|
||||
if (!cpu->timers.t0)
|
||||
if (!cpu->extn.timer0)
|
||||
panic("Timer0 is not present!\n");
|
||||
|
||||
if (!cpu->timers.t1)
|
||||
if (!cpu->extn.timer1)
|
||||
panic("Timer1 is not present!\n");
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc)
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
|
||||
panic("RTC is not present\n");
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_DCCM
|
||||
@ -334,6 +339,7 @@ static void arc_chk_core_config(void)
|
||||
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
|
||||
|
||||
if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
|
||||
IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
|
||||
!IS_ENABLED(CONFIG_ARC_STAR_9000923308))
|
||||
panic("llock/scond livelock workaround missing\n");
|
||||
}
|
||||
|
@ -62,7 +62,7 @@
|
||||
|
||||
/********** Clock Source Device *********/
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_GRTC
|
||||
#ifdef CONFIG_ARC_HAS_GFRC
|
||||
|
||||
static int arc_counter_setup(void)
|
||||
{
|
||||
@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs)
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
__mcip_cmd(CMD_GRTC_READ_LO, 0);
|
||||
__mcip_cmd(CMD_GFRC_READ_LO, 0);
|
||||
stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
|
||||
|
||||
__mcip_cmd(CMD_GRTC_READ_HI, 0);
|
||||
__mcip_cmd(CMD_GFRC_READ_HI, 0);
|
||||
stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
|
||||
|
||||
local_irq_restore(flags);
|
||||
@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs)
|
||||
}
|
||||
|
||||
static struct clocksource arc_counter = {
|
||||
.name = "ARConnect GRTC",
|
||||
.name = "ARConnect GFRC",
|
||||
.rating = 400,
|
||||
.read = arc_counter_read,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
|
@ -16,7 +16,7 @@
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
#include <asm/hardware/icst.h>
|
||||
|
||||
/*
|
||||
@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
|
||||
|
||||
unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
|
||||
{
|
||||
return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
|
||||
u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
|
||||
u32 divisor = (vco.r + 2) * p->s2div[vco.s];
|
||||
|
||||
do_div(dividend, divisor);
|
||||
return (unsigned long)dividend;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(icst_hz);
|
||||
@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
|
||||
|
||||
if (f > p->vco_min && f <= p->vco_max)
|
||||
break;
|
||||
i++;
|
||||
} while (i < 8);
|
||||
|
||||
if (i >= 8)
|
||||
|
@ -292,24 +292,23 @@ CONFIG_FB=y
|
||||
CONFIG_FIRMWARE_EDID=y
|
||||
CONFIG_FB_MODE_HELPERS=y
|
||||
CONFIG_FB_TILEBLITTING=y
|
||||
CONFIG_OMAP2_DSS=m
|
||||
CONFIG_OMAP5_DSS_HDMI=y
|
||||
CONFIG_OMAP2_DSS_SDI=y
|
||||
CONFIG_OMAP2_DSS_DSI=y
|
||||
CONFIG_FB_OMAP5_DSS_HDMI=y
|
||||
CONFIG_FB_OMAP2_DSS_SDI=y
|
||||
CONFIG_FB_OMAP2_DSS_DSI=y
|
||||
CONFIG_FB_OMAP2=m
|
||||
CONFIG_DISPLAY_ENCODER_TFP410=m
|
||||
CONFIG_DISPLAY_ENCODER_TPD12S015=m
|
||||
CONFIG_DISPLAY_CONNECTOR_DVI=m
|
||||
CONFIG_DISPLAY_CONNECTOR_HDMI=m
|
||||
CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m
|
||||
CONFIG_DISPLAY_PANEL_DPI=m
|
||||
CONFIG_DISPLAY_PANEL_DSI_CM=m
|
||||
CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m
|
||||
CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m
|
||||
CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m
|
||||
CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m
|
||||
CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m
|
||||
CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m
|
||||
CONFIG_FB_OMAP2_ENCODER_TFP410=m
|
||||
CONFIG_FB_OMAP2_ENCODER_TPD12S015=m
|
||||
CONFIG_FB_OMAP2_CONNECTOR_DVI=m
|
||||
CONFIG_FB_OMAP2_CONNECTOR_HDMI=m
|
||||
CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m
|
||||
CONFIG_FB_OMAP2_PANEL_DPI=m
|
||||
CONFIG_FB_OMAP2_PANEL_DSI_CM=m
|
||||
CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m
|
||||
CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m
|
||||
CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m
|
||||
CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m
|
||||
CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m
|
||||
CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
CONFIG_LCD_CLASS_DEVICE=y
|
||||
CONFIG_LCD_PLATFORM=y
|
||||
|
@ -103,6 +103,7 @@ static inline u64 gic_read_iar_common(void)
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
dsb(sy);
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
|
@ -182,6 +182,7 @@
|
||||
#define CPTR_EL2_TCPAC (1 << 31)
|
||||
#define CPTR_EL2_TTA (1 << 20)
|
||||
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
||||
#define CPTR_EL2_DEFAULT 0x000033ff
|
||||
|
||||
/* Hyp Debug Configuration Register bits */
|
||||
#define MDCR_EL2_TDRA (1 << 11)
|
||||
|
@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
|
||||
u32 mode;
|
||||
|
||||
if (vcpu_mode_is_32bit(vcpu))
|
||||
if (vcpu_mode_is_32bit(vcpu)) {
|
||||
mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
|
||||
return mode > COMPAT_PSR_MODE_USR;
|
||||
}
|
||||
|
||||
mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
|
||||
|
||||
return mode != PSR_MODE_EL0t;
|
||||
}
|
||||
|
@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(val, hcr_el2);
|
||||
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
|
||||
write_sysreg(1 << 15, hstr_el2);
|
||||
write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
|
||||
|
||||
val = CPTR_EL2_DEFAULT;
|
||||
val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
|
||||
write_sysreg(val, cptr_el2);
|
||||
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
}
|
||||
|
||||
@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(HCR_RW, hcr_el2);
|
||||
write_sysreg(0, hstr_el2);
|
||||
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
|
||||
write_sysreg(0, cptr_el2);
|
||||
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
|
||||
}
|
||||
|
||||
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
|
||||
|
@ -27,7 +27,11 @@
|
||||
|
||||
#define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
|
||||
PSR_I_BIT | PSR_D_BIT)
|
||||
#define EL1_EXCEPT_SYNC_OFFSET 0x200
|
||||
|
||||
#define CURRENT_EL_SP_EL0_VECTOR 0x0
|
||||
#define CURRENT_EL_SP_ELx_VECTOR 0x200
|
||||
#define LOWER_EL_AArch64_VECTOR 0x400
|
||||
#define LOWER_EL_AArch32_VECTOR 0x600
|
||||
|
||||
static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
||||
{
|
||||
@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
||||
*fsr = 0x14;
|
||||
}
|
||||
|
||||
enum exception_type {
|
||||
except_type_sync = 0,
|
||||
except_type_irq = 0x80,
|
||||
except_type_fiq = 0x100,
|
||||
except_type_serror = 0x180,
|
||||
};
|
||||
|
||||
static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
|
||||
{
|
||||
u64 exc_offset;
|
||||
|
||||
switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
|
||||
case PSR_MODE_EL1t:
|
||||
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
|
||||
break;
|
||||
case PSR_MODE_EL1h:
|
||||
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
|
||||
break;
|
||||
case PSR_MODE_EL0t:
|
||||
exc_offset = LOWER_EL_AArch64_VECTOR;
|
||||
break;
|
||||
default:
|
||||
exc_offset = LOWER_EL_AArch32_VECTOR;
|
||||
}
|
||||
|
||||
return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
|
||||
}
|
||||
|
||||
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
||||
{
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
|
||||
|
||||
vcpu_sys_reg(vcpu, FAR_EL1) = addr;
|
||||
|
||||
@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
|
||||
*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
|
||||
*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
|
||||
|
||||
/*
|
||||
* Build an unknown exception, depending on the instruction
|
||||
|
@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
|
||||
if (likely(r->access(vcpu, params, r))) {
|
||||
/* Skip instruction, since it was emulated */
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
/* Handled */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Handled */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Not handled */
|
||||
@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
|
||||
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The kvm_run struct
|
||||
*/
|
||||
@ -1095,7 +1094,7 @@ out:
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
|
||||
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The kvm_run struct
|
||||
*/
|
||||
|
@ -2085,7 +2085,7 @@ config PAGE_SIZE_32KB
|
||||
|
||||
config PAGE_SIZE_64KB
|
||||
bool "64kB"
|
||||
depends on !CPU_R3000 && !CPU_TX39XX
|
||||
depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000
|
||||
help
|
||||
Using 64kB page size will result in higher performance kernel at
|
||||
the price of higher memory consumption. This option is available on
|
||||
|
@ -74,6 +74,7 @@
|
||||
timer: timer@10000040 {
|
||||
compatible = "syscon";
|
||||
reg = <0x10000040 0x2c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -98,6 +98,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x60c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -118,6 +118,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x51c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -112,6 +112,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x51c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -112,6 +112,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x51c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -118,6 +118,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x51c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -99,6 +99,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x60c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -100,6 +100,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x51c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -114,6 +114,7 @@
|
||||
sun_top_ctrl: syscon@404000 {
|
||||
compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
|
||||
reg = <0x404000 0x51c>;
|
||||
little-endian;
|
||||
};
|
||||
|
||||
reboot {
|
||||
|
@ -227,7 +227,7 @@ struct mips_elf_abiflags_v0 {
|
||||
int __res = 1; \
|
||||
struct elfhdr *__h = (hdr); \
|
||||
\
|
||||
if (__h->e_machine != EM_MIPS) \
|
||||
if (!mips_elf_check_machine(__h)) \
|
||||
__res = 0; \
|
||||
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
|
||||
__res = 0; \
|
||||
@ -258,7 +258,7 @@ struct mips_elf_abiflags_v0 {
|
||||
int __res = 1; \
|
||||
struct elfhdr *__h = (hdr); \
|
||||
\
|
||||
if (__h->e_machine != EM_MIPS) \
|
||||
if (!mips_elf_check_machine(__h)) \
|
||||
__res = 0; \
|
||||
if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
|
||||
__res = 0; \
|
||||
@ -285,6 +285,11 @@ struct mips_elf_abiflags_v0 {
|
||||
|
||||
#endif /* !defined(ELF_ARCH) */
|
||||
|
||||
#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
|
||||
|
||||
#define vmcore_elf32_check_arch mips_elf_check_machine
|
||||
#define vmcore_elf64_check_arch mips_elf_check_machine
|
||||
|
||||
struct mips_abi;
|
||||
|
||||
extern struct mips_abi mips_abi;
|
||||
|
@ -179,6 +179,10 @@ static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
|
||||
if (save)
|
||||
_save_fp(tsk);
|
||||
__disable_fpu();
|
||||
} else {
|
||||
/* FPU should not have been left enabled with no owner */
|
||||
WARN(read_c0_status() & ST0_CU1,
|
||||
"Orphaned FPU left enabled");
|
||||
}
|
||||
KSTK_STATUS(tsk) &= ~ST0_CU1;
|
||||
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
|
||||
|
@ -128,7 +128,8 @@ static inline int octeon_has_feature(enum octeon_feature feature)
|
||||
case OCTEON_FEATURE_PCIE:
|
||||
return OCTEON_IS_MODEL(OCTEON_CN56XX)
|
||||
|| OCTEON_IS_MODEL(OCTEON_CN52XX)
|
||||
|| OCTEON_IS_MODEL(OCTEON_CN6XXX);
|
||||
|| OCTEON_IS_MODEL(OCTEON_CN6XXX)
|
||||
|| OCTEON_IS_MODEL(OCTEON_CN7XXX);
|
||||
|
||||
case OCTEON_FEATURE_SRIO:
|
||||
return OCTEON_IS_MODEL(OCTEON_CN63XX)
|
||||
|
@ -45,7 +45,7 @@ extern unsigned int vced_count, vcei_count;
|
||||
* User space process size: 2GB. This is hardcoded into a few places,
|
||||
* so don't change it unless you know what you are doing.
|
||||
*/
|
||||
#define TASK_SIZE 0x7fff8000UL
|
||||
#define TASK_SIZE 0x80000000UL
|
||||
#endif
|
||||
|
||||
#define STACK_TOP_MAX TASK_SIZE
|
||||
|
@ -289,7 +289,7 @@
|
||||
.set reorder
|
||||
.set noat
|
||||
mfc0 a0, CP0_STATUS
|
||||
li v1, 0xff00
|
||||
li v1, ST0_CU1 | ST0_IM
|
||||
ori a0, STATMASK
|
||||
xori a0, STATMASK
|
||||
mtc0 a0, CP0_STATUS
|
||||
@ -330,7 +330,7 @@
|
||||
ori a0, STATMASK
|
||||
xori a0, STATMASK
|
||||
mtc0 a0, CP0_STATUS
|
||||
li v1, 0xff00
|
||||
li v1, ST0_CU1 | ST0_FR | ST0_IM
|
||||
and a0, v1
|
||||
LONG_L v0, PT_STATUS(sp)
|
||||
nor v1, $0, v1
|
||||
|
@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
||||
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
|
||||
if ((config_enabled(CONFIG_32BIT) ||
|
||||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
|
||||
(regs->regs[2] == __NR_syscall)) {
|
||||
(regs->regs[2] == __NR_syscall))
|
||||
i++;
|
||||
n++;
|
||||
}
|
||||
|
||||
while (n--)
|
||||
ret |= mips_get_syscall_arg(args++, task, regs, i++);
|
||||
|
@ -380,16 +380,17 @@
|
||||
#define __NR_userfaultfd (__NR_Linux + 357)
|
||||
#define __NR_membarrier (__NR_Linux + 358)
|
||||
#define __NR_mlock2 (__NR_Linux + 359)
|
||||
#define __NR_copy_file_range (__NR_Linux + 360)
|
||||
|
||||
/*
|
||||
* Offset of the last Linux o32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 359
|
||||
#define __NR_Linux_syscalls 360
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
|
||||
|
||||
#define __NR_O32_Linux 4000
|
||||
#define __NR_O32_Linux_syscalls 359
|
||||
#define __NR_O32_Linux_syscalls 360
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI64
|
||||
|
||||
@ -717,16 +718,17 @@
|
||||
#define __NR_userfaultfd (__NR_Linux + 317)
|
||||
#define __NR_membarrier (__NR_Linux + 318)
|
||||
#define __NR_mlock2 (__NR_Linux + 319)
|
||||
#define __NR_copy_file_range (__NR_Linux + 320)
|
||||
|
||||
/*
|
||||
* Offset of the last Linux 64-bit flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 319
|
||||
#define __NR_Linux_syscalls 320
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
|
||||
|
||||
#define __NR_64_Linux 5000
|
||||
#define __NR_64_Linux_syscalls 319
|
||||
#define __NR_64_Linux_syscalls 320
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_NABI32
|
||||
|
||||
@ -1058,15 +1060,16 @@
|
||||
#define __NR_userfaultfd (__NR_Linux + 321)
|
||||
#define __NR_membarrier (__NR_Linux + 322)
|
||||
#define __NR_mlock2 (__NR_Linux + 323)
|
||||
#define __NR_copy_file_range (__NR_Linux + 324)
|
||||
|
||||
/*
|
||||
* Offset of the last N32 flavoured syscall
|
||||
*/
|
||||
#define __NR_Linux_syscalls 323
|
||||
#define __NR_Linux_syscalls 324
|
||||
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
|
||||
|
||||
#define __NR_N32_Linux 6000
|
||||
#define __NR_N32_Linux_syscalls 323
|
||||
#define __NR_N32_Linux_syscalls 324
|
||||
|
||||
#endif /* _UAPI_ASM_UNISTD_H */
|
||||
|
@ -35,7 +35,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
||||
int __res = 1; \
|
||||
struct elfhdr *__h = (hdr); \
|
||||
\
|
||||
if (__h->e_machine != EM_MIPS) \
|
||||
if (!mips_elf_check_machine(__h)) \
|
||||
__res = 0; \
|
||||
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
|
||||
__res = 0; \
|
||||
|
@ -47,7 +47,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
|
||||
int __res = 1; \
|
||||
struct elfhdr *__h = (hdr); \
|
||||
\
|
||||
if (__h->e_machine != EM_MIPS) \
|
||||
if (!mips_elf_check_machine(__h)) \
|
||||
__res = 0; \
|
||||
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
|
||||
__res = 0; \
|
||||
|
@ -65,12 +65,10 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
|
||||
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
|
||||
status |= KU_USER;
|
||||
regs->cp0_status = status;
|
||||
clear_used_math();
|
||||
clear_fpu_owner();
|
||||
init_dsp();
|
||||
clear_thread_flag(TIF_USEDMSA);
|
||||
lose_fpu(0);
|
||||
clear_thread_flag(TIF_MSA_CTX_LIVE);
|
||||
disable_msa();
|
||||
clear_used_math();
|
||||
init_dsp();
|
||||
regs->cp0_epc = pc;
|
||||
regs->regs[29] = sp;
|
||||
}
|
||||
|
@ -595,3 +595,4 @@ EXPORT(sys_call_table)
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
PTR sys_mlock2
|
||||
PTR sys_copy_file_range /* 4360 */
|
||||
|
@ -433,4 +433,5 @@ EXPORT(sys_call_table)
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
PTR sys_mlock2
|
||||
PTR sys_copy_file_range /* 5320 */
|
||||
.size sys_call_table,.-sys_call_table
|
||||
|
@ -423,4 +423,5 @@ EXPORT(sysn32_call_table)
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
PTR sys_mlock2
|
||||
PTR sys_copy_file_range
|
||||
.size sysn32_call_table,.-sysn32_call_table
|
||||
|
@ -578,4 +578,5 @@ EXPORT(sys32_call_table)
|
||||
PTR sys_userfaultfd
|
||||
PTR sys_membarrier
|
||||
PTR sys_mlock2
|
||||
PTR sys_copy_file_range /* 4360 */
|
||||
.size sys32_call_table,.-sys32_call_table
|
||||
|
@ -782,6 +782,7 @@ static inline void prefill_possible_map(void) {}
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
cpu_probe();
|
||||
mips_cm_probe();
|
||||
prom_init();
|
||||
|
||||
setup_early_fdc_console();
|
||||
|
@ -663,7 +663,7 @@ static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
|
||||
static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
|
||||
{
|
||||
if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
|
||||
int rd = (opcode & MM_RS) >> 16;
|
||||
@ -1119,11 +1119,12 @@ no_r2_instr:
|
||||
if (get_isa16_mode(regs->cp0_epc)) {
|
||||
unsigned short mmop[2] = { 0 };
|
||||
|
||||
if (unlikely(get_user(mmop[0], epc) < 0))
|
||||
if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
|
||||
status = SIGSEGV;
|
||||
if (unlikely(get_user(mmop[1], epc) < 0))
|
||||
if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
|
||||
status = SIGSEGV;
|
||||
opcode = (mmop[0] << 16) | mmop[1];
|
||||
opcode = mmop[0];
|
||||
opcode = (opcode << 16) | mmop[1];
|
||||
|
||||
if (status < 0)
|
||||
status = simulate_rdhwr_mm(regs, opcode);
|
||||
@ -1369,26 +1370,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
||||
if (unlikely(compute_return_epc(regs) < 0))
|
||||
break;
|
||||
|
||||
if (get_isa16_mode(regs->cp0_epc)) {
|
||||
unsigned short mmop[2] = { 0 };
|
||||
|
||||
if (unlikely(get_user(mmop[0], epc) < 0))
|
||||
status = SIGSEGV;
|
||||
if (unlikely(get_user(mmop[1], epc) < 0))
|
||||
status = SIGSEGV;
|
||||
opcode = (mmop[0] << 16) | mmop[1];
|
||||
|
||||
if (status < 0)
|
||||
status = simulate_rdhwr_mm(regs, opcode);
|
||||
} else {
|
||||
if (!get_isa16_mode(regs->cp0_epc)) {
|
||||
if (unlikely(get_user(opcode, epc) < 0))
|
||||
status = SIGSEGV;
|
||||
|
||||
if (!cpu_has_llsc && status < 0)
|
||||
status = simulate_llsc(regs, opcode);
|
||||
|
||||
if (status < 0)
|
||||
status = simulate_rdhwr_normal(regs, opcode);
|
||||
}
|
||||
|
||||
if (status < 0)
|
||||
|
@ -181,10 +181,6 @@ static int __init mips_sc_probe_cm3(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __weak platform_early_l2_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int __init mips_sc_probe(void)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
@ -194,12 +190,6 @@ static inline int __init mips_sc_probe(void)
|
||||
/* Mark as not present until probe completed */
|
||||
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
|
||||
|
||||
/*
|
||||
* Do we need some platform specific probing before
|
||||
* we configure L2?
|
||||
*/
|
||||
platform_early_l2_init();
|
||||
|
||||
if (mips_cm_revision() >= CM_REV_CM3)
|
||||
return mips_sc_probe_cm3();
|
||||
|
||||
|
@ -293,7 +293,6 @@ mips_pci_controller:
|
||||
console_config();
|
||||
#endif
|
||||
/* Early detection of CMP support */
|
||||
mips_cm_probe();
|
||||
mips_cpc_probe();
|
||||
|
||||
if (!register_cps_smp_ops())
|
||||
@ -304,10 +303,3 @@ mips_pci_controller:
|
||||
return;
|
||||
register_up_smp_ops();
|
||||
}
|
||||
|
||||
void platform_early_l2_init(void)
|
||||
{
|
||||
/* L2 configuration lives in the CM3 */
|
||||
if (mips_cm_revision() >= CM_REV_CM3)
|
||||
mips_cm_probe();
|
||||
}
|
||||
|
@ -297,12 +297,12 @@ static int mt7620_pci_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(rstpcie0);
|
||||
|
||||
bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
|
||||
if (!bridge_base)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(bridge_base))
|
||||
return PTR_ERR(bridge_base);
|
||||
|
||||
pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
|
||||
if (!pcie_base)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(pcie_base))
|
||||
return PTR_ERR(pcie_base);
|
||||
|
||||
iomem_resource.start = 0;
|
||||
iomem_resource.end = ~0;
|
||||
|
@ -475,6 +475,7 @@ config X86_UV
|
||||
depends on X86_64
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
depends on NUMA
|
||||
depends on EFI
|
||||
depends on X86_X2APIC
|
||||
depends on PCI
|
||||
---help---
|
||||
|
@ -766,7 +766,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
* Return saved PC of a blocked thread.
|
||||
* What is this good for? it will be always the scheduler or ret_from_fork.
|
||||
*/
|
||||
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
|
||||
#define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
|
||||
|
||||
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
|
||||
extern unsigned long KSTK_ESP(struct task_struct *task);
|
||||
|
@ -469,7 +469,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
|
||||
{
|
||||
int i, nid;
|
||||
nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
|
||||
unsigned long start, end;
|
||||
phys_addr_t start, end;
|
||||
struct memblock_region *r;
|
||||
|
||||
/*
|
||||
|
@ -2455,14 +2455,16 @@ struct request *blk_peek_request(struct request_queue *q)
|
||||
|
||||
rq = NULL;
|
||||
break;
|
||||
} else if (ret == BLKPREP_KILL) {
|
||||
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
|
||||
int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
|
||||
|
||||
rq->cmd_flags |= REQ_QUIET;
|
||||
/*
|
||||
* Mark this request as started so we don't trigger
|
||||
* any debug logic in the end I/O path.
|
||||
*/
|
||||
blk_start_request(rq);
|
||||
__blk_end_request_all(rq, -EIO);
|
||||
__blk_end_request_all(rq, err);
|
||||
} else {
|
||||
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
|
||||
break;
|
||||
|
@ -65,18 +65,10 @@ struct skcipher_async_req {
|
||||
struct skcipher_async_rsgl first_sgl;
|
||||
struct list_head list;
|
||||
struct scatterlist *tsg;
|
||||
char iv[];
|
||||
atomic_t *inflight;
|
||||
struct skcipher_request req;
|
||||
};
|
||||
|
||||
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
|
||||
|
||||
#define GET_REQ_SIZE(ctx) \
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define GET_IV_SIZE(ctx) \
|
||||
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
|
||||
sizeof(struct scatterlist) - 1)
|
||||
|
||||
@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
|
||||
|
||||
static void skcipher_async_cb(struct crypto_async_request *req, int err)
|
||||
{
|
||||
struct sock *sk = req->data;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
|
||||
struct skcipher_async_req *sreq = req->data;
|
||||
struct kiocb *iocb = sreq->iocb;
|
||||
|
||||
atomic_dec(&ctx->inflight);
|
||||
atomic_dec(sreq->inflight);
|
||||
skcipher_free_async_sgls(sreq);
|
||||
kfree(req);
|
||||
kzfree(sreq);
|
||||
iocb->ki_complete(iocb, err, err);
|
||||
}
|
||||
|
||||
@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
|
||||
struct skcipher_tfm *skc = pask->private;
|
||||
struct crypto_skcipher *tfm = skc->skcipher;
|
||||
unsigned ivsize = crypto_skcipher_ivsize(tfm);
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct af_alg_control con = {};
|
||||
@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct skcipher_tfm *skc = pask->private;
|
||||
struct crypto_skcipher *tfm = skc->skcipher;
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
struct skcipher_async_req *sreq;
|
||||
struct skcipher_request *req;
|
||||
struct skcipher_async_rsgl *last_rsgl = NULL;
|
||||
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
|
||||
unsigned int reqlen = sizeof(struct skcipher_async_req) +
|
||||
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
|
||||
unsigned int txbufs = 0, len = 0, tx_nents;
|
||||
unsigned int reqsize = crypto_skcipher_reqsize(tfm);
|
||||
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
|
||||
int err = -ENOMEM;
|
||||
bool mark = false;
|
||||
char *iv;
|
||||
|
||||
sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
|
||||
if (unlikely(!sreq))
|
||||
goto out;
|
||||
|
||||
req = &sreq->req;
|
||||
iv = (char *)(req + 1) + reqsize;
|
||||
sreq->iocb = msg->msg_iocb;
|
||||
INIT_LIST_HEAD(&sreq->list);
|
||||
sreq->inflight = &ctx->inflight;
|
||||
|
||||
lock_sock(sk);
|
||||
req = kmalloc(reqlen, GFP_KERNEL);
|
||||
if (unlikely(!req))
|
||||
goto unlock;
|
||||
|
||||
sreq = GET_SREQ(req, ctx);
|
||||
sreq->iocb = msg->msg_iocb;
|
||||
memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
|
||||
INIT_LIST_HEAD(&sreq->list);
|
||||
tx_nents = skcipher_all_sg_nents(ctx);
|
||||
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
|
||||
if (unlikely(!sreq->tsg)) {
|
||||
kfree(req);
|
||||
if (unlikely(!sreq->tsg))
|
||||
goto unlock;
|
||||
}
|
||||
sg_init_table(sreq->tsg, tx_nents);
|
||||
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
|
||||
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_async_cb, sk);
|
||||
memcpy(iv, ctx->iv, ivsize);
|
||||
skcipher_request_set_tfm(req, tfm);
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
skcipher_async_cb, sreq);
|
||||
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
struct skcipher_async_rsgl *rsgl;
|
||||
@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
||||
sg_mark_end(sreq->tsg + txbufs - 1);
|
||||
|
||||
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
|
||||
len, sreq->iv);
|
||||
len, iv);
|
||||
err = ctx->enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
if (err == -EINPROGRESS) {
|
||||
atomic_inc(&ctx->inflight);
|
||||
err = -EIOCBQUEUED;
|
||||
sreq = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
free:
|
||||
skcipher_free_async_sgls(sreq);
|
||||
kfree(req);
|
||||
unlock:
|
||||
skcipher_wmem_wakeup(sk);
|
||||
release_sock(sk);
|
||||
kzfree(sreq);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
|
||||
&ctx->req));
|
||||
struct skcipher_tfm *skc = pask->private;
|
||||
struct crypto_skcipher *tfm = skc->skcipher;
|
||||
unsigned bs = crypto_skcipher_blocksize(tfm);
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
int err = -EAGAIN;
|
||||
@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
|
||||
ask->private = ctx;
|
||||
|
||||
skcipher_request_set_tfm(&ctx->req, skcipher);
|
||||
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
|
||||
CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
|
||||
sk->sk_destruct = skcipher_sock_destruct;
|
||||
|
@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
if (link->dump == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&crypto_alg_sem);
|
||||
list_for_each_entry(alg, &crypto_alg_list, cra_list)
|
||||
dump_alloc += CRYPTO_REPORT_MAXSIZE;
|
||||
|
||||
@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
.done = link->done,
|
||||
.min_dump_alloc = dump_alloc,
|
||||
};
|
||||
return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
|
||||
err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
|
||||
}
|
||||
up_read(&crypto_alg_sem);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
|
||||
|
@ -1321,6 +1321,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
struct binder_transaction *t;
|
||||
struct binder_work *tcomplete;
|
||||
binder_size_t *offp, *off_end;
|
||||
binder_size_t off_min;
|
||||
struct binder_proc *target_proc;
|
||||
struct binder_thread *target_thread = NULL;
|
||||
struct binder_node *target_node = NULL;
|
||||
@ -1522,18 +1523,24 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
goto err_bad_offset;
|
||||
}
|
||||
off_end = (void *)offp + tr->offsets_size;
|
||||
off_min = 0;
|
||||
for (; offp < off_end; offp++) {
|
||||
struct flat_binder_object *fp;
|
||||
|
||||
if (*offp > t->buffer->data_size - sizeof(*fp) ||
|
||||
*offp < off_min ||
|
||||
t->buffer->data_size < sizeof(*fp) ||
|
||||
!IS_ALIGNED(*offp, sizeof(u32))) {
|
||||
binder_user_error("%d:%d got transaction with invalid offset, %lld\n",
|
||||
proc->pid, thread->pid, (u64)*offp);
|
||||
binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
|
||||
proc->pid, thread->pid, (u64)*offp,
|
||||
(u64)off_min,
|
||||
(u64)(t->buffer->data_size -
|
||||
sizeof(*fp)));
|
||||
return_error = BR_FAILED_REPLY;
|
||||
goto err_bad_offset;
|
||||
}
|
||||
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
|
||||
off_min = *offp + sizeof(struct flat_binder_object);
|
||||
switch (fp->type) {
|
||||
case BINDER_TYPE_BINDER:
|
||||
case BINDER_TYPE_WEAK_BINDER: {
|
||||
@ -3598,13 +3605,24 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
|
||||
|
||||
static int binder_proc_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct binder_proc *itr;
|
||||
struct binder_proc *proc = m->private;
|
||||
int do_lock = !binder_debug_no_lock;
|
||||
bool valid_proc = false;
|
||||
|
||||
if (do_lock)
|
||||
binder_lock(__func__);
|
||||
seq_puts(m, "binder proc state:\n");
|
||||
print_binder_proc(m, proc, 1);
|
||||
|
||||
hlist_for_each_entry(itr, &binder_procs, proc_node) {
|
||||
if (itr == proc) {
|
||||
valid_proc = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (valid_proc) {
|
||||
seq_puts(m, "binder proc state:\n");
|
||||
print_binder_proc(m, proc, 1);
|
||||
}
|
||||
if (do_lock)
|
||||
binder_unlock(__func__);
|
||||
return 0;
|
||||
|
@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
|
||||
|
@ -250,6 +250,7 @@ enum {
|
||||
AHCI_HFLAG_MULTI_MSI = 0,
|
||||
AHCI_HFLAG_MULTI_MSIX = 0,
|
||||
#endif
|
||||
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
|
||||
|
||||
/* ap->flags bits */
|
||||
|
||||
|
@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(hpriv))
|
||||
return PTR_ERR(hpriv);
|
||||
hpriv->plat_data = priv;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
|
||||
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
|
||||
|
@ -496,8 +496,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
|
||||
}
|
||||
}
|
||||
|
||||
/* fabricate port_map from cap.nr_ports */
|
||||
if (!port_map) {
|
||||
/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
|
||||
if (!port_map && vers < 0x10300) {
|
||||
port_map = (1 << ahci_nr_ports(cap)) - 1;
|
||||
dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
|
||||
|
||||
@ -593,8 +593,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
|
||||
int ahci_stop_engine(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
u32 tmp;
|
||||
|
||||
/*
|
||||
* On some controllers, stopping a port's DMA engine while the port
|
||||
* is in ALPM state (partial or slumber) results in failures on
|
||||
* subsequent DMA engine starts. For those controllers, put the
|
||||
* port back in active state before stopping its DMA engine.
|
||||
*/
|
||||
if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
|
||||
(ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
|
||||
ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
|
||||
dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
tmp = readl(port_mmio + PORT_CMD);
|
||||
|
||||
/* check if the HBA is idle */
|
||||
@ -689,6 +703,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
|
||||
if (policy != ATA_LPM_MAX_POWER) {
|
||||
/* wakeup flag only applies to the max power policy */
|
||||
hints &= ~ATA_LPM_WAKE_ONLY;
|
||||
|
||||
/*
|
||||
* Disable interrupts on Phy Ready. This keeps us from
|
||||
* getting woken up due to spurious phy ready
|
||||
@ -704,7 +721,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
||||
u32 cmd = readl(port_mmio + PORT_CMD);
|
||||
|
||||
if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
|
||||
cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
|
||||
if (!(hints & ATA_LPM_WAKE_ONLY))
|
||||
cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
|
||||
cmd |= PORT_CMD_ICC_ACTIVE;
|
||||
|
||||
writel(cmd, port_mmio + PORT_CMD);
|
||||
@ -712,6 +730,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
||||
|
||||
/* wait 10ms to be sure we've come out of LPM state */
|
||||
ata_msleep(ap, 10);
|
||||
|
||||
if (hints & ATA_LPM_WAKE_ONLY)
|
||||
return 0;
|
||||
} else {
|
||||
cmd |= PORT_CMD_ALPE;
|
||||
if (policy == ATA_LPM_MIN_POWER)
|
||||
|
@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
|
||||
{ "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
|
||||
{ " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
|
||||
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
|
||||
/* Odd clown on sil3726/4726 PMPs */
|
||||
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
|
||||
|
||||
|
@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
|
||||
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
unsigned long flags;
|
||||
|
||||
if (ap->ops->error_handler) {
|
||||
if (in_wq) {
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
/* EH might have kicked in while host lock is
|
||||
* released.
|
||||
*/
|
||||
@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
||||
} else
|
||||
ata_port_freeze(ap);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
} else {
|
||||
if (likely(!(qc->err_mask & AC_ERR_HSM)))
|
||||
ata_qc_complete(qc);
|
||||
@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
|
||||
}
|
||||
} else {
|
||||
if (in_wq) {
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
ata_sff_irq_on(ap);
|
||||
ata_qc_complete(qc);
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
} else
|
||||
ata_qc_complete(qc);
|
||||
}
|
||||
@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
|
||||
{
|
||||
struct ata_link *link = qc->dev->link;
|
||||
struct ata_eh_info *ehi = &link->eh_info;
|
||||
unsigned long flags = 0;
|
||||
int poll_next;
|
||||
|
||||
lockdep_assert_held(ap->lock);
|
||||
|
||||
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
|
||||
|
||||
/* Make sure ata_sff_qc_issue() does not throw things
|
||||
@ -1112,14 +1106,6 @@ fsm_start:
|
||||
}
|
||||
}
|
||||
|
||||
/* Send the CDB (atapi) or the first data block (ata pio out).
|
||||
* During the state transition, interrupt handler shouldn't
|
||||
* be invoked before the data transfer is complete and
|
||||
* hsm_task_state is changed. Hence, the following locking.
|
||||
*/
|
||||
if (in_wq)
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
if (qc->tf.protocol == ATA_PROT_PIO) {
|
||||
/* PIO data out protocol.
|
||||
* send first data block.
|
||||
@ -1135,9 +1121,6 @@ fsm_start:
|
||||
/* send CDB */
|
||||
atapi_send_cdb(ap, qc);
|
||||
|
||||
if (in_wq)
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
|
||||
/* if polling, ata_sff_pio_task() handles the rest.
|
||||
* otherwise, interrupt handler takes over from here.
|
||||
*/
|
||||
@ -1296,7 +1279,8 @@ fsm_start:
|
||||
break;
|
||||
default:
|
||||
poll_next = 0;
|
||||
BUG();
|
||||
WARN(true, "ata%d: SFF host state machine in invalid state %d",
|
||||
ap->print_id, ap->hsm_task_state);
|
||||
}
|
||||
|
||||
return poll_next;
|
||||
@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
|
||||
u8 status;
|
||||
int poll_next;
|
||||
|
||||
spin_lock_irq(ap->lock);
|
||||
|
||||
BUG_ON(ap->sff_pio_task_link == NULL);
|
||||
/* qc can be NULL if timeout occurred */
|
||||
qc = ata_qc_from_tag(ap, link->active_tag);
|
||||
if (!qc) {
|
||||
ap->sff_pio_task_link = NULL;
|
||||
return;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
fsm_start:
|
||||
@ -1381,11 +1367,14 @@ fsm_start:
|
||||
*/
|
||||
status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
|
||||
if (status & ATA_BUSY) {
|
||||
spin_unlock_irq(ap->lock);
|
||||
ata_msleep(ap, 2);
|
||||
spin_lock_irq(ap->lock);
|
||||
|
||||
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
|
||||
if (status & ATA_BUSY) {
|
||||
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
|
||||
return;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1402,6 +1391,8 @@ fsm_start:
|
||||
*/
|
||||
if (poll_next)
|
||||
goto fsm_start;
|
||||
out_unlock:
|
||||
spin_unlock_irq(ap->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -206,6 +206,8 @@ static void component_match_release(struct device *master,
|
||||
if (mc->release)
|
||||
mc->release(master, mc->data);
|
||||
}
|
||||
|
||||
kfree(match->compare);
|
||||
}
|
||||
|
||||
static void devm_component_match_release(struct device *dev, void *res)
|
||||
@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev,
|
||||
if (match->alloc == num)
|
||||
return 0;
|
||||
|
||||
new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL);
|
||||
new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
if (match->compare) {
|
||||
memcpy(new, match->compare, sizeof(*new) *
|
||||
min(match->num, num));
|
||||
devm_kfree(dev, match->compare);
|
||||
kfree(match->compare);
|
||||
}
|
||||
match->compare = new;
|
||||
match->alloc = num;
|
||||
@ -283,6 +285,24 @@ void component_match_add_release(struct device *master,
|
||||
}
|
||||
EXPORT_SYMBOL(component_match_add_release);
|
||||
|
||||
static void free_master(struct master *master)
|
||||
{
|
||||
struct component_match *match = master->match;
|
||||
int i;
|
||||
|
||||
list_del(&master->node);
|
||||
|
||||
if (match) {
|
||||
for (i = 0; i < match->num; i++) {
|
||||
struct component *c = match->compare[i].component;
|
||||
if (c)
|
||||
c->master = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(master);
|
||||
}
|
||||
|
||||
int component_master_add_with_match(struct device *dev,
|
||||
const struct component_master_ops *ops,
|
||||
struct component_match *match)
|
||||
@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev,
|
||||
|
||||
ret = try_to_bring_up_master(master, NULL);
|
||||
|
||||
if (ret < 0) {
|
||||
/* Delete off the list if we weren't successful */
|
||||
list_del(&master->node);
|
||||
kfree(master);
|
||||
}
|
||||
if (ret < 0)
|
||||
free_master(master);
|
||||
|
||||
mutex_unlock(&component_mutex);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
@ -324,25 +342,12 @@ void component_master_del(struct device *dev,
|
||||
const struct component_master_ops *ops)
|
||||
{
|
||||
struct master *master;
|
||||
int i;
|
||||
|
||||
mutex_lock(&component_mutex);
|
||||
master = __master_find(dev, ops);
|
||||
if (master) {
|
||||
struct component_match *match = master->match;
|
||||
|
||||
take_down_master(master);
|
||||
|
||||
list_del(&master->node);
|
||||
|
||||
if (match) {
|
||||
for (i = 0; i < match->num; i++) {
|
||||
struct component *c = match->compare[i].component;
|
||||
if (c)
|
||||
c->master = NULL;
|
||||
}
|
||||
}
|
||||
kfree(master);
|
||||
free_master(master);
|
||||
}
|
||||
mutex_unlock(&component_mutex);
|
||||
}
|
||||
@ -486,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops)
|
||||
|
||||
ret = try_to_bring_up_masters(component);
|
||||
if (ret < 0) {
|
||||
if (component->master)
|
||||
remove_component(component->master, component);
|
||||
list_del(&component->node);
|
||||
|
||||
kfree(component);
|
||||
|
@ -257,7 +257,7 @@ static void __fw_free_buf(struct kref *ref)
|
||||
vunmap(buf->data);
|
||||
for (i = 0; i < buf->nr_pages; i++)
|
||||
__free_page(buf->pages[i]);
|
||||
kfree(buf->pages);
|
||||
vfree(buf->pages);
|
||||
} else
|
||||
#endif
|
||||
vfree(buf->data);
|
||||
@ -353,15 +353,15 @@ static int fw_get_filesystem_firmware(struct device *device,
|
||||
rc = fw_read_file_contents(file, buf);
|
||||
fput(file);
|
||||
if (rc)
|
||||
dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
|
||||
path, rc);
|
||||
dev_warn(device, "loading %s failed with error %d\n",
|
||||
path, rc);
|
||||
else
|
||||
break;
|
||||
}
|
||||
__putname(path);
|
||||
|
||||
if (!rc) {
|
||||
dev_dbg(device, "firmware: direct-loading firmware %s\n",
|
||||
dev_dbg(device, "direct-loading %s\n",
|
||||
buf->fw_id);
|
||||
mutex_lock(&fw_lock);
|
||||
set_bit(FW_STATUS_DONE, &buf->status);
|
||||
@ -660,7 +660,7 @@ static ssize_t firmware_loading_store(struct device *dev,
|
||||
if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
|
||||
for (i = 0; i < fw_buf->nr_pages; i++)
|
||||
__free_page(fw_buf->pages[i]);
|
||||
kfree(fw_buf->pages);
|
||||
vfree(fw_buf->pages);
|
||||
fw_buf->pages = NULL;
|
||||
fw_buf->page_array_size = 0;
|
||||
fw_buf->nr_pages = 0;
|
||||
@ -770,8 +770,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
|
||||
buf->page_array_size * 2);
|
||||
struct page **new_pages;
|
||||
|
||||
new_pages = kmalloc(new_array_size * sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
new_pages = vmalloc(new_array_size * sizeof(void *));
|
||||
if (!new_pages) {
|
||||
fw_load_abort(fw_priv);
|
||||
return -ENOMEM;
|
||||
@ -780,7 +779,7 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
|
||||
buf->page_array_size * sizeof(void *));
|
||||
memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
|
||||
(new_array_size - buf->page_array_size));
|
||||
kfree(buf->pages);
|
||||
vfree(buf->pages);
|
||||
buf->pages = new_pages;
|
||||
buf->page_array_size = new_array_size;
|
||||
}
|
||||
@ -1051,7 +1050,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
|
||||
}
|
||||
|
||||
if (fw_get_builtin_firmware(firmware, name)) {
|
||||
dev_dbg(device, "firmware: using built-in firmware %s\n", name);
|
||||
dev_dbg(device, "using built-in %s\n", name);
|
||||
return 0; /* assigned */
|
||||
}
|
||||
|
||||
|
@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context,
|
||||
while (val_size) {
|
||||
switch (ctx->val_bytes) {
|
||||
case 1:
|
||||
__raw_writeb(*(u8 *)val, ctx->regs + offset);
|
||||
writeb(*(u8 *)val, ctx->regs + offset);
|
||||
break;
|
||||
case 2:
|
||||
__raw_writew(*(u16 *)val, ctx->regs + offset);
|
||||
writew(*(u16 *)val, ctx->regs + offset);
|
||||
break;
|
||||
case 4:
|
||||
__raw_writel(*(u32 *)val, ctx->regs + offset);
|
||||
writel(*(u32 *)val, ctx->regs + offset);
|
||||
break;
|
||||
#ifdef CONFIG_64BIT
|
||||
case 8:
|
||||
__raw_writeq(*(u64 *)val, ctx->regs + offset);
|
||||
writeq(*(u64 *)val, ctx->regs + offset);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context,
|
||||
while (val_size) {
|
||||
switch (ctx->val_bytes) {
|
||||
case 1:
|
||||
*(u8 *)val = __raw_readb(ctx->regs + offset);
|
||||
*(u8 *)val = readb(ctx->regs + offset);
|
||||
break;
|
||||
case 2:
|
||||
*(u16 *)val = __raw_readw(ctx->regs + offset);
|
||||
*(u16 *)val = readw(ctx->regs + offset);
|
||||
break;
|
||||
case 4:
|
||||
*(u32 *)val = __raw_readl(ctx->regs + offset);
|
||||
*(u32 *)val = readl(ctx->regs + offset);
|
||||
break;
|
||||
#ifdef CONFIG_64BIT
|
||||
case 8:
|
||||
*(u64 *)val = __raw_readq(ctx->regs + offset);
|
||||
*(u64 *)val = readq(ctx->regs + offset);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
|
@ -496,12 +496,12 @@ static void pc_set_checksum(void)
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
||||
static char *floppy_types[] = {
|
||||
static const char * const floppy_types[] = {
|
||||
"none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M",
|
||||
"3.5'' 2.88M", "3.5'' 2.88M"
|
||||
};
|
||||
|
||||
static char *gfx_types[] = {
|
||||
static const char * const gfx_types[] = {
|
||||
"EGA, VGA, ... (with BIOS)",
|
||||
"CGA (40 cols)",
|
||||
"CGA (80 cols)",
|
||||
@ -602,7 +602,7 @@ static void atari_set_checksum(void)
|
||||
|
||||
static struct {
|
||||
unsigned char val;
|
||||
char *name;
|
||||
const char *name;
|
||||
} boot_prefs[] = {
|
||||
{ 0x80, "TOS" },
|
||||
{ 0x40, "ASV" },
|
||||
@ -611,7 +611,7 @@ static struct {
|
||||
{ 0x00, "unspecified" }
|
||||
};
|
||||
|
||||
static char *languages[] = {
|
||||
static const char * const languages[] = {
|
||||
"English (US)",
|
||||
"German",
|
||||
"French",
|
||||
@ -623,7 +623,7 @@ static char *languages[] = {
|
||||
"Swiss (German)"
|
||||
};
|
||||
|
||||
static char *dateformat[] = {
|
||||
static const char * const dateformat[] = {
|
||||
"MM%cDD%cYY",
|
||||
"DD%cMM%cYY",
|
||||
"YY%cMM%cDD",
|
||||
@ -634,7 +634,7 @@ static char *dateformat[] = {
|
||||
"7 (undefined)"
|
||||
};
|
||||
|
||||
static char *colors[] = {
|
||||
static const char * const colors[] = {
|
||||
"2", "4", "16", "256", "65536", "??", "??", "??"
|
||||
};
|
||||
|
||||
|
@ -129,10 +129,9 @@ static void button_consume_callbacks (int bpcount)
|
||||
|
||||
static void button_sequence_finished (unsigned long parameters)
|
||||
{
|
||||
#ifdef CONFIG_NWBUTTON_REBOOT /* Reboot using button is enabled */
|
||||
if (button_press_count == reboot_count)
|
||||
if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
|
||||
button_press_count == reboot_count)
|
||||
kill_cad_pid(SIGINT, 1); /* Ask init to reboot us */
|
||||
#endif /* CONFIG_NWBUTTON_REBOOT */
|
||||
button_consume_callbacks (button_press_count);
|
||||
bcount = sprintf (button_output_buffer, "%d\n", button_press_count);
|
||||
button_press_count = 0; /* Reset the button press counter */
|
||||
|
@ -69,12 +69,13 @@
|
||||
#include <linux/ppdev.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
#define PP_VERSION "ppdev: user-space parallel port driver"
|
||||
#define CHRDEV "ppdev"
|
||||
|
||||
struct pp_struct {
|
||||
struct pardevice * pdev;
|
||||
struct pardevice *pdev;
|
||||
wait_queue_head_t irq_wait;
|
||||
atomic_t irqc;
|
||||
unsigned int flags;
|
||||
@ -98,18 +99,26 @@ struct pp_struct {
|
||||
#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
|
||||
|
||||
static DEFINE_MUTEX(pp_do_mutex);
|
||||
static inline void pp_enable_irq (struct pp_struct *pp)
|
||||
|
||||
/* define fixed sized ioctl cmd for y2038 migration */
|
||||
#define PPGETTIME32 _IOR(PP_IOCTL, 0x95, s32[2])
|
||||
#define PPSETTIME32 _IOW(PP_IOCTL, 0x96, s32[2])
|
||||
#define PPGETTIME64 _IOR(PP_IOCTL, 0x95, s64[2])
|
||||
#define PPSETTIME64 _IOW(PP_IOCTL, 0x96, s64[2])
|
||||
|
||||
static inline void pp_enable_irq(struct pp_struct *pp)
|
||||
{
|
||||
struct parport *port = pp->pdev->port;
|
||||
port->ops->enable_irq (port);
|
||||
|
||||
port->ops->enable_irq(port);
|
||||
}
|
||||
|
||||
static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
|
||||
loff_t * ppos)
|
||||
static ssize_t pp_read(struct file *file, char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
unsigned int minor = iminor(file_inode(file));
|
||||
struct pp_struct *pp = file->private_data;
|
||||
char * kbuffer;
|
||||
char *kbuffer;
|
||||
ssize_t bytes_read = 0;
|
||||
struct parport *pport;
|
||||
int mode;
|
||||
@ -125,16 +134,15 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
|
||||
return 0;
|
||||
|
||||
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
|
||||
if (!kbuffer) {
|
||||
if (!kbuffer)
|
||||
return -ENOMEM;
|
||||
}
|
||||
pport = pp->pdev->port;
|
||||
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
|
||||
|
||||
parport_set_timeout (pp->pdev,
|
||||
(file->f_flags & O_NONBLOCK) ?
|
||||
PARPORT_INACTIVITY_O_NONBLOCK :
|
||||
pp->default_inactivity);
|
||||
parport_set_timeout(pp->pdev,
|
||||
(file->f_flags & O_NONBLOCK) ?
|
||||
PARPORT_INACTIVITY_O_NONBLOCK :
|
||||
pp->default_inactivity);
|
||||
|
||||
while (bytes_read == 0) {
|
||||
ssize_t need = min_t(unsigned long, count, PP_BUFFER_SIZE);
|
||||
@ -144,20 +152,17 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
|
||||
int flags = 0;
|
||||
size_t (*fn)(struct parport *, void *, size_t, int);
|
||||
|
||||
if (pp->flags & PP_W91284PIC) {
|
||||
if (pp->flags & PP_W91284PIC)
|
||||
flags |= PARPORT_W91284PIC;
|
||||
}
|
||||
if (pp->flags & PP_FASTREAD) {
|
||||
if (pp->flags & PP_FASTREAD)
|
||||
flags |= PARPORT_EPP_FAST;
|
||||
}
|
||||
if (pport->ieee1284.mode & IEEE1284_ADDR) {
|
||||
if (pport->ieee1284.mode & IEEE1284_ADDR)
|
||||
fn = pport->ops->epp_read_addr;
|
||||
} else {
|
||||
else
|
||||
fn = pport->ops->epp_read_data;
|
||||
}
|
||||
bytes_read = (*fn)(pport, kbuffer, need, flags);
|
||||
} else {
|
||||
bytes_read = parport_read (pport, kbuffer, need);
|
||||
bytes_read = parport_read(pport, kbuffer, need);
|
||||
}
|
||||
|
||||
if (bytes_read != 0)
|
||||
@ -168,7 +173,7 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
|
||||
break;
|
||||
}
|
||||
|
||||
if (signal_pending (current)) {
|
||||
if (signal_pending(current)) {
|
||||
bytes_read = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
@ -176,22 +181,22 @@ static ssize_t pp_read (struct file * file, char __user * buf, size_t count,
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
parport_set_timeout (pp->pdev, pp->default_inactivity);
|
||||
parport_set_timeout(pp->pdev, pp->default_inactivity);
|
||||
|
||||
if (bytes_read > 0 && copy_to_user (buf, kbuffer, bytes_read))
|
||||
if (bytes_read > 0 && copy_to_user(buf, kbuffer, bytes_read))
|
||||
bytes_read = -EFAULT;
|
||||
|
||||
kfree (kbuffer);
|
||||
pp_enable_irq (pp);
|
||||
kfree(kbuffer);
|
||||
pp_enable_irq(pp);
|
||||
return bytes_read;
|
||||
}
|
||||
|
||||
static ssize_t pp_write (struct file * file, const char __user * buf,
|
||||
size_t count, loff_t * ppos)
|
||||
static ssize_t pp_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
unsigned int minor = iminor(file_inode(file));
|
||||
struct pp_struct *pp = file->private_data;
|
||||
char * kbuffer;
|
||||
char *kbuffer;
|
||||
ssize_t bytes_written = 0;
|
||||
ssize_t wrote;
|
||||
int mode;
|
||||
@ -204,21 +209,21 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
|
||||
}
|
||||
|
||||
kbuffer = kmalloc(min_t(size_t, count, PP_BUFFER_SIZE), GFP_KERNEL);
|
||||
if (!kbuffer) {
|
||||
if (!kbuffer)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pport = pp->pdev->port;
|
||||
mode = pport->ieee1284.mode & ~(IEEE1284_DEVICEID | IEEE1284_ADDR);
|
||||
|
||||
parport_set_timeout (pp->pdev,
|
||||
(file->f_flags & O_NONBLOCK) ?
|
||||
PARPORT_INACTIVITY_O_NONBLOCK :
|
||||
pp->default_inactivity);
|
||||
parport_set_timeout(pp->pdev,
|
||||
(file->f_flags & O_NONBLOCK) ?
|
||||
PARPORT_INACTIVITY_O_NONBLOCK :
|
||||
pp->default_inactivity);
|
||||
|
||||
while (bytes_written < count) {
|
||||
ssize_t n = min_t(unsigned long, count - bytes_written, PP_BUFFER_SIZE);
|
||||
|
||||
if (copy_from_user (kbuffer, buf + bytes_written, n)) {
|
||||
if (copy_from_user(kbuffer, buf + bytes_written, n)) {
|
||||
bytes_written = -EFAULT;
|
||||
break;
|
||||
}
|
||||
@ -226,20 +231,19 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
|
||||
if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) {
|
||||
/* do a fast EPP write */
|
||||
if (pport->ieee1284.mode & IEEE1284_ADDR) {
|
||||
wrote = pport->ops->epp_write_addr (pport,
|
||||
wrote = pport->ops->epp_write_addr(pport,
|
||||
kbuffer, n, PARPORT_EPP_FAST);
|
||||
} else {
|
||||
wrote = pport->ops->epp_write_data (pport,
|
||||
wrote = pport->ops->epp_write_data(pport,
|
||||
kbuffer, n, PARPORT_EPP_FAST);
|
||||
}
|
||||
} else {
|
||||
wrote = parport_write (pp->pdev->port, kbuffer, n);
|
||||
wrote = parport_write(pp->pdev->port, kbuffer, n);
|
||||
}
|
||||
|
||||
if (wrote <= 0) {
|
||||
if (!bytes_written) {
|
||||
if (!bytes_written)
|
||||
bytes_written = wrote;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -251,67 +255,69 @@ static ssize_t pp_write (struct file * file, const char __user * buf,
|
||||
break;
|
||||
}
|
||||
|
||||
if (signal_pending (current))
|
||||
if (signal_pending(current))
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
parport_set_timeout (pp->pdev, pp->default_inactivity);
|
||||
parport_set_timeout(pp->pdev, pp->default_inactivity);
|
||||
|
||||
kfree (kbuffer);
|
||||
pp_enable_irq (pp);
|
||||
kfree(kbuffer);
|
||||
pp_enable_irq(pp);
|
||||
return bytes_written;
|
||||
}
|
||||
|
||||
static void pp_irq (void *private)
|
||||
static void pp_irq(void *private)
|
||||
{
|
||||
struct pp_struct *pp = private;
|
||||
|
||||
if (pp->irqresponse) {
|
||||
parport_write_control (pp->pdev->port, pp->irqctl);
|
||||
parport_write_control(pp->pdev->port, pp->irqctl);
|
||||
pp->irqresponse = 0;
|
||||
}
|
||||
|
||||
atomic_inc (&pp->irqc);
|
||||
wake_up_interruptible (&pp->irq_wait);
|
||||
atomic_inc(&pp->irqc);
|
||||
wake_up_interruptible(&pp->irq_wait);
|
||||
}
|
||||
|
||||
static int register_device (int minor, struct pp_struct *pp)
|
||||
static int register_device(int minor, struct pp_struct *pp)
|
||||
{
|
||||
struct parport *port;
|
||||
struct pardevice * pdev = NULL;
|
||||
struct pardevice *pdev = NULL;
|
||||
char *name;
|
||||
int fl;
|
||||
struct pardev_cb ppdev_cb;
|
||||
|
||||
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
|
||||
if (name == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
port = parport_find_number (minor);
|
||||
port = parport_find_number(minor);
|
||||
if (!port) {
|
||||
printk (KERN_WARNING "%s: no associated port!\n", name);
|
||||
kfree (name);
|
||||
printk(KERN_WARNING "%s: no associated port!\n", name);
|
||||
kfree(name);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
fl = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
|
||||
pdev = parport_register_device (port, name, NULL,
|
||||
NULL, pp_irq, fl, pp);
|
||||
parport_put_port (port);
|
||||
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
|
||||
ppdev_cb.irq_func = pp_irq;
|
||||
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
|
||||
ppdev_cb.private = pp;
|
||||
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
|
||||
parport_put_port(port);
|
||||
|
||||
if (!pdev) {
|
||||
printk (KERN_WARNING "%s: failed to register device!\n", name);
|
||||
kfree (name);
|
||||
printk(KERN_WARNING "%s: failed to register device!\n", name);
|
||||
kfree(name);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
pp->pdev = pdev;
|
||||
pr_debug("%s: registered pardevice\n", name);
|
||||
dev_dbg(&pdev->dev, "registered pardevice\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum ieee1284_phase init_phase (int mode)
|
||||
static enum ieee1284_phase init_phase(int mode)
|
||||
{
|
||||
switch (mode & ~(IEEE1284_DEVICEID
|
||||
| IEEE1284_ADDR)) {
|
||||
@ -322,11 +328,27 @@ static enum ieee1284_phase init_phase (int mode)
|
||||
return IEEE1284_PH_FWD_IDLE;
|
||||
}
|
||||
|
||||
static int pp_set_timeout(struct pardevice *pdev, long tv_sec, int tv_usec)
|
||||
{
|
||||
long to_jiffies;
|
||||
|
||||
if ((tv_sec < 0) || (tv_usec < 0))
|
||||
return -EINVAL;
|
||||
|
||||
to_jiffies = usecs_to_jiffies(tv_usec);
|
||||
to_jiffies += tv_sec * HZ;
|
||||
if (to_jiffies <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
pdev->timeout = to_jiffies;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
unsigned int minor = iminor(file_inode(file));
|
||||
struct pp_struct *pp = file->private_data;
|
||||
struct parport * port;
|
||||
struct parport *port;
|
||||
void __user *argp = (void __user *)arg;
|
||||
|
||||
/* First handle the cases that don't take arguments. */
|
||||
@ -337,19 +359,19 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
int ret;
|
||||
|
||||
if (pp->flags & PP_CLAIMED) {
|
||||
pr_debug(CHRDEV "%x: you've already got it!\n", minor);
|
||||
dev_dbg(&pp->pdev->dev, "you've already got it!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Deferred device registration. */
|
||||
if (!pp->pdev) {
|
||||
int err = register_device (minor, pp);
|
||||
if (err) {
|
||||
int err = register_device(minor, pp);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
ret = parport_claim_or_block (pp->pdev);
|
||||
ret = parport_claim_or_block(pp->pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -357,7 +379,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
|
||||
/* For interrupt-reporting to work, we need to be
|
||||
* informed of each interrupt. */
|
||||
pp_enable_irq (pp);
|
||||
pp_enable_irq(pp);
|
||||
|
||||
/* We may need to fix up the state machine. */
|
||||
info = &pp->pdev->port->ieee1284;
|
||||
@ -365,15 +387,15 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
pp->saved_state.phase = info->phase;
|
||||
info->mode = pp->state.mode;
|
||||
info->phase = pp->state.phase;
|
||||
pp->default_inactivity = parport_set_timeout (pp->pdev, 0);
|
||||
parport_set_timeout (pp->pdev, pp->default_inactivity);
|
||||
pp->default_inactivity = parport_set_timeout(pp->pdev, 0);
|
||||
parport_set_timeout(pp->pdev, pp->default_inactivity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
case PPEXCL:
|
||||
if (pp->pdev) {
|
||||
pr_debug(CHRDEV "%x: too late for PPEXCL; "
|
||||
"already registered\n", minor);
|
||||
dev_dbg(&pp->pdev->dev,
|
||||
"too late for PPEXCL; already registered\n");
|
||||
if (pp->flags & PP_EXCL)
|
||||
/* But it's not really an error. */
|
||||
return 0;
|
||||
@ -388,11 +410,12 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
case PPSETMODE:
|
||||
{
|
||||
int mode;
|
||||
if (copy_from_user (&mode, argp, sizeof (mode)))
|
||||
|
||||
if (copy_from_user(&mode, argp, sizeof(mode)))
|
||||
return -EFAULT;
|
||||
/* FIXME: validate mode */
|
||||
pp->state.mode = mode;
|
||||
pp->state.phase = init_phase (mode);
|
||||
pp->state.phase = init_phase(mode);
|
||||
|
||||
if (pp->flags & PP_CLAIMED) {
|
||||
pp->pdev->port->ieee1284.mode = mode;
|
||||
@ -405,28 +428,27 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int mode;
|
||||
|
||||
if (pp->flags & PP_CLAIMED) {
|
||||
if (pp->flags & PP_CLAIMED)
|
||||
mode = pp->pdev->port->ieee1284.mode;
|
||||
} else {
|
||||
else
|
||||
mode = pp->state.mode;
|
||||
}
|
||||
if (copy_to_user (argp, &mode, sizeof (mode))) {
|
||||
|
||||
if (copy_to_user(argp, &mode, sizeof(mode)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
case PPSETPHASE:
|
||||
{
|
||||
int phase;
|
||||
if (copy_from_user (&phase, argp, sizeof (phase))) {
|
||||
|
||||
if (copy_from_user(&phase, argp, sizeof(phase)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* FIXME: validate phase */
|
||||
pp->state.phase = phase;
|
||||
|
||||
if (pp->flags & PP_CLAIMED) {
|
||||
if (pp->flags & PP_CLAIMED)
|
||||
pp->pdev->port->ieee1284.phase = phase;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -434,38 +456,34 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int phase;
|
||||
|
||||
if (pp->flags & PP_CLAIMED) {
|
||||
if (pp->flags & PP_CLAIMED)
|
||||
phase = pp->pdev->port->ieee1284.phase;
|
||||
} else {
|
||||
else
|
||||
phase = pp->state.phase;
|
||||
}
|
||||
if (copy_to_user (argp, &phase, sizeof (phase))) {
|
||||
if (copy_to_user(argp, &phase, sizeof(phase)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
case PPGETMODES:
|
||||
{
|
||||
unsigned int modes;
|
||||
|
||||
port = parport_find_number (minor);
|
||||
port = parport_find_number(minor);
|
||||
if (!port)
|
||||
return -ENODEV;
|
||||
|
||||
modes = port->modes;
|
||||
parport_put_port(port);
|
||||
if (copy_to_user (argp, &modes, sizeof (modes))) {
|
||||
if (copy_to_user(argp, &modes, sizeof(modes)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
case PPSETFLAGS:
|
||||
{
|
||||
int uflags;
|
||||
|
||||
if (copy_from_user (&uflags, argp, sizeof (uflags))) {
|
||||
if (copy_from_user(&uflags, argp, sizeof(uflags)))
|
||||
return -EFAULT;
|
||||
}
|
||||
pp->flags &= ~PP_FLAGMASK;
|
||||
pp->flags |= (uflags & PP_FLAGMASK);
|
||||
return 0;
|
||||
@ -475,9 +493,8 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
int uflags;
|
||||
|
||||
uflags = pp->flags & PP_FLAGMASK;
|
||||
if (copy_to_user (argp, &uflags, sizeof (uflags))) {
|
||||
if (copy_to_user(argp, &uflags, sizeof(uflags)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
} /* end switch() */
|
||||
@ -495,27 +512,28 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
unsigned char reg;
|
||||
unsigned char mask;
|
||||
int mode;
|
||||
s32 time32[2];
|
||||
s64 time64[2];
|
||||
struct timespec64 ts;
|
||||
int ret;
|
||||
struct timeval par_timeout;
|
||||
long to_jiffies;
|
||||
|
||||
case PPRSTATUS:
|
||||
reg = parport_read_status (port);
|
||||
if (copy_to_user (argp, ®, sizeof (reg)))
|
||||
reg = parport_read_status(port);
|
||||
if (copy_to_user(argp, ®, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case PPRDATA:
|
||||
reg = parport_read_data (port);
|
||||
if (copy_to_user (argp, ®, sizeof (reg)))
|
||||
reg = parport_read_data(port);
|
||||
if (copy_to_user(argp, ®, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case PPRCONTROL:
|
||||
reg = parport_read_control (port);
|
||||
if (copy_to_user (argp, ®, sizeof (reg)))
|
||||
reg = parport_read_control(port);
|
||||
if (copy_to_user(argp, ®, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case PPYIELD:
|
||||
parport_yield_blocking (pp->pdev);
|
||||
parport_yield_blocking(pp->pdev);
|
||||
return 0;
|
||||
|
||||
case PPRELEASE:
|
||||
@ -525,45 +543,45 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
pp->state.phase = info->phase;
|
||||
info->mode = pp->saved_state.mode;
|
||||
info->phase = pp->saved_state.phase;
|
||||
parport_release (pp->pdev);
|
||||
parport_release(pp->pdev);
|
||||
pp->flags &= ~PP_CLAIMED;
|
||||
return 0;
|
||||
|
||||
case PPWCONTROL:
|
||||
if (copy_from_user (®, argp, sizeof (reg)))
|
||||
if (copy_from_user(®, argp, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
parport_write_control (port, reg);
|
||||
parport_write_control(port, reg);
|
||||
return 0;
|
||||
|
||||
case PPWDATA:
|
||||
if (copy_from_user (®, argp, sizeof (reg)))
|
||||
if (copy_from_user(®, argp, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
parport_write_data (port, reg);
|
||||
parport_write_data(port, reg);
|
||||
return 0;
|
||||
|
||||
case PPFCONTROL:
|
||||
if (copy_from_user (&mask, argp,
|
||||
sizeof (mask)))
|
||||
if (copy_from_user(&mask, argp,
|
||||
sizeof(mask)))
|
||||
return -EFAULT;
|
||||
if (copy_from_user (®, 1 + (unsigned char __user *) arg,
|
||||
sizeof (reg)))
|
||||
if (copy_from_user(®, 1 + (unsigned char __user *) arg,
|
||||
sizeof(reg)))
|
||||
return -EFAULT;
|
||||
parport_frob_control (port, mask, reg);
|
||||
parport_frob_control(port, mask, reg);
|
||||
return 0;
|
||||
|
||||
case PPDATADIR:
|
||||
if (copy_from_user (&mode, argp, sizeof (mode)))
|
||||
if (copy_from_user(&mode, argp, sizeof(mode)))
|
||||
return -EFAULT;
|
||||
if (mode)
|
||||
port->ops->data_reverse (port);
|
||||
port->ops->data_reverse(port);
|
||||
else
|
||||
port->ops->data_forward (port);
|
||||
port->ops->data_forward(port);
|
||||
return 0;
|
||||
|
||||
case PPNEGOT:
|
||||
if (copy_from_user (&mode, argp, sizeof (mode)))
|
||||
if (copy_from_user(&mode, argp, sizeof(mode)))
|
||||
return -EFAULT;
|
||||
switch ((ret = parport_negotiate (port, mode))) {
|
||||
switch ((ret = parport_negotiate(port, mode))) {
|
||||
case 0: break;
|
||||
case -1: /* handshake failed, peripheral not IEEE 1284 */
|
||||
ret = -EIO;
|
||||
@ -572,11 +590,11 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
pp_enable_irq (pp);
|
||||
pp_enable_irq(pp);
|
||||
return ret;
|
||||
|
||||
case PPWCTLONIRQ:
|
||||
if (copy_from_user (®, argp, sizeof (reg)))
|
||||
if (copy_from_user(®, argp, sizeof(reg)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Remember what to set the control lines to, for next
|
||||
@ -586,39 +604,50 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
return 0;
|
||||
|
||||
case PPCLRIRQ:
|
||||
ret = atomic_read (&pp->irqc);
|
||||
if (copy_to_user (argp, &ret, sizeof (ret)))
|
||||
ret = atomic_read(&pp->irqc);
|
||||
if (copy_to_user(argp, &ret, sizeof(ret)))
|
||||
return -EFAULT;
|
||||
atomic_sub (ret, &pp->irqc);
|
||||
atomic_sub(ret, &pp->irqc);
|
||||
return 0;
|
||||
|
||||
case PPSETTIME:
|
||||
if (copy_from_user (&par_timeout, argp, sizeof(struct timeval))) {
|
||||
case PPSETTIME32:
|
||||
if (copy_from_user(time32, argp, sizeof(time32)))
|
||||
return -EFAULT;
|
||||
}
|
||||
/* Convert to jiffies, place in pp->pdev->timeout */
|
||||
if ((par_timeout.tv_sec < 0) || (par_timeout.tv_usec < 0)) {
|
||||
|
||||
return pp_set_timeout(pp->pdev, time32[0], time32[1]);
|
||||
|
||||
case PPSETTIME64:
|
||||
if (copy_from_user(time64, argp, sizeof(time64)))
|
||||
return -EFAULT;
|
||||
|
||||
return pp_set_timeout(pp->pdev, time64[0], time64[1]);
|
||||
|
||||
case PPGETTIME32:
|
||||
jiffies_to_timespec64(pp->pdev->timeout, &ts);
|
||||
time32[0] = ts.tv_sec;
|
||||
time32[1] = ts.tv_nsec / NSEC_PER_USEC;
|
||||
if ((time32[0] < 0) || (time32[1] < 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
to_jiffies = ROUND_UP(par_timeout.tv_usec, 1000000/HZ);
|
||||
to_jiffies += par_timeout.tv_sec * (long)HZ;
|
||||
if (to_jiffies <= 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
pp->pdev->timeout = to_jiffies;
|
||||
|
||||
if (copy_to_user(argp, time32, sizeof(time32)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
||||
case PPGETTIME:
|
||||
to_jiffies = pp->pdev->timeout;
|
||||
memset(&par_timeout, 0, sizeof(par_timeout));
|
||||
par_timeout.tv_sec = to_jiffies / HZ;
|
||||
par_timeout.tv_usec = (to_jiffies % (long)HZ) * (1000000/HZ);
|
||||
if (copy_to_user (argp, &par_timeout, sizeof(struct timeval)))
|
||||
case PPGETTIME64:
|
||||
jiffies_to_timespec64(pp->pdev->timeout, &ts);
|
||||
time64[0] = ts.tv_sec;
|
||||
time64[1] = ts.tv_nsec / NSEC_PER_USEC;
|
||||
if ((time64[0] < 0) || (time64[1] < 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_to_user(argp, time64, sizeof(time64)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
||||
default:
|
||||
pr_debug(CHRDEV "%x: What? (cmd=0x%x)\n", minor, cmd);
|
||||
dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -629,13 +658,22 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
long ret;
|
||||
|
||||
mutex_lock(&pp_do_mutex);
|
||||
ret = pp_do_ioctl(file, cmd, arg);
|
||||
mutex_unlock(&pp_do_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pp_open (struct inode * inode, struct file * file)
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long pp_compat_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return pp_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
|
||||
}
|
||||
#endif
|
||||
|
||||
static int pp_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned int minor = iminor(inode);
|
||||
struct pp_struct *pp;
|
||||
@ -643,16 +681,16 @@ static int pp_open (struct inode * inode, struct file * file)
|
||||
if (minor >= PARPORT_MAX)
|
||||
return -ENXIO;
|
||||
|
||||
pp = kmalloc (sizeof (struct pp_struct), GFP_KERNEL);
|
||||
pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL);
|
||||
if (!pp)
|
||||
return -ENOMEM;
|
||||
|
||||
pp->state.mode = IEEE1284_MODE_COMPAT;
|
||||
pp->state.phase = init_phase (pp->state.mode);
|
||||
pp->state.phase = init_phase(pp->state.mode);
|
||||
pp->flags = 0;
|
||||
pp->irqresponse = 0;
|
||||
atomic_set (&pp->irqc, 0);
|
||||
init_waitqueue_head (&pp->irq_wait);
|
||||
atomic_set(&pp->irqc, 0);
|
||||
init_waitqueue_head(&pp->irq_wait);
|
||||
|
||||
/* Defer the actual device registration until the first claim.
|
||||
* That way, we know whether or not the driver wants to have
|
||||
@ -664,7 +702,7 @@ static int pp_open (struct inode * inode, struct file * file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pp_release (struct inode * inode, struct file * file)
|
||||
static int pp_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned int minor = iminor(inode);
|
||||
struct pp_struct *pp = file->private_data;
|
||||
@ -673,10 +711,10 @@ static int pp_release (struct inode * inode, struct file * file)
|
||||
compat_negot = 0;
|
||||
if (!(pp->flags & PP_CLAIMED) && pp->pdev &&
|
||||
(pp->state.mode != IEEE1284_MODE_COMPAT)) {
|
||||
struct ieee1284_info *info;
|
||||
struct ieee1284_info *info;
|
||||
|
||||
/* parport released, but not in compatibility mode */
|
||||
parport_claim_or_block (pp->pdev);
|
||||
parport_claim_or_block(pp->pdev);
|
||||
pp->flags |= PP_CLAIMED;
|
||||
info = &pp->pdev->port->ieee1284;
|
||||
pp->saved_state.mode = info->mode;
|
||||
@ -689,9 +727,9 @@ static int pp_release (struct inode * inode, struct file * file)
|
||||
compat_negot = 2;
|
||||
}
|
||||
if (compat_negot) {
|
||||
parport_negotiate (pp->pdev->port, IEEE1284_MODE_COMPAT);
|
||||
pr_debug(CHRDEV "%x: negotiated back to compatibility "
|
||||
"mode because user-space forgot\n", minor);
|
||||
parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT);
|
||||
dev_dbg(&pp->pdev->dev,
|
||||
"negotiated back to compatibility mode because user-space forgot\n");
|
||||
}
|
||||
|
||||
if (pp->flags & PP_CLAIMED) {
|
||||
@ -702,7 +740,7 @@ static int pp_release (struct inode * inode, struct file * file)
|
||||
pp->state.phase = info->phase;
|
||||
info->mode = pp->saved_state.mode;
|
||||
info->phase = pp->saved_state.phase;
|
||||
parport_release (pp->pdev);
|
||||
parport_release(pp->pdev);
|
||||
if (compat_negot != 1) {
|
||||
pr_debug(CHRDEV "%x: released pardevice "
|
||||
"because user-space forgot\n", minor);
|
||||
@ -711,25 +749,26 @@ static int pp_release (struct inode * inode, struct file * file)
|
||||
|
||||
if (pp->pdev) {
|
||||
const char *name = pp->pdev->name;
|
||||
parport_unregister_device (pp->pdev);
|
||||
kfree (name);
|
||||
|
||||
parport_unregister_device(pp->pdev);
|
||||
kfree(name);
|
||||
pp->pdev = NULL;
|
||||
pr_debug(CHRDEV "%x: unregistered pardevice\n", minor);
|
||||
}
|
||||
|
||||
kfree (pp);
|
||||
kfree(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* No kernel lock held - fine */
|
||||
static unsigned int pp_poll (struct file * file, poll_table * wait)
|
||||
static unsigned int pp_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
struct pp_struct *pp = file->private_data;
|
||||
unsigned int mask = 0;
|
||||
|
||||
poll_wait (file, &pp->irq_wait, wait);
|
||||
if (atomic_read (&pp->irqc))
|
||||
poll_wait(file, &pp->irq_wait, wait);
|
||||
if (atomic_read(&pp->irqc))
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
|
||||
return mask;
|
||||
@ -744,6 +783,9 @@ static const struct file_operations pp_fops = {
|
||||
.write = pp_write,
|
||||
.poll = pp_poll,
|
||||
.unlocked_ioctl = pp_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = pp_compat_ioctl,
|
||||
#endif
|
||||
.open = pp_open,
|
||||
.release = pp_release,
|
||||
};
|
||||
@ -759,19 +801,32 @@ static void pp_detach(struct parport *port)
|
||||
device_destroy(ppdev_class, MKDEV(PP_MAJOR, port->number));
|
||||
}
|
||||
|
||||
static int pp_probe(struct pardevice *par_dev)
|
||||
{
|
||||
struct device_driver *drv = par_dev->dev.driver;
|
||||
int len = strlen(drv->name);
|
||||
|
||||
if (strncmp(par_dev->name, drv->name, len))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct parport_driver pp_driver = {
|
||||
.name = CHRDEV,
|
||||
.attach = pp_attach,
|
||||
.probe = pp_probe,
|
||||
.match_port = pp_attach,
|
||||
.detach = pp_detach,
|
||||
.devmodel = true,
|
||||
};
|
||||
|
||||
static int __init ppdev_init (void)
|
||||
static int __init ppdev_init(void)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (register_chrdev (PP_MAJOR, CHRDEV, &pp_fops)) {
|
||||
printk (KERN_WARNING CHRDEV ": unable to get major %d\n",
|
||||
PP_MAJOR);
|
||||
if (register_chrdev(PP_MAJOR, CHRDEV, &pp_fops)) {
|
||||
printk(KERN_WARNING CHRDEV ": unable to get major %d\n",
|
||||
PP_MAJOR);
|
||||
return -EIO;
|
||||
}
|
||||
ppdev_class = class_create(THIS_MODULE, CHRDEV);
|
||||
@ -781,11 +836,11 @@ static int __init ppdev_init (void)
|
||||
}
|
||||
err = parport_register_driver(&pp_driver);
|
||||
if (err < 0) {
|
||||
printk (KERN_WARNING CHRDEV ": unable to register with parport\n");
|
||||
printk(KERN_WARNING CHRDEV ": unable to register with parport\n");
|
||||
goto out_class;
|
||||
}
|
||||
|
||||
printk (KERN_INFO PP_VERSION "\n");
|
||||
printk(KERN_INFO PP_VERSION "\n");
|
||||
goto out;
|
||||
|
||||
out_class:
|
||||
@ -796,12 +851,12 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit ppdev_cleanup (void)
|
||||
static void __exit ppdev_cleanup(void)
|
||||
{
|
||||
/* Clean up all parport stuff */
|
||||
parport_unregister_driver(&pp_driver);
|
||||
class_destroy(ppdev_class);
|
||||
unregister_chrdev (PP_MAJOR, CHRDEV);
|
||||
unregister_chrdev(PP_MAJOR, CHRDEV);
|
||||
}
|
||||
|
||||
module_init(ppdev_init);
|
||||
|
@ -334,10 +334,8 @@ static int __init raw_init(void)
|
||||
|
||||
cdev_init(&raw_cdev, &raw_fops);
|
||||
ret = cdev_add(&raw_cdev, dev, max_raw_minors);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
goto error_region;
|
||||
}
|
||||
|
||||
raw_class = class_create(THIS_MODULE, "raw");
|
||||
if (IS_ERR(raw_class)) {
|
||||
printk(KERN_ERR "Error creating raw class.\n");
|
||||
|
@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
|
||||
dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
|
||||
SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
|
||||
|
||||
clk_disable_unprepare(dd->iclk);
|
||||
clk_disable(dd->iclk);
|
||||
|
||||
if (req->base.complete)
|
||||
req->base.complete(&req->base, err);
|
||||
@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = clk_prepare_enable(dd->iclk);
|
||||
err = clk_enable(dd->iclk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
|
||||
dev_info(dd->dev,
|
||||
"version: 0x%x\n", dd->hw_version);
|
||||
|
||||
clk_disable_unprepare(dd->iclk);
|
||||
clk_disable(dd->iclk);
|
||||
}
|
||||
|
||||
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
|
||||
@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
|
||||
goto res_err;
|
||||
}
|
||||
|
||||
err = clk_prepare(sha_dd->iclk);
|
||||
if (err)
|
||||
goto res_err;
|
||||
|
||||
atmel_sha_hw_version_init(sha_dd);
|
||||
|
||||
atmel_sha_get_cap(sha_dd);
|
||||
@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(pdata)) {
|
||||
dev_err(&pdev->dev, "platform data not available\n");
|
||||
err = PTR_ERR(pdata);
|
||||
goto res_err;
|
||||
goto iclk_unprepare;
|
||||
}
|
||||
}
|
||||
if (!pdata->dma_slave) {
|
||||
err = -ENXIO;
|
||||
goto res_err;
|
||||
goto iclk_unprepare;
|
||||
}
|
||||
err = atmel_sha_dma_init(sha_dd, pdata);
|
||||
if (err)
|
||||
@ -1457,6 +1461,8 @@ err_algs:
|
||||
if (sha_dd->caps.has_dma)
|
||||
atmel_sha_dma_cleanup(sha_dd);
|
||||
err_sha_dma:
|
||||
iclk_unprepare:
|
||||
clk_unprepare(sha_dd->iclk);
|
||||
res_err:
|
||||
tasklet_kill(&sha_dd->done_task);
|
||||
sha_dd_err:
|
||||
@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
|
||||
if (sha_dd->caps.has_dma)
|
||||
atmel_sha_dma_cleanup(sha_dd);
|
||||
|
||||
iounmap(sha_dd->io_base);
|
||||
|
||||
clk_put(sha_dd->iclk);
|
||||
|
||||
if (sha_dd->irq >= 0)
|
||||
free_irq(sha_dd->irq, sha_dd);
|
||||
clk_unprepare(sha_dd->iclk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
|
||||
return -ENOMEM;
|
||||
|
||||
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
|
||||
if (!dma->cache_pool)
|
||||
if (!dma->padding_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
cesa->dma = dma;
|
||||
|
@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
|
||||
handle_simple_irq, IRQ_TYPE_NONE);
|
||||
|
||||
if (ret) {
|
||||
dev_info(&pdev->dev, "could not add irqchip\n");
|
||||
return ret;
|
||||
dev_err(&pdev->dev, "could not add irqchip\n");
|
||||
goto teardown;
|
||||
}
|
||||
|
||||
gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
|
||||
@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
|
||||
skip_irq:
|
||||
return 0;
|
||||
teardown:
|
||||
of_mm_gpiochip_remove(&altera_gc->mmchip);
|
||||
pr_err("%s: registration failed with status %d\n",
|
||||
node->full_name, ret);
|
||||
|
||||
|
@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc,
|
||||
static int davinci_gpio_probe(struct platform_device *pdev)
|
||||
{
|
||||
int i, base;
|
||||
unsigned ngpio;
|
||||
unsigned ngpio, nbank;
|
||||
struct davinci_gpio_controller *chips;
|
||||
struct davinci_gpio_platform_data *pdata;
|
||||
struct davinci_gpio_regs __iomem *regs;
|
||||
@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
|
||||
if (WARN_ON(ARCH_NR_GPIOS < ngpio))
|
||||
ngpio = ARCH_NR_GPIOS;
|
||||
|
||||
nbank = DIV_ROUND_UP(ngpio, 32);
|
||||
chips = devm_kzalloc(dev,
|
||||
ngpio * sizeof(struct davinci_gpio_controller),
|
||||
nbank * sizeof(struct davinci_gpio_controller),
|
||||
GFP_KERNEL);
|
||||
if (!chips)
|
||||
return -ENOMEM;
|
||||
@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
|
||||
return irq;
|
||||
}
|
||||
|
||||
irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0,
|
||||
irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
|
||||
&davinci_gpio_irq_ops,
|
||||
chips);
|
||||
if (!irq_domain) {
|
||||
|
@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs;
|
||||
extern int amdgpu_sched_hw_submission;
|
||||
extern int amdgpu_enable_semaphores;
|
||||
extern int amdgpu_powerplay;
|
||||
extern unsigned amdgpu_pcie_gen_cap;
|
||||
extern unsigned amdgpu_pcie_lane_cap;
|
||||
|
||||
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
|
||||
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||
@ -132,47 +134,6 @@ extern int amdgpu_powerplay;
|
||||
#define AMDGPU_RESET_VCE (1 << 13)
|
||||
#define AMDGPU_RESET_VCE1 (1 << 14)
|
||||
|
||||
/* CG block flags */
|
||||
#define AMDGPU_CG_BLOCK_GFX (1 << 0)
|
||||
#define AMDGPU_CG_BLOCK_MC (1 << 1)
|
||||
#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
|
||||
#define AMDGPU_CG_BLOCK_UVD (1 << 3)
|
||||
#define AMDGPU_CG_BLOCK_VCE (1 << 4)
|
||||
#define AMDGPU_CG_BLOCK_HDP (1 << 5)
|
||||
#define AMDGPU_CG_BLOCK_BIF (1 << 6)
|
||||
|
||||
/* CG flags */
|
||||
#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
|
||||
#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
|
||||
#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
|
||||
#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
|
||||
#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
|
||||
#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
|
||||
#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
|
||||
#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
|
||||
#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
|
||||
#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
|
||||
#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
|
||||
|
||||
/* PG flags */
|
||||
#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
|
||||
#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
|
||||
#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
|
||||
#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
|
||||
#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
|
||||
#define AMDGPU_PG_SUPPORT_CP (1 << 5)
|
||||
#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
|
||||
#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
|
||||
#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
|
||||
#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
|
||||
#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
|
||||
|
||||
/* GFX current status */
|
||||
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
|
||||
#define AMDGPU_GFX_SAFE_MODE 0x00000001L
|
||||
@ -606,8 +567,6 @@ struct amdgpu_sa_manager {
|
||||
uint32_t align;
|
||||
};
|
||||
|
||||
struct amdgpu_sa_bo;
|
||||
|
||||
/* sub-allocation buffer */
|
||||
struct amdgpu_sa_bo {
|
||||
struct list_head olist;
|
||||
@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||
uint32_t flags);
|
||||
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
|
||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
unsigned long end);
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
||||
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
|
||||
case CGS_SYSTEM_INFO_PCIE_MLW:
|
||||
sys_info->value = adev->pm.pcie_mlw_mask;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_CG_FLAGS:
|
||||
sys_info->value = adev->cg_flags;
|
||||
break;
|
||||
case CGS_SYSTEM_INFO_PG_FLAGS:
|
||||
sys_info->value = adev->pg_flags;
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||
}
|
||||
|
||||
/* post card */
|
||||
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
if (!amdgpu_card_posted(adev))
|
||||
amdgpu_atom_asic_init(adev->mode_info.atom_context);
|
||||
|
||||
r = amdgpu_resume(adev);
|
||||
if (r)
|
||||
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
|
||||
|
||||
amdgpu_fence_driver_resume(adev);
|
||||
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
if (resume) {
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
if (r)
|
||||
DRM_ERROR("ib ring test failed (%d).\n", r);
|
||||
}
|
||||
|
||||
r = amdgpu_late_init(adev);
|
||||
if (r)
|
||||
@ -1933,80 +1938,97 @@ retry:
|
||||
return r;
|
||||
}
|
||||
|
||||
#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
|
||||
#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
|
||||
|
||||
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 mask;
|
||||
int ret;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
if (amdgpu_pcie_gen_cap)
|
||||
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
|
||||
|
||||
if (amdgpu_pcie_lane_cap)
|
||||
adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
|
||||
|
||||
/* covers APUs as well */
|
||||
if (pci_is_root_bus(adev->pdev->bus)) {
|
||||
if (adev->pm.pcie_gen_mask == 0)
|
||||
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
||||
if (adev->pm.pcie_mlw_mask == 0)
|
||||
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
||||
return;
|
||||
|
||||
if (amdgpu_pcie_gen2 == 0)
|
||||
return;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
|
||||
|
||||
if (mask & DRM_PCIE_SPEED_25)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
|
||||
if (mask & DRM_PCIE_SPEED_50)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
|
||||
if (mask & DRM_PCIE_SPEED_80)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
|
||||
}
|
||||
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
switch (mask) {
|
||||
case 32:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 16:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 12:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 8:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 4:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 2:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 1:
|
||||
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
||||
if (adev->pm.pcie_gen_mask == 0) {
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
|
||||
|
||||
if (mask & DRM_PCIE_SPEED_25)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
|
||||
if (mask & DRM_PCIE_SPEED_50)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
|
||||
if (mask & DRM_PCIE_SPEED_80)
|
||||
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
|
||||
} else {
|
||||
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
|
||||
}
|
||||
}
|
||||
if (adev->pm.pcie_mlw_mask == 0) {
|
||||
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
|
||||
if (!ret) {
|
||||
switch (mask) {
|
||||
case 32:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 16:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 12:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 8:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 4:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 2:
|
||||
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
|
||||
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
|
||||
break;
|
||||
case 1:
|
||||
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_enable_semaphores = 0;
|
||||
int amdgpu_powerplay = -1;
|
||||
unsigned amdgpu_pcie_gen_cap = 0;
|
||||
unsigned amdgpu_pcie_lane_cap = 0;
|
||||
|
||||
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
|
||||
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
|
||||
@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
|
||||
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
|
||||
#endif
|
||||
|
||||
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
|
||||
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
|
||||
|
||||
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
|
||||
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||
/* Kaveri */
|
||||
|
@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
|
||||
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
|
||||
end))
|
||||
continue;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
|
@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
|
||||
|
||||
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
|
||||
if (fences[i])
|
||||
fences[count++] = fences[i];
|
||||
fences[count++] = fence_get(fences[i]);
|
||||
|
||||
if (count) {
|
||||
spin_unlock(&sa_manager->wq.lock);
|
||||
t = fence_wait_any_timeout(fences, count, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
for (i = 0; i < count; ++i)
|
||||
fence_put(fences[i]);
|
||||
|
||||
r = (t > 0) ? 0 : t;
|
||||
spin_lock(&sa_manager->wq.lock);
|
||||
} else {
|
||||
|
@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
|
||||
return !!gtt->userptr;
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
unsigned long size;
|
||||
|
||||
if (gtt == NULL)
|
||||
return false;
|
||||
|
||||
if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
|
||||
return false;
|
||||
|
||||
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
|
||||
if (gtt->userptr > end || gtt->userptr + size <= start)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "ci_dpm.h"
|
||||
#include "gfx_v7_0.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include "smu/smu_7_0_1_d.h"
|
||||
@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev)
|
||||
u8 frev, crev;
|
||||
struct ci_power_info *pi;
|
||||
int ret;
|
||||
u32 mask;
|
||||
|
||||
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
|
||||
if (pi == NULL)
|
||||
return -ENOMEM;
|
||||
adev->pm.dpm.priv = pi;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret)
|
||||
pi->sys_pcie_mask = 0;
|
||||
else
|
||||
pi->sys_pcie_mask = mask;
|
||||
pi->sys_pcie_mask =
|
||||
(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
|
||||
|
||||
pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
|
||||
|
||||
pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
|
||||
|
@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev)
|
||||
if (amdgpu_aspm == 0)
|
||||
return;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
return;
|
||||
|
||||
/* XXX double check APUs */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle)
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = adev->rev_id + 0x14;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_LS |
|
||||
AMDGPU_CG_SUPPORT_MC_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags = 0;
|
||||
adev->external_rev_id = 0x28;
|
||||
break;
|
||||
case CHIP_KAVERI:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags =
|
||||
/*AMDGPU_PG_SUPPORT_GFX_PG |
|
||||
AMDGPU_PG_SUPPORT_GFX_SMG |
|
||||
AMDGPU_PG_SUPPORT_GFX_DMG |*/
|
||||
AMDGPU_PG_SUPPORT_UVD |
|
||||
/*AMDGPU_PG_SUPPORT_VCE |
|
||||
AMDGPU_PG_SUPPORT_CP |
|
||||
AMDGPU_PG_SUPPORT_GDS |
|
||||
AMDGPU_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMDGPU_PG_SUPPORT_ACP |
|
||||
AMDGPU_PG_SUPPORT_SAMU |*/
|
||||
/*AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_DMG |*/
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
/*AMD_PG_SUPPORT_VCE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_ACP |
|
||||
AMD_PG_SUPPORT_SAMU |*/
|
||||
0;
|
||||
if (adev->pdev->device == 0x1312 ||
|
||||
adev->pdev->device == 0x1316 ||
|
||||
@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle)
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
adev->cg_flags =
|
||||
AMDGPU_CG_SUPPORT_GFX_MGCG |
|
||||
AMDGPU_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMDGPU_CG_SUPPORT_GFX_CGLS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMDGPU_CG_SUPPORT_GFX_CP_LS |
|
||||
AMDGPU_CG_SUPPORT_SDMA_MGCG |
|
||||
AMDGPU_CG_SUPPORT_SDMA_LS |
|
||||
AMDGPU_CG_SUPPORT_BIF_LS |
|
||||
AMDGPU_CG_SUPPORT_VCE_MGCG |
|
||||
AMDGPU_CG_SUPPORT_UVD_MGCG |
|
||||
AMDGPU_CG_SUPPORT_HDP_LS |
|
||||
AMDGPU_CG_SUPPORT_HDP_MGCG;
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
/*AMD_CG_SUPPORT_GFX_CGCG |*/
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS |
|
||||
AMD_CG_SUPPORT_GFX_CGTS_LS |
|
||||
AMD_CG_SUPPORT_GFX_CP_LS |
|
||||
AMD_CG_SUPPORT_SDMA_MGCG |
|
||||
AMD_CG_SUPPORT_SDMA_LS |
|
||||
AMD_CG_SUPPORT_BIF_LS |
|
||||
AMD_CG_SUPPORT_VCE_MGCG |
|
||||
AMD_CG_SUPPORT_UVD_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG;
|
||||
adev->pg_flags =
|
||||
/*AMDGPU_PG_SUPPORT_GFX_PG |
|
||||
AMDGPU_PG_SUPPORT_GFX_SMG | */
|
||||
AMDGPU_PG_SUPPORT_UVD |
|
||||
/*AMDGPU_PG_SUPPORT_VCE |
|
||||
AMDGPU_PG_SUPPORT_CP |
|
||||
AMDGPU_PG_SUPPORT_GDS |
|
||||
AMDGPU_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMDGPU_PG_SUPPORT_SAMU |*/
|
||||
/*AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_GFX_SMG | */
|
||||
AMD_PG_SUPPORT_UVD |
|
||||
/*AMD_PG_SUPPORT_VCE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_GDS |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS |
|
||||
AMD_PG_SUPPORT_SAMU |*/
|
||||
0;
|
||||
if (adev->asic_type == CHIP_KABINI) {
|
||||
if (adev->rev_id == 0)
|
||||
|
@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
|
||||
{
|
||||
u32 orig, data;
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
|
||||
WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
|
||||
WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
|
||||
} else {
|
||||
@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
|
||||
{
|
||||
u32 orig, data;
|
||||
|
||||
if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
|
||||
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
data |= 0x100;
|
||||
if (orig != data)
|
||||
|
@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev)
|
||||
pi->gfx_pg_threshold = 500;
|
||||
pi->caps_fps = true;
|
||||
/* uvd */
|
||||
pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
|
||||
pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
|
||||
pi->caps_uvd_dpm = true;
|
||||
/* vce */
|
||||
pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
|
||||
pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
|
||||
pi->caps_vce_dpm = true;
|
||||
/* acp */
|
||||
pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
|
||||
pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
|
||||
pi->caps_acp_dpm = true;
|
||||
|
||||
pi->caps_stable_power_state = false;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user