mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
Merge remote-tracking branch 'robh/for-next' into devicetree/next
This commit is contained in:
commit
d88cf7d7b4
@ -124,12 +124,11 @@ the default being 204800 sectors (or 100MB).
|
|||||||
Updating on-disk metadata
|
Updating on-disk metadata
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|
||||||
On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
|
On-disk metadata is committed every time a FLUSH or FUA bio is written.
|
||||||
written. If no such requests are made then commits will occur every
|
If no such requests are made then commits will occur every second. This
|
||||||
second. This means the cache behaves like a physical disk that has a
|
means the cache behaves like a physical disk that has a volatile write
|
||||||
write cache (the same is true of the thin-provisioning target). If
|
cache. If power is lost you may lose some recent writes. The metadata
|
||||||
power is lost you may lose some recent writes. The metadata should
|
should always be consistent in spite of any crash.
|
||||||
always be consistent in spite of any crash.
|
|
||||||
|
|
||||||
The 'dirty' state for a cache block changes far too frequently for us
|
The 'dirty' state for a cache block changes far too frequently for us
|
||||||
to keep updating it on the fly. So we treat it as a hint. In normal
|
to keep updating it on the fly. So we treat it as a hint. In normal
|
||||||
|
@ -116,6 +116,35 @@ Resuming a device with a new table itself triggers an event so the
|
|||||||
userspace daemon can use this to detect a situation where a new table
|
userspace daemon can use this to detect a situation where a new table
|
||||||
already exceeds the threshold.
|
already exceeds the threshold.
|
||||||
|
|
||||||
|
A low water mark for the metadata device is maintained in the kernel and
|
||||||
|
will trigger a dm event if free space on the metadata device drops below
|
||||||
|
it.
|
||||||
|
|
||||||
|
Updating on-disk metadata
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
On-disk metadata is committed every time a FLUSH or FUA bio is written.
|
||||||
|
If no such requests are made then commits will occur every second. This
|
||||||
|
means the thin-provisioning target behaves like a physical disk that has
|
||||||
|
a volatile write cache. If power is lost you may lose some recent
|
||||||
|
writes. The metadata should always be consistent in spite of any crash.
|
||||||
|
|
||||||
|
If data space is exhausted the pool will either error or queue IO
|
||||||
|
according to the configuration (see: error_if_no_space). If metadata
|
||||||
|
space is exhausted or a metadata operation fails: the pool will error IO
|
||||||
|
until the pool is taken offline and repair is performed to 1) fix any
|
||||||
|
potential inconsistencies and 2) clear the flag that imposes repair.
|
||||||
|
Once the pool's metadata device is repaired it may be resized, which
|
||||||
|
will allow the pool to return to normal operation. Note that if a pool
|
||||||
|
is flagged as needing repair, the pool's data and metadata devices
|
||||||
|
cannot be resized until repair is performed. It should also be noted
|
||||||
|
that when the pool's metadata space is exhausted the current metadata
|
||||||
|
transaction is aborted. Given that the pool will cache IO whose
|
||||||
|
completion may have already been acknowledged to upper IO layers
|
||||||
|
(e.g. filesystem) it is strongly suggested that consistency checks
|
||||||
|
(e.g. fsck) be performed on those layers when repair of the pool is
|
||||||
|
required.
|
||||||
|
|
||||||
Thin provisioning
|
Thin provisioning
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
@ -258,10 +287,9 @@ ii) Status
|
|||||||
should register for the event and then check the target's status.
|
should register for the event and then check the target's status.
|
||||||
|
|
||||||
held metadata root:
|
held metadata root:
|
||||||
The location, in sectors, of the metadata root that has been
|
The location, in blocks, of the metadata root that has been
|
||||||
'held' for userspace read access. '-' indicates there is no
|
'held' for userspace read access. '-' indicates there is no
|
||||||
held root. This feature is not yet implemented so '-' is
|
held root.
|
||||||
always returned.
|
|
||||||
|
|
||||||
discard_passdown|no_discard_passdown
|
discard_passdown|no_discard_passdown
|
||||||
Whether or not discards are actually being passed down to the
|
Whether or not discards are actually being passed down to the
|
||||||
|
@ -21,9 +21,9 @@ Required Properties:
|
|||||||
must appear in the same order as the output clocks.
|
must appear in the same order as the output clocks.
|
||||||
- #clock-cells: Must be 1
|
- #clock-cells: Must be 1
|
||||||
- clock-output-names: The name of the clocks as free-form strings
|
- clock-output-names: The name of the clocks as free-form strings
|
||||||
- renesas,indices: Indices of the gate clocks into the group (0 to 31)
|
- renesas,clock-indices: Indices of the gate clocks into the group (0 to 31)
|
||||||
|
|
||||||
The clocks, clock-output-names and renesas,indices properties contain one
|
The clocks, clock-output-names and renesas,clock-indices properties contain one
|
||||||
entry per gate clock. The MSTP groups are sparsely populated. Unimplemented
|
entry per gate clock. The MSTP groups are sparsely populated. Unimplemented
|
||||||
gate clocks must not be declared.
|
gate clocks must not be declared.
|
||||||
|
|
||||||
|
22
Documentation/devicetree/bindings/net/opencores-ethoc.txt
Normal file
22
Documentation/devicetree/bindings/net/opencores-ethoc.txt
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
* OpenCores MAC 10/100 Mbps
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- compatible: Should be "opencores,ethoc".
|
||||||
|
- reg: two memory regions (address and length),
|
||||||
|
first region is for the device registers and descriptor rings,
|
||||||
|
second is for the device packet memory.
|
||||||
|
- interrupts: interrupt for the device.
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
- clocks: phandle to refer to the clk used as per
|
||||||
|
Documentation/devicetree/bindings/clock/clock-bindings.txt
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
enet0: ethoc@fd030000 {
|
||||||
|
compatible = "opencores,ethoc";
|
||||||
|
reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
|
||||||
|
interrupts = <1>;
|
||||||
|
local-mac-address = [00 50 c2 13 6f 00];
|
||||||
|
clocks = <&osc>;
|
||||||
|
};
|
@ -1,4 +1,4 @@
|
|||||||
Broadcom Capri Pin Controller
|
Broadcom BCM281xx Pin Controller
|
||||||
|
|
||||||
This is a pin controller for the Broadcom BCM281xx SoC family, which includes
|
This is a pin controller for the Broadcom BCM281xx SoC family, which includes
|
||||||
BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
|
BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
|
||||||
@ -7,14 +7,14 @@ BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
|
|||||||
|
|
||||||
Required Properties:
|
Required Properties:
|
||||||
|
|
||||||
- compatible: Must be "brcm,capri-pinctrl".
|
- compatible: Must be "brcm,bcm11351-pinctrl"
|
||||||
- reg: Base address of the PAD Controller register block and the size
|
- reg: Base address of the PAD Controller register block and the size
|
||||||
of the block.
|
of the block.
|
||||||
|
|
||||||
For example, the following is the bare minimum node:
|
For example, the following is the bare minimum node:
|
||||||
|
|
||||||
pinctrl@35004800 {
|
pinctrl@35004800 {
|
||||||
compatible = "brcm,capri-pinctrl";
|
compatible = "brcm,bcm11351-pinctrl";
|
||||||
reg = <0x35004800 0x430>;
|
reg = <0x35004800 0x430>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ Optional Properties (for HDMI pins):
|
|||||||
Example:
|
Example:
|
||||||
// pin controller node
|
// pin controller node
|
||||||
pinctrl@35004800 {
|
pinctrl@35004800 {
|
||||||
compatible = "brcm,capri-pinctrl";
|
compatible = "brcmbcm11351-pinctrl";
|
||||||
reg = <0x35004800 0x430>;
|
reg = <0x35004800 0x430>;
|
||||||
|
|
||||||
// pin configuration node
|
// pin configuration node
|
@ -3,6 +3,7 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order.
|
|||||||
This isn't an exhaustive list, but you should add new prefixes to it before
|
This isn't an exhaustive list, but you should add new prefixes to it before
|
||||||
using them to avoid name-space collisions.
|
using them to avoid name-space collisions.
|
||||||
|
|
||||||
|
abilis Abilis Systems
|
||||||
active-semi Active-Semi International Inc
|
active-semi Active-Semi International Inc
|
||||||
ad Avionic Design GmbH
|
ad Avionic Design GmbH
|
||||||
adi Analog Devices, Inc.
|
adi Analog Devices, Inc.
|
||||||
@ -11,14 +12,17 @@ ak Asahi Kasei Corp.
|
|||||||
allwinner Allwinner Technology Co., Ltd.
|
allwinner Allwinner Technology Co., Ltd.
|
||||||
altr Altera Corp.
|
altr Altera Corp.
|
||||||
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
|
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
|
||||||
|
amd Advanced Micro Devices (AMD), Inc.
|
||||||
amstaos AMS-Taos Inc.
|
amstaos AMS-Taos Inc.
|
||||||
apm Applied Micro Circuits Corporation (APM)
|
apm Applied Micro Circuits Corporation (APM)
|
||||||
arm ARM Ltd.
|
arm ARM Ltd.
|
||||||
|
armadeus ARMadeus Systems SARL
|
||||||
atmel Atmel Corporation
|
atmel Atmel Corporation
|
||||||
auo AU Optronics Corporation
|
auo AU Optronics Corporation
|
||||||
avago Avago Technologies
|
avago Avago Technologies
|
||||||
bosch Bosch Sensortec GmbH
|
bosch Bosch Sensortec GmbH
|
||||||
brcm Broadcom Corporation
|
brcm Broadcom Corporation
|
||||||
|
calxeda Calxeda
|
||||||
capella Capella Microsystems, Inc
|
capella Capella Microsystems, Inc
|
||||||
cavium Cavium, Inc.
|
cavium Cavium, Inc.
|
||||||
cdns Cadence Design Systems Inc.
|
cdns Cadence Design Systems Inc.
|
||||||
@ -26,8 +30,10 @@ chrp Common Hardware Reference Platform
|
|||||||
chunghwa Chunghwa Picture Tubes Ltd.
|
chunghwa Chunghwa Picture Tubes Ltd.
|
||||||
cirrus Cirrus Logic, Inc.
|
cirrus Cirrus Logic, Inc.
|
||||||
cortina Cortina Systems, Inc.
|
cortina Cortina Systems, Inc.
|
||||||
|
crystalfontz Crystalfontz America, Inc.
|
||||||
dallas Maxim Integrated Products (formerly Dallas Semiconductor)
|
dallas Maxim Integrated Products (formerly Dallas Semiconductor)
|
||||||
davicom DAVICOM Semiconductor, Inc.
|
davicom DAVICOM Semiconductor, Inc.
|
||||||
|
dlink D-Link Systems, Inc.
|
||||||
denx Denx Software Engineering
|
denx Denx Software Engineering
|
||||||
edt Emerging Display Technologies
|
edt Emerging Display Technologies
|
||||||
emmicro EM Microelectronic
|
emmicro EM Microelectronic
|
||||||
@ -37,7 +43,9 @@ est ESTeem Wireless Modems
|
|||||||
fsl Freescale Semiconductor
|
fsl Freescale Semiconductor
|
||||||
GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
|
GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
|
||||||
gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
|
gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
|
||||||
|
globalscale Globalscale Technologies, Inc.
|
||||||
gmt Global Mixed-mode Technology, Inc.
|
gmt Global Mixed-mode Technology, Inc.
|
||||||
|
google Google, Inc.
|
||||||
gumstix Gumstix, Inc.
|
gumstix Gumstix, Inc.
|
||||||
haoyu Haoyu Microelectronic Co. Ltd.
|
haoyu Haoyu Microelectronic Co. Ltd.
|
||||||
hisilicon Hisilicon Limited.
|
hisilicon Hisilicon Limited.
|
||||||
@ -46,9 +54,12 @@ hp Hewlett Packard
|
|||||||
ibm International Business Machines (IBM)
|
ibm International Business Machines (IBM)
|
||||||
idt Integrated Device Technologies, Inc.
|
idt Integrated Device Technologies, Inc.
|
||||||
img Imagination Technologies Ltd.
|
img Imagination Technologies Ltd.
|
||||||
|
intel Intel Corporation
|
||||||
intercontrol Inter Control Group
|
intercontrol Inter Control Group
|
||||||
isl Intersil
|
isl Intersil
|
||||||
karo Ka-Ro electronics GmbH
|
karo Ka-Ro electronics GmbH
|
||||||
|
lacie LaCie
|
||||||
|
lantiq Lantiq Semiconductor
|
||||||
lg LG Corporation
|
lg LG Corporation
|
||||||
linux Linux-specific binding
|
linux Linux-specific binding
|
||||||
lsi LSI Corp. (LSI Logic)
|
lsi LSI Corp. (LSI Logic)
|
||||||
@ -56,12 +67,16 @@ marvell Marvell Technology Group Ltd.
|
|||||||
maxim Maxim Integrated Products
|
maxim Maxim Integrated Products
|
||||||
microchip Microchip Technology Inc.
|
microchip Microchip Technology Inc.
|
||||||
mosaixtech Mosaix Technologies, Inc.
|
mosaixtech Mosaix Technologies, Inc.
|
||||||
|
moxa Moxa
|
||||||
national National Semiconductor
|
national National Semiconductor
|
||||||
neonode Neonode Inc.
|
neonode Neonode Inc.
|
||||||
|
netgear NETGEAR
|
||||||
nintendo Nintendo
|
nintendo Nintendo
|
||||||
|
nokia Nokia
|
||||||
nvidia NVIDIA
|
nvidia NVIDIA
|
||||||
nxp NXP Semiconductors
|
nxp NXP Semiconductors
|
||||||
onnn ON Semiconductor Corp.
|
onnn ON Semiconductor Corp.
|
||||||
|
opencores OpenCores.org
|
||||||
panasonic Panasonic Corporation
|
panasonic Panasonic Corporation
|
||||||
phytec PHYTEC Messtechnik GmbH
|
phytec PHYTEC Messtechnik GmbH
|
||||||
picochip Picochip Ltd
|
picochip Picochip Ltd
|
||||||
@ -80,6 +95,7 @@ sil Silicon Image
|
|||||||
silabs Silicon Laboratories
|
silabs Silicon Laboratories
|
||||||
simtek
|
simtek
|
||||||
sirf SiRF Technology, Inc.
|
sirf SiRF Technology, Inc.
|
||||||
|
smsc Standard Microsystems Corporation
|
||||||
snps Synopsys, Inc.
|
snps Synopsys, Inc.
|
||||||
spansion Spansion Inc.
|
spansion Spansion Inc.
|
||||||
st STMicroelectronics
|
st STMicroelectronics
|
||||||
@ -94,4 +110,5 @@ via VIA Technologies, Inc.
|
|||||||
winbond Winbond Electronics corp.
|
winbond Winbond Electronics corp.
|
||||||
wlf Wolfson Microelectronics
|
wlf Wolfson Microelectronics
|
||||||
wm Wondermedia Technologies, Inc.
|
wm Wondermedia Technologies, Inc.
|
||||||
|
xes Extreme Engineering Solutions (X-ES)
|
||||||
xlnx Xilinx
|
xlnx Xilinx
|
||||||
|
@ -554,12 +554,6 @@ solution for a couple of reasons:
|
|||||||
not specified in the struct can_frame and therefore it is only valid in
|
not specified in the struct can_frame and therefore it is only valid in
|
||||||
CANFD_MTU sized CAN FD frames.
|
CANFD_MTU sized CAN FD frames.
|
||||||
|
|
||||||
As long as the payload length is <=8 the received CAN frames from CAN FD
|
|
||||||
capable CAN devices can be received and read by legacy sockets too. When
|
|
||||||
user-generated CAN FD frames have a payload length <=8 these can be send
|
|
||||||
by legacy CAN network interfaces too. Sending CAN FD frames with payload
|
|
||||||
length > 8 to a legacy CAN network interface returns an -EMSGSIZE error.
|
|
||||||
|
|
||||||
Implementation hint for new CAN applications:
|
Implementation hint for new CAN applications:
|
||||||
|
|
||||||
To build a CAN FD aware application use struct canfd_frame as basic CAN
|
To build a CAN FD aware application use struct canfd_frame as basic CAN
|
||||||
|
112
MAINTAINERS
112
MAINTAINERS
@ -73,7 +73,8 @@ Descriptions of section entries:
|
|||||||
L: Mailing list that is relevant to this area
|
L: Mailing list that is relevant to this area
|
||||||
W: Web-page with status/info
|
W: Web-page with status/info
|
||||||
Q: Patchwork web based patch tracking system site
|
Q: Patchwork web based patch tracking system site
|
||||||
T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit.
|
T: SCM tree type and location.
|
||||||
|
Type is one of: git, hg, quilt, stgit, topgit
|
||||||
S: Status, one of the following:
|
S: Status, one of the following:
|
||||||
Supported: Someone is actually paid to look after this.
|
Supported: Someone is actually paid to look after this.
|
||||||
Maintained: Someone actually looks after it.
|
Maintained: Someone actually looks after it.
|
||||||
@ -473,7 +474,7 @@ F: net/rxrpc/af_rxrpc.c
|
|||||||
|
|
||||||
AGPGART DRIVER
|
AGPGART DRIVER
|
||||||
M: David Airlie <airlied@linux.ie>
|
M: David Airlie <airlied@linux.ie>
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
|
T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/char/agp/
|
F: drivers/char/agp/
|
||||||
F: include/linux/agp*
|
F: include/linux/agp*
|
||||||
@ -1612,11 +1613,11 @@ S: Maintained
|
|||||||
F: drivers/net/wireless/atmel*
|
F: drivers/net/wireless/atmel*
|
||||||
|
|
||||||
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
|
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
|
||||||
M: Bradley Grove <linuxdrivers@attotech.com>
|
M: Bradley Grove <linuxdrivers@attotech.com>
|
||||||
L: linux-scsi@vger.kernel.org
|
L: linux-scsi@vger.kernel.org
|
||||||
W: http://www.attotech.com
|
W: http://www.attotech.com
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/scsi/esas2r
|
F: drivers/scsi/esas2r
|
||||||
|
|
||||||
AUDIT SUBSYSTEM
|
AUDIT SUBSYSTEM
|
||||||
M: Eric Paris <eparis@redhat.com>
|
M: Eric Paris <eparis@redhat.com>
|
||||||
@ -2159,7 +2160,7 @@ F: Documentation/zh_CN/
|
|||||||
|
|
||||||
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
|
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
|
||||||
M: Peter Chen <Peter.Chen@freescale.com>
|
M: Peter Chen <Peter.Chen@freescale.com>
|
||||||
T: git://github.com/hzpeterchen/linux-usb.git
|
T: git git://github.com/hzpeterchen/linux-usb.git
|
||||||
L: linux-usb@vger.kernel.org
|
L: linux-usb@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/usb/chipidea/
|
F: drivers/usb/chipidea/
|
||||||
@ -2179,9 +2180,9 @@ S: Supported
|
|||||||
F: drivers/net/ethernet/cisco/enic/
|
F: drivers/net/ethernet/cisco/enic/
|
||||||
|
|
||||||
CISCO VIC LOW LATENCY NIC DRIVER
|
CISCO VIC LOW LATENCY NIC DRIVER
|
||||||
M: Upinder Malhi <umalhi@cisco.com>
|
M: Upinder Malhi <umalhi@cisco.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/infiniband/hw/usnic
|
F: drivers/infiniband/hw/usnic
|
||||||
|
|
||||||
CIRRUS LOGIC EP93XX ETHERNET DRIVER
|
CIRRUS LOGIC EP93XX ETHERNET DRIVER
|
||||||
M: Hartley Sweeten <hsweeten@visionengravers.com>
|
M: Hartley Sweeten <hsweeten@visionengravers.com>
|
||||||
@ -2378,20 +2379,20 @@ F: drivers/cpufreq/arm_big_little.c
|
|||||||
F: drivers/cpufreq/arm_big_little_dt.c
|
F: drivers/cpufreq/arm_big_little_dt.c
|
||||||
|
|
||||||
CPUIDLE DRIVER - ARM BIG LITTLE
|
CPUIDLE DRIVER - ARM BIG LITTLE
|
||||||
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
|
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
|
||||||
M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
||||||
L: linux-pm@vger.kernel.org
|
L: linux-pm@vger.kernel.org
|
||||||
L: linux-arm-kernel@lists.infradead.org
|
L: linux-arm-kernel@lists.infradead.org
|
||||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/cpuidle/cpuidle-big_little.c
|
F: drivers/cpuidle/cpuidle-big_little.c
|
||||||
|
|
||||||
CPUIDLE DRIVERS
|
CPUIDLE DRIVERS
|
||||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||||
M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
||||||
L: linux-pm@vger.kernel.org
|
L: linux-pm@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
||||||
F: drivers/cpuidle/*
|
F: drivers/cpuidle/*
|
||||||
F: include/linux/cpuidle.h
|
F: include/linux/cpuidle.h
|
||||||
|
|
||||||
@ -2458,9 +2459,9 @@ S: Maintained
|
|||||||
F: sound/pci/cs5535audio/
|
F: sound/pci/cs5535audio/
|
||||||
|
|
||||||
CW1200 WLAN driver
|
CW1200 WLAN driver
|
||||||
M: Solomon Peachy <pizza@shaftnet.org>
|
M: Solomon Peachy <pizza@shaftnet.org>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/net/wireless/cw1200/
|
F: drivers/net/wireless/cw1200/
|
||||||
|
|
||||||
CX18 VIDEO4LINUX DRIVER
|
CX18 VIDEO4LINUX DRIVER
|
||||||
M: Andy Walls <awalls@md.metrocast.net>
|
M: Andy Walls <awalls@md.metrocast.net>
|
||||||
@ -3095,6 +3096,8 @@ F: fs/ecryptfs/
|
|||||||
|
|
||||||
EDAC-CORE
|
EDAC-CORE
|
||||||
M: Doug Thompson <dougthompson@xmission.com>
|
M: Doug Thompson <dougthompson@xmission.com>
|
||||||
|
M: Borislav Petkov <bp@alien8.de>
|
||||||
|
M: Mauro Carvalho Chehab <m.chehab@samsung.com>
|
||||||
L: linux-edac@vger.kernel.org
|
L: linux-edac@vger.kernel.org
|
||||||
W: bluesmoke.sourceforge.net
|
W: bluesmoke.sourceforge.net
|
||||||
S: Supported
|
S: Supported
|
||||||
@ -4558,6 +4561,7 @@ F: Documentation/networking/ixgbevf.txt
|
|||||||
F: Documentation/networking/i40e.txt
|
F: Documentation/networking/i40e.txt
|
||||||
F: Documentation/networking/i40evf.txt
|
F: Documentation/networking/i40evf.txt
|
||||||
F: drivers/net/ethernet/intel/
|
F: drivers/net/ethernet/intel/
|
||||||
|
F: drivers/net/ethernet/intel/*/
|
||||||
|
|
||||||
INTEL-MID GPIO DRIVER
|
INTEL-MID GPIO DRIVER
|
||||||
M: David Cohen <david.a.cohen@linux.intel.com>
|
M: David Cohen <david.a.cohen@linux.intel.com>
|
||||||
@ -4914,7 +4918,7 @@ F: drivers/staging/ktap/
|
|||||||
KCONFIG
|
KCONFIG
|
||||||
M: "Yann E. MORIN" <yann.morin.1998@free.fr>
|
M: "Yann E. MORIN" <yann.morin.1998@free.fr>
|
||||||
L: linux-kbuild@vger.kernel.org
|
L: linux-kbuild@vger.kernel.org
|
||||||
T: git://gitorious.org/linux-kconfig/linux-kconfig
|
T: git git://gitorious.org/linux-kconfig/linux-kconfig
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/kbuild/kconfig-language.txt
|
F: Documentation/kbuild/kconfig-language.txt
|
||||||
F: scripts/kconfig/
|
F: scripts/kconfig/
|
||||||
@ -5471,11 +5475,11 @@ S: Maintained
|
|||||||
F: drivers/media/tuners/m88ts2022*
|
F: drivers/media/tuners/m88ts2022*
|
||||||
|
|
||||||
MA901 MASTERKIT USB FM RADIO DRIVER
|
MA901 MASTERKIT USB FM RADIO DRIVER
|
||||||
M: Alexey Klimov <klimov.linux@gmail.com>
|
M: Alexey Klimov <klimov.linux@gmail.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/media/radio/radio-ma901.c
|
F: drivers/media/radio/radio-ma901.c
|
||||||
|
|
||||||
MAC80211
|
MAC80211
|
||||||
M: Johannes Berg <johannes@sipsolutions.net>
|
M: Johannes Berg <johannes@sipsolutions.net>
|
||||||
@ -5636,7 +5640,7 @@ F: drivers/scsi/megaraid/
|
|||||||
|
|
||||||
MELLANOX ETHERNET DRIVER (mlx4_en)
|
MELLANOX ETHERNET DRIVER (mlx4_en)
|
||||||
M: Amir Vadai <amirv@mellanox.com>
|
M: Amir Vadai <amirv@mellanox.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: http://www.mellanox.com
|
W: http://www.mellanox.com
|
||||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||||
@ -5677,7 +5681,7 @@ F: include/linux/mtd/
|
|||||||
F: include/uapi/mtd/
|
F: include/uapi/mtd/
|
||||||
|
|
||||||
MEN A21 WATCHDOG DRIVER
|
MEN A21 WATCHDOG DRIVER
|
||||||
M: Johannes Thumshirn <johannes.thumshirn@men.de>
|
M: Johannes Thumshirn <johannes.thumshirn@men.de>
|
||||||
L: linux-watchdog@vger.kernel.org
|
L: linux-watchdog@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/watchdog/mena21_wdt.c
|
F: drivers/watchdog/mena21_wdt.c
|
||||||
@ -5733,20 +5737,20 @@ L: linux-rdma@vger.kernel.org
|
|||||||
W: http://www.mellanox.com
|
W: http://www.mellanox.com
|
||||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||||
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||||
T: git://openfabrics.org/~eli/connect-ib.git
|
T: git git://openfabrics.org/~eli/connect-ib.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/mellanox/mlx5/core/
|
F: drivers/net/ethernet/mellanox/mlx5/core/
|
||||||
F: include/linux/mlx5/
|
F: include/linux/mlx5/
|
||||||
|
|
||||||
Mellanox MLX5 IB driver
|
Mellanox MLX5 IB driver
|
||||||
M: Eli Cohen <eli@mellanox.com>
|
M: Eli Cohen <eli@mellanox.com>
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
W: http://www.mellanox.com
|
W: http://www.mellanox.com
|
||||||
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||||
T: git://openfabrics.org/~eli/connect-ib.git
|
T: git git://openfabrics.org/~eli/connect-ib.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: include/linux/mlx5/
|
F: include/linux/mlx5/
|
||||||
F: drivers/infiniband/hw/mlx5/
|
F: drivers/infiniband/hw/mlx5/
|
||||||
|
|
||||||
MODULE SUPPORT
|
MODULE SUPPORT
|
||||||
M: Rusty Russell <rusty@rustcorp.com.au>
|
M: Rusty Russell <rusty@rustcorp.com.au>
|
||||||
@ -6171,6 +6175,12 @@ S: Supported
|
|||||||
F: drivers/block/nvme*
|
F: drivers/block/nvme*
|
||||||
F: include/linux/nvme.h
|
F: include/linux/nvme.h
|
||||||
|
|
||||||
|
NXP TDA998X DRM DRIVER
|
||||||
|
M: Russell King <rmk+kernel@arm.linux.org.uk>
|
||||||
|
S: Supported
|
||||||
|
F: drivers/gpu/drm/i2c/tda998x_drv.c
|
||||||
|
F: include/drm/i2c/tda998x.h
|
||||||
|
|
||||||
OMAP SUPPORT
|
OMAP SUPPORT
|
||||||
M: Tony Lindgren <tony@atomide.com>
|
M: Tony Lindgren <tony@atomide.com>
|
||||||
L: linux-omap@vger.kernel.org
|
L: linux-omap@vger.kernel.org
|
||||||
@ -8700,17 +8710,17 @@ S: Maintained
|
|||||||
F: drivers/media/radio/radio-raremono.c
|
F: drivers/media/radio/radio-raremono.c
|
||||||
|
|
||||||
THERMAL
|
THERMAL
|
||||||
M: Zhang Rui <rui.zhang@intel.com>
|
M: Zhang Rui <rui.zhang@intel.com>
|
||||||
M: Eduardo Valentin <eduardo.valentin@ti.com>
|
M: Eduardo Valentin <eduardo.valentin@ti.com>
|
||||||
L: linux-pm@vger.kernel.org
|
L: linux-pm@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
|
||||||
Q: https://patchwork.kernel.org/project/linux-pm/list/
|
Q: https://patchwork.kernel.org/project/linux-pm/list/
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/thermal/
|
F: drivers/thermal/
|
||||||
F: include/linux/thermal.h
|
F: include/linux/thermal.h
|
||||||
F: include/linux/cpu_cooling.h
|
F: include/linux/cpu_cooling.h
|
||||||
F: Documentation/devicetree/bindings/thermal/
|
F: Documentation/devicetree/bindings/thermal/
|
||||||
|
|
||||||
THINGM BLINK(1) USB RGB LED DRIVER
|
THINGM BLINK(1) USB RGB LED DRIVER
|
||||||
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
|
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
|
||||||
@ -9812,7 +9822,7 @@ ZR36067 VIDEO FOR LINUX DRIVER
|
|||||||
L: mjpeg-users@lists.sourceforge.net
|
L: mjpeg-users@lists.sourceforge.net
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
W: http://mjpeg.sourceforge.net/driver-zoran/
|
W: http://mjpeg.sourceforge.net/driver-zoran/
|
||||||
T: Mercurial http://linuxtv.org/hg/v4l-dvb
|
T: hg http://linuxtv.org/hg/v4l-dvb
|
||||||
S: Odd Fixes
|
S: Odd Fixes
|
||||||
F: drivers/media/pci/zoran/
|
F: drivers/media/pci/zoran/
|
||||||
|
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 14
|
PATCHLEVEL = 14
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc5
|
EXTRAVERSION = -rc6
|
||||||
NAME = Shuffling Zombie Juror
|
NAME = Shuffling Zombie Juror
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
|
|||||||
#else
|
#else
|
||||||
/* if V-P const for loop, PTAG can be written once outside loop */
|
/* if V-P const for loop, PTAG can be written once outside loop */
|
||||||
if (full_page_op)
|
if (full_page_op)
|
||||||
write_aux_reg(ARC_REG_DC_PTAG, paddr);
|
write_aux_reg(aux_tag, paddr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
while (num_lines-- > 0) {
|
while (num_lines-- > 0) {
|
||||||
@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
|
|||||||
write_aux_reg(aux_cmd, vaddr);
|
write_aux_reg(aux_cmd, vaddr);
|
||||||
vaddr += L1_CACHE_BYTES;
|
vaddr += L1_CACHE_BYTES;
|
||||||
#else
|
#else
|
||||||
write_aux_reg(aux, paddr);
|
write_aux_reg(aux_cmd, paddr);
|
||||||
paddr += L1_CACHE_BYTES;
|
paddr += L1_CACHE_BYTES;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF
|
|||||||
|
|
||||||
choice
|
choice
|
||||||
prompt "Memory split"
|
prompt "Memory split"
|
||||||
|
depends on MMU
|
||||||
default VMSPLIT_3G
|
default VMSPLIT_3G
|
||||||
help
|
help
|
||||||
Select the desired split between kernel and user memory.
|
Select the desired split between kernel and user memory.
|
||||||
@ -1595,6 +1596,7 @@ endchoice
|
|||||||
|
|
||||||
config PAGE_OFFSET
|
config PAGE_OFFSET
|
||||||
hex
|
hex
|
||||||
|
default PHYS_OFFSET if !MMU
|
||||||
default 0x40000000 if VMSPLIT_1G
|
default 0x40000000 if VMSPLIT_1G
|
||||||
default 0x80000000 if VMSPLIT_2G
|
default 0x80000000 if VMSPLIT_2G
|
||||||
default 0xC0000000
|
default 0xC0000000
|
||||||
@ -1903,6 +1905,7 @@ config XEN
|
|||||||
depends on ARM && AEABI && OF
|
depends on ARM && AEABI && OF
|
||||||
depends on CPU_V7 && !CPU_V6
|
depends on CPU_V7 && !CPU_V6
|
||||||
depends on !GENERIC_ATOMIC64
|
depends on !GENERIC_ATOMIC64
|
||||||
|
depends on MMU
|
||||||
select ARM_PSCI
|
select ARM_PSCI
|
||||||
select SWIOTLB_XEN
|
select SWIOTLB_XEN
|
||||||
select ARCH_DMA_ADDR_T_64BIT
|
select ARCH_DMA_ADDR_T_64BIT
|
||||||
|
1
arch/arm/boot/compressed/.gitignore
vendored
1
arch/arm/boot/compressed/.gitignore
vendored
@ -1,4 +1,5 @@
|
|||||||
ashldi3.S
|
ashldi3.S
|
||||||
|
bswapsdi2.S
|
||||||
font.c
|
font.c
|
||||||
lib1funcs.S
|
lib1funcs.S
|
||||||
hyp-stub.S
|
hyp-stub.S
|
||||||
|
@ -147,7 +147,7 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
pinctrl@35004800 {
|
pinctrl@35004800 {
|
||||||
compatible = "brcm,capri-pinctrl";
|
compatible = "brcm,bcm11351-pinctrl";
|
||||||
reg = <0x35004800 0x430>;
|
reg = <0x35004800 0x430>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -612,7 +612,7 @@ clocks {
|
|||||||
compatible = "ti,keystone,psc-clock";
|
compatible = "ti,keystone,psc-clock";
|
||||||
clocks = <&chipclk13>;
|
clocks = <&chipclk13>;
|
||||||
clock-output-names = "vcp-3";
|
clock-output-names = "vcp-3";
|
||||||
reg = <0x0235000a8 0xb00>, <0x02350060 0x400>;
|
reg = <0x023500a8 0xb00>, <0x02350060 0x400>;
|
||||||
reg-names = "control", "domain";
|
reg-names = "control", "domain";
|
||||||
domain-id = <24>;
|
domain-id = <24>;
|
||||||
};
|
};
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "OMAP3 GTA04";
|
model = "OMAP3 GTA04";
|
||||||
compatible = "ti,omap3-gta04", "ti,omap3";
|
compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
|
||||||
|
|
||||||
cpus {
|
cpus {
|
||||||
cpu@0 {
|
cpu@0 {
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "IGEPv2 (TI OMAP AM/DM37x)";
|
model = "IGEPv2 (TI OMAP AM/DM37x)";
|
||||||
compatible = "isee,omap3-igep0020", "ti,omap3";
|
compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
|
||||||
|
|
||||||
leds {
|
leds {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
|
model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
|
||||||
compatible = "isee,omap3-igep0030", "ti,omap3";
|
compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
|
||||||
|
|
||||||
leds {
|
leds {
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
@ -426,7 +426,7 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
rtp: rtp@01c25000 {
|
rtp: rtp@01c25000 {
|
||||||
compatible = "allwinner,sun4i-ts";
|
compatible = "allwinner,sun4i-a10-ts";
|
||||||
reg = <0x01c25000 0x100>;
|
reg = <0x01c25000 0x100>;
|
||||||
interrupts = <29>;
|
interrupts = <29>;
|
||||||
};
|
};
|
||||||
|
@ -383,7 +383,7 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
rtp: rtp@01c25000 {
|
rtp: rtp@01c25000 {
|
||||||
compatible = "allwinner,sun4i-ts";
|
compatible = "allwinner,sun4i-a10-ts";
|
||||||
reg = <0x01c25000 0x100>;
|
reg = <0x01c25000 0x100>;
|
||||||
interrupts = <29>;
|
interrupts = <29>;
|
||||||
};
|
};
|
||||||
|
@ -346,7 +346,7 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
rtp: rtp@01c25000 {
|
rtp: rtp@01c25000 {
|
||||||
compatible = "allwinner,sun4i-ts";
|
compatible = "allwinner,sun4i-a10-ts";
|
||||||
reg = <0x01c25000 0x100>;
|
reg = <0x01c25000 0x100>;
|
||||||
interrupts = <29>;
|
interrupts = <29>;
|
||||||
};
|
};
|
||||||
|
@ -454,7 +454,7 @@
|
|||||||
rtc: rtc@01c20d00 {
|
rtc: rtc@01c20d00 {
|
||||||
compatible = "allwinner,sun7i-a20-rtc";
|
compatible = "allwinner,sun7i-a20-rtc";
|
||||||
reg = <0x01c20d00 0x20>;
|
reg = <0x01c20d00 0x20>;
|
||||||
interrupts = <0 24 1>;
|
interrupts = <0 24 4>;
|
||||||
};
|
};
|
||||||
|
|
||||||
sid: eeprom@01c23800 {
|
sid: eeprom@01c23800 {
|
||||||
@ -463,7 +463,7 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
rtp: rtp@01c25000 {
|
rtp: rtp@01c25000 {
|
||||||
compatible = "allwinner,sun4i-ts";
|
compatible = "allwinner,sun4i-a10-ts";
|
||||||
reg = <0x01c25000 0x100>;
|
reg = <0x01c25000 0x100>;
|
||||||
interrupts = <0 29 4>;
|
interrupts = <0 29 4>;
|
||||||
};
|
};
|
||||||
@ -596,10 +596,10 @@
|
|||||||
hstimer@01c60000 {
|
hstimer@01c60000 {
|
||||||
compatible = "allwinner,sun7i-a20-hstimer";
|
compatible = "allwinner,sun7i-a20-hstimer";
|
||||||
reg = <0x01c60000 0x1000>;
|
reg = <0x01c60000 0x1000>;
|
||||||
interrupts = <0 81 1>,
|
interrupts = <0 81 4>,
|
||||||
<0 82 1>,
|
<0 82 4>,
|
||||||
<0 83 1>,
|
<0 83 4>,
|
||||||
<0 84 1>;
|
<0 84 4>;
|
||||||
clocks = <&ahb_gates 28>;
|
clocks = <&ahb_gates 28>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -204,7 +204,10 @@ CONFIG_MMC_BLOCK_MINORS=16
|
|||||||
CONFIG_MMC_SDHCI=y
|
CONFIG_MMC_SDHCI=y
|
||||||
CONFIG_MMC_SDHCI_PLTFM=y
|
CONFIG_MMC_SDHCI_PLTFM=y
|
||||||
CONFIG_MMC_SDHCI_TEGRA=y
|
CONFIG_MMC_SDHCI_TEGRA=y
|
||||||
|
CONFIG_NEW_LEDS=y
|
||||||
|
CONFIG_LEDS_CLASS=y
|
||||||
CONFIG_LEDS_GPIO=y
|
CONFIG_LEDS_GPIO=y
|
||||||
|
CONFIG_LEDS_TRIGGERS=y
|
||||||
CONFIG_LEDS_TRIGGER_TIMER=y
|
CONFIG_LEDS_TRIGGER_TIMER=y
|
||||||
CONFIG_LEDS_TRIGGER_ONESHOT=y
|
CONFIG_LEDS_TRIGGER_ONESHOT=y
|
||||||
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
|
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
|
||||||
|
@ -30,14 +30,15 @@
|
|||||||
*/
|
*/
|
||||||
#define UL(x) _AC(x, UL)
|
#define UL(x) _AC(x, UL)
|
||||||
|
|
||||||
|
/* PAGE_OFFSET - the virtual address of the start of the kernel image */
|
||||||
|
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PAGE_OFFSET - the virtual address of the start of the kernel image
|
|
||||||
* TASK_SIZE - the maximum size of a user space task.
|
* TASK_SIZE - the maximum size of a user space task.
|
||||||
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
|
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
|
||||||
*/
|
*/
|
||||||
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
|
||||||
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
|
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
|
||||||
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
|
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
|
||||||
|
|
||||||
@ -104,10 +105,6 @@
|
|||||||
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
|
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef PAGE_OFFSET
|
|
||||||
#define PAGE_OFFSET PLAT_PHYS_OFFSET
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The module can be at any place in ram in nommu mode.
|
* The module can be at any place in ram in nommu mode.
|
||||||
*/
|
*/
|
||||||
|
@ -177,6 +177,18 @@ __lookup_processor_type_data:
|
|||||||
.long __proc_info_end
|
.long __proc_info_end
|
||||||
.size __lookup_processor_type_data, . - __lookup_processor_type_data
|
.size __lookup_processor_type_data, . - __lookup_processor_type_data
|
||||||
|
|
||||||
|
__error_lpae:
|
||||||
|
#ifdef CONFIG_DEBUG_LL
|
||||||
|
adr r0, str_lpae
|
||||||
|
bl printascii
|
||||||
|
b __error
|
||||||
|
str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
|
||||||
|
#else
|
||||||
|
b __error
|
||||||
|
#endif
|
||||||
|
.align
|
||||||
|
ENDPROC(__error_lpae)
|
||||||
|
|
||||||
__error_p:
|
__error_p:
|
||||||
#ifdef CONFIG_DEBUG_LL
|
#ifdef CONFIG_DEBUG_LL
|
||||||
adr r0, str_p1
|
adr r0, str_p1
|
||||||
|
@ -102,7 +102,7 @@ ENTRY(stext)
|
|||||||
and r3, r3, #0xf @ extract VMSA support
|
and r3, r3, #0xf @ extract VMSA support
|
||||||
cmp r3, #5 @ long-descriptor translation table format?
|
cmp r3, #5 @ long-descriptor translation table format?
|
||||||
THUMB( it lo ) @ force fixup-able long branch encoding
|
THUMB( it lo ) @ force fixup-able long branch encoding
|
||||||
blo __error_p @ only classic page table format
|
blo __error_lpae @ only classic page table format
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_XIP_KERNEL
|
#ifndef CONFIG_XIP_KERNEL
|
||||||
|
@ -433,7 +433,9 @@ static const struct clk_ops dpll4_m5x2_ck_ops = {
|
|||||||
.enable = &omap2_dflt_clk_enable,
|
.enable = &omap2_dflt_clk_enable,
|
||||||
.disable = &omap2_dflt_clk_disable,
|
.disable = &omap2_dflt_clk_disable,
|
||||||
.is_enabled = &omap2_dflt_clk_is_enabled,
|
.is_enabled = &omap2_dflt_clk_is_enabled,
|
||||||
|
.set_rate = &omap3_clkoutx2_set_rate,
|
||||||
.recalc_rate = &omap3_clkoutx2_recalc,
|
.recalc_rate = &omap3_clkoutx2_recalc,
|
||||||
|
.round_rate = &omap3_clkoutx2_round_rate,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
|
static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
#include "prm.h"
|
#include "prm.h"
|
||||||
#include "clockdomain.h"
|
#include "clockdomain.h"
|
||||||
|
|
||||||
|
#define MAX_CPUS 2
|
||||||
|
|
||||||
/* Machine specific information */
|
/* Machine specific information */
|
||||||
struct idle_statedata {
|
struct idle_statedata {
|
||||||
u32 cpu_state;
|
u32 cpu_state;
|
||||||
@ -48,11 +50,11 @@ static struct idle_statedata omap4_idle_data[] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
|
static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
|
||||||
static struct clockdomain *cpu_clkdm[NR_CPUS];
|
static struct clockdomain *cpu_clkdm[MAX_CPUS];
|
||||||
|
|
||||||
static atomic_t abort_barrier;
|
static atomic_t abort_barrier;
|
||||||
static bool cpu_done[NR_CPUS];
|
static bool cpu_done[MAX_CPUS];
|
||||||
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
|
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
|
||||||
|
|
||||||
/* Private functions */
|
/* Private functions */
|
||||||
|
@ -623,25 +623,12 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
|
|||||||
|
|
||||||
/* Clock control for DPLL outputs */
|
/* Clock control for DPLL outputs */
|
||||||
|
|
||||||
/**
|
/* Find the parent DPLL for the given clkoutx2 clock */
|
||||||
* omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
|
static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
|
||||||
* @clk: DPLL output struct clk
|
|
||||||
*
|
|
||||||
* Using parent clock DPLL data, look up DPLL state. If locked, set our
|
|
||||||
* rate to the dpll_clk * 2; otherwise, just use dpll_clk.
|
|
||||||
*/
|
|
||||||
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
|
|
||||||
unsigned long parent_rate)
|
|
||||||
{
|
{
|
||||||
const struct dpll_data *dd;
|
|
||||||
unsigned long rate;
|
|
||||||
u32 v;
|
|
||||||
struct clk_hw_omap *pclk = NULL;
|
struct clk_hw_omap *pclk = NULL;
|
||||||
struct clk *parent;
|
struct clk *parent;
|
||||||
|
|
||||||
if (!parent_rate)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Walk up the parents of clk, looking for a DPLL */
|
/* Walk up the parents of clk, looking for a DPLL */
|
||||||
do {
|
do {
|
||||||
do {
|
do {
|
||||||
@ -656,9 +643,35 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
|
|||||||
/* clk does not have a DPLL as a parent? error in the clock data */
|
/* clk does not have a DPLL as a parent? error in the clock data */
|
||||||
if (!pclk) {
|
if (!pclk) {
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return 0;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return pclk;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
|
||||||
|
* @clk: DPLL output struct clk
|
||||||
|
*
|
||||||
|
* Using parent clock DPLL data, look up DPLL state. If locked, set our
|
||||||
|
* rate to the dpll_clk * 2; otherwise, just use dpll_clk.
|
||||||
|
*/
|
||||||
|
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
|
||||||
|
unsigned long parent_rate)
|
||||||
|
{
|
||||||
|
const struct dpll_data *dd;
|
||||||
|
unsigned long rate;
|
||||||
|
u32 v;
|
||||||
|
struct clk_hw_omap *pclk = NULL;
|
||||||
|
|
||||||
|
if (!parent_rate)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
pclk = omap3_find_clkoutx2_dpll(hw);
|
||||||
|
|
||||||
|
if (!pclk)
|
||||||
|
return 0;
|
||||||
|
|
||||||
dd = pclk->dpll_data;
|
dd = pclk->dpll_data;
|
||||||
|
|
||||||
WARN_ON(!dd->enable_mask);
|
WARN_ON(!dd->enable_mask);
|
||||||
@ -672,6 +685,55 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
|
|||||||
return rate;
|
return rate;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||||
|
unsigned long parent_rate)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
|
||||||
|
unsigned long *prate)
|
||||||
|
{
|
||||||
|
const struct dpll_data *dd;
|
||||||
|
u32 v;
|
||||||
|
struct clk_hw_omap *pclk = NULL;
|
||||||
|
|
||||||
|
if (!*prate)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
pclk = omap3_find_clkoutx2_dpll(hw);
|
||||||
|
|
||||||
|
if (!pclk)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
dd = pclk->dpll_data;
|
||||||
|
|
||||||
|
/* TYPE J does not have a clkoutx2 */
|
||||||
|
if (dd->flags & DPLL_J_TYPE) {
|
||||||
|
*prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
|
||||||
|
return *prate;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_ON(!dd->enable_mask);
|
||||||
|
|
||||||
|
v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
|
||||||
|
v >>= __ffs(dd->enable_mask);
|
||||||
|
|
||||||
|
/* If in bypass, the rate is fixed to the bypass rate*/
|
||||||
|
if (v != OMAP3XXX_EN_DPLL_LOCKED)
|
||||||
|
return *prate;
|
||||||
|
|
||||||
|
if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
|
||||||
|
unsigned long best_parent;
|
||||||
|
|
||||||
|
best_parent = (rate / 2);
|
||||||
|
*prate = __clk_round_rate(__clk_get_parent(hw->clk),
|
||||||
|
best_parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
return *prate * 2;
|
||||||
|
}
|
||||||
|
|
||||||
/* OMAP3/4 non-CORE DPLL clkops */
|
/* OMAP3/4 non-CORE DPLL clkops */
|
||||||
const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
|
const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
|
||||||
.allow_idle = omap3_dpll_allow_idle,
|
.allow_idle = omap3_dpll_allow_idle,
|
||||||
|
@ -1947,29 +1947,31 @@ static int _ocp_softreset(struct omap_hwmod *oh)
|
|||||||
goto dis_opt_clks;
|
goto dis_opt_clks;
|
||||||
|
|
||||||
_write_sysconfig(v, oh);
|
_write_sysconfig(v, oh);
|
||||||
|
|
||||||
|
if (oh->class->sysc->srst_udelay)
|
||||||
|
udelay(oh->class->sysc->srst_udelay);
|
||||||
|
|
||||||
|
c = _wait_softreset_complete(oh);
|
||||||
|
if (c == MAX_MODULE_SOFTRESET_WAIT) {
|
||||||
|
pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
|
||||||
|
oh->name, MAX_MODULE_SOFTRESET_WAIT);
|
||||||
|
ret = -ETIMEDOUT;
|
||||||
|
goto dis_opt_clks;
|
||||||
|
} else {
|
||||||
|
pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
|
||||||
|
}
|
||||||
|
|
||||||
ret = _clear_softreset(oh, &v);
|
ret = _clear_softreset(oh, &v);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto dis_opt_clks;
|
goto dis_opt_clks;
|
||||||
|
|
||||||
_write_sysconfig(v, oh);
|
_write_sysconfig(v, oh);
|
||||||
|
|
||||||
if (oh->class->sysc->srst_udelay)
|
|
||||||
udelay(oh->class->sysc->srst_udelay);
|
|
||||||
|
|
||||||
c = _wait_softreset_complete(oh);
|
|
||||||
if (c == MAX_MODULE_SOFTRESET_WAIT)
|
|
||||||
pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
|
|
||||||
oh->name, MAX_MODULE_SOFTRESET_WAIT);
|
|
||||||
else
|
|
||||||
pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
|
* XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
|
||||||
* _wait_target_ready() or _reset()
|
* _wait_target_ready() or _reset()
|
||||||
*/
|
*/
|
||||||
|
|
||||||
ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
|
|
||||||
|
|
||||||
dis_opt_clks:
|
dis_opt_clks:
|
||||||
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
|
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
|
||||||
_disable_optional_clocks(oh);
|
_disable_optional_clocks(oh);
|
||||||
|
@ -1365,11 +1365,10 @@ static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = {
|
|||||||
.rev_offs = 0x0000,
|
.rev_offs = 0x0000,
|
||||||
.sysc_offs = 0x0010,
|
.sysc_offs = 0x0010,
|
||||||
.syss_offs = 0x0014,
|
.syss_offs = 0x0014,
|
||||||
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
|
.sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
|
||||||
SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
|
SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
|
||||||
SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
|
SYSS_HAS_RESET_STATUS),
|
||||||
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
|
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
|
||||||
SIDLE_SMART_WKUP),
|
|
||||||
.sysc_fields = &omap_hwmod_sysc_type1,
|
.sysc_fields = &omap_hwmod_sysc_type1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -22,6 +22,8 @@
|
|||||||
#include "common-board-devices.h"
|
#include "common-board-devices.h"
|
||||||
#include "dss-common.h"
|
#include "dss-common.h"
|
||||||
#include "control.h"
|
#include "control.h"
|
||||||
|
#include "omap-secure.h"
|
||||||
|
#include "soc.h"
|
||||||
|
|
||||||
struct pdata_init {
|
struct pdata_init {
|
||||||
const char *compatible;
|
const char *compatible;
|
||||||
@ -169,6 +171,22 @@ static void __init am3517_evm_legacy_init(void)
|
|||||||
omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
|
omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
|
||||||
omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
|
omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init nokia_n900_legacy_init(void)
|
||||||
|
{
|
||||||
|
hsmmc2_internal_input_clk();
|
||||||
|
|
||||||
|
if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
|
||||||
|
if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
|
||||||
|
pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
|
||||||
|
/* set IBE to 1 */
|
||||||
|
rx51_secure_update_aux_cr(BIT(6), 0);
|
||||||
|
} else {
|
||||||
|
pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n");
|
||||||
|
pr_warning("Thumb binaries may crash randomly without this workaround\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
#endif /* CONFIG_ARCH_OMAP3 */
|
#endif /* CONFIG_ARCH_OMAP3 */
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_OMAP4
|
#ifdef CONFIG_ARCH_OMAP4
|
||||||
@ -239,6 +257,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
|
|||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ARCH_OMAP3
|
#ifdef CONFIG_ARCH_OMAP3
|
||||||
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
|
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
|
||||||
|
OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata),
|
||||||
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
|
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
|
||||||
/* Only on am3517 */
|
/* Only on am3517 */
|
||||||
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
|
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
|
||||||
@ -259,7 +278,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
|
|||||||
static struct pdata_init pdata_quirks[] __initdata = {
|
static struct pdata_init pdata_quirks[] __initdata = {
|
||||||
#ifdef CONFIG_ARCH_OMAP3
|
#ifdef CONFIG_ARCH_OMAP3
|
||||||
{ "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, },
|
{ "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, },
|
||||||
{ "nokia,omap3-n900", hsmmc2_internal_input_clk, },
|
{ "nokia,omap3-n900", nokia_n900_legacy_init, },
|
||||||
{ "nokia,omap3-n9", hsmmc2_internal_input_clk, },
|
{ "nokia,omap3-n9", hsmmc2_internal_input_clk, },
|
||||||
{ "nokia,omap3-n950", hsmmc2_internal_input_clk, },
|
{ "nokia,omap3-n950", hsmmc2_internal_input_clk, },
|
||||||
{ "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
|
{ "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
|
||||||
|
@ -183,11 +183,11 @@ void omap4_prminst_global_warm_sw_reset(void)
|
|||||||
OMAP4_PRM_RSTCTRL_OFFSET);
|
OMAP4_PRM_RSTCTRL_OFFSET);
|
||||||
v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
|
v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
|
||||||
omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
|
omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
|
||||||
OMAP4430_PRM_DEVICE_INST,
|
dev_inst,
|
||||||
OMAP4_PRM_RSTCTRL_OFFSET);
|
OMAP4_PRM_RSTCTRL_OFFSET);
|
||||||
|
|
||||||
/* OCP barrier */
|
/* OCP barrier */
|
||||||
v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
|
v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
|
||||||
OMAP4430_PRM_DEVICE_INST,
|
dev_inst,
|
||||||
OMAP4_PRM_RSTCTRL_OFFSET);
|
OMAP4_PRM_RSTCTRL_OFFSET);
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,8 @@
|
|||||||
#ifndef __ASM_ARCH_COLLIE_H
|
#ifndef __ASM_ARCH_COLLIE_H
|
||||||
#define __ASM_ARCH_COLLIE_H
|
#define __ASM_ARCH_COLLIE_H
|
||||||
|
|
||||||
|
#include "hardware.h" /* Gives GPIO_MAX */
|
||||||
|
|
||||||
extern void locomolcd_power(int on);
|
extern void locomolcd_power(int on);
|
||||||
|
|
||||||
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
|
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
|
||||||
|
@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
|||||||
note_page(st, addr, 3, pmd_val(*pmd));
|
note_page(st, addr, 3, pmd_val(*pmd));
|
||||||
else
|
else
|
||||||
walk_pte(st, pmd, addr);
|
walk_pte(st, pmd, addr);
|
||||||
|
|
||||||
|
if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
|
||||||
|
note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#define _ASM_C6X_CACHE_H
|
#define _ASM_C6X_CACHE_H
|
||||||
|
|
||||||
#include <linux/irqflags.h>
|
#include <linux/irqflags.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cache line size
|
* Cache line size
|
||||||
|
@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|||||||
flush_altivec_to_thread(src);
|
flush_altivec_to_thread(src);
|
||||||
flush_vsx_to_thread(src);
|
flush_vsx_to_thread(src);
|
||||||
flush_spe_to_thread(src);
|
flush_spe_to_thread(src);
|
||||||
|
/*
|
||||||
|
* Flush TM state out so we can copy it. __switch_to_tm() does this
|
||||||
|
* flush but it removes the checkpointed state from the current CPU and
|
||||||
|
* transitions the CPU out of TM mode. Hence we need to call
|
||||||
|
* tm_recheckpoint_new_task() (on the same task) to restore the
|
||||||
|
* checkpointed state back and the TM mode.
|
||||||
|
*/
|
||||||
|
__switch_to_tm(src);
|
||||||
|
tm_recheckpoint_new_task(src);
|
||||||
|
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
|
|
||||||
|
@ -81,6 +81,7 @@ _GLOBAL(relocate)
|
|||||||
|
|
||||||
6: blr
|
6: blr
|
||||||
|
|
||||||
|
.balign 8
|
||||||
p_dyn: .llong __dynamic_start - 0b
|
p_dyn: .llong __dynamic_start - 0b
|
||||||
p_rela: .llong __rela_dyn_start - 0b
|
p_rela: .llong __rela_dyn_start - 0b
|
||||||
p_st: .llong _stext - 0b
|
p_st: .llong _stext - 0b
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
#define SH_CACHE_ASSOC 8
|
#define SH_CACHE_ASSOC 8
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
|
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
|
||||||
#define CCR 0xffffffec
|
#define SH_CCR 0xffffffec
|
||||||
|
|
||||||
#define CCR_CACHE_CE 0x01 /* Cache enable */
|
#define CCR_CACHE_CE 0x01 /* Cache enable */
|
||||||
#define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
|
#define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
#define SH_CACHE_COMBINED 4
|
#define SH_CACHE_COMBINED 4
|
||||||
#define SH_CACHE_ASSOC 8
|
#define SH_CACHE_ASSOC 8
|
||||||
|
|
||||||
#define CCR 0xfffc1000 /* CCR1 */
|
#define SH_CCR 0xfffc1000 /* CCR1 */
|
||||||
#define CCR2 0xfffc1004
|
#define SH_CCR2 0xfffc1004
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
|
* Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
#define SH_CACHE_COMBINED 4
|
#define SH_CACHE_COMBINED 4
|
||||||
#define SH_CACHE_ASSOC 8
|
#define SH_CACHE_ASSOC 8
|
||||||
|
|
||||||
#define CCR 0xffffffec /* Address of Cache Control Register */
|
#define SH_CCR 0xffffffec /* Address of Cache Control Register */
|
||||||
|
|
||||||
#define CCR_CACHE_CE 0x01 /* Cache Enable */
|
#define CCR_CACHE_CE 0x01 /* Cache Enable */
|
||||||
#define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
|
#define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
#define SH_CACHE_COMBINED 4
|
#define SH_CACHE_COMBINED 4
|
||||||
#define SH_CACHE_ASSOC 8
|
#define SH_CACHE_ASSOC 8
|
||||||
|
|
||||||
#define CCR 0xff00001c /* Address of Cache Control Register */
|
#define SH_CCR 0xff00001c /* Address of Cache Control Register */
|
||||||
#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
|
#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
|
||||||
#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
|
#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
|
||||||
#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
|
#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
|
||||||
|
@ -112,7 +112,7 @@ static void cache_init(void)
|
|||||||
unsigned long ccr, flags;
|
unsigned long ccr, flags;
|
||||||
|
|
||||||
jump_to_uncached();
|
jump_to_uncached();
|
||||||
ccr = __raw_readl(CCR);
|
ccr = __raw_readl(SH_CCR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* At this point we don't know whether the cache is enabled or not - a
|
* At this point we don't know whether the cache is enabled or not - a
|
||||||
@ -189,7 +189,7 @@ static void cache_init(void)
|
|||||||
|
|
||||||
l2_cache_init();
|
l2_cache_init();
|
||||||
|
|
||||||
__raw_writel(flags, CCR);
|
__raw_writel(flags, SH_CCR);
|
||||||
back_to_cached();
|
back_to_cached();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
|
|||||||
*/
|
*/
|
||||||
jump_to_uncached();
|
jump_to_uncached();
|
||||||
|
|
||||||
ccr = __raw_readl(CCR);
|
ccr = __raw_readl(SH_CCR);
|
||||||
if ((ccr & CCR_CACHE_ENABLE) == 0) {
|
if ((ccr & CCR_CACHE_ENABLE) == 0) {
|
||||||
back_to_cached();
|
back_to_cached();
|
||||||
|
|
||||||
|
@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size)
|
|||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
jump_to_uncached();
|
jump_to_uncached();
|
||||||
|
|
||||||
ccr = __raw_readl(CCR);
|
ccr = __raw_readl(SH_CCR);
|
||||||
ccr |= CCR_CACHE_INVALIDATE;
|
ccr |= CCR_CACHE_INVALIDATE;
|
||||||
__raw_writel(ccr, CCR);
|
__raw_writel(ccr, SH_CCR);
|
||||||
|
|
||||||
back_to_cached();
|
back_to_cached();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size)
|
|||||||
|
|
||||||
/* If there are too many pages then just blow the cache */
|
/* If there are too many pages then just blow the cache */
|
||||||
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
|
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
|
||||||
__raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
|
__raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
|
||||||
|
SH_CCR);
|
||||||
} else {
|
} else {
|
||||||
for (v = begin; v < end; v += L1_CACHE_BYTES)
|
for (v = begin; v < end; v += L1_CACHE_BYTES)
|
||||||
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
|
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
|
||||||
@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args)
|
|||||||
/* I-Cache invalidate */
|
/* I-Cache invalidate */
|
||||||
/* If there are too many pages then just blow the cache */
|
/* If there are too many pages then just blow the cache */
|
||||||
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
|
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
|
||||||
__raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
|
__raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
|
||||||
|
SH_CCR);
|
||||||
} else {
|
} else {
|
||||||
for (v = start; v < end; v += L1_CACHE_BYTES)
|
for (v = start; v < end; v += L1_CACHE_BYTES)
|
||||||
sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
|
sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
|
||||||
|
@ -133,9 +133,9 @@ static void flush_icache_all(void)
|
|||||||
jump_to_uncached();
|
jump_to_uncached();
|
||||||
|
|
||||||
/* Flush I-cache */
|
/* Flush I-cache */
|
||||||
ccr = __raw_readl(CCR);
|
ccr = __raw_readl(SH_CCR);
|
||||||
ccr |= CCR_CACHE_ICI;
|
ccr |= CCR_CACHE_ICI;
|
||||||
__raw_writel(ccr, CCR);
|
__raw_writel(ccr, SH_CCR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* back_to_cached() will take care of the barrier for us, don't add
|
* back_to_cached() will take care of the barrier for us, don't add
|
||||||
|
@ -19,7 +19,7 @@ void __init shx3_cache_init(void)
|
|||||||
{
|
{
|
||||||
unsigned int ccr;
|
unsigned int ccr;
|
||||||
|
|
||||||
ccr = __raw_readl(CCR);
|
ccr = __raw_readl(SH_CCR);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we've got cache aliases, resolve them in hardware.
|
* If we've got cache aliases, resolve them in hardware.
|
||||||
@ -40,5 +40,5 @@ void __init shx3_cache_init(void)
|
|||||||
ccr |= CCR_CACHE_IBE;
|
ccr |= CCR_CACHE_IBE;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
writel_uncached(ccr, CCR);
|
writel_uncached(ccr, SH_CCR);
|
||||||
}
|
}
|
||||||
|
@ -285,8 +285,8 @@ void __init cpu_cache_init(void)
|
|||||||
{
|
{
|
||||||
unsigned int cache_disabled = 0;
|
unsigned int cache_disabled = 0;
|
||||||
|
|
||||||
#ifdef CCR
|
#ifdef SH_CCR
|
||||||
cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
|
cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
compute_alias(&boot_cpu_data.icache);
|
compute_alias(&boot_cpu_data.icache);
|
||||||
|
@ -134,6 +134,7 @@ extern void efi_setup_page_tables(void);
|
|||||||
extern void __init old_map_region(efi_memory_desc_t *md);
|
extern void __init old_map_region(efi_memory_desc_t *md);
|
||||||
extern void __init runtime_code_page_mkexec(void);
|
extern void __init runtime_code_page_mkexec(void);
|
||||||
extern void __init efi_runtime_mkexec(void);
|
extern void __init efi_runtime_mkexec(void);
|
||||||
|
extern void __init efi_apply_memmap_quirks(void);
|
||||||
|
|
||||||
struct efi_setup_data {
|
struct efi_setup_data {
|
||||||
u64 fw_vendor;
|
u64 fw_vendor;
|
||||||
|
@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
|
|||||||
/* This is global to keep gas from relaxing the jumps */
|
/* This is global to keep gas from relaxing the jumps */
|
||||||
ENTRY(early_idt_handler)
|
ENTRY(early_idt_handler)
|
||||||
cld
|
cld
|
||||||
|
|
||||||
|
cmpl $2,(%esp) # X86_TRAP_NMI
|
||||||
|
je is_nmi # Ignore NMI
|
||||||
|
|
||||||
cmpl $2,%ss:early_recursion_flag
|
cmpl $2,%ss:early_recursion_flag
|
||||||
je hlt_loop
|
je hlt_loop
|
||||||
incl %ss:early_recursion_flag
|
incl %ss:early_recursion_flag
|
||||||
@ -594,8 +598,9 @@ ex_entry:
|
|||||||
pop %edx
|
pop %edx
|
||||||
pop %ecx
|
pop %ecx
|
||||||
pop %eax
|
pop %eax
|
||||||
addl $8,%esp /* drop vector number and error code */
|
|
||||||
decl %ss:early_recursion_flag
|
decl %ss:early_recursion_flag
|
||||||
|
is_nmi:
|
||||||
|
addl $8,%esp /* drop vector number and error code */
|
||||||
iret
|
iret
|
||||||
ENDPROC(early_idt_handler)
|
ENDPROC(early_idt_handler)
|
||||||
|
|
||||||
|
@ -343,6 +343,9 @@ early_idt_handlers:
|
|||||||
ENTRY(early_idt_handler)
|
ENTRY(early_idt_handler)
|
||||||
cld
|
cld
|
||||||
|
|
||||||
|
cmpl $2,(%rsp) # X86_TRAP_NMI
|
||||||
|
je is_nmi # Ignore NMI
|
||||||
|
|
||||||
cmpl $2,early_recursion_flag(%rip)
|
cmpl $2,early_recursion_flag(%rip)
|
||||||
jz 1f
|
jz 1f
|
||||||
incl early_recursion_flag(%rip)
|
incl early_recursion_flag(%rip)
|
||||||
@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
|
|||||||
popq %rdx
|
popq %rdx
|
||||||
popq %rcx
|
popq %rcx
|
||||||
popq %rax
|
popq %rax
|
||||||
addq $16,%rsp # drop vector number and error code
|
|
||||||
decl early_recursion_flag(%rip)
|
decl early_recursion_flag(%rip)
|
||||||
|
is_nmi:
|
||||||
|
addq $16,%rsp # drop vector number and error code
|
||||||
INTERRUPT_RETURN
|
INTERRUPT_RETURN
|
||||||
ENDPROC(early_idt_handler)
|
ENDPROC(early_idt_handler)
|
||||||
|
|
||||||
|
@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
register_refined_jiffies(CLOCK_TICK_RATE);
|
register_refined_jiffies(CLOCK_TICK_RATE);
|
||||||
|
|
||||||
#ifdef CONFIG_EFI
|
#ifdef CONFIG_EFI
|
||||||
/* Once setup is done above, unmap the EFI memory map on
|
if (efi_enabled(EFI_BOOT))
|
||||||
* mismatched firmware/kernel archtectures since there is no
|
efi_apply_memmap_quirks();
|
||||||
* support for runtime services.
|
|
||||||
*/
|
|
||||||
if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
|
|
||||||
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
|
|
||||||
efi_unmap_memmap();
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1020,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
|
|||||||
* This routine handles page faults. It determines the address,
|
* This routine handles page faults. It determines the address,
|
||||||
* and the problem, and then passes it off to one of the appropriate
|
* and the problem, and then passes it off to one of the appropriate
|
||||||
* routines.
|
* routines.
|
||||||
|
*
|
||||||
|
* This function must have noinline because both callers
|
||||||
|
* {,trace_}do_page_fault() have notrace on. Having this an actual function
|
||||||
|
* guarantees there's a function trace entry.
|
||||||
*/
|
*/
|
||||||
static void __kprobes
|
static void __kprobes noinline
|
||||||
__do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
__do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
||||||
|
unsigned long address)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
unsigned long address;
|
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
int fault;
|
int fault;
|
||||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||||
@ -1034,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||||||
tsk = current;
|
tsk = current;
|
||||||
mm = tsk->mm;
|
mm = tsk->mm;
|
||||||
|
|
||||||
/* Get the faulting address: */
|
|
||||||
address = read_cr2();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detect and handle instructions that would cause a page fault for
|
* Detect and handle instructions that would cause a page fault for
|
||||||
* both a tracked kernel page and a userspace page.
|
* both a tracked kernel page and a userspace page.
|
||||||
@ -1248,32 +1249,50 @@ good_area:
|
|||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
dotraplinkage void __kprobes
|
dotraplinkage void __kprobes notrace
|
||||||
do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||||
{
|
{
|
||||||
|
unsigned long address = read_cr2(); /* Get the faulting address */
|
||||||
enum ctx_state prev_state;
|
enum ctx_state prev_state;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must have this function tagged with __kprobes, notrace and call
|
||||||
|
* read_cr2() before calling anything else. To avoid calling any kind
|
||||||
|
* of tracing machinery before we've observed the CR2 value.
|
||||||
|
*
|
||||||
|
* exception_{enter,exit}() contain all sorts of tracepoints.
|
||||||
|
*/
|
||||||
|
|
||||||
prev_state = exception_enter();
|
prev_state = exception_enter();
|
||||||
__do_page_fault(regs, error_code);
|
__do_page_fault(regs, error_code, address);
|
||||||
exception_exit(prev_state);
|
exception_exit(prev_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void trace_page_fault_entries(struct pt_regs *regs,
|
#ifdef CONFIG_TRACING
|
||||||
|
static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
|
||||||
unsigned long error_code)
|
unsigned long error_code)
|
||||||
{
|
{
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
trace_page_fault_user(read_cr2(), regs, error_code);
|
trace_page_fault_user(address, regs, error_code);
|
||||||
else
|
else
|
||||||
trace_page_fault_kernel(read_cr2(), regs, error_code);
|
trace_page_fault_kernel(address, regs, error_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
dotraplinkage void __kprobes
|
dotraplinkage void __kprobes notrace
|
||||||
trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* The exception_enter and tracepoint processing could
|
||||||
|
* trigger another page faults (user space callchain
|
||||||
|
* reading) and destroy the original cr2 value, so read
|
||||||
|
* the faulting address now.
|
||||||
|
*/
|
||||||
|
unsigned long address = read_cr2();
|
||||||
enum ctx_state prev_state;
|
enum ctx_state prev_state;
|
||||||
|
|
||||||
prev_state = exception_enter();
|
prev_state = exception_enter();
|
||||||
trace_page_fault_entries(regs, error_code);
|
trace_page_fault_entries(address, regs, error_code);
|
||||||
__do_page_fault(regs, error_code);
|
__do_page_fault(regs, error_code, address);
|
||||||
exception_exit(prev_state);
|
exception_exit(prev_state);
|
||||||
}
|
}
|
||||||
|
#endif /* CONFIG_TRACING */
|
||||||
|
@ -52,6 +52,7 @@
|
|||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/x86_init.h>
|
#include <asm/x86_init.h>
|
||||||
#include <asm/rtc.h>
|
#include <asm/rtc.h>
|
||||||
|
#include <asm/uv/uv.h>
|
||||||
|
|
||||||
#define EFI_DEBUG
|
#define EFI_DEBUG
|
||||||
|
|
||||||
@ -1210,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_param("efi", parse_efi_cmdline);
|
early_param("efi", parse_efi_cmdline);
|
||||||
|
|
||||||
|
void __init efi_apply_memmap_quirks(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Once setup is done earlier, unmap the EFI memory map on mismatched
|
||||||
|
* firmware/kernel architectures since there is no support for runtime
|
||||||
|
* services.
|
||||||
|
*/
|
||||||
|
if (!efi_is_native()) {
|
||||||
|
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
|
||||||
|
efi_unmap_memmap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* UV doesn't support the new EFI pagetable mapping yet.
|
||||||
|
*/
|
||||||
|
if (is_uv_system())
|
||||||
|
set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
|
||||||
|
}
|
||||||
|
@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||||||
* be resued after dying flag is set
|
* be resued after dying flag is set
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops) {
|
if (q->mq_ops) {
|
||||||
blk_mq_insert_request(q, rq, at_head, true);
|
blk_mq_insert_request(rq, at_head, true, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work)
|
|||||||
rq = container_of(work, struct request, mq_flush_work);
|
rq = container_of(work, struct request, mq_flush_work);
|
||||||
|
|
||||||
memset(&rq->csd, 0, sizeof(rq->csd));
|
memset(&rq->csd, 0, sizeof(rq->csd));
|
||||||
blk_mq_run_request(rq, true, false);
|
blk_mq_insert_request(rq, false, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blk_flush_queue_rq(struct request *rq)
|
static bool blk_flush_queue_rq(struct request *rq)
|
||||||
@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq)
|
|||||||
if ((policy & REQ_FSEQ_DATA) &&
|
if ((policy & REQ_FSEQ_DATA) &&
|
||||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||||
if (q->mq_ops) {
|
if (q->mq_ops) {
|
||||||
blk_mq_run_request(rq, false, true);
|
blk_mq_insert_request(rq, false, false, true);
|
||||||
} else
|
} else
|
||||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||||
return;
|
return;
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
#include "blk-mq.h"
|
#include "blk-mq.h"
|
||||||
|
|
||||||
static LIST_HEAD(blk_mq_cpu_notify_list);
|
static LIST_HEAD(blk_mq_cpu_notify_list);
|
||||||
static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
|
static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
|
||||||
|
|
||||||
static int blk_mq_main_cpu_notify(struct notifier_block *self,
|
static int blk_mq_main_cpu_notify(struct notifier_block *self,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
|
|||||||
unsigned int cpu = (unsigned long) hcpu;
|
unsigned int cpu = (unsigned long) hcpu;
|
||||||
struct blk_mq_cpu_notifier *notify;
|
struct blk_mq_cpu_notifier *notify;
|
||||||
|
|
||||||
spin_lock(&blk_mq_cpu_notify_lock);
|
raw_spin_lock(&blk_mq_cpu_notify_lock);
|
||||||
|
|
||||||
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
|
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
|
||||||
notify->notify(notify->data, action, cpu);
|
notify->notify(notify->data, action, cpu);
|
||||||
|
|
||||||
spin_unlock(&blk_mq_cpu_notify_lock);
|
raw_spin_unlock(&blk_mq_cpu_notify_lock);
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
|
|||||||
{
|
{
|
||||||
BUG_ON(!notifier->notify);
|
BUG_ON(!notifier->notify);
|
||||||
|
|
||||||
spin_lock(&blk_mq_cpu_notify_lock);
|
raw_spin_lock(&blk_mq_cpu_notify_lock);
|
||||||
list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list);
|
list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list);
|
||||||
spin_unlock(&blk_mq_cpu_notify_lock);
|
raw_spin_unlock(&blk_mq_cpu_notify_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
|
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
|
||||||
{
|
{
|
||||||
spin_lock(&blk_mq_cpu_notify_lock);
|
raw_spin_lock(&blk_mq_cpu_notify_lock);
|
||||||
list_del(¬ifier->list);
|
list_del(¬ifier->list);
|
||||||
spin_unlock(&blk_mq_cpu_notify_lock);
|
raw_spin_unlock(&blk_mq_cpu_notify_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
|
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
|
||||||
|
108
block/blk-mq.c
108
block/blk-mq.c
@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
|
|||||||
set_bit(ctx->index_hw, hctx->ctx_map);
|
set_bit(ctx->index_hw, hctx->ctx_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
|
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
|
||||||
bool reserved)
|
gfp_t gfp, bool reserved)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int tag;
|
unsigned int tag;
|
||||||
@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
|
|||||||
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
|
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
|
|
||||||
gfp_t gfp, bool reserved)
|
|
||||||
{
|
|
||||||
return blk_mq_alloc_rq(hctx, gfp, reserved);
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
|
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
|
||||||
int rw, gfp_t gfp,
|
int rw, gfp_t gfp,
|
||||||
bool reserved)
|
bool reserved)
|
||||||
@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
|
|||||||
__blk_mq_free_request(hctx, ctx, rq);
|
__blk_mq_free_request(hctx, ctx, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
|
bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
|
||||||
{
|
{
|
||||||
if (error)
|
if (blk_update_request(rq, error, blk_rq_bytes(rq)))
|
||||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
return true;
|
||||||
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
|
||||||
error = -EIO;
|
|
||||||
|
|
||||||
if (unlikely(rq->cmd_flags & REQ_QUIET))
|
|
||||||
set_bit(BIO_QUIET, &bio->bi_flags);
|
|
||||||
|
|
||||||
/* don't actually finish bio if it's part of flush sequence */
|
|
||||||
if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
|
|
||||||
bio_endio(bio, error);
|
|
||||||
}
|
|
||||||
|
|
||||||
void blk_mq_end_io(struct request *rq, int error)
|
|
||||||
{
|
|
||||||
struct bio *bio = rq->bio;
|
|
||||||
unsigned int bytes = 0;
|
|
||||||
|
|
||||||
trace_block_rq_complete(rq->q, rq);
|
|
||||||
|
|
||||||
while (bio) {
|
|
||||||
struct bio *next = bio->bi_next;
|
|
||||||
|
|
||||||
bio->bi_next = NULL;
|
|
||||||
bytes += bio->bi_iter.bi_size;
|
|
||||||
blk_mq_bio_endio(rq, bio, error);
|
|
||||||
bio = next;
|
|
||||||
}
|
|
||||||
|
|
||||||
blk_account_io_completion(rq, bytes);
|
|
||||||
|
|
||||||
blk_account_io_done(rq);
|
blk_account_io_done(rq);
|
||||||
|
|
||||||
@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
|
|||||||
rq->end_io(rq, error);
|
rq->end_io(rq, error);
|
||||||
else
|
else
|
||||||
blk_mq_free_request(rq);
|
blk_mq_free_request(rq);
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_end_io);
|
EXPORT_SYMBOL(blk_mq_end_io_partial);
|
||||||
|
|
||||||
static void __blk_mq_complete_request_remote(void *data)
|
static void __blk_mq_complete_request_remote(void *data)
|
||||||
{
|
{
|
||||||
@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_mq_add_timer(rq);
|
blk_mq_add_timer(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
|
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
||||||
bool at_head, bool run_queue)
|
bool async)
|
||||||
{
|
|
||||||
struct blk_mq_hw_ctx *hctx;
|
|
||||||
struct blk_mq_ctx *ctx, *current_ctx;
|
|
||||||
|
|
||||||
ctx = rq->mq_ctx;
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
||||||
|
|
||||||
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
|
|
||||||
blk_insert_flush(rq);
|
|
||||||
} else {
|
|
||||||
current_ctx = blk_mq_get_ctx(q);
|
|
||||||
|
|
||||||
if (!cpu_online(ctx->cpu)) {
|
|
||||||
ctx = current_ctx;
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
|
||||||
rq->mq_ctx = ctx;
|
|
||||||
}
|
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
__blk_mq_insert_request(hctx, rq, at_head);
|
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
|
|
||||||
blk_mq_put_ctx(current_ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (run_queue)
|
|
||||||
__blk_mq_run_hw_queue(hctx);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(blk_mq_insert_request);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is a special version of blk_mq_insert_request to bypass FLUSH request
|
|
||||||
* check. Should only be used internally.
|
|
||||||
*/
|
|
||||||
void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
|
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct blk_mq_ctx *ctx, *current_ctx;
|
struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
|
||||||
|
|
||||||
current_ctx = blk_mq_get_ctx(q);
|
current_ctx = blk_mq_get_ctx(q);
|
||||||
|
if (!cpu_online(ctx->cpu))
|
||||||
|
rq->mq_ctx = ctx = current_ctx;
|
||||||
|
|
||||||
ctx = rq->mq_ctx;
|
|
||||||
if (!cpu_online(ctx->cpu)) {
|
|
||||||
ctx = current_ctx;
|
|
||||||
rq->mq_ctx = ctx;
|
|
||||||
}
|
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
/* ctx->cpu might be offline */
|
if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
|
||||||
spin_lock(&ctx->lock);
|
!(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
|
||||||
__blk_mq_insert_request(hctx, rq, false);
|
blk_insert_flush(rq);
|
||||||
spin_unlock(&ctx->lock);
|
} else {
|
||||||
|
spin_lock(&ctx->lock);
|
||||||
|
__blk_mq_insert_request(hctx, rq, at_head);
|
||||||
|
spin_unlock(&ctx->lock);
|
||||||
|
}
|
||||||
|
|
||||||
blk_mq_put_ctx(current_ctx);
|
blk_mq_put_ctx(current_ctx);
|
||||||
|
|
||||||
@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
ctx = blk_mq_get_ctx(q);
|
ctx = blk_mq_get_ctx(q);
|
||||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
|
if (is_sync)
|
||||||
|
rw |= REQ_SYNC;
|
||||||
trace_block_getrq(q, bio, rw);
|
trace_block_getrq(q, bio, rw);
|
||||||
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
|
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
|
||||||
if (likely(rq))
|
if (likely(rq))
|
||||||
|
@ -23,7 +23,6 @@ struct blk_mq_ctx {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void __blk_mq_complete_request(struct request *rq);
|
void __blk_mq_complete_request(struct request *rq);
|
||||||
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
|
|
||||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||||
void blk_mq_init_flush(struct request_queue *q);
|
void blk_mq_init_flush(struct request_queue *q);
|
||||||
void blk_mq_drain_queue(struct request_queue *q);
|
void blk_mq_drain_queue(struct request_queue *q);
|
||||||
|
@ -67,6 +67,8 @@ enum ec_command {
|
|||||||
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
|
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
|
||||||
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
|
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
|
||||||
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
|
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
|
||||||
|
#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
|
||||||
|
* when trying to clear the EC */
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
EC_FLAGS_QUERY_PENDING, /* Query is pending */
|
EC_FLAGS_QUERY_PENDING, /* Query is pending */
|
||||||
@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec);
|
|||||||
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
|
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
|
||||||
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
|
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
|
||||||
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
|
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
|
||||||
|
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
|
||||||
|
|
||||||
/* --------------------------------------------------------------------------
|
/* --------------------------------------------------------------------------
|
||||||
Transaction Management
|
Transaction Management
|
||||||
@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void)
|
|||||||
|
|
||||||
EXPORT_SYMBOL(ec_get_handle);
|
EXPORT_SYMBOL(ec_get_handle);
|
||||||
|
|
||||||
|
static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clears stale _Q events that might have accumulated in the EC.
|
||||||
|
* Run with locked ec mutex.
|
||||||
|
*/
|
||||||
|
static void acpi_ec_clear(struct acpi_ec *ec)
|
||||||
|
{
|
||||||
|
int i, status;
|
||||||
|
u8 value = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
|
||||||
|
status = acpi_ec_query_unlocked(ec, &value);
|
||||||
|
if (status || !value)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlikely(i == ACPI_EC_CLEAR_MAX))
|
||||||
|
pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
|
||||||
|
else
|
||||||
|
pr_info("%d stale EC events cleared\n", i);
|
||||||
|
}
|
||||||
|
|
||||||
void acpi_ec_block_transactions(void)
|
void acpi_ec_block_transactions(void)
|
||||||
{
|
{
|
||||||
struct acpi_ec *ec = first_ec;
|
struct acpi_ec *ec = first_ec;
|
||||||
@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void)
|
|||||||
mutex_lock(&ec->mutex);
|
mutex_lock(&ec->mutex);
|
||||||
/* Allow transactions to be carried out again */
|
/* Allow transactions to be carried out again */
|
||||||
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
|
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
|
||||||
|
|
||||||
|
if (EC_FLAGS_CLEAR_ON_RESUME)
|
||||||
|
acpi_ec_clear(ec);
|
||||||
|
|
||||||
mutex_unlock(&ec->mutex);
|
mutex_unlock(&ec->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device)
|
|||||||
|
|
||||||
/* EC is fully operational, allow queries */
|
/* EC is fully operational, allow queries */
|
||||||
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
|
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
|
||||||
|
|
||||||
|
/* Clear stale _Q events if hardware might require that */
|
||||||
|
if (EC_FLAGS_CLEAR_ON_RESUME) {
|
||||||
|
mutex_lock(&ec->mutex);
|
||||||
|
acpi_ec_clear(ec);
|
||||||
|
mutex_unlock(&ec->mutex);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On some hardware it is necessary to clear events accumulated by the EC during
|
||||||
|
* sleep. These ECs stop reporting GPEs until they are manually polled, if too
|
||||||
|
* many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
|
||||||
|
*
|
||||||
|
* https://bugzilla.kernel.org/show_bug.cgi?id=44161
|
||||||
|
*
|
||||||
|
* Ideally, the EC should also be instructed NOT to accumulate events during
|
||||||
|
* sleep (which Windows seems to do somehow), but the interface to control this
|
||||||
|
* behaviour is not known at this time.
|
||||||
|
*
|
||||||
|
* Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
|
||||||
|
* however it is very likely that other Samsung models are affected.
|
||||||
|
*
|
||||||
|
* On systems which don't accumulate _Q events during sleep, this extra check
|
||||||
|
* should be harmless.
|
||||||
|
*/
|
||||||
|
static int ec_clear_on_resume(const struct dmi_system_id *id)
|
||||||
|
{
|
||||||
|
pr_debug("Detected system needing EC poll on resume.\n");
|
||||||
|
EC_FLAGS_CLEAR_ON_RESUME = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct dmi_system_id ec_dmi_table[] __initdata = {
|
static struct dmi_system_id ec_dmi_table[] __initdata = {
|
||||||
{
|
{
|
||||||
ec_skip_dsdt_scan, "Compal JFL92", {
|
ec_skip_dsdt_scan, "Compal JFL92", {
|
||||||
@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
|
|||||||
ec_validate_ecdt, "ASUS hardware", {
|
ec_validate_ecdt, "ASUS hardware", {
|
||||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
|
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
|
||||||
|
{
|
||||||
|
ec_clear_on_resume, "Samsung hardware", {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
|
|||||||
switch (ares->type) {
|
switch (ares->type) {
|
||||||
case ACPI_RESOURCE_TYPE_MEMORY24:
|
case ACPI_RESOURCE_TYPE_MEMORY24:
|
||||||
memory24 = &ares->data.memory24;
|
memory24 = &ares->data.memory24;
|
||||||
|
if (!memory24->address_length)
|
||||||
|
return false;
|
||||||
acpi_dev_get_memresource(res, memory24->minimum,
|
acpi_dev_get_memresource(res, memory24->minimum,
|
||||||
memory24->address_length,
|
memory24->address_length,
|
||||||
memory24->write_protect);
|
memory24->write_protect);
|
||||||
break;
|
break;
|
||||||
case ACPI_RESOURCE_TYPE_MEMORY32:
|
case ACPI_RESOURCE_TYPE_MEMORY32:
|
||||||
memory32 = &ares->data.memory32;
|
memory32 = &ares->data.memory32;
|
||||||
|
if (!memory32->address_length)
|
||||||
|
return false;
|
||||||
acpi_dev_get_memresource(res, memory32->minimum,
|
acpi_dev_get_memresource(res, memory32->minimum,
|
||||||
memory32->address_length,
|
memory32->address_length,
|
||||||
memory32->write_protect);
|
memory32->write_protect);
|
||||||
break;
|
break;
|
||||||
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
|
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
|
||||||
fixed_memory32 = &ares->data.fixed_memory32;
|
fixed_memory32 = &ares->data.fixed_memory32;
|
||||||
|
if (!fixed_memory32->address_length)
|
||||||
|
return false;
|
||||||
acpi_dev_get_memresource(res, fixed_memory32->address,
|
acpi_dev_get_memresource(res, fixed_memory32->address,
|
||||||
fixed_memory32->address_length,
|
fixed_memory32->address_length,
|
||||||
fixed_memory32->write_protect);
|
fixed_memory32->write_protect);
|
||||||
@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
|
|||||||
switch (ares->type) {
|
switch (ares->type) {
|
||||||
case ACPI_RESOURCE_TYPE_IO:
|
case ACPI_RESOURCE_TYPE_IO:
|
||||||
io = &ares->data.io;
|
io = &ares->data.io;
|
||||||
|
if (!io->address_length)
|
||||||
|
return false;
|
||||||
acpi_dev_get_ioresource(res, io->minimum,
|
acpi_dev_get_ioresource(res, io->minimum,
|
||||||
io->address_length,
|
io->address_length,
|
||||||
io->io_decode);
|
io->io_decode);
|
||||||
break;
|
break;
|
||||||
case ACPI_RESOURCE_TYPE_FIXED_IO:
|
case ACPI_RESOURCE_TYPE_FIXED_IO:
|
||||||
fixed_io = &ares->data.fixed_io;
|
fixed_io = &ares->data.fixed_io;
|
||||||
|
if (!fixed_io->address_length)
|
||||||
|
return false;
|
||||||
acpi_dev_get_ioresource(res, fixed_io->address,
|
acpi_dev_get_ioresource(res, fixed_io->address,
|
||||||
fixed_io->address_length,
|
fixed_io->address_length,
|
||||||
ACPI_DECODE_10);
|
ACPI_DECODE_10);
|
||||||
|
@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||||||
|
|
||||||
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
|
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
|
||||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||||
|
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||||
|
|
||||||
/* Blacklist entries taken from Silicon Image 3124/3132
|
/* Blacklist entries taken from Silicon Image 3124/3132
|
||||||
Windows driver .inf file - also several Linux problem reports */
|
Windows driver .inf file - also several Linux problem reports */
|
||||||
@ -4225,6 +4226,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||||||
/* devices that don't properly handle queued TRIM commands */
|
/* devices that don't properly handle queued TRIM commands */
|
||||||
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||||
{ "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
{ "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||||
|
{ "Crucial_CT???M500SSD3", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some WD SATA-I drives spin up and down erratically when the link
|
* Some WD SATA-I drives spin up and down erratically when the link
|
||||||
|
@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio)
|
|||||||
/* Non-zero page count for non-head members of
|
/* Non-zero page count for non-head members of
|
||||||
* compound pages is no longer allowed by the kernel.
|
* compound pages is no longer allowed by the kernel.
|
||||||
*/
|
*/
|
||||||
page = compound_trans_head(bv.bv_page);
|
page = compound_head(bv.bv_page);
|
||||||
atomic_inc(&page->_count);
|
atomic_inc(&page->_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio)
|
|||||||
struct bvec_iter iter;
|
struct bvec_iter iter;
|
||||||
|
|
||||||
bio_for_each_segment(bv, bio, iter) {
|
bio_for_each_segment(bv, bio, iter) {
|
||||||
page = compound_trans_head(bv.bv_page);
|
page = compound_head(bv.bv_page);
|
||||||
atomic_dec(&page->_count);
|
atomic_dec(&page->_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@
|
|||||||
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
|
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
|
||||||
|
|
||||||
/* unaligned IO handling */
|
/* unaligned IO handling */
|
||||||
#define MTIP_MAX_UNALIGNED_SLOTS 8
|
#define MTIP_MAX_UNALIGNED_SLOTS 2
|
||||||
|
|
||||||
/* Macro to extract the tag bit number from a tag value. */
|
/* Macro to extract the tag bit number from a tag value. */
|
||||||
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
|
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
|
||||||
|
@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev,
|
|||||||
|
|
||||||
disksize = PAGE_ALIGN(disksize);
|
disksize = PAGE_ALIGN(disksize);
|
||||||
meta = zram_meta_alloc(disksize);
|
meta = zram_meta_alloc(disksize);
|
||||||
|
if (!meta)
|
||||||
|
return -ENOMEM;
|
||||||
down_write(&zram->init_lock);
|
down_write(&zram->init_lock);
|
||||||
if (zram->init_done) {
|
if (zram->init_done) {
|
||||||
up_write(&zram->init_lock);
|
up_write(&zram->init_lock);
|
||||||
|
@ -242,7 +242,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
|
|||||||
|
|
||||||
irq = irq_of_parse_and_map(np, 0);
|
irq = irq_of_parse_and_map(np, 0);
|
||||||
if (!irq)
|
if (!irq)
|
||||||
return;
|
goto out_free_characteristics;
|
||||||
|
|
||||||
clk = at91_clk_register_master(pmc, irq, name, num_parents,
|
clk = at91_clk_register_master(pmc, irq, name, num_parents,
|
||||||
parent_names, layout,
|
parent_names, layout,
|
||||||
|
@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = {
|
|||||||
|
|
||||||
static int __init nomadik_src_clk_init_debugfs(void)
|
static int __init nomadik_src_clk_init_debugfs(void)
|
||||||
{
|
{
|
||||||
|
/* Vital for multiplatform */
|
||||||
|
if (!src_base)
|
||||||
|
return -ENODEV;
|
||||||
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
|
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
|
||||||
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
|
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
|
||||||
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
|
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
|
||||||
|
@ -2226,24 +2226,25 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
|
|||||||
*/
|
*/
|
||||||
int __clk_get(struct clk *clk)
|
int __clk_get(struct clk *clk)
|
||||||
{
|
{
|
||||||
if (clk && !try_module_get(clk->owner))
|
if (clk) {
|
||||||
return 0;
|
if (!try_module_get(clk->owner))
|
||||||
|
return 0;
|
||||||
|
|
||||||
kref_get(&clk->ref);
|
kref_get(&clk->ref);
|
||||||
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __clk_put(struct clk *clk)
|
void __clk_put(struct clk *clk)
|
||||||
{
|
{
|
||||||
if (WARN_ON_ONCE(IS_ERR(clk)))
|
if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
clk_prepare_lock();
|
clk_prepare_lock();
|
||||||
kref_put(&clk->ref, __clk_release);
|
kref_put(&clk->ref, __clk_release);
|
||||||
clk_prepare_unlock();
|
clk_prepare_unlock();
|
||||||
|
|
||||||
if (clk)
|
module_put(clk->owner);
|
||||||
module_put(clk->owner);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*** clk rate change notifiers ***/
|
/*** clk rate change notifiers ***/
|
||||||
|
@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev,
|
|||||||
|
|
||||||
init.name = name;
|
init.name = name;
|
||||||
init.ops = &clk_psc_ops;
|
init.ops = &clk_psc_ops;
|
||||||
|
init.flags = 0;
|
||||||
init.parent_names = (parent_name ? &parent_name : NULL);
|
init.parent_names = (parent_name ? &parent_name : NULL);
|
||||||
init.num_parents = (parent_name ? 1 : 0);
|
init.num_parents = (parent_name ? 1 : 0);
|
||||||
|
|
||||||
|
@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = {
|
|||||||
.num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
|
.num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init a370_coreclk_init(struct device_node *np)
|
|
||||||
{
|
|
||||||
mvebu_coreclk_setup(np, &a370_coreclks);
|
|
||||||
}
|
|
||||||
CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
|
|
||||||
a370_coreclk_init);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clock Gating Control
|
* Clock Gating Control
|
||||||
*/
|
*/
|
||||||
@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
|
|||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init a370_clk_gating_init(struct device_node *np)
|
static void __init a370_clk_init(struct device_node *np)
|
||||||
{
|
{
|
||||||
mvebu_clk_gating_setup(np, a370_gating_desc);
|
struct device_node *cgnp =
|
||||||
|
of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock");
|
||||||
|
|
||||||
|
mvebu_coreclk_setup(np, &a370_coreclks);
|
||||||
|
|
||||||
|
if (cgnp)
|
||||||
|
mvebu_clk_gating_setup(cgnp, a370_gating_desc);
|
||||||
}
|
}
|
||||||
CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock",
|
CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
|
||||||
a370_clk_gating_init);
|
|
||||||
|
@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = {
|
|||||||
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
|
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init axp_coreclk_init(struct device_node *np)
|
|
||||||
{
|
|
||||||
mvebu_coreclk_setup(np, &axp_coreclks);
|
|
||||||
}
|
|
||||||
CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
|
|
||||||
axp_coreclk_init);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clock Gating Control
|
* Clock Gating Control
|
||||||
*/
|
*/
|
||||||
@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
|
|||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init axp_clk_gating_init(struct device_node *np)
|
static void __init axp_clk_init(struct device_node *np)
|
||||||
{
|
{
|
||||||
mvebu_clk_gating_setup(np, axp_gating_desc);
|
struct device_node *cgnp =
|
||||||
|
of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock");
|
||||||
|
|
||||||
|
mvebu_coreclk_setup(np, &axp_coreclks);
|
||||||
|
|
||||||
|
if (cgnp)
|
||||||
|
mvebu_clk_gating_setup(cgnp, axp_gating_desc);
|
||||||
}
|
}
|
||||||
CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock",
|
CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
|
||||||
axp_clk_gating_init);
|
|
||||||
|
@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = {
|
|||||||
.num_ratios = ARRAY_SIZE(dove_coreclk_ratios),
|
.num_ratios = ARRAY_SIZE(dove_coreclk_ratios),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init dove_coreclk_init(struct device_node *np)
|
|
||||||
{
|
|
||||||
mvebu_coreclk_setup(np, &dove_coreclks);
|
|
||||||
}
|
|
||||||
CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clock Gating Control
|
* Clock Gating Control
|
||||||
*/
|
*/
|
||||||
@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = {
|
|||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init dove_clk_gating_init(struct device_node *np)
|
static void __init dove_clk_init(struct device_node *np)
|
||||||
{
|
{
|
||||||
mvebu_clk_gating_setup(np, dove_gating_desc);
|
struct device_node *cgnp =
|
||||||
|
of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock");
|
||||||
|
|
||||||
|
mvebu_coreclk_setup(np, &dove_coreclks);
|
||||||
|
|
||||||
|
if (cgnp)
|
||||||
|
mvebu_clk_gating_setup(cgnp, dove_gating_desc);
|
||||||
}
|
}
|
||||||
CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock",
|
CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
|
||||||
dove_clk_gating_init);
|
|
||||||
|
@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = {
|
|||||||
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
|
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init kirkwood_coreclk_init(struct device_node *np)
|
|
||||||
{
|
|
||||||
mvebu_coreclk_setup(np, &kirkwood_coreclks);
|
|
||||||
}
|
|
||||||
CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock",
|
|
||||||
kirkwood_coreclk_init);
|
|
||||||
|
|
||||||
static const struct coreclk_soc_desc mv88f6180_coreclks = {
|
static const struct coreclk_soc_desc mv88f6180_coreclks = {
|
||||||
.get_tclk_freq = kirkwood_get_tclk_freq,
|
.get_tclk_freq = kirkwood_get_tclk_freq,
|
||||||
.get_cpu_freq = mv88f6180_get_cpu_freq,
|
.get_cpu_freq = mv88f6180_get_cpu_freq,
|
||||||
@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = {
|
|||||||
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
|
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init mv88f6180_coreclk_init(struct device_node *np)
|
|
||||||
{
|
|
||||||
mvebu_coreclk_setup(np, &mv88f6180_coreclks);
|
|
||||||
}
|
|
||||||
CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
|
|
||||||
mv88f6180_coreclk_init);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clock Gating Control
|
* Clock Gating Control
|
||||||
*/
|
*/
|
||||||
@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = {
|
|||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init kirkwood_clk_gating_init(struct device_node *np)
|
static void __init kirkwood_clk_init(struct device_node *np)
|
||||||
{
|
{
|
||||||
mvebu_clk_gating_setup(np, kirkwood_gating_desc);
|
struct device_node *cgnp =
|
||||||
|
of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock");
|
||||||
|
|
||||||
|
|
||||||
|
if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock"))
|
||||||
|
mvebu_coreclk_setup(np, &mv88f6180_coreclks);
|
||||||
|
else
|
||||||
|
mvebu_coreclk_setup(np, &kirkwood_coreclks);
|
||||||
|
|
||||||
|
if (cgnp)
|
||||||
|
mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
|
||||||
}
|
}
|
||||||
CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock",
|
CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
|
||||||
kirkwood_clk_gating_init);
|
kirkwood_clk_init);
|
||||||
|
CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock",
|
||||||
|
kirkwood_clk_init);
|
||||||
|
@ -26,6 +26,8 @@ struct rcar_gen2_cpg {
|
|||||||
void __iomem *reg;
|
void __iomem *reg;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define CPG_FRQCRB 0x00000004
|
||||||
|
#define CPG_FRQCRB_KICK BIT(31)
|
||||||
#define CPG_SDCKCR 0x00000074
|
#define CPG_SDCKCR 0x00000074
|
||||||
#define CPG_PLL0CR 0x000000d8
|
#define CPG_PLL0CR 0x000000d8
|
||||||
#define CPG_FRQCRC 0x000000e0
|
#define CPG_FRQCRC 0x000000e0
|
||||||
@ -45,6 +47,7 @@ struct rcar_gen2_cpg {
|
|||||||
struct cpg_z_clk {
|
struct cpg_z_clk {
|
||||||
struct clk_hw hw;
|
struct clk_hw hw;
|
||||||
void __iomem *reg;
|
void __iomem *reg;
|
||||||
|
void __iomem *kick_reg;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
|
#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
|
||||||
@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||||||
{
|
{
|
||||||
struct cpg_z_clk *zclk = to_z_clk(hw);
|
struct cpg_z_clk *zclk = to_z_clk(hw);
|
||||||
unsigned int mult;
|
unsigned int mult;
|
||||||
u32 val;
|
u32 val, kick;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
mult = div_u64((u64)rate * 32, parent_rate);
|
mult = div_u64((u64)rate * 32, parent_rate);
|
||||||
mult = clamp(mult, 1U, 32U);
|
mult = clamp(mult, 1U, 32U);
|
||||||
|
|
||||||
|
if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
val = clk_readl(zclk->reg);
|
val = clk_readl(zclk->reg);
|
||||||
val &= ~CPG_FRQCRC_ZFC_MASK;
|
val &= ~CPG_FRQCRC_ZFC_MASK;
|
||||||
val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
|
val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
|
||||||
clk_writel(val, zclk->reg);
|
clk_writel(val, zclk->reg);
|
||||||
|
|
||||||
return 0;
|
/*
|
||||||
|
* Set KICK bit in FRQCRB to update hardware setting and wait for
|
||||||
|
* clock change completion.
|
||||||
|
*/
|
||||||
|
kick = clk_readl(zclk->kick_reg);
|
||||||
|
kick |= CPG_FRQCRB_KICK;
|
||||||
|
clk_writel(kick, zclk->kick_reg);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: There is no HW information about the worst case latency.
|
||||||
|
*
|
||||||
|
* Using experimental measurements, it seems that no more than
|
||||||
|
* ~10 iterations are needed, independently of the CPU rate.
|
||||||
|
* Since this value might be dependant of external xtal rate, pll1
|
||||||
|
* rate or even the other emulation clocks rate, use 1000 as a
|
||||||
|
* "super" safe value.
|
||||||
|
*/
|
||||||
|
for (i = 1000; i; i--) {
|
||||||
|
if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
cpu_relax();
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct clk_ops cpg_z_clk_ops = {
|
static const struct clk_ops cpg_z_clk_ops = {
|
||||||
@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
|
|||||||
init.num_parents = 1;
|
init.num_parents = 1;
|
||||||
|
|
||||||
zclk->reg = cpg->reg + CPG_FRQCRC;
|
zclk->reg = cpg->reg + CPG_FRQCRC;
|
||||||
|
zclk->kick_reg = cpg->reg + CPG_FRQCRB;
|
||||||
zclk->hw.init = &init;
|
zclk->hw.init = &init;
|
||||||
|
|
||||||
clk = clk_register(NULL, &zclk->hw);
|
clk = clk_register(NULL, &zclk->hw);
|
||||||
@ -186,7 +218,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
|
|||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
const struct clk_div_table *table = NULL;
|
const struct clk_div_table *table = NULL;
|
||||||
const char *parent_name = "main";
|
const char *parent_name;
|
||||||
unsigned int shift;
|
unsigned int shift;
|
||||||
unsigned int mult = 1;
|
unsigned int mult = 1;
|
||||||
unsigned int div = 1;
|
unsigned int div = 1;
|
||||||
@ -201,23 +233,31 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
|
|||||||
* the multiplier value.
|
* the multiplier value.
|
||||||
*/
|
*/
|
||||||
u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
|
u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
|
||||||
|
parent_name = "main";
|
||||||
mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
|
mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
|
||||||
} else if (!strcmp(name, "pll1")) {
|
} else if (!strcmp(name, "pll1")) {
|
||||||
|
parent_name = "main";
|
||||||
mult = config->pll1_mult / 2;
|
mult = config->pll1_mult / 2;
|
||||||
} else if (!strcmp(name, "pll3")) {
|
} else if (!strcmp(name, "pll3")) {
|
||||||
|
parent_name = "main";
|
||||||
mult = config->pll3_mult;
|
mult = config->pll3_mult;
|
||||||
} else if (!strcmp(name, "lb")) {
|
} else if (!strcmp(name, "lb")) {
|
||||||
|
parent_name = "pll1_div2";
|
||||||
div = cpg_mode & BIT(18) ? 36 : 24;
|
div = cpg_mode & BIT(18) ? 36 : 24;
|
||||||
} else if (!strcmp(name, "qspi")) {
|
} else if (!strcmp(name, "qspi")) {
|
||||||
|
parent_name = "pll1_div2";
|
||||||
div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
|
div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
|
||||||
? 16 : 20;
|
? 8 : 10;
|
||||||
} else if (!strcmp(name, "sdh")) {
|
} else if (!strcmp(name, "sdh")) {
|
||||||
|
parent_name = "pll1_div2";
|
||||||
table = cpg_sdh_div_table;
|
table = cpg_sdh_div_table;
|
||||||
shift = 8;
|
shift = 8;
|
||||||
} else if (!strcmp(name, "sd0")) {
|
} else if (!strcmp(name, "sd0")) {
|
||||||
|
parent_name = "pll1_div2";
|
||||||
table = cpg_sd01_div_table;
|
table = cpg_sd01_div_table;
|
||||||
shift = 4;
|
shift = 4;
|
||||||
} else if (!strcmp(name, "sd1")) {
|
} else if (!strcmp(name, "sd1")) {
|
||||||
|
parent_name = "pll1_div2";
|
||||||
table = cpg_sd01_div_table;
|
table = cpg_sd01_div_table;
|
||||||
shift = 0;
|
shift = 0;
|
||||||
} else if (!strcmp(name, "z")) {
|
} else if (!strcmp(name, "z")) {
|
||||||
|
@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (divider_ux1 > get_max_div(divider))
|
if (divider_ux1 > get_max_div(divider))
|
||||||
return -EINVAL;
|
return get_max_div(divider);
|
||||||
|
|
||||||
return divider_ux1;
|
return divider_ux1;
|
||||||
}
|
}
|
||||||
|
@ -180,9 +180,13 @@ enum clk_id {
|
|||||||
tegra_clk_sbc6_8,
|
tegra_clk_sbc6_8,
|
||||||
tegra_clk_sclk,
|
tegra_clk_sclk,
|
||||||
tegra_clk_sdmmc1,
|
tegra_clk_sdmmc1,
|
||||||
|
tegra_clk_sdmmc1_8,
|
||||||
tegra_clk_sdmmc2,
|
tegra_clk_sdmmc2,
|
||||||
|
tegra_clk_sdmmc2_8,
|
||||||
tegra_clk_sdmmc3,
|
tegra_clk_sdmmc3,
|
||||||
|
tegra_clk_sdmmc3_8,
|
||||||
tegra_clk_sdmmc4,
|
tegra_clk_sdmmc4,
|
||||||
|
tegra_clk_sdmmc4_8,
|
||||||
tegra_clk_se,
|
tegra_clk_se,
|
||||||
tegra_clk_soc_therm,
|
tegra_clk_soc_therm,
|
||||||
tegra_clk_sor0,
|
tegra_clk_sor0,
|
||||||
|
@ -371,9 +371,7 @@ static const char *mux_pllp3_pllc_clkm[] = {
|
|||||||
static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
|
static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
|
||||||
"pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
|
"pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
|
||||||
};
|
};
|
||||||
static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = {
|
#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL
|
||||||
[0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
|
static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
|
||||||
"pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
|
"pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
|
||||||
@ -465,6 +463,10 @@ static struct tegra_periph_init_data periph_clks[] = {
|
|||||||
MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
|
MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
|
||||||
MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
|
MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
|
||||||
MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
|
MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
|
||||||
|
MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
|
||||||
|
MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
|
||||||
|
MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
|
||||||
|
MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
|
||||||
MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
|
MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
|
||||||
MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
|
MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
|
||||||
MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
|
MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
|
||||||
@ -492,7 +494,7 @@ static struct tegra_periph_init_data periph_clks[] = {
|
|||||||
UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
|
UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
|
||||||
UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
|
UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
|
||||||
UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
|
UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
|
||||||
UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte),
|
UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte),
|
||||||
XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
|
XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
|
||||||
XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
|
XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
|
||||||
XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
|
XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
|
||||||
|
@ -120,7 +120,7 @@ void __init tegra_super_clk_gen4_init(void __iomem *clk_base,
|
|||||||
ARRAY_SIZE(cclk_lp_parents),
|
ARRAY_SIZE(cclk_lp_parents),
|
||||||
CLK_SET_RATE_PARENT,
|
CLK_SET_RATE_PARENT,
|
||||||
clk_base + CCLKLP_BURST_POLICY,
|
clk_base + CCLKLP_BURST_POLICY,
|
||||||
0, 4, 8, 9, NULL);
|
TEGRA_DIVIDER_2, 4, 8, 9, NULL);
|
||||||
*dt_clk = clk;
|
*dt_clk = clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -682,12 +682,12 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
|
|||||||
[tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
|
[tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
|
||||||
[tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
|
[tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
|
||||||
[tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
|
[tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
|
||||||
[tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
|
[tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
|
||||||
[tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
|
[tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
|
||||||
[tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
|
[tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
|
||||||
[tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
|
[tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
|
||||||
[tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
|
[tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
|
||||||
[tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
|
[tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
|
||||||
[tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
|
[tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
|
||||||
[tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
|
[tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
|
||||||
[tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
|
[tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
|
||||||
@ -723,7 +723,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
|
|||||||
[tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
|
[tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
|
||||||
[tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
|
[tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
|
||||||
[tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
|
[tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
|
||||||
[tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
|
[tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
|
||||||
[tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
|
[tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
|
||||||
[tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
|
[tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
|
||||||
[tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
|
[tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
|
||||||
|
@ -516,11 +516,11 @@ static struct div_nmp pllp_nmp = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
|
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
|
||||||
{12000000, 216000000, 432, 12, 1, 8},
|
{12000000, 408000000, 408, 12, 0, 8},
|
||||||
{13000000, 216000000, 432, 13, 1, 8},
|
{13000000, 408000000, 408, 13, 0, 8},
|
||||||
{16800000, 216000000, 360, 14, 1, 8},
|
{16800000, 408000000, 340, 14, 0, 8},
|
||||||
{19200000, 216000000, 360, 16, 1, 8},
|
{19200000, 408000000, 340, 16, 0, 8},
|
||||||
{26000000, 216000000, 432, 26, 1, 8},
|
{26000000, 408000000, 408, 26, 0, 8},
|
||||||
{0, 0, 0, 0, 0, 0},
|
{0, 0, 0, 0, 0, 0},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -570,6 +570,15 @@ static struct tegra_clk_pll_params pll_a_params = {
|
|||||||
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
|
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct div_nmp plld_nmp = {
|
||||||
|
.divm_shift = 0,
|
||||||
|
.divm_width = 5,
|
||||||
|
.divn_shift = 8,
|
||||||
|
.divn_width = 11,
|
||||||
|
.divp_shift = 20,
|
||||||
|
.divp_width = 3,
|
||||||
|
};
|
||||||
|
|
||||||
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
|
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
|
||||||
{12000000, 216000000, 864, 12, 4, 12},
|
{12000000, 216000000, 864, 12, 4, 12},
|
||||||
{13000000, 216000000, 864, 13, 4, 12},
|
{13000000, 216000000, 864, 13, 4, 12},
|
||||||
@ -603,19 +612,18 @@ static struct tegra_clk_pll_params pll_d_params = {
|
|||||||
.lock_mask = PLL_BASE_LOCK,
|
.lock_mask = PLL_BASE_LOCK,
|
||||||
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
|
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
|
||||||
.lock_delay = 1000,
|
.lock_delay = 1000,
|
||||||
.div_nmp = &pllp_nmp,
|
.div_nmp = &plld_nmp,
|
||||||
.freq_table = pll_d_freq_table,
|
.freq_table = pll_d_freq_table,
|
||||||
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
|
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
|
||||||
TEGRA_PLL_USE_LOCK,
|
TEGRA_PLL_USE_LOCK,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
|
static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
|
||||||
{ 12000000, 148500000, 99, 1, 8},
|
{ 12000000, 594000000, 99, 1, 2},
|
||||||
{ 12000000, 594000000, 99, 1, 1},
|
{ 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */
|
||||||
{ 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */
|
{ 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */
|
||||||
{ 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */
|
{ 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */
|
||||||
{ 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */
|
{ 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */
|
||||||
{ 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */
|
|
||||||
{ 0, 0, 0, 0, 0, 0 },
|
{ 0, 0, 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -753,21 +761,19 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
|
|||||||
[tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
|
[tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
|
||||||
[tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
|
[tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
|
||||||
[tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
|
[tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
|
||||||
[tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
|
[tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
|
||||||
[tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
|
[tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
|
||||||
[tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
|
[tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
|
||||||
[tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
|
[tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
|
||||||
[tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
|
[tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
|
||||||
[tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
|
[tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
|
||||||
[tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
|
[tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
|
||||||
[tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
|
[tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
|
||||||
[tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true },
|
|
||||||
[tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
|
[tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
|
||||||
[tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
|
[tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
|
||||||
[tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true },
|
|
||||||
[tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
|
[tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
|
||||||
[tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
|
[tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
|
||||||
[tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
|
[tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
|
||||||
[tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
|
[tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
|
||||||
[tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
|
[tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
|
||||||
[tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
|
[tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
|
||||||
@ -794,7 +800,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
|
|||||||
[tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
|
[tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
|
||||||
[tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
|
[tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
|
||||||
[tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
|
[tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
|
||||||
[tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
|
[tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
|
||||||
[tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
|
[tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
|
||||||
[tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
|
[tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
|
||||||
[tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
|
[tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
|
||||||
@ -1286,9 +1292,9 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
|
|||||||
clk_register_clkdev(clk, "pll_d2", NULL);
|
clk_register_clkdev(clk, "pll_d2", NULL);
|
||||||
clks[TEGRA124_CLK_PLL_D2] = clk;
|
clks[TEGRA124_CLK_PLL_D2] = clk;
|
||||||
|
|
||||||
/* PLLD2_OUT0 ?? */
|
/* PLLD2_OUT0 */
|
||||||
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
|
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
|
||||||
CLK_SET_RATE_PARENT, 1, 2);
|
CLK_SET_RATE_PARENT, 1, 1);
|
||||||
clk_register_clkdev(clk, "pll_d2_out0", NULL);
|
clk_register_clkdev(clk, "pll_d2_out0", NULL);
|
||||||
clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
|
clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
|
||||||
|
|
||||||
|
@ -574,6 +574,8 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
|
|||||||
[tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
|
[tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
|
||||||
[tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
|
[tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
|
||||||
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
|
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
|
||||||
|
[tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
|
||||||
|
[tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
|
||||||
};
|
};
|
||||||
|
|
||||||
static unsigned long tegra20_clk_measure_input_freq(void)
|
static unsigned long tegra20_clk_measure_input_freq(void)
|
||||||
|
@ -1109,6 +1109,21 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
|||||||
goto err_set_policy_cpu;
|
goto err_set_policy_cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* related cpus should atleast have policy->cpus */
|
||||||
|
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* affected cpus must always be the one, which are online. We aren't
|
||||||
|
* managing offline cpus here.
|
||||||
|
*/
|
||||||
|
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
|
||||||
|
|
||||||
|
if (!frozen) {
|
||||||
|
policy->user_policy.min = policy->min;
|
||||||
|
policy->user_policy.max = policy->max;
|
||||||
|
}
|
||||||
|
|
||||||
|
down_write(&policy->rwsem);
|
||||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||||
for_each_cpu(j, policy->cpus)
|
for_each_cpu(j, policy->cpus)
|
||||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||||
@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* related cpus should atleast have policy->cpus */
|
|
||||||
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* affected cpus must always be the one, which are online. We aren't
|
|
||||||
* managing offline cpus here.
|
|
||||||
*/
|
|
||||||
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
|
|
||||||
|
|
||||||
if (!frozen) {
|
|
||||||
policy->user_policy.min = policy->min;
|
|
||||||
policy->user_policy.max = policy->max;
|
|
||||||
}
|
|
||||||
|
|
||||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||||
CPUFREQ_START, policy);
|
CPUFREQ_START, policy);
|
||||||
|
|
||||||
@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
|
|||||||
policy->user_policy.policy = policy->policy;
|
policy->user_policy.policy = policy->policy;
|
||||||
policy->user_policy.governor = policy->governor;
|
policy->user_policy.governor = policy->governor;
|
||||||
}
|
}
|
||||||
|
up_write(&policy->rwsem);
|
||||||
|
|
||||||
kobject_uevent(&policy->kobj, KOBJ_ADD);
|
kobject_uevent(&policy->kobj, KOBJ_ADD);
|
||||||
up_read(&cpufreq_rwsem);
|
up_read(&cpufreq_rwsem);
|
||||||
@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
|
|||||||
*/
|
*/
|
||||||
unsigned int cpufreq_get(unsigned int cpu)
|
unsigned int cpufreq_get(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||||
unsigned int ret_freq = 0;
|
unsigned int ret_freq = 0;
|
||||||
|
|
||||||
if (cpufreq_disabled() || !cpufreq_driver)
|
if (policy) {
|
||||||
return -ENOENT;
|
down_read(&policy->rwsem);
|
||||||
|
ret_freq = __cpufreq_get(cpu);
|
||||||
|
up_read(&policy->rwsem);
|
||||||
|
|
||||||
BUG_ON(!policy);
|
cpufreq_cpu_put(policy);
|
||||||
|
}
|
||||||
if (!down_read_trylock(&cpufreq_rwsem))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
down_read(&policy->rwsem);
|
|
||||||
|
|
||||||
ret_freq = __cpufreq_get(cpu);
|
|
||||||
|
|
||||||
up_read(&policy->rwsem);
|
|
||||||
up_read(&cpufreq_rwsem);
|
|
||||||
|
|
||||||
return ret_freq;
|
return ret_freq;
|
||||||
}
|
}
|
||||||
|
@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
|
|||||||
old->config_rom_retries = 0;
|
old->config_rom_retries = 0;
|
||||||
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
|
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
|
||||||
|
|
||||||
PREPARE_DELAYED_WORK(&old->work, fw_device_update);
|
old->workfn = fw_device_update;
|
||||||
fw_schedule_device_work(old, 0);
|
fw_schedule_device_work(old, 0);
|
||||||
|
|
||||||
if (current_node == card->root_node)
|
if (current_node == card->root_node)
|
||||||
@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
|
|||||||
if (atomic_cmpxchg(&device->state,
|
if (atomic_cmpxchg(&device->state,
|
||||||
FW_DEVICE_INITIALIZING,
|
FW_DEVICE_INITIALIZING,
|
||||||
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
|
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
|
||||||
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
|
device->workfn = fw_device_shutdown;
|
||||||
fw_schedule_device_work(device, SHUTDOWN_DELAY);
|
fw_schedule_device_work(device, SHUTDOWN_DELAY);
|
||||||
} else {
|
} else {
|
||||||
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
|
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
|
||||||
@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
|
|||||||
dev_name(&device->device), fw_rcode_string(ret));
|
dev_name(&device->device), fw_rcode_string(ret));
|
||||||
gone:
|
gone:
|
||||||
atomic_set(&device->state, FW_DEVICE_GONE);
|
atomic_set(&device->state, FW_DEVICE_GONE);
|
||||||
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
|
device->workfn = fw_device_shutdown;
|
||||||
fw_schedule_device_work(device, SHUTDOWN_DELAY);
|
fw_schedule_device_work(device, SHUTDOWN_DELAY);
|
||||||
out:
|
out:
|
||||||
if (node_id == card->root_node->node_id)
|
if (node_id == card->root_node->node_id)
|
||||||
fw_schedule_bm_work(card, 0);
|
fw_schedule_bm_work(card, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fw_device_workfn(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct fw_device *device = container_of(to_delayed_work(work),
|
||||||
|
struct fw_device, work);
|
||||||
|
device->workfn(work);
|
||||||
|
}
|
||||||
|
|
||||||
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
|
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
|
||||||
{
|
{
|
||||||
struct fw_device *device;
|
struct fw_device *device;
|
||||||
@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
|
|||||||
* power-up after getting plugged in. We schedule the
|
* power-up after getting plugged in. We schedule the
|
||||||
* first config rom scan half a second after bus reset.
|
* first config rom scan half a second after bus reset.
|
||||||
*/
|
*/
|
||||||
INIT_DELAYED_WORK(&device->work, fw_device_init);
|
device->workfn = fw_device_init;
|
||||||
|
INIT_DELAYED_WORK(&device->work, fw_device_workfn);
|
||||||
fw_schedule_device_work(device, INITIAL_DELAY);
|
fw_schedule_device_work(device, INITIAL_DELAY);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
|
|||||||
if (atomic_cmpxchg(&device->state,
|
if (atomic_cmpxchg(&device->state,
|
||||||
FW_DEVICE_RUNNING,
|
FW_DEVICE_RUNNING,
|
||||||
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
|
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
|
||||||
PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
|
device->workfn = fw_device_refresh;
|
||||||
fw_schedule_device_work(device,
|
fw_schedule_device_work(device,
|
||||||
device->is_local ? 0 : INITIAL_DELAY);
|
device->is_local ? 0 : INITIAL_DELAY);
|
||||||
}
|
}
|
||||||
@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
|
|||||||
smp_wmb(); /* update node_id before generation */
|
smp_wmb(); /* update node_id before generation */
|
||||||
device->generation = card->generation;
|
device->generation = card->generation;
|
||||||
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
|
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
|
||||||
PREPARE_DELAYED_WORK(&device->work, fw_device_update);
|
device->workfn = fw_device_update;
|
||||||
fw_schedule_device_work(device, 0);
|
fw_schedule_device_work(device, 0);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
|
|||||||
device = node->data;
|
device = node->data;
|
||||||
if (atomic_xchg(&device->state,
|
if (atomic_xchg(&device->state,
|
||||||
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
|
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
|
||||||
PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
|
device->workfn = fw_device_shutdown;
|
||||||
fw_schedule_device_work(device,
|
fw_schedule_device_work(device,
|
||||||
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
|
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
|
||||||
}
|
}
|
||||||
|
@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
|
|||||||
if (rcode == RCODE_COMPLETE) {
|
if (rcode == RCODE_COMPLETE) {
|
||||||
fwnet_transmit_packet_done(ptask);
|
fwnet_transmit_packet_done(ptask);
|
||||||
} else {
|
} else {
|
||||||
fwnet_transmit_packet_failed(ptask);
|
|
||||||
|
|
||||||
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
|
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
|
||||||
dev_err(&ptask->dev->netdev->dev,
|
dev_err(&ptask->dev->netdev->dev,
|
||||||
"fwnet_write_complete failed: %x (skipped %d)\n",
|
"fwnet_write_complete failed: %x (skipped %d)\n",
|
||||||
@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
|
|||||||
|
|
||||||
errors_skipped = 0;
|
errors_skipped = 0;
|
||||||
last_rcode = rcode;
|
last_rcode = rcode;
|
||||||
} else
|
} else {
|
||||||
errors_skipped++;
|
errors_skipped++;
|
||||||
|
}
|
||||||
|
fwnet_transmit_packet_failed(ptask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
|
|||||||
#define QUIRK_NO_MSI 0x10
|
#define QUIRK_NO_MSI 0x10
|
||||||
#define QUIRK_TI_SLLZ059 0x20
|
#define QUIRK_TI_SLLZ059 0x20
|
||||||
#define QUIRK_IR_WAKE 0x40
|
#define QUIRK_IR_WAKE 0x40
|
||||||
#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
|
|
||||||
|
|
||||||
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
|
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
|
||||||
static const struct {
|
static const struct {
|
||||||
@ -303,10 +302,7 @@ static const struct {
|
|||||||
QUIRK_BE_HEADERS},
|
QUIRK_BE_HEADERS},
|
||||||
|
|
||||||
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
|
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
|
||||||
QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
|
QUIRK_NO_MSI},
|
||||||
|
|
||||||
{PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
|
|
||||||
QUIRK_PHY_LCTRL_TIMEOUT},
|
|
||||||
|
|
||||||
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
|
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
|
||||||
QUIRK_RESET_PACKET},
|
QUIRK_RESET_PACKET},
|
||||||
@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
|
|||||||
", disable MSI = " __stringify(QUIRK_NO_MSI)
|
", disable MSI = " __stringify(QUIRK_NO_MSI)
|
||||||
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
|
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
|
||||||
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
|
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
|
||||||
", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
|
|
||||||
")");
|
")");
|
||||||
|
|
||||||
#define OHCI_PARAM_DEBUG_AT_AR 1
|
#define OHCI_PARAM_DEBUG_AT_AR 1
|
||||||
@ -2299,9 +2294,6 @@ static int ohci_enable(struct fw_card *card,
|
|||||||
* TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
|
* TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
|
||||||
* cannot actually use the phy at that time. These need tens of
|
* cannot actually use the phy at that time. These need tens of
|
||||||
* millisecods pause between LPS write and first phy access too.
|
* millisecods pause between LPS write and first phy access too.
|
||||||
*
|
|
||||||
* But do not wait for 50msec on Agere/LSI cards. Their phy
|
|
||||||
* arbitration state machine may time out during such a long wait.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
reg_write(ohci, OHCI1394_HCControlSet,
|
reg_write(ohci, OHCI1394_HCControlSet,
|
||||||
@ -2309,11 +2301,8 @@ static int ohci_enable(struct fw_card *card,
|
|||||||
OHCI1394_HCControl_postedWriteEnable);
|
OHCI1394_HCControl_postedWriteEnable);
|
||||||
flush_writes(ohci);
|
flush_writes(ohci);
|
||||||
|
|
||||||
if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
|
for (lps = 0, i = 0; !lps && i < 3; i++) {
|
||||||
msleep(50);
|
msleep(50);
|
||||||
|
|
||||||
for (lps = 0, i = 0; !lps && i < 150; i++) {
|
|
||||||
msleep(1);
|
|
||||||
lps = reg_read(ohci, OHCI1394_HCControlSet) &
|
lps = reg_read(ohci, OHCI1394_HCControlSet) &
|
||||||
OHCI1394_HCControl_LPS;
|
OHCI1394_HCControl_LPS;
|
||||||
}
|
}
|
||||||
|
@ -146,6 +146,7 @@ struct sbp2_logical_unit {
|
|||||||
*/
|
*/
|
||||||
int generation;
|
int generation;
|
||||||
int retries;
|
int retries;
|
||||||
|
work_func_t workfn;
|
||||||
struct delayed_work work;
|
struct delayed_work work;
|
||||||
bool has_sdev;
|
bool has_sdev;
|
||||||
bool blocked;
|
bool blocked;
|
||||||
@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
|
|||||||
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
|
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
|
||||||
sbp2_set_busy_timeout(lu);
|
sbp2_set_busy_timeout(lu);
|
||||||
|
|
||||||
PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
|
lu->workfn = sbp2_reconnect;
|
||||||
sbp2_agent_reset(lu);
|
sbp2_agent_reset(lu);
|
||||||
|
|
||||||
/* This was a re-login. */
|
/* This was a re-login. */
|
||||||
@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
|
|||||||
* If a bus reset happened, sbp2_update will have requeued
|
* If a bus reset happened, sbp2_update will have requeued
|
||||||
* lu->work already. Reset the work from reconnect to login.
|
* lu->work already. Reset the work from reconnect to login.
|
||||||
*/
|
*/
|
||||||
PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
|
lu->workfn = sbp2_login;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sbp2_reconnect(struct work_struct *work)
|
static void sbp2_reconnect(struct work_struct *work)
|
||||||
@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
|
|||||||
lu->retries++ >= 5) {
|
lu->retries++ >= 5) {
|
||||||
dev_err(tgt_dev(tgt), "failed to reconnect\n");
|
dev_err(tgt_dev(tgt), "failed to reconnect\n");
|
||||||
lu->retries = 0;
|
lu->retries = 0;
|
||||||
PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
|
lu->workfn = sbp2_login;
|
||||||
}
|
}
|
||||||
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
|
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
|
||||||
|
|
||||||
@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
|
|||||||
sbp2_conditionally_unblock(lu);
|
sbp2_conditionally_unblock(lu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sbp2_lu_workfn(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
|
||||||
|
struct sbp2_logical_unit, work);
|
||||||
|
lu->workfn(work);
|
||||||
|
}
|
||||||
|
|
||||||
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
|
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
|
||||||
{
|
{
|
||||||
struct sbp2_logical_unit *lu;
|
struct sbp2_logical_unit *lu;
|
||||||
@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
|
|||||||
lu->blocked = false;
|
lu->blocked = false;
|
||||||
++tgt->dont_block;
|
++tgt->dont_block;
|
||||||
INIT_LIST_HEAD(&lu->orb_list);
|
INIT_LIST_HEAD(&lu->orb_list);
|
||||||
INIT_DELAYED_WORK(&lu->work, sbp2_login);
|
lu->workfn = sbp2_login;
|
||||||
|
INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
|
||||||
|
|
||||||
list_add_tail(&lu->link, &tgt->lu_list);
|
list_add_tail(&lu->link, &tgt->lu_list);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
|
|||||||
{
|
{
|
||||||
struct armada_private *priv = dev->dev_private;
|
struct armada_private *priv = dev->dev_private;
|
||||||
|
|
||||||
/*
|
WARN_ON(!kfifo_put(&priv->fb_unref, fb));
|
||||||
* Yes, we really must jump through these hoops just to store a
|
|
||||||
* _pointer_ to something into the kfifo. This is utterly insane
|
|
||||||
* and idiotic, because it kfifo requires the _data_ pointed to by
|
|
||||||
* the pointer const, not the pointer itself. Not only that, but
|
|
||||||
* you have to pass a pointer _to_ the pointer you want stored.
|
|
||||||
*/
|
|
||||||
const struct drm_framebuffer *silly_api_alert = fb;
|
|
||||||
WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
|
|
||||||
schedule_work(&priv->fb_unref_work);
|
schedule_work(&priv->fb_unref_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ config DRM_BOCHS
|
|||||||
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
|
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
|
||||||
depends on DRM && PCI
|
depends on DRM && PCI
|
||||||
select DRM_KMS_HELPER
|
select DRM_KMS_HELPER
|
||||||
|
select DRM_KMS_FB_HELPER
|
||||||
select FB_SYS_FILLRECT
|
select FB_SYS_FILLRECT
|
||||||
select FB_SYS_COPYAREA
|
select FB_SYS_COPYAREA
|
||||||
select FB_SYS_IMAGEBLIT
|
select FB_SYS_IMAGEBLIT
|
||||||
|
@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
|
|||||||
void intel_detect_pch(struct drm_device *dev)
|
void intel_detect_pch(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
struct pci_dev *pch;
|
struct pci_dev *pch = NULL;
|
||||||
|
|
||||||
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
|
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
|
||||||
* (which really amounts to a PCH but no South Display).
|
* (which really amounts to a PCH but no South Display).
|
||||||
@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev)
|
|||||||
* all the ISA bridge devices and check for the first match, instead
|
* all the ISA bridge devices and check for the first match, instead
|
||||||
* of only checking the first one.
|
* of only checking the first one.
|
||||||
*/
|
*/
|
||||||
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
|
while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
|
||||||
while (pch) {
|
|
||||||
struct pci_dev *curr = pch;
|
|
||||||
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
|
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
|
||||||
unsigned short id;
|
unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
|
||||||
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
|
|
||||||
dev_priv->pch_id = id;
|
dev_priv->pch_id = id;
|
||||||
|
|
||||||
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
|
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
|
||||||
@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev)
|
|||||||
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
||||||
WARN_ON(!IS_HASWELL(dev));
|
WARN_ON(!IS_HASWELL(dev));
|
||||||
WARN_ON(!IS_ULT(dev));
|
WARN_ON(!IS_ULT(dev));
|
||||||
} else {
|
} else
|
||||||
goto check_next;
|
continue;
|
||||||
}
|
|
||||||
pci_dev_put(pch);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
check_next:
|
|
||||||
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
|
|
||||||
pci_dev_put(curr);
|
|
||||||
}
|
}
|
||||||
if (!pch)
|
if (!pch)
|
||||||
DRM_DEBUG_KMS("No PCH found?\n");
|
DRM_DEBUG_KMS("No PCH found.\n");
|
||||||
|
|
||||||
|
pci_dev_put(pch);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool i915_semaphore_is_enabled(struct drm_device *dev)
|
bool i915_semaphore_is_enabled(struct drm_device *dev)
|
||||||
|
@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
|||||||
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
|
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
|
||||||
"Graphics Stolen Memory");
|
"Graphics Stolen Memory");
|
||||||
if (r == NULL) {
|
if (r == NULL) {
|
||||||
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
|
/*
|
||||||
base, base + (uint32_t)dev_priv->gtt.stolen_size);
|
* One more attempt but this time requesting region from
|
||||||
base = 0;
|
* base + 1, as we have seen that this resolves the region
|
||||||
|
* conflict with the PCI Bus.
|
||||||
|
* This is a BIOS w/a: Some BIOS wrap stolen in the root
|
||||||
|
* PCI bus, but have an off-by-one error. Hence retry the
|
||||||
|
* reservation starting from 1 instead of 0.
|
||||||
|
*/
|
||||||
|
r = devm_request_mem_region(dev->dev, base + 1,
|
||||||
|
dev_priv->gtt.stolen_size - 1,
|
||||||
|
"Graphics Stolen Memory");
|
||||||
|
if (r == NULL) {
|
||||||
|
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
|
||||||
|
base, base + (uint32_t)dev_priv->gtt.stolen_size);
|
||||||
|
base = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return base;
|
return base;
|
||||||
|
@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
|
|||||||
struct drm_device *dev = dev_priv->dev;
|
struct drm_device *dev = dev_priv->dev;
|
||||||
bool cur_state;
|
bool cur_state;
|
||||||
|
|
||||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
|
if (IS_845G(dev) || IS_I865G(dev))
|
||||||
cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
|
|
||||||
else if (IS_845G(dev) || IS_I865G(dev))
|
|
||||||
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
|
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
|
||||||
else
|
else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
|
||||||
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
|
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
|
||||||
|
else
|
||||||
|
cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
|
||||||
|
|
||||||
WARN(cur_state != state,
|
WARN(cur_state != state,
|
||||||
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
|
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
|
||||||
|
@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
|
|||||||
{
|
{
|
||||||
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
|
||||||
|
|
||||||
if (IS_G4X(dev))
|
if (!hdmi->has_hdmi_sink || IS_G4X(dev))
|
||||||
return 165000;
|
return 165000;
|
||||||
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
|
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
|
||||||
return 300000;
|
return 300000;
|
||||||
@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
|||||||
* outputs. We also need to check that the higher clock still fits
|
* outputs. We also need to check that the higher clock still fits
|
||||||
* within limits.
|
* within limits.
|
||||||
*/
|
*/
|
||||||
if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
|
if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
|
||||||
&& HAS_PCH_SPLIT(dev)) {
|
clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
|
||||||
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
|
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
|
||||||
desired_bpp = 12*3;
|
desired_bpp = 12*3;
|
||||||
|
|
||||||
|
@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
|
|||||||
freq /= 0xff;
|
freq /= 0xff;
|
||||||
|
|
||||||
ctl = freq << 17;
|
ctl = freq << 17;
|
||||||
if (IS_GEN2(dev) && panel->backlight.combination_mode)
|
if (panel->backlight.combination_mode)
|
||||||
ctl |= BLM_LEGACY_MODE;
|
ctl |= BLM_LEGACY_MODE;
|
||||||
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
|
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
|
||||||
ctl |= BLM_POLARITY_PNV;
|
ctl |= BLM_POLARITY_PNV;
|
||||||
@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
|
|||||||
|
|
||||||
ctl = I915_READ(BLC_PWM_CTL);
|
ctl = I915_READ(BLC_PWM_CTL);
|
||||||
|
|
||||||
if (IS_GEN2(dev))
|
if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
|
||||||
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
|
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
|
||||||
|
|
||||||
if (IS_PINEVIEW(dev))
|
if (IS_PINEVIEW(dev))
|
||||||
|
@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
|
|||||||
u32 pcbr;
|
u32 pcbr;
|
||||||
int pctx_size = 24*1024;
|
int pctx_size = 24*1024;
|
||||||
|
|
||||||
|
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||||
|
|
||||||
pcbr = I915_READ(VLV_PCBR);
|
pcbr = I915_READ(VLV_PCBR);
|
||||||
if (pcbr) {
|
if (pcbr) {
|
||||||
/* BIOS set it up already, grab the pre-alloc'd space */
|
/* BIOS set it up already, grab the pre-alloc'd space */
|
||||||
@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||||||
I915_WRITE(GTFIFODBG, gtfifodbg);
|
I915_WRITE(GTFIFODBG, gtfifodbg);
|
||||||
}
|
}
|
||||||
|
|
||||||
valleyview_setup_pctx(dev);
|
|
||||||
|
|
||||||
/* If VLV, Forcewake all wells, else re-direct to regular path */
|
/* If VLV, Forcewake all wells, else re-direct to regular path */
|
||||||
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
|
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
|
||||||
|
|
||||||
@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
|
|||||||
ironlake_enable_rc6(dev);
|
ironlake_enable_rc6(dev);
|
||||||
intel_init_emon(dev);
|
intel_init_emon(dev);
|
||||||
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
|
||||||
|
if (IS_VALLEYVIEW(dev))
|
||||||
|
valleyview_setup_pctx(dev);
|
||||||
/*
|
/*
|
||||||
* PCU communication is slow and this doesn't need to be
|
* PCU communication is slow and this doesn't need to be
|
||||||
* done at any specific time, so do this out of our fast path
|
* done at any specific time, so do this out of our fast path
|
||||||
|
@ -1314,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
|||||||
}
|
}
|
||||||
if (is_dp)
|
if (is_dp)
|
||||||
args.v5.ucLaneNum = dp_lane_count;
|
args.v5.ucLaneNum = dp_lane_count;
|
||||||
else if (radeon_encoder->pixel_clock > 165000)
|
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
|
||||||
args.v5.ucLaneNum = 8;
|
args.v5.ucLaneNum = 8;
|
||||||
else
|
else
|
||||||
args.v5.ucLaneNum = 4;
|
args.v5.ucLaneNum = 4;
|
||||||
|
@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cik_select_se_sh - select which SE, SH to address
|
* cik_get_rb_disabled - computes the mask of disabled RBs
|
||||||
*
|
*
|
||||||
* @rdev: radeon_device pointer
|
* @rdev: radeon_device pointer
|
||||||
* @max_rb_num: max RBs (render backends) for the asic
|
* @max_rb_num: max RBs (render backends) for the asic
|
||||||
@ -7902,7 +7902,8 @@ int cik_resume(struct radeon_device *rdev)
|
|||||||
/* init golden registers */
|
/* init golden registers */
|
||||||
cik_init_golden_registers(rdev);
|
cik_init_golden_registers(rdev);
|
||||||
|
|
||||||
radeon_pm_resume(rdev);
|
if (rdev->pm.pm_method == PM_METHOD_DPM)
|
||||||
|
radeon_pm_resume(rdev);
|
||||||
|
|
||||||
rdev->accel_working = true;
|
rdev->accel_working = true;
|
||||||
r = cik_startup(rdev);
|
r = cik_startup(rdev);
|
||||||
|
@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev)
|
|||||||
/* init golden registers */
|
/* init golden registers */
|
||||||
evergreen_init_golden_registers(rdev);
|
evergreen_init_golden_registers(rdev);
|
||||||
|
|
||||||
radeon_pm_resume(rdev);
|
if (rdev->pm.pm_method == PM_METHOD_DPM)
|
||||||
|
radeon_pm_resume(rdev);
|
||||||
|
|
||||||
rdev->accel_working = true;
|
rdev->accel_working = true;
|
||||||
r = evergreen_startup(rdev);
|
r = evergreen_startup(rdev);
|
||||||
|
@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
|
|||||||
|
|
||||||
#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
|
#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
|
||||||
|
|
||||||
#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
|
#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
|
||||||
#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
|
#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
|
||||||
#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
|
#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
|
||||||
|
|
||||||
|
@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev)
|
|||||||
/* init golden registers */
|
/* init golden registers */
|
||||||
ni_init_golden_registers(rdev);
|
ni_init_golden_registers(rdev);
|
||||||
|
|
||||||
radeon_pm_resume(rdev);
|
if (rdev->pm.pm_method == PM_METHOD_DPM)
|
||||||
|
radeon_pm_resume(rdev);
|
||||||
|
|
||||||
rdev->accel_working = true;
|
rdev->accel_working = true;
|
||||||
r = cayman_startup(rdev);
|
r = cayman_startup(rdev);
|
||||||
|
@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev)
|
|||||||
/* Initialize surface registers */
|
/* Initialize surface registers */
|
||||||
radeon_surface_init(rdev);
|
radeon_surface_init(rdev);
|
||||||
|
|
||||||
radeon_pm_resume(rdev);
|
|
||||||
|
|
||||||
rdev->accel_working = true;
|
rdev->accel_working = true;
|
||||||
r = r100_startup(rdev);
|
r = r100_startup(rdev);
|
||||||
if (r) {
|
if (r) {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user