Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts (sort of) and no adjacent changes.

This merge reverts commit b3c9e65eb2 ("net: hsr: remove seqnr_lock")
from net, as it was superseded by
commit 430d67bdcb ("net: hsr: Use the seqnr lock for frames received via interlink port.")
in net-next.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-09-05 20:27:09 -07:00
commit 46ae4d0a48
267 changed files with 2472 additions and 1140 deletions

View File

@ -134,19 +134,3 @@ RISC-V Linux Kernel SV57
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________ __________________|____________|__________________|_________|____________________________________________________________
Userspace VAs
--------------------
To maintain compatibility with software that relies on the VA space with a
maximum of 48 bits the kernel will, by default, return virtual addresses to
userspace from a 48-bit range (sv48). This default behavior is achieved by
passing 0 into the hint address parameter of mmap. On CPUs with an address space
smaller than sv48, the CPU maximum supported address space will be the default.
Software can "opt-in" to receiving VAs from another VA space by providing
a hint address to mmap. When a hint address is passed to mmap, the returned
address will never use more bits than the hint address. For example, if a hint
address of `1 << 40` is passed to mmap, a valid returned address will never use
bits 41 through 63. If no mappable addresses are available in that range, mmap
will return `MAP_FAILED`.

View File

@ -1,10 +1,10 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2 %YAML 1.2
--- ---
$id: http://devicetree.org/schemas/display/panel/wl-355608-a8.yaml# $id: http://devicetree.org/schemas/display/panel/anbernic,rg35xx-plus-panel.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml#
title: WL-355608-A8 3.5" (640x480 pixels) 24-bit IPS LCD panel title: Anbernic RG35XX series (WL-355608-A8) 3.5" 640x480 24-bit IPS LCD panel
maintainers: maintainers:
- Ryan Walklin <ryan@testtoast.com> - Ryan Walklin <ryan@testtoast.com>
@ -15,7 +15,14 @@ allOf:
properties: properties:
compatible: compatible:
const: wl-355608-a8 oneOf:
- const: anbernic,rg35xx-plus-panel
- items:
- enum:
- anbernic,rg35xx-2024-panel
- anbernic,rg35xx-h-panel
- anbernic,rg35xx-sp-panel
- const: anbernic,rg35xx-plus-panel
reg: reg:
maxItems: 1 maxItems: 1
@ -40,7 +47,7 @@ examples:
#size-cells = <0>; #size-cells = <0>;
panel@0 { panel@0 {
compatible = "wl-355608-a8"; compatible = "anbernic,rg35xx-plus-panel";
reg = <0>; reg = <0>;
spi-3wire; spi-3wire;

View File

@ -14,8 +14,53 @@ maintainers:
description: description:
Bindings for NXP TJA11xx automotive PHYs Bindings for NXP TJA11xx automotive PHYs
properties:
compatible:
enum:
- ethernet-phy-id0180.dc40
- ethernet-phy-id0180.dc41
- ethernet-phy-id0180.dc48
- ethernet-phy-id0180.dd00
- ethernet-phy-id0180.dd01
- ethernet-phy-id0180.dd02
- ethernet-phy-id0180.dc80
- ethernet-phy-id0180.dc82
- ethernet-phy-id001b.b010
- ethernet-phy-id001b.b013
- ethernet-phy-id001b.b030
- ethernet-phy-id001b.b031
allOf: allOf:
- $ref: ethernet-phy.yaml# - $ref: ethernet-phy.yaml#
- if:
properties:
compatible:
contains:
enum:
- ethernet-phy-id0180.dc40
- ethernet-phy-id0180.dc41
- ethernet-phy-id0180.dc48
- ethernet-phy-id0180.dd00
- ethernet-phy-id0180.dd01
- ethernet-phy-id0180.dd02
then:
properties:
nxp,rmii-refclk-in:
type: boolean
description: |
The REF_CLK is provided for both transmitted and received data
in RMII mode. This clock signal is provided by the PHY and is
typically derived from an external 25MHz crystal. Alternatively,
a 50MHz clock signal generated by an external oscillator can be
connected to pin REF_CLK. A third option is to connect a 25MHz
clock to pin CLK_IN_OUT. So, the REF_CLK should be configured
as input or output according to the actual circuit connection.
If present, indicates that the REF_CLK will be configured as
interface reference clock input when RMII mode enabled.
If not present, the REF_CLK will be configured as interface
reference clock output when RMII mode enabled.
Only supported on TJA1100 and TJA1101.
patternProperties: patternProperties:
"^ethernet-phy@[0-9a-f]+$": "^ethernet-phy@[0-9a-f]+$":
@ -32,22 +77,6 @@ patternProperties:
description: description:
The ID number for the child PHY. Should be +1 of parent PHY. The ID number for the child PHY. Should be +1 of parent PHY.
nxp,rmii-refclk-in:
type: boolean
description: |
The REF_CLK is provided for both transmitted and received data
in RMII mode. This clock signal is provided by the PHY and is
typically derived from an external 25MHz crystal. Alternatively,
a 50MHz clock signal generated by an external oscillator can be
connected to pin REF_CLK. A third option is to connect a 25MHz
clock to pin CLK_IN_OUT. So, the REF_CLK should be configured
as input or output according to the actual circuit connection.
If present, indicates that the REF_CLK will be configured as
interface reference clock input when RMII mode enabled.
If not present, the REF_CLK will be configured as interface
reference clock output when RMII mode enabled.
Only supported on TJA1100 and TJA1101.
required: required:
- reg - reg
@ -60,6 +89,7 @@ examples:
#size-cells = <0>; #size-cells = <0>;
tja1101_phy0: ethernet-phy@4 { tja1101_phy0: ethernet-phy@4 {
compatible = "ethernet-phy-id0180.dc40";
reg = <0x4>; reg = <0x4>;
nxp,rmii-refclk-in; nxp,rmii-refclk-in;
}; };

View File

@ -28,7 +28,7 @@ unevaluatedProperties: false
examples: examples:
- | - |
nvmem { soc-nvmem {
compatible = "xlnx,zynqmp-nvmem-fw"; compatible = "xlnx,zynqmp-nvmem-fw";
nvmem-layout { nvmem-layout {
compatible = "fixed-layout"; compatible = "fixed-layout";

View File

@ -31,10 +31,16 @@ properties:
- rockchip,rk3588-pcie3-pipe-grf - rockchip,rk3588-pcie3-pipe-grf
- rockchip,rk3588-usb-grf - rockchip,rk3588-usb-grf
- rockchip,rk3588-usbdpphy-grf - rockchip,rk3588-usbdpphy-grf
- rockchip,rk3588-vo-grf - rockchip,rk3588-vo0-grf
- rockchip,rk3588-vo1-grf
- rockchip,rk3588-vop-grf - rockchip,rk3588-vop-grf
- rockchip,rv1108-usbgrf - rockchip,rv1108-usbgrf
- const: syscon - const: syscon
- items:
- const: rockchip,rk3588-vo-grf
- const: syscon
deprecated: true
description: Use rockchip,rk3588-vo{0,1}-grf instead.
- items: - items:
- enum: - enum:
- rockchip,px30-grf - rockchip,px30-grf
@ -262,6 +268,8 @@ allOf:
contains: contains:
enum: enum:
- rockchip,rk3588-vo-grf - rockchip,rk3588-vo-grf
- rockchip,rk3588-vo0-grf
- rockchip,rk3588-vo1-grf
then: then:
required: required:

View File

@ -109,7 +109,6 @@ attribute-sets:
- -
name: port name: port
type: u16 type: u16
byte-order: big-endian
- -
name: flags name: flags
type: u32 type: u32

View File

@ -0,0 +1,260 @@
.. SPDX-License-Identifier: GPL-2.0
Confidential Computing VMs
==========================
Hyper-V can create and run Linux guests that are Confidential Computing
(CoCo) VMs. Such VMs cooperate with the physical processor to better protect
the confidentiality and integrity of data in the VM's memory, even in the
face of a hypervisor/VMM that has been compromised and may behave maliciously.
CoCo VMs on Hyper-V share the generic CoCo VM threat model and security
objectives described in Documentation/security/snp-tdx-threat-model.rst. Note
that Hyper-V specific code in Linux refers to CoCo VMs as "isolated VMs" or
"isolation VMs".
A Linux CoCo VM on Hyper-V requires the cooperation and interaction of the
following:
* Physical hardware with a processor that supports CoCo VMs
* The hardware runs a version of Windows/Hyper-V with support for CoCo VMs
* The VM runs a version of Linux that supports being a CoCo VM
The physical hardware requirements are as follows:
* AMD processor with SEV-SNP. Hyper-V does not run guest VMs with AMD SME,
SEV, or SEV-ES encryption, and such encryption is not sufficient for a CoCo
VM on Hyper-V.
* Intel processor with TDX
To create a CoCo VM, the "Isolated VM" attribute must be specified to Hyper-V
when the VM is created. A VM cannot be changed from a CoCo VM to a normal VM,
or vice versa, after it is created.
Operational Modes
-----------------
Hyper-V CoCo VMs can run in two modes. The mode is selected when the VM is
created and cannot be changed during the life of the VM.
* Fully-enlightened mode. In this mode, the guest operating system is
enlightened to understand and manage all aspects of running as a CoCo VM.
* Paravisor mode. In this mode, a paravisor layer between the guest and the
host provides some operations needed to run as a CoCo VM. The guest operating
system can have fewer CoCo enlightenments than is required in the
fully-enlightened case.
Conceptually, fully-enlightened mode and paravisor mode may be treated as
points on a spectrum spanning the degree of guest enlightenment needed to run
as a CoCo VM. Fully-enlightened mode is one end of the spectrum. A full
implementation of paravisor mode is the other end of the spectrum, where all
aspects of running as a CoCo VM are handled by the paravisor, and a normal
guest OS with no knowledge of memory encryption or other aspects of CoCo VMs
can run successfully. However, the Hyper-V implementation of paravisor mode
does not go this far, and is somewhere in the middle of the spectrum. Some
aspects of CoCo VMs are handled by the Hyper-V paravisor while the guest OS
must be enlightened for other aspects. Unfortunately, there is no
standardized enumeration of feature/functions that might be provided in the
paravisor, and there is no standardized mechanism for a guest OS to query the
paravisor for the feature/functions it provides. The understanding of what
the paravisor provides is hard-coded in the guest OS.
Paravisor mode has similarities to the `Coconut project`_, which aims to provide
a limited paravisor to provide services to the guest such as a virtual TPM.
However, the Hyper-V paravisor generally handles more aspects of CoCo VMs
than is currently envisioned for Coconut, and so is further toward the "no
guest enlightenments required" end of the spectrum.
.. _Coconut project: https://github.com/coconut-svsm/svsm
In the CoCo VM threat model, the paravisor is in the guest security domain
and must be trusted by the guest OS. By implication, the hypervisor/VMM must
protect itself against a potentially malicious paravisor just like it
protects against a potentially malicious guest.
The hardware architectural approach to fully-enlightened vs. paravisor mode
varies depending on the underlying processor.
* With AMD SEV-SNP processors, in fully-enlightened mode the guest OS runs in
VMPL 0 and has full control of the guest context. In paravisor mode, the
guest OS runs in VMPL 2 and the paravisor runs in VMPL 0. The paravisor
running in VMPL 0 has privileges that the guest OS in VMPL 2 does not have.
Certain operations require the guest to invoke the paravisor. Furthermore, in
paravisor mode the guest OS operates in "virtual Top Of Memory" (vTOM) mode
as defined by the SEV-SNP architecture. This mode simplifies guest management
of memory encryption when a paravisor is used.
* With Intel TDX processor, in fully-enlightened mode the guest OS runs in an
L1 VM. In paravisor mode, TD partitioning is used. The paravisor runs in the
L1 VM, and the guest OS runs in a nested L2 VM.
Hyper-V exposes a synthetic MSR to guests that describes the CoCo mode. This
MSR indicates if the underlying processor uses AMD SEV-SNP or Intel TDX, and
whether a paravisor is being used. It is straightforward to build a single
kernel image that can boot and run properly on either architecture, and in
either mode.
Paravisor Effects
-----------------
Running in paravisor mode affects the following areas of generic Linux kernel
CoCo VM functionality:
* Initial guest memory setup. When a new VM is created in paravisor mode, the
paravisor runs first and sets up the guest physical memory as encrypted. The
guest Linux does normal memory initialization, except for explicitly marking
appropriate ranges as decrypted (shared). In paravisor mode, Linux does not
perform the early boot memory setup steps that are particularly tricky with
AMD SEV-SNP in fully-enlightened mode.
* #VC/#VE exception handling. In paravisor mode, Hyper-V configures the guest
CoCo VM to route #VC and #VE exceptions to VMPL 0 and the L1 VM,
respectively, and not the guest Linux. Consequently, these exception handlers
do not run in the guest Linux and are not a required enlightenment for a
Linux guest in paravisor mode.
* CPUID flags. Both AMD SEV-SNP and Intel TDX provide a CPUID flag in the
guest indicating that the VM is operating with the respective hardware
support. While these CPUID flags are visible in fully-enlightened CoCo VMs,
the paravisor filters out these flags and the guest Linux does not see them.
Throughout the Linux kernel, explicitly testing these flags has mostly been
eliminated in favor of the cc_platform_has() function, with the goal of
abstracting the differences between SEV-SNP and TDX. But the
cc_platform_has() abstraction also allows the Hyper-V paravisor configuration
to selectively enable aspects of CoCo VM functionality even when the CPUID
flags are not set. The exception is early boot memory setup on SEV-SNP, which
tests the CPUID SEV-SNP flag. But not having the flag in Hyper-V paravisor
mode VM achieves the desired effect or not running SEV-SNP specific early
boot memory setup.
* Device emulation. In paravisor mode, the Hyper-V paravisor provides
emulation of devices such as the IO-APIC and TPM. Because the emulation
happens in the paravisor in the guest context (instead of the hypervisor/VMM
context), MMIO accesses to these devices must be encrypted references instead
of the decrypted references that would be used in a fully-enlightened CoCo
VM. The __ioremap_caller() function has been enhanced to make a callback to
check whether a particular address range should be treated as encrypted
(private). See the "is_private_mmio" callback.
* Encrypt/decrypt memory transitions. In a CoCo VM, transitioning guest
memory between encrypted and decrypted requires coordinating with the
hypervisor/VMM. This is done via callbacks invoked from
__set_memory_enc_pgtable(). In fully-enlightened mode, the normal SEV-SNP and
TDX implementations of these callbacks are used. In paravisor mode, a Hyper-V
specific set of callbacks is used. These callbacks invoke the paravisor so
that the paravisor can coordinate the transitions and inform the hypervisor
as necessary. See hv_vtom_init() where these callback are set up.
* Interrupt injection. In fully enlightened mode, a malicious hypervisor
could inject interrupts into the guest OS at times that violate x86/x64
architectural rules. For full protection, the guest OS should include
enlightenments that use the interrupt injection management features provided
by CoCo-capable processors. In paravisor mode, the paravisor mediates
interrupt injection into the guest OS, and ensures that the guest OS only
sees interrupts that are "legal". The paravisor uses the interrupt injection
management features provided by the CoCo-capable physical processor, thereby
masking these complexities from the guest OS.
Hyper-V Hypercalls
------------------
When in fully-enlightened mode, hypercalls made by the Linux guest are routed
directly to the hypervisor, just as in a non-CoCo VM. But in paravisor mode,
normal hypercalls trap to the paravisor first, which may in turn invoke the
hypervisor. But the paravisor is idiosyncratic in this regard, and a few
hypercalls made by the Linux guest must always be routed directly to the
hypervisor. These hypercall sites test for a paravisor being present, and use
a special invocation sequence. See hv_post_message(), for example.
Guest communication with Hyper-V
--------------------------------
Separate from the generic Linux kernel handling of memory encryption in Linux
CoCo VMs, Hyper-V has VMBus and VMBus devices that communicate using memory
shared between the Linux guest and the host. This shared memory must be
marked decrypted to enable communication. Furthermore, since the threat model
includes a compromised and potentially malicious host, the guest must guard
against leaking any unintended data to the host through this shared memory.
These Hyper-V and VMBus memory pages are marked as decrypted:
* VMBus monitor pages
* Synthetic interrupt controller (synic) related pages (unless supplied by
the paravisor)
* Per-cpu hypercall input and output pages (unless running with a paravisor)
* VMBus ring buffers. The direct mapping is marked decrypted in
__vmbus_establish_gpadl(). The secondary mapping created in
hv_ringbuffer_init() must also include the "decrypted" attribute.
When the guest writes data to memory that is shared with the host, it must
ensure that only the intended data is written. Padding or unused fields must
be initialized to zeros before copying into the shared memory so that random
kernel data is not inadvertently given to the host.
Similarly, when the guest reads memory that is shared with the host, it must
validate the data before acting on it so that a malicious host cannot induce
the guest to expose unintended data. Doing such validation can be tricky
because the host can modify the shared memory areas even while or after
validation is performed. For messages passed from the host to the guest in a
VMBus ring buffer, the length of the message is validated, and the message is
copied into a temporary (encrypted) buffer for further validation and
processing. The copying adds a small amount of overhead, but is the only way
to protect against a malicious host. See hv_pkt_iter_first().
Many drivers for VMBus devices have been "hardened" by adding code to fully
validate messages received over VMBus, instead of assuming that Hyper-V is
acting cooperatively. Such drivers are marked as "allowed_in_isolated" in the
vmbus_devs[] table. Other drivers for VMBus devices that are not needed in a
CoCo VM have not been hardened, and they are not allowed to load in a CoCo
VM. See vmbus_is_valid_offer() where such devices are excluded.
Two VMBus devices depend on the Hyper-V host to do DMA data transfers:
storvsc for disk I/O and netvsc for network I/O. storvsc uses the normal
Linux kernel DMA APIs, and so bounce buffering through decrypted swiotlb
memory is done implicitly. netvsc has two modes for data transfers. The first
mode goes through send and receive buffer space that is explicitly allocated
by the netvsc driver, and is used for most smaller packets. These send and
receive buffers are marked decrypted by __vmbus_establish_gpadl(). Because
the netvsc driver explicitly copies packets to/from these buffers, the
equivalent of bounce buffering between encrypted and decrypted memory is
already part of the data path. The second mode uses the normal Linux kernel
DMA APIs, and is bounce buffered through swiotlb memory implicitly like in
storvsc.
Finally, the VMBus virtual PCI driver needs special handling in a CoCo VM.
Linux PCI device drivers access PCI config space using standard APIs provided
by the Linux PCI subsystem. On Hyper-V, these functions directly access MMIO
space, and the access traps to Hyper-V for emulation. But in CoCo VMs, memory
encryption prevents Hyper-V from reading the guest instruction stream to
emulate the access. So in a CoCo VM, these functions must make a hypercall
with arguments explicitly describing the access. See
_hv_pcifront_read_config() and _hv_pcifront_write_config() and the
"use_calls" flag indicating to use hypercalls.
load_unaligned_zeropad()
------------------------
When transitioning memory between encrypted and decrypted, the caller of
set_memory_encrypted() or set_memory_decrypted() is responsible for ensuring
the memory isn't in use and isn't referenced while the transition is in
progress. The transition has multiple steps, and includes interaction with
the Hyper-V host. The memory is in an inconsistent state until all steps are
complete. A reference while the state is inconsistent could result in an
exception that can't be cleanly fixed up.
However, the kernel load_unaligned_zeropad() mechanism may make stray
references that can't be prevented by the caller of set_memory_encrypted() or
set_memory_decrypted(), so there's specific code in the #VC or #VE exception
handler to fixup this case. But a CoCo VM running on Hyper-V may be
configured to run with a paravisor, with the #VC or #VE exception routed to
the paravisor. There's no architectural way to forward the exceptions back to
the guest kernel, and in such a case, the load_unaligned_zeropad() fixup code
in the #VC/#VE handlers doesn't run.
To avoid this problem, the Hyper-V specific functions for notifying the
hypervisor of the transition mark pages as "not present" while a transition
is in progress. If load_unaligned_zeropad() causes a stray reference, a
normal page fault is generated instead of #VC or #VE, and the page-fault-
based handlers for load_unaligned_zeropad() fixup the reference. When the
encrypted/decrypted transition is complete, the pages are marked as "present"
again. See hv_vtom_clear_present() and hv_vtom_set_host_visibility().

View File

@ -11,3 +11,4 @@ Hyper-V Enlightenments
vmbus vmbus
clocks clocks
vpci vpci
coco

View File

@ -7466,8 +7466,8 @@ S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/bridge/ F: Documentation/devicetree/bindings/display/bridge/
F: drivers/gpu/drm/bridge/ F: drivers/gpu/drm/bridge/
F: drivers/gpu/drm/display/drm_bridge_connector.c
F: drivers/gpu/drm/drm_bridge.c F: drivers/gpu/drm/drm_bridge.c
F: drivers/gpu/drm/drm_bridge_connector.c
F: include/drm/drm_bridge.h F: include/drm/drm_bridge.h
F: include/drm/drm_bridge_connector.h F: include/drm/drm_bridge_connector.h
@ -18434,6 +18434,7 @@ L: netdev@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/net/pse-pd/ F: Documentation/devicetree/bindings/net/pse-pd/
F: drivers/net/pse-pd/ F: drivers/net/pse-pd/
F: net/ethtool/pse-pd.c
PSTORE FILESYSTEM PSTORE FILESYSTEM
M: Kees Cook <kees@kernel.org> M: Kees Cook <kees@kernel.org>

View File

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 11 PATCHLEVEL = 11
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Baby Opossum Posse NAME = Baby Opossum Posse
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -387,7 +387,7 @@
pmic { pmic {
pmic_int_l: pmic-int-l { pmic_int_l: pmic-int-l {
rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>; rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
}; };
}; };

View File

@ -154,6 +154,22 @@
}; };
}; };
&gpio3 {
/*
* The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
* eMMC and SPI flash powered-down initially (in fact it keeps the
* reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
* that signal so that eMMC and SPI can be used regardless of the state
* of the signal.
*/
bios-disable-override-hog {
gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
gpio-hog;
line-name = "bios_disable_override";
output-high;
};
};
&gmac { &gmac {
assigned-clocks = <&cru SCLK_RMII_SRC>; assigned-clocks = <&cru SCLK_RMII_SRC>;
assigned-clock-parents = <&clkin_gmac>; assigned-clock-parents = <&clkin_gmac>;
@ -409,6 +425,7 @@
&i2s0 { &i2s0 {
pinctrl-0 = <&i2s0_2ch_bus>; pinctrl-0 = <&i2s0_2ch_bus>;
pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
rockchip,playback-channels = <2>; rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>; rockchip,capture-channels = <2>;
status = "okay"; status = "okay";
@ -417,8 +434,8 @@
/* /*
* As Q7 does not specify neither a global nor a RX clock for I2S these * As Q7 does not specify neither a global nor a RX clock for I2S these
* signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO. * signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO.
* Therefore we have to redefine the i2s0_2ch_bus definition to prevent * Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off
* conflicts. * definitions to prevent conflicts.
*/ */
&i2s0_2ch_bus { &i2s0_2ch_bus {
rockchip,pins = rockchip,pins =
@ -428,6 +445,14 @@
<3 RK_PD7 1 &pcfg_pull_none>; <3 RK_PD7 1 &pcfg_pull_none>;
}; };
&i2s0_2ch_bus_bclk_off {
rockchip,pins =
<3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
<3 RK_PD2 1 &pcfg_pull_none>,
<3 RK_PD3 1 &pcfg_pull_none>,
<3 RK_PD7 1 &pcfg_pull_none>;
};
&io_domains { &io_domains {
status = "okay"; status = "okay";
bt656-supply = <&vcc_1v8>; bt656-supply = <&vcc_1v8>;
@ -449,9 +474,14 @@
&pinctrl { &pinctrl {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>; pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
gpios { gpios {
bios_disable_override_hog_pin: bios-disable-override-hog-pin {
rockchip,pins =
<3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
};
q7_thermal_pin: q7-thermal-pin { q7_thermal_pin: q7-thermal-pin {
rockchip,pins = rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>; <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;

View File

@ -1592,10 +1592,9 @@
<&cru SRST_TSADCPHY>; <&cru SRST_TSADCPHY>;
rockchip,grf = <&grf>; rockchip,grf = <&grf>;
rockchip,hw-tshut-temp = <95000>; rockchip,hw-tshut-temp = <95000>;
pinctrl-names = "init", "default", "sleep"; pinctrl-names = "default", "sleep";
pinctrl-0 = <&tsadc_pin>; pinctrl-0 = <&tsadc_shutorg>;
pinctrl-1 = <&tsadc_shutorg>; pinctrl-1 = <&tsadc_pin>;
pinctrl-2 = <&tsadc_pin>;
#thermal-sensor-cells = <1>; #thermal-sensor-cells = <1>;
status = "disabled"; status = "disabled";
}; };

View File

@ -582,14 +582,14 @@
}; };
vo0_grf: syscon@fd5a6000 { vo0_grf: syscon@fd5a6000 {
compatible = "rockchip,rk3588-vo-grf", "syscon"; compatible = "rockchip,rk3588-vo0-grf", "syscon";
reg = <0x0 0xfd5a6000 0x0 0x2000>; reg = <0x0 0xfd5a6000 0x0 0x2000>;
clocks = <&cru PCLK_VO0GRF>; clocks = <&cru PCLK_VO0GRF>;
}; };
vo1_grf: syscon@fd5a8000 { vo1_grf: syscon@fd5a8000 {
compatible = "rockchip,rk3588-vo-grf", "syscon"; compatible = "rockchip,rk3588-vo1-grf", "syscon";
reg = <0x0 0xfd5a8000 0x0 0x100>; reg = <0x0 0xfd5a8000 0x0 0x4000>;
clocks = <&cru PCLK_VO1GRF>; clocks = <&cru PCLK_VO1GRF>;
}; };

View File

@ -25,6 +25,7 @@
* *
* @common: Common unwind state. * @common: Common unwind state.
* @task: The task being unwound. * @task: The task being unwound.
* @graph_idx: Used by ftrace_graph_ret_addr() for optimized stack unwinding.
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
* associated with the most recently encountered replacement lr * associated with the most recently encountered replacement lr
* value. * value.
@ -32,6 +33,7 @@
struct kunwind_state { struct kunwind_state {
struct unwind_state common; struct unwind_state common;
struct task_struct *task; struct task_struct *task;
int graph_idx;
#ifdef CONFIG_KRETPROBES #ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur; struct llist_node *kr_cur;
#endif #endif
@ -106,7 +108,7 @@ kunwind_recover_return_address(struct kunwind_state *state)
if (state->task->ret_stack && if (state->task->ret_stack &&
(state->common.pc == (unsigned long)return_to_handler)) { (state->common.pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc; unsigned long orig_pc;
orig_pc = ftrace_graph_ret_addr(state->task, NULL, orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
state->common.pc, state->common.pc,
(void *)state->common.fp); (void *)state->common.fp);
if (WARN_ON_ONCE(state->common.pc == orig_pc)) if (WARN_ON_ONCE(state->common.pc == orig_pc))

View File

@ -52,7 +52,7 @@
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) pr_err("%s:%d: bad pgd %08llx.\n", __FILE__, __LINE__, (unsigned long long)pgd_val(e))
/* /*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
@ -170,7 +170,7 @@ static inline void pmd_clear(pmd_t *pmdp)
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#else #else
#define pmd_page_vaddr(pmd) \ #define pmd_page_vaddr(pmd) \
((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) ((const void *)((unsigned long)pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
#endif #endif

View File

@ -49,16 +49,22 @@ static inline unsigned long pud_val(pud_t x)
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
/* PGD level */ /* PGD level */
#if defined(CONFIG_PPC_E500) && defined(CONFIG_PTE_64BIT) #if defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
typedef struct { unsigned long long pgd; } pgd_t; typedef struct { unsigned long long pgd; } pgd_t;
static inline unsigned long long pgd_val(pgd_t x)
{
return x.pgd;
}
#else #else
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
#endif
#define __pgd(x) ((pgd_t) { (x) })
static inline unsigned long pgd_val(pgd_t x) static inline unsigned long pgd_val(pgd_t x)
{ {
return x.pgd; return x.pgd;
} }
#endif
#define __pgd(x) ((pgd_t) { (x) })
/* Page protection bits */ /* Page protection bits */
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;

View File

@ -74,6 +74,8 @@ SECTIONS
.got : { *(.got) } :text .got : { *(.got) } :text
.plt : { *(.plt) } .plt : { *(.plt) }
.rela.dyn : { *(.rela .rela*) }
_end = .; _end = .;
__end = .; __end = .;
PROVIDE(end = .); PROVIDE(end = .);
@ -87,7 +89,7 @@ SECTIONS
*(.branch_lt) *(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*) *(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss) *(.bss .sbss .dynbss .dynsbss)
*(.got1 .glink .iplt .rela*) *(.got1 .glink .iplt)
} }
} }

View File

@ -69,7 +69,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text .eh_frame : { KEEP (*(.eh_frame)) } :text
.gcc_except_table : { *(.gcc_except_table) } .gcc_except_table : { *(.gcc_except_table) }
.rela.dyn ALIGN(8) : { *(.rela.dyn) } .rela.dyn ALIGN(8) : { *(.rela .rela*) }
.got ALIGN(8) : { *(.got .toc) } .got ALIGN(8) : { *(.got .toc) }
@ -86,7 +86,7 @@ SECTIONS
*(.data .data.* .gnu.linkonce.d.* .sdata*) *(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss) *(.bss .sbss .dynbss .dynsbss)
*(.opd) *(.opd)
*(.glink .iplt .plt .rela*) *(.glink .iplt .plt)
} }
} }

View File

@ -697,7 +697,15 @@ again:
} }
release: release:
qnodesp->count--; /* release the node */ /*
* Clear the lock before releasing the node, as another CPU might see stale
* values if an interrupt occurs after we increment qnodesp->count
* but before node->lock is initialized. The barrier ensures that
* there are no further stores to the node after it has been released.
*/
node->lock = NULL;
barrier();
qnodesp->count--;
} }
void queued_spin_lock_slowpath(struct qspinlock *lock) void queued_spin_lock_slowpath(struct qspinlock *lock)

View File

@ -33,7 +33,7 @@
* though this will probably be made common with other nohash * though this will probably be made common with other nohash
* implementations at some point * implementations at some point
*/ */
int mmu_pte_psize; /* Page size used for PTE pages */ static int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
unsigned long linear_map_top; /* Top of linear mapping */ unsigned long linear_map_top; /* Top of linear mapping */

View File

@ -552,8 +552,8 @@ config RISCV_ISA_SVPBMT
config TOOLCHAIN_HAS_V config TOOLCHAIN_HAS_V
bool bool
default y default y
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64iv) depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64imv)
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32iv) depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32imv)
depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800 depends on LLD_VERSION >= 140000 || LD_VERSION >= 23800
depends on AS_HAS_OPTION_ARCH depends on AS_HAS_OPTION_ARCH

View File

@ -365,6 +365,12 @@
}; };
}; };
&syscrg {
assigned-clocks = <&syscrg JH7110_SYSCLK_CPU_CORE>,
<&pllclk JH7110_PLLCLK_PLL0_OUT>;
assigned-clock-rates = <500000000>, <1500000000>;
};
&sysgpio { &sysgpio {
i2c0_pins: i2c0-0 { i2c0_pins: i2c0-0 {
i2c-pins { i2c-pins {

View File

@ -14,36 +14,14 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
/*
* addr is a hint to the maximum userspace address that mmap should provide, so
* this macro needs to return the largest address space available so that
* mmap_end < addr, being mmap_end the top of that address space.
* See Documentation/arch/riscv/vm-layout.rst for more details.
*/
#define arch_get_mmap_end(addr, len, flags) \ #define arch_get_mmap_end(addr, len, flags) \
({ \ ({ \
unsigned long mmap_end; \ STACK_TOP_MAX; \
typeof(addr) _addr = (addr); \
if ((_addr) == 0 || is_compat_task() || \
((_addr + len) > BIT(VA_BITS - 1))) \
mmap_end = STACK_TOP_MAX; \
else \
mmap_end = (_addr + len); \
mmap_end; \
}) })
#define arch_get_mmap_base(addr, base) \ #define arch_get_mmap_base(addr, base) \
({ \ ({ \
unsigned long mmap_base; \ base; \
typeof(addr) _addr = (addr); \
typeof(base) _base = (base); \
unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
if ((_addr) == 0 || is_compat_task() || \
((_addr + len) > BIT(VA_BITS - 1))) \
mmap_base = (_base); \
else \
mmap_base = (_addr + len) - rnd_gap; \
mmap_base; \
}) })
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT

View File

@ -9,6 +9,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/jump_label.h>
#ifdef CONFIG_RISCV_SBI #ifdef CONFIG_RISCV_SBI
enum sbi_ext_id { enum sbi_ext_id {
@ -304,6 +305,7 @@ struct sbiret {
}; };
void sbi_init(void); void sbi_init(void);
long __sbi_base_ecall(int fid);
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1, struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5, unsigned long arg4, unsigned long arg5,
@ -373,7 +375,23 @@ static inline unsigned long sbi_mk_version(unsigned long major,
| (minor & SBI_SPEC_VERSION_MINOR_MASK); | (minor & SBI_SPEC_VERSION_MINOR_MASK);
} }
int sbi_err_map_linux_errno(int err); static inline int sbi_err_map_linux_errno(int err)
{
switch (err) {
case SBI_SUCCESS:
return 0;
case SBI_ERR_DENIED:
return -EPERM;
case SBI_ERR_INVALID_PARAM:
return -EINVAL;
case SBI_ERR_INVALID_ADDRESS:
return -EFAULT;
case SBI_ERR_NOT_SUPPORTED:
case SBI_ERR_FAILURE:
default:
return -ENOTSUPP;
};
}
extern bool sbi_debug_console_available; extern bool sbi_debug_console_available;
int sbi_debug_console_write(const char *bytes, unsigned int num_bytes); int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);

View File

@ -20,17 +20,21 @@ endif
ifdef CONFIG_RISCV_ALTERNATIVE_EARLY ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
CFLAGS_alternative.o := -mcmodel=medany CFLAGS_alternative.o := -mcmodel=medany
CFLAGS_cpufeature.o := -mcmodel=medany CFLAGS_cpufeature.o := -mcmodel=medany
CFLAGS_sbi_ecall.o := -mcmodel=medany
ifdef CONFIG_FTRACE ifdef CONFIG_FTRACE
CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi_ecall.o = $(CC_FLAGS_FTRACE)
endif endif
ifdef CONFIG_RELOCATABLE ifdef CONFIG_RELOCATABLE
CFLAGS_alternative.o += -fno-pie CFLAGS_alternative.o += -fno-pie
CFLAGS_cpufeature.o += -fno-pie CFLAGS_cpufeature.o += -fno-pie
CFLAGS_sbi_ecall.o += -fno-pie
endif endif
ifdef CONFIG_KASAN ifdef CONFIG_KASAN
KASAN_SANITIZE_alternative.o := n KASAN_SANITIZE_alternative.o := n
KASAN_SANITIZE_cpufeature.o := n KASAN_SANITIZE_cpufeature.o := n
KASAN_SANITIZE_sbi_ecall.o := n
endif endif
endif endif
@ -88,7 +92,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
obj-$(CONFIG_RISCV_SBI) += sbi.o obj-$(CONFIG_RISCV_SBI) += sbi.o sbi_ecall.o
ifeq ($(CONFIG_RISCV_SBI), y) ifeq ($(CONFIG_RISCV_SBI), y)
obj-$(CONFIG_SMP) += sbi-ipi.o obj-$(CONFIG_SMP) += sbi-ipi.o
obj-$(CONFIG_SMP) += cpu_ops_sbi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o

View File

@ -14,9 +14,6 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
/* default SBI version is 0.1 */ /* default SBI version is 0.1 */
unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
EXPORT_SYMBOL(sbi_spec_version); EXPORT_SYMBOL(sbi_spec_version);
@ -27,55 +24,6 @@ static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size, unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5) __ro_after_init; unsigned long arg4, unsigned long arg5) __ro_after_init;
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
int fid, int ext)
{
struct sbiret ret;
trace_sbi_call(ext, fid);
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
asm volatile ("ecall"
: "+r" (a0), "+r" (a1)
: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
: "memory");
ret.error = a0;
ret.value = a1;
trace_sbi_return(ext, ret.error, ret.value);
return ret;
}
EXPORT_SYMBOL(__sbi_ecall);
int sbi_err_map_linux_errno(int err)
{
switch (err) {
case SBI_SUCCESS:
return 0;
case SBI_ERR_DENIED:
return -EPERM;
case SBI_ERR_INVALID_PARAM:
return -EINVAL;
case SBI_ERR_INVALID_ADDRESS:
return -EFAULT;
case SBI_ERR_NOT_SUPPORTED:
case SBI_ERR_FAILURE:
default:
return -ENOTSUPP;
};
}
EXPORT_SYMBOL(sbi_err_map_linux_errno);
#ifdef CONFIG_RISCV_SBI_V01 #ifdef CONFIG_RISCV_SBI_V01
static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask) static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
{ {
@ -535,17 +483,6 @@ long sbi_probe_extension(int extid)
} }
EXPORT_SYMBOL(sbi_probe_extension); EXPORT_SYMBOL(sbi_probe_extension);
static long __sbi_base_ecall(int fid)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
if (!ret.error)
return ret.value;
else
return sbi_err_map_linux_errno(ret.error);
}
static inline long sbi_get_spec_version(void) static inline long sbi_get_spec_version(void)
{ {
return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION); return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);

View File

@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Rivos Inc. */
#include <asm/sbi.h>
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
long __sbi_base_ecall(int fid)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
if (!ret.error)
return ret.value;
else
return sbi_err_map_linux_errno(ret.error);
}
EXPORT_SYMBOL(__sbi_base_ecall);
struct sbiret __sbi_ecall(unsigned long arg0, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5,
int fid, int ext)
{
struct sbiret ret;
trace_sbi_call(ext, fid);
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
asm volatile ("ecall"
: "+r" (a0), "+r" (a1)
: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
: "memory");
ret.error = a0;
ret.value = a1;
trace_sbi_return(ext, ret.error, ret.value);
return ret;
}
EXPORT_SYMBOL(__sbi_ecall);

View File

@ -417,7 +417,7 @@ int handle_misaligned_load(struct pt_regs *regs)
val.data_u64 = 0; val.data_u64 = 0;
if (user_mode(regs)) { if (user_mode(regs)) {
if (raw_copy_from_user(&val, (u8 __user *)addr, len)) if (copy_from_user(&val, (u8 __user *)addr, len))
return -1; return -1;
} else { } else {
memcpy(&val, (u8 *)addr, len); memcpy(&val, (u8 *)addr, len);
@ -515,7 +515,7 @@ int handle_misaligned_store(struct pt_regs *regs)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (user_mode(regs)) { if (user_mode(regs)) {
if (raw_copy_to_user((u8 __user *)addr, &val, len)) if (copy_to_user((u8 __user *)addr, &val, len))
return -1; return -1;
} else { } else {
memcpy((u8 *)addr, &val, len); memcpy((u8 *)addr, &val, len);

View File

@ -252,7 +252,7 @@ static void __init setup_bootmem(void)
* The size of the linear page mapping may restrict the amount of * The size of the linear page mapping may restrict the amount of
* usable RAM. * usable RAM.
*/ */
if (IS_ENABLED(CONFIG_64BIT)) { if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE; max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base, memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base); max_mapped_addr - phys_ram_base);

View File

@ -35,7 +35,6 @@
#include <clocksource/hyperv_timer.h> #include <clocksource/hyperv_timer.h>
#include <linux/highmem.h> #include <linux/highmem.h>
int hyperv_init_cpuhp;
u64 hv_current_partition_id = ~0ull; u64 hv_current_partition_id = ~0ull;
EXPORT_SYMBOL_GPL(hv_current_partition_id); EXPORT_SYMBOL_GPL(hv_current_partition_id);
@ -607,8 +606,6 @@ skip_hypercall_pg_init:
register_syscore_ops(&hv_syscore_ops); register_syscore_ops(&hv_syscore_ops);
hyperv_init_cpuhp = cpuhp;
if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID) if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
hv_get_partition_id(); hv_get_partition_id();
@ -637,7 +634,7 @@ skip_hypercall_pg_init:
clean_guest_os_id: clean_guest_os_id:
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0); hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
cpuhp_remove_state(cpuhp); cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
free_ghcb_page: free_ghcb_page:
free_percpu(hv_ghcb_pg); free_percpu(hv_ghcb_pg);
free_vp_assist_page: free_vp_assist_page:

View File

@ -40,7 +40,6 @@ static inline unsigned char hv_get_nmi_reason(void)
} }
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
extern int hyperv_init_cpuhp;
extern bool hyperv_paravisor_present; extern bool hyperv_paravisor_present;
extern void *hv_hypercall_pg; extern void *hv_hypercall_pg;

View File

@ -199,8 +199,8 @@ static void hv_machine_shutdown(void)
* Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
* corrupts the old VP Assist Pages and can crash the kexec kernel. * corrupts the old VP Assist Pages and can crash the kexec kernel.
*/ */
if (kexec_in_progress && hyperv_init_cpuhp > 0) if (kexec_in_progress)
cpuhp_remove_state(hyperv_init_cpuhp); cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
/* The function calls stop_other_cpus(). */ /* The function calls stop_other_cpus(). */
native_machine_shutdown(); native_machine_shutdown();
@ -424,6 +424,7 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) { ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz; x86_platform.calibrate_tsc = hv_get_tsc_khz;
x86_platform.calibrate_cpu = hv_get_tsc_khz; x86_platform.calibrate_cpu = hv_get_tsc_khz;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
} }
if (ms_hyperv.priv_high & HV_ISOLATION) { if (ms_hyperv.priv_high & HV_ISOLATION) {
@ -449,9 +450,23 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED; ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED;
if (!ms_hyperv.paravisor_present) { if (!ms_hyperv.paravisor_present) {
/* To be supported: more work is required. */ /*
* Mark the Hyper-V TSC page feature as disabled
* in a TDX VM without paravisor so that the
* Invariant TSC, which is a better clocksource
* anyway, is used instead.
*/
ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE; ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE;
/*
* The Invariant TSC is expected to be available
* in a TDX VM without paravisor, but if not,
* print a warning message. The slower Hyper-V MSR-based
* Ref Counter should end up being the clocksource.
*/
if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT))
pr_warn("Hyper-V: Invariant TSC is unavailable\n");
/* HV_MSR_CRASH_CTL is unsupported. */ /* HV_MSR_CRASH_CTL is unsupported. */
ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;

View File

@ -19,7 +19,6 @@ if VIRTUALIZATION
config KVM config KVM
tristate "Kernel-based Virtual Machine (KVM) support" tristate "Kernel-based Virtual Machine (KVM) support"
depends on HIGH_RES_TIMERS
depends on X86_LOCAL_APIC depends on X86_LOCAL_APIC
select KVM_COMMON select KVM_COMMON
select KVM_GENERIC_MMU_NOTIFIER select KVM_GENERIC_MMU_NOTIFIER
@ -144,8 +143,10 @@ config KVM_AMD_SEV
select HAVE_KVM_ARCH_GMEM_PREPARE select HAVE_KVM_ARCH_GMEM_PREPARE
select HAVE_KVM_ARCH_GMEM_INVALIDATE select HAVE_KVM_ARCH_GMEM_INVALIDATE
help help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs Provides support for launching encrypted VMs which use Secure
with Encrypted State (SEV-ES) on AMD processors. Encrypted Virtualization (SEV), Secure Encrypted Virtualization with
Encrypted State (SEV-ES), and Secure Encrypted Virtualization with
Secure Nested Paging (SEV-SNP) technologies on AMD processors.
config KVM_SMM config KVM_SMM
bool "System Management Mode emulation" bool "System Management Mode emulation"

View File

@ -4750,7 +4750,9 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
* reload is efficient when called repeatedly, so we can do it on * reload is efficient when called repeatedly, so we can do it on
* every iteration. * every iteration.
*/ */
kvm_mmu_reload(vcpu); r = kvm_mmu_reload(vcpu);
if (r)
return r;
if (kvm_arch_has_private_mem(vcpu->kvm) && if (kvm_arch_has_private_mem(vcpu->kvm) &&
kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa))) kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))

View File

@ -391,9 +391,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
mmio_value = 0; mmio_value = 0;
/* /*
* The masked MMIO value must obviously match itself and a removed SPTE * The masked MMIO value must obviously match itself and a frozen SPTE
* must not get a false positive. Removed SPTEs and MMIO SPTEs should * must not get a false positive. Frozen SPTEs and MMIO SPTEs should
* never collide as MMIO must set some RWX bits, and removed SPTEs must * never collide as MMIO must set some RWX bits, and frozen SPTEs must
* not set any RWX bits. * not set any RWX bits.
*/ */
if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||

View File

@ -214,7 +214,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*/ */
#define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL) #define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
/* Removed SPTEs must not be misconstrued as shadow present PTEs. */ /* Frozen SPTEs must not be misconstrued as shadow present PTEs. */
static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK)); static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK));
static inline bool is_frozen_spte(u64 spte) static inline bool is_frozen_spte(u64 spte)

View File

@ -359,10 +359,10 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
/* /*
* Set the SPTE to a nonpresent value that other * Set the SPTE to a nonpresent value that other
* threads will not overwrite. If the SPTE was * threads will not overwrite. If the SPTE was
* already marked as removed then another thread * already marked as frozen then another thread
* handling a page fault could overwrite it, so * handling a page fault could overwrite it, so
* set the SPTE until it is set from some other * set the SPTE until it is set from some other
* value to the removed SPTE value. * value to the frozen SPTE value.
*/ */
for (;;) { for (;;) {
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE); old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);
@ -536,8 +536,8 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
u64 *sptep = rcu_dereference(iter->sptep); u64 *sptep = rcu_dereference(iter->sptep);
/* /*
* The caller is responsible for ensuring the old SPTE is not a REMOVED * The caller is responsible for ensuring the old SPTE is not a FROZEN
* SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE, * SPTE. KVM should never attempt to zap or manipulate a FROZEN SPTE,
* and pre-checking before inserting a new SPTE is advantageous as it * and pre-checking before inserting a new SPTE is advantageous as it
* avoids unnecessary work. * avoids unnecessary work.
*/ */

View File

@ -2876,6 +2876,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_CSTAR: case MSR_CSTAR:
msr_info->data = svm->vmcb01.ptr->save.cstar; msr_info->data = svm->vmcb01.ptr->save.cstar;
break; break;
case MSR_GS_BASE:
msr_info->data = svm->vmcb01.ptr->save.gs.base;
break;
case MSR_FS_BASE:
msr_info->data = svm->vmcb01.ptr->save.fs.base;
break;
case MSR_KERNEL_GS_BASE: case MSR_KERNEL_GS_BASE:
msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
break; break;
@ -3101,6 +3107,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_CSTAR: case MSR_CSTAR:
svm->vmcb01.ptr->save.cstar = data; svm->vmcb01.ptr->save.cstar = data;
break; break;
case MSR_GS_BASE:
svm->vmcb01.ptr->save.gs.base = data;
break;
case MSR_FS_BASE:
svm->vmcb01.ptr->save.fs.base = data;
break;
case MSR_KERNEL_GS_BASE: case MSR_KERNEL_GS_BASE:
svm->vmcb01.ptr->save.kernel_gs_base = data; svm->vmcb01.ptr->save.kernel_gs_base = data;
break; break;
@ -5224,6 +5236,9 @@ static __init void svm_set_cpu_caps(void)
/* CPUID 0x8000001F (SME/SEV features) */ /* CPUID 0x8000001F (SME/SEV features) */
sev_set_cpu_caps(); sev_set_cpu_caps();
/* Don't advertise Bus Lock Detect to guest if SVM support is absent */
kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
} }
static __init int svm_hardware_setup(void) static __init int svm_hardware_setup(void)

View File

@ -4656,7 +4656,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ASYNC_PF_INT: case KVM_CAP_ASYNC_PF_INT:
case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_IOAPIC_POLARITY_IGNORED: case KVM_CAP_IOAPIC_POLARITY_IGNORED:
case KVM_CAP_TSC_DEADLINE_TIMER: case KVM_CAP_TSC_DEADLINE_TIMER:
case KVM_CAP_DISABLE_QUIRKS: case KVM_CAP_DISABLE_QUIRKS:
@ -4815,6 +4814,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_VM_TYPES: case KVM_CAP_VM_TYPES:
r = kvm_caps.supported_vm_types; r = kvm_caps.supported_vm_types;
break; break;
case KVM_CAP_READONLY_MEM:
r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
break;
default: default:
break; break;
} }
@ -6040,7 +6042,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
break; break;
kvm_vcpu_srcu_read_lock(vcpu);
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
kvm_vcpu_srcu_read_unlock(vcpu);
break; break;
} }
case KVM_GET_DEBUGREGS: { case KVM_GET_DEBUGREGS: {

View File

@ -167,10 +167,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
if (((bip->bip_iter.bi_size + len) >> SECTOR_SHIFT) >
queue_max_hw_sectors(q))
return 0;
if (bip->bip_vcnt > 0) { if (bip->bip_vcnt > 0) {
struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1]; struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
bool same_page = false; bool same_page = false;

View File

@ -3422,6 +3422,7 @@ static void binder_transaction(struct binder_proc *proc,
*/ */
copy_size = object_offset - user_offset; copy_size = object_offset - user_offset;
if (copy_size && (user_offset > object_offset || if (copy_size && (user_offset > object_offset ||
object_offset > tr->data_size ||
binder_alloc_copy_user_to_buffer( binder_alloc_copy_user_to_buffer(
&target_proc->alloc, &target_proc->alloc,
t->buffer, user_offset, t->buffer, user_offset,

View File

@ -2663,6 +2663,8 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
mutex_lock(&ub->mutex); mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub)) if (!ublk_can_use_recovery(ub))
goto out_unlock; goto out_unlock;
if (!ub->nr_queues_ready)
goto out_unlock;
/* /*
* START_RECOVERY is only allowd after: * START_RECOVERY is only allowd after:
* *

View File

@ -40,7 +40,8 @@
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL]) #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8 # define PLL_POST_DIV_SHIFT 8
# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0) # define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0)
# define PLL_ALPHA_MSB BIT(15)
# define PLL_ALPHA_EN BIT(24) # define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25) # define PLL_ALPHA_MODE BIT(25)
# define PLL_VCO_SHIFT 20 # define PLL_VCO_SHIFT 20
@ -1552,8 +1553,8 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
} }
return regmap_update_bits(regmap, PLL_USER_CTL(pll), return regmap_update_bits(regmap, PLL_USER_CTL(pll),
PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT, PLL_POST_DIV_MASK(pll) << pll->post_div_shift,
val << PLL_POST_DIV_SHIFT); val << pll->post_div_shift);
} }
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = { const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
@ -2117,6 +2118,18 @@ static void clk_zonda_pll_disable(struct clk_hw *hw)
regmap_write(regmap, PLL_OPMODE(pll), 0x0); regmap_write(regmap, PLL_OPMODE(pll), 0x0);
} }
static void zonda_pll_adjust_l_val(unsigned long rate, unsigned long prate, u32 *l)
{
u64 remainder, quotient;
quotient = rate;
remainder = do_div(quotient, prate);
*l = quotient;
if ((remainder * 2) / prate)
*l = *l + 1;
}
static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate, static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate) unsigned long prate)
{ {
@ -2133,9 +2146,15 @@ static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (a & PLL_ALPHA_MSB)
zonda_pll_adjust_l_val(rate, prate, &l);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a); regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
if (!clk_hw_is_enabled(hw))
return 0;
/* Wait before polling for the frequency latch */ /* Wait before polling for the frequency latch */
udelay(5); udelay(5);

View File

@ -198,6 +198,7 @@ extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops; extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops; extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_rcg2_shared_ops; extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_rcg2_shared_no_init_park_ops;
extern const struct clk_ops clk_dp_ops; extern const struct clk_ops clk_dp_ops;
struct clk_rcg_dfs_data { struct clk_rcg_dfs_data {

View File

@ -1348,6 +1348,36 @@ const struct clk_ops clk_rcg2_shared_ops = {
}; };
EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
static int clk_rcg2_shared_no_init_park(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
/*
* Read the config register so that the parent is properly mapped at
* registration time.
*/
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
return 0;
}
/*
* Like clk_rcg2_shared_ops but skip the init so that the clk frequency is left
* unchanged at registration time.
*/
const struct clk_ops clk_rcg2_shared_no_init_park_ops = {
.init = clk_rcg2_shared_no_init_park,
.enable = clk_rcg2_shared_enable,
.disable = clk_rcg2_shared_disable,
.get_parent = clk_rcg2_shared_get_parent,
.set_parent = clk_rcg2_shared_set_parent,
.recalc_rate = clk_rcg2_shared_recalc_rate,
.determine_rate = clk_rcg2_determine_rate,
.set_rate = clk_rcg2_shared_set_rate,
.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
};
EXPORT_SYMBOL_GPL(clk_rcg2_shared_no_init_park_ops);
/* Common APIs to be used for DFS based RCGR */ /* Common APIs to be used for DFS based RCGR */
static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
struct freq_tbl *f) struct freq_tbl *f)

View File

@ -68,7 +68,7 @@ static const struct clk_parent_data gcc_sleep_clk_data[] = {
static struct clk_alpha_pll gpll0_main = { static struct clk_alpha_pll gpll0_main = {
.offset = 0x20000, .offset = 0x20000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = { .clkr = {
.enable_reg = 0x0b000, .enable_reg = 0x0b000,
.enable_mask = BIT(0), .enable_mask = BIT(0),
@ -96,7 +96,7 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
static struct clk_alpha_pll_postdiv gpll0 = { static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x20000, .offset = 0x20000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4, .width = 4,
.clkr.hw.init = &(const struct clk_init_data) { .clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll0", .name = "gpll0",
@ -110,7 +110,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
static struct clk_alpha_pll gpll4_main = { static struct clk_alpha_pll gpll4_main = {
.offset = 0x22000, .offset = 0x22000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = { .clkr = {
.enable_reg = 0x0b000, .enable_reg = 0x0b000,
.enable_mask = BIT(2), .enable_mask = BIT(2),
@ -125,7 +125,7 @@ static struct clk_alpha_pll gpll4_main = {
static struct clk_alpha_pll_postdiv gpll4 = { static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x22000, .offset = 0x22000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4, .width = 4,
.clkr.hw.init = &(const struct clk_init_data) { .clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll4", .name = "gpll4",
@ -139,7 +139,7 @@ static struct clk_alpha_pll_postdiv gpll4 = {
static struct clk_alpha_pll gpll2_main = { static struct clk_alpha_pll gpll2_main = {
.offset = 0x21000, .offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.clkr = { .clkr = {
.enable_reg = 0x0b000, .enable_reg = 0x0b000,
.enable_mask = BIT(1), .enable_mask = BIT(1),
@ -154,7 +154,7 @@ static struct clk_alpha_pll gpll2_main = {
static struct clk_alpha_pll_postdiv gpll2 = { static struct clk_alpha_pll_postdiv gpll2 = {
.offset = 0x21000, .offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT_EVO],
.width = 4, .width = 4,
.clkr.hw.init = &(const struct clk_init_data) { .clkr.hw.init = &(const struct clk_init_data) {
.name = "gpll2", .name = "gpll2",

View File

@ -1500,7 +1500,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@ -1517,7 +1517,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@ -1534,7 +1534,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@ -1551,7 +1551,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@ -1568,7 +1568,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@ -1585,7 +1585,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@ -1617,7 +1617,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@ -1634,7 +1634,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@ -1651,7 +1651,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@ -1668,7 +1668,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@ -1685,7 +1685,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@ -1702,7 +1702,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@ -1719,7 +1719,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@ -1736,7 +1736,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@ -1753,7 +1753,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@ -1770,7 +1770,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@ -1787,7 +1787,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@ -1804,7 +1804,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@ -1821,7 +1821,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@ -1838,7 +1838,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@ -1855,7 +1855,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@ -1872,7 +1872,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@ -1889,7 +1889,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@ -1906,7 +1906,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {

View File

@ -536,7 +536,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -551,7 +551,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -566,7 +566,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -581,7 +581,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -596,7 +596,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -611,7 +611,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -626,7 +626,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -641,7 +641,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -656,7 +656,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -700,7 +700,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@ -717,7 +717,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@ -750,7 +750,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@ -767,7 +767,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@ -784,7 +784,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@ -801,7 +801,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@ -818,7 +818,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@ -835,7 +835,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@ -852,7 +852,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@ -869,7 +869,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@ -886,7 +886,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@ -903,7 +903,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@ -920,7 +920,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@ -937,7 +937,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@ -975,7 +975,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_8, .parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8), .num_parents = ARRAY_SIZE(gcc_parent_data_8),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@ -992,7 +992,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@ -1159,7 +1159,7 @@ static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_shared_no_init_park_ops,
}, },
}; };

View File

@ -713,7 +713,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s0_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -728,7 +728,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s1_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -743,7 +743,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s2_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -758,7 +758,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s3_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -773,7 +773,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s4_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -788,7 +788,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s5_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -803,7 +803,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s6_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -818,7 +818,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s7_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -833,7 +833,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s8_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -848,7 +848,7 @@ static struct clk_rcg2 gcc_qupv3_i2c_s9_clk_src = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -863,7 +863,7 @@ static struct clk_init_data gcc_qupv3_wrap1_qspi_ref_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_qspi_ref_clk_src = {
@ -899,7 +899,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@ -916,7 +916,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@ -948,7 +948,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@ -980,7 +980,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@ -997,7 +997,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@ -1014,7 +1014,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@ -1031,7 +1031,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@ -1059,7 +1059,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_ibi_ctrl_0_clk_src = {
.parent_data = gcc_parent_data_2, .parent_data = gcc_parent_data_2,
.num_parents = ARRAY_SIZE(gcc_parent_data_2), .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}, },
}; };
@ -1068,7 +1068,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@ -1085,7 +1085,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@ -1102,7 +1102,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@ -1119,7 +1119,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@ -1136,7 +1136,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@ -1153,7 +1153,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@ -1186,7 +1186,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_10, .parent_data = gcc_parent_data_10,
.num_parents = ARRAY_SIZE(gcc_parent_data_10), .num_parents = ARRAY_SIZE(gcc_parent_data_10),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@ -1203,7 +1203,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@ -1226,7 +1226,7 @@ static struct clk_init_data gcc_qupv3_wrap3_qspi_ref_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap3_qspi_ref_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap3_qspi_ref_clk_src = {

View File

@ -670,7 +670,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
@ -687,7 +687,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
@ -719,7 +719,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
@ -736,7 +736,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
@ -768,7 +768,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
@ -785,7 +785,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
@ -802,7 +802,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
@ -819,7 +819,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
@ -836,7 +836,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
@ -853,7 +853,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
@ -870,7 +870,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
@ -887,7 +887,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
@ -904,7 +904,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
@ -921,7 +921,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
@ -938,7 +938,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
@ -955,7 +955,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
@ -972,7 +972,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
@ -989,7 +989,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
@ -1006,7 +1006,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
@ -1023,7 +1023,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
@ -1040,7 +1040,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
@ -1057,7 +1057,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
@ -1074,7 +1074,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
.parent_data = gcc_parent_data_8, .parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8), .num_parents = ARRAY_SIZE(gcc_parent_data_8),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
@ -1091,7 +1091,7 @@ static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
.parent_data = gcc_parent_data_0, .parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0), .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.flags = CLK_SET_RATE_PARENT, .flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_shared_ops, .ops = &clk_rcg2_ops,
}; };
static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
@ -6203,7 +6203,7 @@ static struct gdsc gcc_usb_0_phy_gdsc = {
.pd = { .pd = {
.name = "gcc_usb_0_phy_gdsc", .name = "gcc_usb_0_phy_gdsc",
}, },
.pwrsts = PWRSTS_OFF_ON, .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
}; };
@ -6215,7 +6215,7 @@ static struct gdsc gcc_usb_1_phy_gdsc = {
.pd = { .pd = {
.name = "gcc_usb_1_phy_gdsc", .name = "gcc_usb_1_phy_gdsc",
}, },
.pwrsts = PWRSTS_OFF_ON, .pwrsts = PWRSTS_RET_ON,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
}; };

View File

@ -385,6 +385,32 @@ int jh7110_reset_controller_register(struct jh71x0_clk_priv *priv,
} }
EXPORT_SYMBOL_GPL(jh7110_reset_controller_register); EXPORT_SYMBOL_GPL(jh7110_reset_controller_register);
/*
* This clock notifier is called when the rate of PLL0 clock is to be changed.
* The cpu_root clock should save the curent parent clock and switch its parent
* clock to osc before PLL0 rate will be changed. Then switch its parent clock
* back after the PLL0 rate is completed.
*/
static int jh7110_pll0_clk_notifier_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
struct jh71x0_clk_priv *priv = container_of(nb, struct jh71x0_clk_priv, pll_clk_nb);
struct clk *cpu_root = priv->reg[JH7110_SYSCLK_CPU_ROOT].hw.clk;
int ret = 0;
if (action == PRE_RATE_CHANGE) {
struct clk *osc = clk_get(priv->dev, "osc");
priv->original_clk = clk_get_parent(cpu_root);
ret = clk_set_parent(cpu_root, osc);
clk_put(osc);
} else if (action == POST_RATE_CHANGE) {
ret = clk_set_parent(cpu_root, priv->original_clk);
}
return notifier_from_errno(ret);
}
static int __init jh7110_syscrg_probe(struct platform_device *pdev) static int __init jh7110_syscrg_probe(struct platform_device *pdev)
{ {
struct jh71x0_clk_priv *priv; struct jh71x0_clk_priv *priv;
@ -413,7 +439,10 @@ static int __init jh7110_syscrg_probe(struct platform_device *pdev)
if (IS_ERR(priv->pll[0])) if (IS_ERR(priv->pll[0]))
return PTR_ERR(priv->pll[0]); return PTR_ERR(priv->pll[0]);
} else { } else {
clk_put(pllclk); priv->pll_clk_nb.notifier_call = jh7110_pll0_clk_notifier_cb;
ret = clk_notifier_register(pllclk, &priv->pll_clk_nb);
if (ret)
return ret;
priv->pll[0] = NULL; priv->pll[0] = NULL;
} }

View File

@ -114,6 +114,8 @@ struct jh71x0_clk_priv {
spinlock_t rmw_lock; spinlock_t rmw_lock;
struct device *dev; struct device *dev;
void __iomem *base; void __iomem *base;
struct clk *original_clk;
struct notifier_block pll_clk_nb;
struct clk_hw *pll[3]; struct clk_hw *pll[3];
struct jh71x0_clk reg[]; struct jh71x0_clk reg[];
}; };

View File

@ -137,7 +137,21 @@ static int hv_stimer_init(unsigned int cpu)
ce->name = "Hyper-V clockevent"; ce->name = "Hyper-V clockevent";
ce->features = CLOCK_EVT_FEAT_ONESHOT; ce->features = CLOCK_EVT_FEAT_ONESHOT;
ce->cpumask = cpumask_of(cpu); ce->cpumask = cpumask_of(cpu);
ce->rating = 1000;
/*
* Lower the rating of the Hyper-V timer in a TDX VM without paravisor,
* so the local APIC timer (lapic_clockevent) is the default timer in
* such a VM. The Hyper-V timer is not preferred in such a VM because
* it depends on the slow VM Reference Counter MSR (the Hyper-V TSC
* page is not enbled in such a VM because the VM uses Invariant TSC
* as a better clocksource and it's challenging to mark the Hyper-V
* TSC page shared in very early boot).
*/
if (!ms_hyperv.paravisor_present && hv_isolation_type_tdx())
ce->rating = 90;
else
ce->rating = 1000;
ce->set_state_shutdown = hv_ce_shutdown; ce->set_state_shutdown = hv_ce_shutdown;
ce->set_state_oneshot = hv_ce_set_oneshot; ce->set_state_oneshot = hv_ce_set_oneshot;
ce->set_next_event = hv_ce_set_next_event; ce->set_next_event = hv_ce_set_next_event;

View File

@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
static int tpm_set_next_event(unsigned long delta, static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
unsigned long next, now; unsigned long next, prev, now;
next = tpm_read_counter(); prev = tpm_read_counter();
next += delta; next = prev + delta;
writel(next, timer_base + TPM_C0V); writel(next, timer_base + TPM_C0V);
now = tpm_read_counter(); now = tpm_read_counter();
/*
* Need to wait CNT increase at least 1 cycle to make sure
* the C0V has been updated into HW.
*/
if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
while (now == tpm_read_counter())
;
/* /*
* NOTE: We observed in a very small probability, the bus fabric * NOTE: We observed in a very small probability, the bus fabric
* contention between GPU and A7 may results a few cycles delay * contention between GPU and A7 may results a few cycles delay
* of writing CNT registers which may cause the min_delta event got * of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened. * missed, so we need add a ETIME check here in case it happened.
*/ */
return (int)(next - now) <= 0 ? -ETIME : 0; return (now - prev) >= delta ? -ETIME : 0;
} }
static int tpm_set_state_oneshot(struct clock_event_device *evt) static int tpm_set_state_oneshot(struct clock_event_device *evt)

View File

@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
struct clock_event_device *clkevt = &to->clkevt; struct clock_event_device *clkevt = &to->clkevt;
if (of_irq->percpu) free_irq(of_irq->irq, clkevt);
free_percpu_irq(of_irq->irq, clkevt);
else
free_irq(of_irq->irq, clkevt);
} }
/** /**
@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
* - Get interrupt number by name * - Get interrupt number by name
* - Get interrupt number by index * - Get interrupt number by index
* *
* When the interrupt is per CPU, 'request_percpu_irq()' is called,
* otherwise 'request_irq()' is used.
*
* Returns 0 on success, < 0 otherwise * Returns 0 on success, < 0 otherwise
*/ */
static __init int timer_of_irq_init(struct device_node *np, static __init int timer_of_irq_init(struct device_node *np,
@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np,
return -EINVAL; return -EINVAL;
} }
ret = of_irq->percpu ? ret = request_irq(of_irq->irq, of_irq->handler,
request_percpu_irq(of_irq->irq, of_irq->handler, of_irq->flags ? of_irq->flags : IRQF_TIMER,
np->full_name, clkevt) : np->full_name, clkevt);
request_irq(of_irq->irq, of_irq->handler,
of_irq->flags ? of_irq->flags : IRQF_TIMER,
np->full_name, clkevt);
if (ret) { if (ret) {
pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np); pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
return ret; return ret;

View File

@ -11,7 +11,6 @@
struct of_timer_irq { struct of_timer_irq {
int irq; int irq;
int index; int index;
int percpu;
const char *name; const char *name;
unsigned long flags; unsigned long flags;
irq_handler_t handler; irq_handler_t handler;

View File

@ -1834,20 +1834,34 @@ static bool amd_cppc_supported(void)
} }
/* /*
* If the CPPC feature is disabled in the BIOS for processors that support MSR-based CPPC, * If the CPPC feature is disabled in the BIOS for processors
* the AMD Pstate driver may not function correctly. * that support MSR-based CPPC, the AMD Pstate driver may not
* Check the CPPC flag and display a warning message if the platform supports CPPC. * function correctly.
* Note: below checking code will not abort the driver registeration process because of *
* the code is added for debugging purposes. * For such processors, check the CPPC flag and display a
* warning message if the platform supports CPPC.
*
* Note: The code check below will not abort the driver
* registration process because of the code is added for
* debugging purposes. Besides, it may still be possible for
* the driver to work using the shared-memory mechanism.
*/ */
if (!cpu_feature_enabled(X86_FEATURE_CPPC)) { if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) { if (cpu_feature_enabled(X86_FEATURE_ZEN2)) {
if (c->x86_model > 0x60 && c->x86_model < 0xaf) switch (c->x86_model) {
case 0x60 ... 0x6F:
case 0x80 ... 0xAF:
warn = true; warn = true;
} else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) { break;
if ((c->x86_model > 0x10 && c->x86_model < 0x1F) || }
(c->x86_model > 0x40 && c->x86_model < 0xaf)) } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
cpu_feature_enabled(X86_FEATURE_ZEN4)) {
switch (c->x86_model) {
case 0x10 ... 0x1F:
case 0x40 ... 0xAF:
warn = true; warn = true;
break;
}
} else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) { } else if (cpu_feature_enabled(X86_FEATURE_ZEN5)) {
warn = true; warn = true;
} }

View File

@ -715,6 +715,10 @@ static int qcuefi_set_reference(struct qcuefi_client *qcuefi)
static struct qcuefi_client *qcuefi_acquire(void) static struct qcuefi_client *qcuefi_acquire(void)
{ {
mutex_lock(&__qcuefi_lock); mutex_lock(&__qcuefi_lock);
if (!__qcuefi) {
mutex_unlock(&__qcuefi_lock);
return NULL;
}
return __qcuefi; return __qcuefi;
} }

View File

@ -713,6 +713,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
return -ENODEV; return -ENODEV;
pctldev = of_pinctrl_get(pctlnp); pctldev = of_pinctrl_get(pctlnp);
of_node_put(pctlnp);
if (!pctldev) if (!pctldev)
return -EPROBE_DEFER; return -EPROBE_DEFER;

View File

@ -146,6 +146,7 @@ static const struct of_device_id modepin_platform_id[] = {
{ .compatible = "xlnx,zynqmp-gpio-modepin", }, { .compatible = "xlnx,zynqmp-gpio-modepin", },
{ } { }
}; };
MODULE_DEVICE_TABLE(of, modepin_platform_id);
static struct platform_driver modepin_platform_driver = { static struct platform_driver modepin_platform_driver = {
.driver = { .driver = {

View File

@ -128,7 +128,6 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \ drm_kms_helper-y := \
drm_atomic_helper.o \ drm_atomic_helper.o \
drm_atomic_state_helper.o \ drm_atomic_state_helper.o \
drm_bridge_connector.o \
drm_crtc_helper.o \ drm_crtc_helper.o \
drm_damage_helper.o \ drm_damage_helper.o \
drm_encoder_slave.o \ drm_encoder_slave.o \

View File

@ -348,6 +348,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {

View File

@ -657,7 +657,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
uint64_t queue_mask = 0; uint64_t queue_mask = 0;
int r, i, j; int r, i, j;
if (adev->enable_mes) if (adev->mes.enable_legacy_queue_map)
return amdgpu_gfx_mes_enable_kcq(adev, xcc_id); return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
@ -719,7 +719,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL); amdgpu_device_flush_hdp(adev, NULL);
if (adev->enable_mes) { if (adev->mes.enable_legacy_queue_map) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings; j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev, r = amdgpu_mes_map_legacy_queue(adev,

View File

@ -75,6 +75,7 @@ struct amdgpu_mes {
uint32_t sched_version; uint32_t sched_version;
uint32_t kiq_version; uint32_t kiq_version;
bool enable_legacy_queue_map;
uint32_t total_max_queue; uint32_t total_max_queue;
uint32_t max_doorbell_slices; uint32_t max_doorbell_slices;

View File

@ -693,6 +693,28 @@ static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
(void **)&adev->mes.ucode_fw_ptr[pipe]); (void **)&adev->mes.ucode_fw_ptr[pipe]);
} }
static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
{
int pipe;
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
soc21_grbm_select(adev, 3, pipe, 0, 0);
if (pipe == AMDGPU_MES_SCHED_PIPE)
adev->mes.sched_version =
RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
adev->mes.kiq_version =
RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
{ {
uint64_t ucode_addr; uint64_t ucode_addr;
@ -1062,18 +1084,6 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
mes_v11_0_queue_init_register(ring); mes_v11_0_queue_init_register(ring);
} }
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, 3, pipe, 0, 0);
if (pipe == AMDGPU_MES_SCHED_PIPE)
adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
return 0; return 0;
} }
@ -1320,15 +1330,24 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true); mes_v11_0_enable(adev, true);
mes_v11_0_get_fw_version(adev);
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring); mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r) if (r)
goto failure; goto failure;
r = mes_v11_0_hw_init(adev); if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x47)
if (r) adev->mes.enable_legacy_queue_map = true;
goto failure; else
adev->mes.enable_legacy_queue_map = false;
if (adev->mes.enable_legacy_queue_map) {
r = mes_v11_0_hw_init(adev);
if (r)
goto failure;
}
return r; return r;

View File

@ -1266,6 +1266,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.funcs = &mes_v12_0_funcs; adev->mes.funcs = &mes_v12_0_funcs;
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init; adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
@ -1422,9 +1423,11 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE); mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
} }
r = mes_v12_0_hw_init(adev); if (adev->mes.enable_legacy_queue_map) {
if (r) r = mes_v12_0_hw_init(adev);
goto failure; if (r)
goto failure;
}
return r; return r;

View File

@ -1752,6 +1752,30 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
return bb; return bb;
} }
static enum dmub_ips_disable_type dm_get_default_ips_mode(
struct amdgpu_device *adev)
{
/*
* On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
* cause a hard hang. A fix exists for newer PMFW.
*
* As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
* IPS state in all cases, except for s0ix and all displays off (DPMS),
* where IPS2 is allowed.
*
* When checking pmfw version, use the major and minor only.
*/
if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) &&
(adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
return DMUB_IPS_ENABLE;
/* ASICs older than DCN35 do not have IPSs */
return DMUB_IPS_DISABLE_ALL;
}
static int amdgpu_dm_init(struct amdgpu_device *adev) static int amdgpu_dm_init(struct amdgpu_device *adev)
{ {
struct dc_init_data init_data; struct dc_init_data init_data;
@ -1863,7 +1887,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
else else
init_data.flags.disable_ips = DMUB_IPS_ENABLE; init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
init_data.flags.disable_ips_in_vpb = 0; init_data.flags.disable_ips_in_vpb = 0;
@ -4492,7 +4516,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
struct amdgpu_dm_backlight_caps caps; struct amdgpu_dm_backlight_caps caps;
struct dc_link *link; struct dc_link *link;
u32 brightness; u32 brightness;
bool rc; bool rc, reallow_idle = false;
amdgpu_dm_update_backlight_caps(dm, bl_idx); amdgpu_dm_update_backlight_caps(dm, bl_idx);
caps = dm->backlight_caps[bl_idx]; caps = dm->backlight_caps[bl_idx];
@ -4505,6 +4529,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
link = (struct dc_link *)dm->backlight_link[bl_idx]; link = (struct dc_link *)dm->backlight_link[bl_idx];
/* Change brightness based on AUX property */ /* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
dc_allow_idle_optimizations(dm->dc, false);
reallow_idle = true;
}
if (caps.aux_support) { if (caps.aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness, rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS); AUX_BL_DEFAULT_TRANSITION_TIME_MS);
@ -4516,6 +4546,11 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
} }
if (dm->dc->caps.ips_support && reallow_idle)
dc_allow_idle_optimizations(dm->dc, true);
mutex_unlock(&dm->dc_lock);
if (rc) if (rc)
dm->actual_brightness[bl_idx] = user_brightness; dm->actual_brightness[bl_idx] = user_brightness;
} }

View File

@ -811,7 +811,8 @@ static void build_synchronized_timing_groups(
for (j = i + 1; j < display_config->display_config.num_streams; j++) { for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing, if (memcmp(master_timing,
&display_config->display_config.stream_descriptors[j].timing, &display_config->display_config.stream_descriptors[j].timing,
sizeof(struct dml2_timing_cfg)) == 0) { sizeof(struct dml2_timing_cfg)) == 0 &&
display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j); set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j); set_bit_in_bitfield(&stream_mapped_mask, j);
} }

View File

@ -2266,7 +2266,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
smu_dpm_ctx->dpm_level = level; smu_dpm_ctx->dpm_level = level;
} }
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask); index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index]; workload[0] = smu->workload_setting[index];
@ -2345,7 +2346,8 @@ static int smu_switch_power_profile(void *handle,
workload[0] = smu->workload_setting[index]; workload[0] = smu->workload_setting[index];
} }
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0); smu_bump_power_profile_mode(smu, workload, 0);
return 0; return 0;

View File

@ -160,6 +160,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_plane *plane; struct drm_plane *plane;
struct list_head zorder_list; struct list_head zorder_list;
int order = 0, err; int order = 0, err;
u32 slave_zpos = 0;
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name); crtc->base.id, crtc->name);
@ -199,10 +200,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
plane_st->zpos, plane_st->normalized_zpos); plane_st->zpos, plane_st->normalized_zpos);
/* calculate max slave zorder */ /* calculate max slave zorder */
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
slave_zpos = plane_st->normalized_zpos;
if (to_kplane_st(plane_st)->layer_split)
slave_zpos++;
kcrtc_st->max_slave_zorder = kcrtc_st->max_slave_zorder =
max(plane_st->normalized_zpos, max(slave_zpos, kcrtc_st->max_slave_zorder);
kcrtc_st->max_slave_zorder); }
} }
crtc_st->zpos_changed = true; crtc_st->zpos_changed = true;

View File

@ -390,6 +390,7 @@ config DRM_TI_SN65DSI86
depends on OF depends on OF
select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_KMS_HELPER select DRM_KMS_HELPER
select REGMAP_I2C select REGMAP_I2C
select DRM_PANEL select DRM_PANEL

View File

@ -1,19 +1,26 @@
# SPDX-License-Identifier: MIT # SPDX-License-Identifier: MIT
config DRM_DISPLAY_DP_AUX_BUS
tristate
depends on DRM
depends on OF || COMPILE_TEST
config DRM_DISPLAY_HELPER config DRM_DISPLAY_HELPER
tristate tristate
depends on DRM depends on DRM
help help
DRM helpers for display adapters. DRM helpers for display adapters.
config DRM_DISPLAY_DP_AUX_BUS if DRM_DISPLAY_HELPER
tristate
depends on DRM config DRM_BRIDGE_CONNECTOR
depends on OF || COMPILE_TEST bool
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
config DRM_DISPLAY_DP_AUX_CEC config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support" bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_DP_HELPER
select CEC_CORE select CEC_CORE
help help
@ -25,7 +32,6 @@ config DRM_DISPLAY_DP_AUX_CEC
config DRM_DISPLAY_DP_AUX_CHARDEV config DRM_DISPLAY_DP_AUX_CHARDEV
bool "DRM DP AUX Interface" bool "DRM DP AUX Interface"
depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_DP_HELPER
help help
Choose this option to enable a /dev/drm_dp_auxN node that allows to Choose this option to enable a /dev/drm_dp_auxN node that allows to
@ -34,7 +40,6 @@ config DRM_DISPLAY_DP_AUX_CHARDEV
config DRM_DISPLAY_DP_HELPER config DRM_DISPLAY_DP_HELPER
bool bool
depends on DRM_DISPLAY_HELPER
help help
DRM display helpers for DisplayPort. DRM display helpers for DisplayPort.
@ -61,19 +66,18 @@ config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
config DRM_DISPLAY_HDCP_HELPER config DRM_DISPLAY_HDCP_HELPER
bool bool
depends on DRM_DISPLAY_HELPER
help help
DRM display helpers for HDCP. DRM display helpers for HDCP.
config DRM_DISPLAY_HDMI_HELPER config DRM_DISPLAY_HDMI_HELPER
bool bool
depends on DRM_DISPLAY_HELPER
help help
DRM display helpers for HDMI. DRM display helpers for HDMI.
config DRM_DISPLAY_HDMI_STATE_HELPER config DRM_DISPLAY_HDMI_STATE_HELPER
bool bool
depends on DRM_DISPLAY_HELPER
select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HDMI_HELPER
help help
DRM KMS state helpers for HDMI. DRM KMS state helpers for HDMI.
endif # DRM_DISPLAY_HELPER

View File

@ -3,6 +3,8 @@
obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o drm_display_helper-y := drm_display_helper_mod.o
drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \
drm_bridge_connector.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_dual_mode_helper.o \ drm_dp_dual_mode_helper.o \
drm_dp_helper.o \ drm_dp_helper.o \

View File

@ -216,8 +216,19 @@ static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
} }
} }
static void drm_bridge_connector_reset(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
drm_atomic_helper_connector_reset(connector);
if (bridge_connector->bridge_hdmi)
__drm_atomic_helper_connector_hdmi_reset(connector,
connector->state);
}
static const struct drm_connector_funcs drm_bridge_connector_funcs = { static const struct drm_connector_funcs drm_bridge_connector_funcs = {
.reset = drm_atomic_helper_connector_reset, .reset = drm_bridge_connector_reset,
.detect = drm_bridge_connector_detect, .detect = drm_bridge_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,

View File

@ -36,20 +36,11 @@ static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
return 0; return 0;
} }
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{ {
struct drm_fb_helper *fb_helper = info->par; struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
if (!dma->map_noncoherent) return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return fb_deferred_io_mmap(info, vma);
} }
static void drm_fbdev_dma_fb_destroy(struct fb_info *info) static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
@ -70,13 +61,40 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
} }
static const struct fb_ops drm_fbdev_dma_fb_ops = { static const struct fb_ops drm_fbdev_dma_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DMAMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DMAMEM_OPS_DRAW,
.fb_mmap = drm_fbdev_dma_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
if (!dma->map_noncoherent)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return fb_deferred_io_mmap(info, vma);
}
static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open, .fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release, .fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma), __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
DRM_FB_HELPER_DEFAULT_OPS, DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma), __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
.fb_mmap = drm_fbdev_dma_fb_mmap, .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy, .fb_destroy = drm_fbdev_dma_fb_destroy,
}; };
@ -89,6 +107,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
{ {
struct drm_client_dev *client = &fb_helper->client; struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev; struct drm_device *dev = fb_helper->dev;
bool use_deferred_io = false;
struct drm_client_buffer *buffer; struct drm_client_buffer *buffer;
struct drm_gem_dma_object *dma_obj; struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
@ -111,6 +130,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
fb = buffer->fb; fb = buffer->fb;
/*
* Deferred I/O requires struct page for framebuffer memory,
* which is not guaranteed for all DMA ranges. We thus only
* install deferred I/O if we have a framebuffer that requires
* it.
*/
if (fb->funcs->dirty)
use_deferred_io = true;
ret = drm_client_buffer_vmap(buffer, &map); ret = drm_client_buffer_vmap(buffer, &map);
if (ret) { if (ret) {
goto err_drm_client_buffer_delete; goto err_drm_client_buffer_delete;
@ -130,7 +158,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes); drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_dma_fb_ops; if (use_deferred_io)
info->fbops = &drm_fbdev_dma_deferred_fb_ops;
else
info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */ /* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */ info->flags |= FBINFO_VIRTFB; /* system memory */
@ -144,14 +175,28 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
} }
info->fix.smem_len = info->screen_size; info->fix.smem_len = info->screen_size;
/* deferred I/O */ /*
fb_helper->fbdefio.delay = HZ / 20; * Only set up deferred I/O if the screen buffer supports
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; * it. If this disagrees with the previous test for ->dirty,
* mmap on the /dev/fb file might not work correctly.
*/
if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
info->fbdefio = &fb_helper->fbdefio; if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
ret = fb_deferred_io_init(info); use_deferred_io = false;
if (ret) }
goto err_drm_fb_helper_release_info;
/* deferred I/O */
if (use_deferred_io) {
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
goto err_drm_fb_helper_release_info;
}
return 0; return 0;

View File

@ -228,7 +228,7 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
int tfw_exit_latency = 20; /* eDP spec */ int tfw_exit_latency = 20; /* eDP spec */
int phy_wake = 4; /* eDP spec */ int phy_wake = 4; /* eDP spec */
int preamble = 8; /* eDP spec */ int preamble = 8; /* eDP spec */
int precharge = intel_dp_aux_fw_sync_len() - preamble; int precharge = intel_dp_aux_fw_sync_len(intel_dp) - preamble;
u8 max_wake_lines; u8 max_wake_lines;
io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) + io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +

View File

@ -1885,6 +1885,10 @@ struct intel_dp {
} alpm_parameters; } alpm_parameters;
u8 alpm_dpcd; u8 alpm_dpcd;
struct {
unsigned long mask;
} quirks;
}; };
enum lspcon_vendor { enum lspcon_vendor {

View File

@ -82,6 +82,7 @@
#include "intel_pch_display.h" #include "intel_pch_display.h"
#include "intel_pps.h" #include "intel_pps.h"
#include "intel_psr.h" #include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_tc.h" #include "intel_tc.h"
#include "intel_vdsc.h" #include "intel_vdsc.h"
#include "intel_vrr.h" #include "intel_vrr.h"
@ -3952,6 +3953,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd)); drm_dp_is_branch(intel_dp->dpcd));
intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
/* /*
* Read the eDP display control registers. * Read the eDP display control registers.
@ -4064,6 +4066,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd)); drm_dp_is_branch(intel_dp->dpcd));
intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
intel_dp_update_sink_caps(intel_dp); intel_dp_update_sink_caps(intel_dp);
} }

View File

@ -13,6 +13,7 @@
#include "intel_dp_aux.h" #include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h" #include "intel_dp_aux_regs.h"
#include "intel_pps.h" #include "intel_pps.h"
#include "intel_quirks.h"
#include "intel_tc.h" #include "intel_tc.h"
#define AUX_CH_NAME_BUFSIZE 6 #define AUX_CH_NAME_BUFSIZE 6
@ -142,16 +143,21 @@ static int intel_dp_aux_sync_len(void)
return precharge + preamble; return precharge + preamble;
} }
int intel_dp_aux_fw_sync_len(void) int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp)
{ {
int precharge = 10; /* 10-16 */
int preamble = 8;
/* /*
* We faced some glitches on Dell Precision 5490 MTL laptop with panel: * We faced some glitches on Dell Precision 5490 MTL laptop with panel:
* "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20 * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
* is fixing these problems with the panel. It is still within range * is fixing these problems with the panel. It is still within range
* mentioned in eDP specification. * mentioned in eDP specification. Increasing Fast Wake sync length is
* causing problems with other panels: increase length as a quirk for
* this specific laptop.
*/ */
int precharge = 12; /* 10-16 */ if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN))
int preamble = 8; precharge += 2;
return precharge + preamble; return precharge + preamble;
} }
@ -211,7 +217,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_MAX | DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR | DP_AUX_CH_CTL_RECEIVE_ERROR |
DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) | DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
if (intel_tc_port_in_tbt_alt_mode(dig_port)) if (intel_tc_port_in_tbt_alt_mode(dig_port))

View File

@ -20,6 +20,6 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
void intel_dp_aux_irq_handler(struct drm_i915_private *i915); void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes); u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
int intel_dp_aux_fw_sync_len(void); int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_AUX_H__ */ #endif /* __INTEL_DP_AUX_H__ */

View File

@ -326,6 +326,8 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state) static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
{ {
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (intel_crtc_is_joiner_secondary(crtc_state)) if (intel_crtc_is_joiner_secondary(crtc_state))
return; return;
@ -337,11 +339,30 @@ static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state
crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode; crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter; crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
/* assume 1:1 mapping */ if (DISPLAY_INFO(i915)->color.degamma_lut_size) {
drm_property_replace_blob(&crtc_state->hw.degamma_lut, /* assume 1:1 mapping */
crtc_state->pre_csc_lut); drm_property_replace_blob(&crtc_state->hw.degamma_lut,
drm_property_replace_blob(&crtc_state->hw.gamma_lut, crtc_state->pre_csc_lut);
crtc_state->post_csc_lut); drm_property_replace_blob(&crtc_state->hw.gamma_lut,
crtc_state->post_csc_lut);
} else {
/*
* ilk/snb hw may be configured for either pre_csc_lut
* or post_csc_lut, but we don't advertise degamma_lut as
* being available in the uapi since there is only one
* hardware LUT. Always assign the result of the readout
* to gamma_lut as that is the only valid source of LUTs
* in the uapi.
*/
drm_WARN_ON(&i915->drm, crtc_state->post_csc_lut &&
crtc_state->pre_csc_lut);
drm_property_replace_blob(&crtc_state->hw.degamma_lut,
NULL);
drm_property_replace_blob(&crtc_state->hw.gamma_lut,
crtc_state->post_csc_lut ?:
crtc_state->pre_csc_lut);
}
drm_property_replace_blob(&crtc_state->uapi.degamma_lut, drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
crtc_state->hw.degamma_lut); crtc_state->hw.degamma_lut);

View File

@ -14,6 +14,11 @@ static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id q
display->quirks.mask |= BIT(quirk); display->quirks.mask |= BIT(quirk);
} }
static void intel_set_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk)
{
intel_dp->quirks.mask |= BIT(quirk);
}
/* /*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/ */
@ -65,6 +70,14 @@ static void quirk_no_pps_backlight_power_hook(struct intel_display *display)
drm_info(display->drm, "Applying no pps backlight power quirk\n"); drm_info(display->drm, "Applying no pps backlight power quirk\n");
} }
static void quirk_fw_sync_len(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
intel_set_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN);
drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n");
}
struct intel_quirk { struct intel_quirk {
int device; int device;
int subsystem_vendor; int subsystem_vendor;
@ -72,6 +85,21 @@ struct intel_quirk {
void (*hook)(struct intel_display *display); void (*hook)(struct intel_display *display);
}; };
struct intel_dpcd_quirk {
int device;
int subsystem_vendor;
int subsystem_device;
u8 sink_oui[3];
u8 sink_device_id[6];
void (*hook)(struct intel_dp *intel_dp);
};
#define SINK_OUI(first, second, third) { (first), (second), (third) }
#define SINK_DEVICE_ID(first, second, third, fourth, fifth, sixth) \
{ (first), (second), (third), (fourth), (fifth), (sixth) }
#define SINK_DEVICE_ID_ANY SINK_DEVICE_ID(0, 0, 0, 0, 0, 0)
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk { struct intel_dmi_quirk {
void (*hook)(struct intel_display *display); void (*hook)(struct intel_display *display);
@ -203,6 +231,18 @@ static struct intel_quirk intel_quirks[] = {
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
}; };
static struct intel_dpcd_quirk intel_dpcd_quirks[] = {
/* Dell Precision 5490 */
{
.device = 0x7d55,
.subsystem_vendor = 0x1028,
.subsystem_device = 0x0cc7,
.sink_oui = SINK_OUI(0x38, 0xec, 0x11),
.hook = quirk_fw_sync_len,
},
};
void intel_init_quirks(struct intel_display *display) void intel_init_quirks(struct intel_display *display)
{ {
struct pci_dev *d = to_pci_dev(display->drm->dev); struct pci_dev *d = to_pci_dev(display->drm->dev);
@ -224,7 +264,35 @@ void intel_init_quirks(struct intel_display *display)
} }
} }
void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
const struct drm_dp_dpcd_ident *ident)
{
struct intel_display *display = to_intel_display(intel_dp);
struct pci_dev *d = to_pci_dev(display->drm->dev);
int i;
for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) {
struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i];
if (d->device == q->device &&
(d->subsystem_vendor == q->subsystem_vendor ||
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID) &&
!memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) &&
(!memcmp(q->sink_device_id, ident->device_id,
sizeof(ident->device_id)) ||
!memchr_inv(q->sink_device_id, 0, sizeof(q->sink_device_id))))
q->hook(intel_dp);
}
}
bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk) bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{ {
return display->quirks.mask & BIT(quirk); return display->quirks.mask & BIT(quirk);
} }
bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk)
{
return intel_dp->quirks.mask & BIT(quirk);
}

View File

@ -9,6 +9,8 @@
#include <linux/types.h> #include <linux/types.h>
struct intel_display; struct intel_display;
struct intel_dp;
struct drm_dp_dpcd_ident;
enum intel_quirk_id { enum intel_quirk_id {
QUIRK_BACKLIGHT_PRESENT, QUIRK_BACKLIGHT_PRESENT,
@ -17,9 +19,13 @@ enum intel_quirk_id {
QUIRK_INVERT_BRIGHTNESS, QUIRK_INVERT_BRIGHTNESS,
QUIRK_LVDS_SSC_DISABLE, QUIRK_LVDS_SSC_DISABLE,
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
QUIRK_FW_SYNC_LEN,
}; };
void intel_init_quirks(struct intel_display *display); void intel_init_quirks(struct intel_display *display);
void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
const struct drm_dp_dpcd_ident *ident);
bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk); bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk);
bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */ #endif /* __INTEL_QUIRKS_H__ */

View File

@ -302,7 +302,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{ {
struct intel_gt *gt = gsc_uc_to_gt(gsc); struct intel_gt *gt = gsc_uc_to_gt(gsc);
if (!intel_uc_fw_is_loadable(&gsc->fw)) if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
return; return;
if (intel_gsc_uc_fw_init_done(gsc)) if (intel_gsc_uc_fw_init_done(gsc))

View File

@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING; return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
} }
static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw)
{
return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0;
}
static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
{ {
return uc_fw->user_overridden; return uc_fw->user_overridden;

View File

@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
debug_object_init(fence, &i915_sw_fence_debug_descr); debug_object_init(fence, &i915_sw_fence_debug_descr);
} }
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{ {
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr); debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
} }
@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
debug_object_destroy(fence, &i915_sw_fence_debug_descr); debug_object_destroy(fence, &i915_sw_fence_debug_descr);
} }
static inline void debug_fence_free(struct i915_sw_fence *fence) static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{ {
debug_object_free(fence, &i915_sw_fence_debug_descr); debug_object_free(fence, &i915_sw_fence_debug_descr);
smp_wmb(); /* flush the change in state before reallocation */ smp_wmb(); /* flush the change in state before reallocation */
@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
{ {
} }
static inline void debug_fence_init_onstack(struct i915_sw_fence *fence) static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{ {
} }
@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
{ {
} }
static inline void debug_fence_free(struct i915_sw_fence *fence) static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{ {
} }

View File

@ -114,6 +114,8 @@ struct pvr_vm_gpuva {
struct drm_gpuva base; struct drm_gpuva base;
}; };
#define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
enum pvr_vm_bind_type { enum pvr_vm_bind_type {
PVR_VM_BIND_TYPE_MAP, PVR_VM_BIND_TYPE_MAP,
PVR_VM_BIND_TYPE_UNMAP, PVR_VM_BIND_TYPE_UNMAP,
@ -386,6 +388,7 @@ pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
drm_gpuva_unmap(&op->unmap); drm_gpuva_unmap(&op->unmap);
drm_gpuva_unlink(op->unmap.va); drm_gpuva_unlink(op->unmap.va);
kfree(to_pvr_vm_gpuva(op->unmap.va));
return 0; return 0;
} }
@ -433,6 +436,7 @@ pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
} }
drm_gpuva_unlink(op->remap.unmap->va); drm_gpuva_unlink(op->remap.unmap->va);
kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
return 0; return 0;
} }

View File

@ -2,6 +2,8 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS" tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER select IMX_IRQSTEER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64 depends on DRM && ARCH_MXC && ARM64

View File

@ -3,5 +3,7 @@ config DRM_IMX_LCDC
depends on DRM && (ARCH_MXC || COMPILE_TEST) depends on DRM && (ARCH_MXC || COMPILE_TEST)
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
help help
Found on i.MX1, i.MX21, i.MX25 and i.MX27. Found on i.MX1, i.MX21, i.MX25 and i.MX27.

View File

@ -8,6 +8,8 @@ config DRM_INGENIC
select DRM_BRIDGE select DRM_BRIDGE
select DRM_PANEL_BRIDGE select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
select REGMAP select REGMAP
select REGMAP_MMIO select REGMAP_MMIO

View File

@ -3,6 +3,8 @@ config DRM_KMB_DISPLAY
depends on DRM depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI select DRM_MIPI_DSI
help help

View File

@ -9,6 +9,8 @@ config DRM_MEDIATEK
depends on MTK_MMSYS depends on MTK_MMSYS
select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_MIPI_DSI select DRM_MIPI_DSI
select DRM_PANEL select DRM_PANEL
select MEMORY select MEMORY

View File

@ -4,6 +4,8 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64) depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER select DRM_GEM_DMA_HELPER
select DRM_DISPLAY_CONNECTOR select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS

View File

@ -17,6 +17,7 @@ config DRM_MSM
select DRM_DISPLAY_DP_AUX_BUS select DRM_DISPLAY_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select DRM_EXEC select DRM_EXEC
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_PANEL select DRM_PANEL

View File

@ -324,7 +324,7 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
return ret; return ret;
/* Verify. */ /* Verify. */
err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff; err = nvkm_rd32(device, 0x001400 + (0x15 * 4)) & 0x0000ffff;
if (err) { if (err) {
nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err); nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err);
return -EIO; return -EIO;

View File

@ -5,6 +5,8 @@ config DRM_OMAP
depends on DRM && OF depends on DRM && OF
depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB) depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB)
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_DISPLAY_HELPER
select DRM_BRIDGE_CONNECTOR
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
select HDMI select HDMI

View File

@ -925,7 +925,7 @@ MODULE_DEVICE_TABLE(spi, nv3052c_ids);
static const struct of_device_id nv3052c_of_match[] = { static const struct of_device_id nv3052c_of_match[] = {
{ .compatible = "leadtek,ltk035c5444t", .data = &ltk035c5444t_panel_info }, { .compatible = "leadtek,ltk035c5444t", .data = &ltk035c5444t_panel_info },
{ .compatible = "fascontek,fs035vg158", .data = &fs035vg158_panel_info }, { .compatible = "fascontek,fs035vg158", .data = &fs035vg158_panel_info },
{ .compatible = "wl-355608-a8", .data = &wl_355608_a8_panel_info }, { .compatible = "anbernic,rg35xx-plus-panel", .data = &wl_355608_a8_panel_info },
{ /* sentinel */ } { /* sentinel */ }
}; };
MODULE_DEVICE_TABLE(of, nv3052c_of_match); MODULE_DEVICE_TABLE(of, nv3052c_of_match);

View File

@ -10,6 +10,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h> #include <drm/drm_debugfs.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_exec.h> #include <drm/drm_exec.h>
@ -996,6 +997,24 @@ static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
return panthor_group_destroy(pfile, args->group_handle); return panthor_group_destroy(pfile, args->group_handle);
} }
static int group_priority_permit(struct drm_file *file,
u8 priority)
{
/* Ensure that priority is valid */
if (priority > PANTHOR_GROUP_PRIORITY_HIGH)
return -EINVAL;
/* Medium priority and below are always allowed */
if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
return 0;
/* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
return 0;
return -EACCES;
}
static int panthor_ioctl_group_create(struct drm_device *ddev, void *data, static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
struct drm_file *file) struct drm_file *file)
{ {
@ -1011,6 +1030,10 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
if (ret) if (ret)
return ret; return ret;
ret = group_priority_permit(file, args->priority);
if (ret)
return ret;
ret = panthor_group_create(pfile, args, queue_args); ret = panthor_group_create(pfile, args, queue_args);
if (ret >= 0) { if (ret >= 0) {
args->group_handle = ret; args->group_handle = ret;

View File

@ -1089,6 +1089,12 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
panthor_fw_stop(ptdev); panthor_fw_stop(ptdev);
ptdev->fw->fast_reset = false; ptdev->fw->fast_reset = false;
drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset"); drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
ret = panthor_vm_flush_all(ptdev->fw->vm);
if (ret) {
drm_err(&ptdev->base, "FW slow reset failed (couldn't flush FW's AS l2cache)");
return ret;
}
} }
/* Reload all sections, including RO ones. We're not supposed /* Reload all sections, including RO ones. We're not supposed
@ -1099,7 +1105,7 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
ret = panthor_fw_start(ptdev); ret = panthor_fw_start(ptdev);
if (ret) { if (ret) {
drm_err(&ptdev->base, "FW slow reset failed"); drm_err(&ptdev->base, "FW slow reset failed (couldn't start the FW )");
return ret; return ret;
} }

Some files were not shown because too many files have changed in this diff Show More