Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

Conflicts:

Documentation/devicetree/bindings/net/fsl,qoriq-mc-dpmac.yaml
  c25504a0ba ("dt-bindings: net: fsl,qoriq-mc-dpmac: add missed property phys")
  be034ee6c3 ("dt-bindings: net: fsl,qoriq-mc-dpmac: using unevaluatedProperties")
https://lore.kernel.org/20240815110934.56ae623a@canb.auug.org.au

drivers/net/dsa/vitesse-vsc73xx-core.c
  5b9eebc2c7 ("net: dsa: vsc73xx: pass value in phy_write operation")
  fa63c6434b ("net: dsa: vsc73xx: check busy flag in MDIO operations")
  2524d6c28b ("net: dsa: vsc73xx: use defined values in phy operations")
https://lore.kernel.org/20240813104039.429b9fe6@canb.auug.org.au
Resolve by using FIELD_PREP(), Stephen's resolution is simpler.

Adjacent changes:

net/vmw_vsock/af_vsock.c
  69139d2919 ("vsock: fix recursive ->recvmsg calls")
  744500d81f ("vsock: add support for SIOCOUTQ ioctl")

Link: https://patch.msgid.link/20240815141149.33862-1-pabeni@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-08-08 14:03:51 -07:00
commit 4d3d3559fc
336 changed files with 3314 additions and 1269 deletions

View File

@ -32,9 +32,9 @@ Description: (RW) The front button on the Turris Omnia router can be
interrupt. interrupt.
This file switches between these two modes: This file switches between these two modes:
- "mcu" makes the button press event be handled by the MCU to - ``mcu`` makes the button press event be handled by the MCU to
change the LEDs panel intensity. change the LEDs panel intensity.
- "cpu" makes the button press event be handled by the CPU. - ``cpu`` makes the button press event be handled by the CPU.
Format: %s. Format: %s.

View File

@ -742,7 +742,7 @@ SecurityFlags Flags which control security negotiation and
may use NTLMSSP 0x00080 may use NTLMSSP 0x00080
must use NTLMSSP 0x80080 must use NTLMSSP 0x80080
seal (packet encryption) 0x00040 seal (packet encryption) 0x00040
must seal (not implemented yet) 0x40040 must seal 0x40040
cifsFYI If set to non-zero value, additional debug information cifsFYI If set to non-zero value, additional debug information
will be logged to the system error log. This field will be logged to the system error log. This field

View File

@ -17,10 +17,13 @@ properties:
oneOf: oneOf:
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel # Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
- const: samsung,atna33xc20 - const: samsung,atna33xc20
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
- items: - items:
- const: samsung,atna45af01 - enum:
- const: samsung,atna33xc20 # Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
- samsung,atna45af01
# Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
- samsung,atna45dc02
- const: samsung,atna33xc20
enable-gpios: true enable-gpios: true
port: true port: true

View File

@ -30,6 +30,10 @@ properties:
A reference to a node representing a PCS PHY device found on A reference to a node representing a PCS PHY device found on
the internal MDIO bus. the internal MDIO bus.
phys:
description: A reference to the SerDes lane(s)
maxItems: 1
required: required:
- reg - reg

View File

@ -199,10 +199,11 @@ additionalProperties: false
examples: examples:
- | - |
#include <dt-bindings/gpio/gpio.h>
codec@1,0{ codec@1,0{
compatible = "slim217,250"; compatible = "slim217,250";
reg = <1 0>; reg = <1 0>;
reset-gpios = <&tlmm 64 0>; reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>;
slim-ifc-dev = <&wcd9340_ifd>; slim-ifc-dev = <&wcd9340_ifd>;
#sound-dai-cells = <1>; #sound-dai-cells = <1>;
interrupt-parent = <&tlmm>; interrupt-parent = <&tlmm>;

View File

@ -42,7 +42,7 @@ examples:
pinctrl-names = "default", "sleep"; pinctrl-names = "default", "sleep";
pinctrl-0 = <&wcd_reset_n>; pinctrl-0 = <&wcd_reset_n>;
pinctrl-1 = <&wcd_reset_n_sleep>; pinctrl-1 = <&wcd_reset_n_sleep>;
reset-gpios = <&tlmm 83 GPIO_ACTIVE_HIGH>; reset-gpios = <&tlmm 83 GPIO_ACTIVE_LOW>;
vdd-buck-supply = <&vreg_l17b_1p8>; vdd-buck-supply = <&vreg_l17b_1p8>;
vdd-rxtx-supply = <&vreg_l18b_1p8>; vdd-rxtx-supply = <&vreg_l18b_1p8>;
vdd-px-supply = <&vreg_l18b_1p8>; vdd-px-supply = <&vreg_l18b_1p8>;

View File

@ -34,9 +34,10 @@ unevaluatedProperties: false
examples: examples:
- | - |
#include <dt-bindings/gpio/gpio.h>
codec { codec {
compatible = "qcom,wcd9380-codec"; compatible = "qcom,wcd9380-codec";
reset-gpios = <&tlmm 32 0>; reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <1>; #sound-dai-cells = <1>;
qcom,tx-device = <&wcd938x_tx>; qcom,tx-device = <&wcd938x_tx>;
qcom,rx-device = <&wcd938x_rx>; qcom,rx-device = <&wcd938x_rx>;

View File

@ -52,10 +52,10 @@ unevaluatedProperties: false
examples: examples:
- | - |
#include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/gpio/gpio.h>
codec { codec {
compatible = "qcom,wcd9390-codec"; compatible = "qcom,wcd9390-codec";
reset-gpios = <&tlmm 32 IRQ_TYPE_NONE>; reset-gpios = <&tlmm 32 GPIO_ACTIVE_LOW>;
#sound-dai-cells = <1>; #sound-dai-cells = <1>;
qcom,tx-device = <&wcd939x_tx>; qcom,tx-device = <&wcd939x_tx>;
qcom,rx-device = <&wcd939x_rx>; qcom,rx-device = <&wcd939x_rx>;

View File

@ -18,6 +18,7 @@ properties:
- usb424,2412 - usb424,2412
- usb424,2417 - usb424,2417
- usb424,2514 - usb424,2514
- usb424,2517
reg: true reg: true

View File

@ -318,10 +318,10 @@ where the columns are:
Debugging Debugging
========= =========
If CONFIG_FSCACHE_DEBUG is enabled, the FS-Cache facility can have runtime If CONFIG_NETFS_DEBUG is enabled, the FS-Cache facility and NETFS support can
debugging enabled by adjusting the value in:: have runtime debugging enabled by adjusting the value in::
/sys/module/fscache/parameters/debug /sys/module/netfs/parameters/debug
This is a bitmask of debugging streams to enable: This is a bitmask of debugging streams to enable:
@ -343,6 +343,6 @@ This is a bitmask of debugging streams to enable:
The appropriate set of values should be OR'd together and the result written to The appropriate set of values should be OR'd together and the result written to
the control file. For example:: the control file. For example::
echo $((1|8|512)) >/sys/module/fscache/parameters/debug echo $((1|8|512)) >/sys/module/netfs/parameters/debug
will turn on all function entry debugging. will turn on all function entry debugging.

View File

@ -13,9 +13,9 @@ kernel.
Hardware issues like Meltdown, Spectre, L1TF etc. must be treated Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
differently because they usually affect all Operating Systems ("OS") and differently because they usually affect all Operating Systems ("OS") and
therefore need coordination across different OS vendors, distributions, therefore need coordination across different OS vendors, distributions,
hardware vendors and other parties. For some of the issues, software silicon vendors, hardware integrators, and other parties. For some of the
mitigations can depend on microcode or firmware updates, which need further issues, software mitigations can depend on microcode or firmware updates,
coordination. which need further coordination.
.. _Contact: .. _Contact:
@ -32,8 +32,8 @@ Linux kernel security team (:ref:`Documentation/admin-guide/
<securitybugs>`) instead. <securitybugs>`) instead.
The team can be contacted by email at <hardware-security@kernel.org>. This The team can be contacted by email at <hardware-security@kernel.org>. This
is a private list of security officers who will help you to coordinate a is a private list of security officers who will help you coordinate a fix
fix according to our documented process. according to our documented process.
The list is encrypted and email to the list can be sent by either PGP or The list is encrypted and email to the list can be sent by either PGP or
S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
@ -43,7 +43,7 @@ the following URLs:
- PGP: https://www.kernel.org/static/files/hardware-security.asc - PGP: https://www.kernel.org/static/files/hardware-security.asc
- S/MIME: https://www.kernel.org/static/files/hardware-security.crt - S/MIME: https://www.kernel.org/static/files/hardware-security.crt
While hardware security issues are often handled by the affected hardware While hardware security issues are often handled by the affected silicon
vendor, we welcome contact from researchers or individuals who have vendor, we welcome contact from researchers or individuals who have
identified a potential hardware flaw. identified a potential hardware flaw.
@ -65,7 +65,7 @@ of Linux Foundation's IT operations personnel technically have the
ability to access the embargoed information, but are obliged to ability to access the embargoed information, but are obliged to
confidentiality by their employment contract. Linux Foundation IT confidentiality by their employment contract. Linux Foundation IT
personnel are also responsible for operating and managing the rest of personnel are also responsible for operating and managing the rest of
kernel.org infrastructure. kernel.org's infrastructure.
The Linux Foundation's current director of IT Project infrastructure is The Linux Foundation's current director of IT Project infrastructure is
Konstantin Ryabitsev. Konstantin Ryabitsev.
@ -85,7 +85,7 @@ Memorandum of Understanding
The Linux kernel community has a deep understanding of the requirement to The Linux kernel community has a deep understanding of the requirement to
keep hardware security issues under embargo for coordination between keep hardware security issues under embargo for coordination between
different OS vendors, distributors, hardware vendors and other parties. different OS vendors, distributors, silicon vendors, and other parties.
The Linux kernel community has successfully handled hardware security The Linux kernel community has successfully handled hardware security
issues in the past and has the necessary mechanisms in place to allow issues in the past and has the necessary mechanisms in place to allow
@ -103,11 +103,11 @@ the issue in the best technical way.
All involved developers pledge to adhere to the embargo rules and to keep All involved developers pledge to adhere to the embargo rules and to keep
the received information confidential. Violation of the pledge will lead to the received information confidential. Violation of the pledge will lead to
immediate exclusion from the current issue and removal from all related immediate exclusion from the current issue and removal from all related
mailing-lists. In addition, the hardware security team will also exclude mailing lists. In addition, the hardware security team will also exclude
the offender from future issues. The impact of this consequence is a highly the offender from future issues. The impact of this consequence is a highly
effective deterrent in our community. In case a violation happens the effective deterrent in our community. In case a violation happens the
hardware security team will inform the involved parties immediately. If you hardware security team will inform the involved parties immediately. If you
or anyone becomes aware of a potential violation, please report it or anyone else becomes aware of a potential violation, please report it
immediately to the Hardware security officers. immediately to the Hardware security officers.
@ -124,14 +124,16 @@ method for these types of issues.
Start of Disclosure Start of Disclosure
""""""""""""""""""" """""""""""""""""""
Disclosure starts by contacting the Linux kernel hardware security team by Disclosure starts by emailing the Linux kernel hardware security team per
email. This initial contact should contain a description of the problem and the Contact section above. This initial contact should contain a
a list of any known affected hardware. If your organization builds or description of the problem and a list of any known affected silicon. If
distributes the affected hardware, we encourage you to also consider what your organization builds or distributes the affected hardware, we encourage
other hardware could be affected. you to also consider what other hardware could be affected. The disclosing
party is responsible for contacting the affected silicon vendors in a
timely manner.
The hardware security team will provide an incident-specific encrypted The hardware security team will provide an incident-specific encrypted
mailing-list which will be used for initial discussion with the reporter, mailing list which will be used for initial discussion with the reporter,
further disclosure, and coordination of fixes. further disclosure, and coordination of fixes.
The hardware security team will provide the disclosing party a list of The hardware security team will provide the disclosing party a list of
@ -158,8 +160,8 @@ This serves several purposes:
- The disclosed entities can be contacted to name experts who should - The disclosed entities can be contacted to name experts who should
participate in the mitigation development. participate in the mitigation development.
- If an expert which is required to handle an issue is employed by an - If an expert who is required to handle an issue is employed by a listed
listed entity or member of an listed entity, then the response teams can entity or member of an listed entity, then the response teams can
request the disclosure of that expert from that entity. This ensures request the disclosure of that expert from that entity. This ensures
that the expert is also part of the entity's response team. that the expert is also part of the entity's response team.
@ -169,8 +171,8 @@ Disclosure
The disclosing party provides detailed information to the initial response The disclosing party provides detailed information to the initial response
team via the specific encrypted mailing-list. team via the specific encrypted mailing-list.
From our experience the technical documentation of these issues is usually From our experience, the technical documentation of these issues is usually
a sufficient starting point and further technical clarification is best a sufficient starting point, and further technical clarification is best
done via email. done via email.
Mitigation development Mitigation development
@ -179,57 +181,93 @@ Mitigation development
The initial response team sets up an encrypted mailing-list or repurposes The initial response team sets up an encrypted mailing-list or repurposes
an existing one if appropriate. an existing one if appropriate.
Using a mailing-list is close to the normal Linux development process and Using a mailing list is close to the normal Linux development process and
has been successfully used in developing mitigations for various hardware has been successfully used to develop mitigations for various hardware
security issues in the past. security issues in the past.
The mailing-list operates in the same way as normal Linux development. The mailing list operates in the same way as normal Linux development.
Patches are posted, discussed and reviewed and if agreed on applied to a Patches are posted, discussed, and reviewed and if agreed upon, applied to
non-public git repository which is only accessible to the participating a non-public git repository which is only accessible to the participating
developers via a secure connection. The repository contains the main developers via a secure connection. The repository contains the main
development branch against the mainline kernel and backport branches for development branch against the mainline kernel and backport branches for
stable kernel versions as necessary. stable kernel versions as necessary.
The initial response team will identify further experts from the Linux The initial response team will identify further experts from the Linux
kernel developer community as needed. Bringing in experts can happen at any kernel developer community as needed. Any involved party can suggest
time of the development process and needs to be handled in a timely manner. further experts to be included, each of which will be subject to the same
requirements outlined above.
If an expert is employed by or member of an entity on the disclosure list Bringing in experts can happen at any time in the development process and
needs to be handled in a timely manner.
If an expert is employed by or a member of an entity on the disclosure list
provided by the disclosing party, then participation will be requested from provided by the disclosing party, then participation will be requested from
the relevant entity. the relevant entity.
If not, then the disclosing party will be informed about the experts If not, then the disclosing party will be informed about the experts'
participation. The experts are covered by the Memorandum of Understanding participation. The experts are covered by the Memorandum of Understanding
and the disclosing party is requested to acknowledge the participation. In and the disclosing party is requested to acknowledge their participation.
case that the disclosing party has a compelling reason to object, then this In the case where the disclosing party has a compelling reason to object,
objection has to be raised within five work days and resolved with the any objection must to be raised within five working days and resolved with
incident team immediately. If the disclosing party does not react within the incident team immediately. If the disclosing party does not react
five work days this is taken as silent acknowledgement. within five working days this is taken as silent acknowledgment.
After acknowledgement or resolution of an objection the expert is disclosed After the incident team acknowledges or resolves an objection, the expert
by the incident team and brought into the development process. is disclosed and brought into the development process.
List participants may not communicate about the issue outside of the List participants may not communicate about the issue outside of the
private mailing list. List participants may not use any shared resources private mailing list. List participants may not use any shared resources
(e.g. employer build farms, CI systems, etc) when working on patches. (e.g. employer build farms, CI systems, etc) when working on patches.
Early access
""""""""""""
The patches discussed and developed on the list can neither be distributed
to any individual who is not a member of the response team nor to any other
organization.
To allow the affected silicon vendors to work with their internal teams and
industry partners on testing, validation, and logistics, the following
exception is provided:
Designated representatives of the affected silicon vendors are
allowed to hand over the patches at any time to the silicon
vendors response team. The representative must notify the kernel
response team about the handover. The affected silicon vendor must
have and maintain their own documented security process for any
patches shared with their response team that is consistent with
this policy.
The silicon vendors response team can distribute these patches to
their industry partners and to their internal teams under the
silicon vendors documented security process. Feedback from the
industry partners goes back to the silicon vendor and is
communicated by the silicon vendor to the kernel response team.
The handover to the silicon vendors response team removes any
responsibility or liability from the kernel response team regarding
premature disclosure, which happens due to the involvement of the
silicon vendors internal teams or industry partners. The silicon
vendor guarantees this release of liability by agreeing to this
process.
Coordinated release Coordinated release
""""""""""""""""""" """""""""""""""""""
The involved parties will negotiate the date and time where the embargo The involved parties will negotiate the date and time when the embargo
ends. At that point the prepared mitigations are integrated into the ends. At that point, the prepared mitigations are published into the
relevant kernel trees and published. There is no pre-notification process: relevant kernel trees. There is no pre-notification process: the
fixes are published in public and available to everyone at the same time. mitigations are published in public and available to everyone at the same
time.
While we understand that hardware security issues need coordinated embargo While we understand that hardware security issues need coordinated embargo
time, the embargo time should be constrained to the minimum time which is time, the embargo time should be constrained to the minimum time that is
required for all involved parties to develop, test and prepare the required for all involved parties to develop, test, and prepare their
mitigations. Extending embargo time artificially to meet conference talk mitigations. Extending embargo time artificially to meet conference talk
dates or other non-technical reasons is creating more work and burden for dates or other non-technical reasons creates more work and burden for the
the involved developers and response teams as the patches need to be kept involved developers and response teams as the patches need to be kept up to
up to date in order to follow the ongoing upstream kernel development, date in order to follow the ongoing upstream kernel development, which
which might create conflicting changes. might create conflicting changes.
CVE assignment CVE assignment
"""""""""""""" """"""""""""""
@ -275,34 +313,35 @@ an involved disclosed party. The current ambassadors list:
If you want your organization to be added to the ambassadors list, please If you want your organization to be added to the ambassadors list, please
contact the hardware security team. The nominated ambassador has to contact the hardware security team. The nominated ambassador has to
understand and support our process fully and is ideally well connected in understand and support our process fully and is ideally well-connected in
the Linux kernel community. the Linux kernel community.
Encrypted mailing-lists Encrypted mailing-lists
----------------------- -----------------------
We use encrypted mailing-lists for communication. The operating principle We use encrypted mailing lists for communication. The operating principle
of these lists is that email sent to the list is encrypted either with the of these lists is that email sent to the list is encrypted either with the
list's PGP key or with the list's S/MIME certificate. The mailing-list list's PGP key or with the list's S/MIME certificate. The mailing list
software decrypts the email and re-encrypts it individually for each software decrypts the email and re-encrypts it individually for each
subscriber with the subscriber's PGP key or S/MIME certificate. Details subscriber with the subscriber's PGP key or S/MIME certificate. Details
about the mailing-list software and the setup which is used to ensure the about the mailing list software and the setup that is used to ensure the
security of the lists and protection of the data can be found here: security of the lists and protection of the data can be found here:
https://korg.wiki.kernel.org/userdoc/remail. https://korg.wiki.kernel.org/userdoc/remail.
List keys List keys
^^^^^^^^^ ^^^^^^^^^
For initial contact see :ref:`Contact`. For incident specific mailing-lists For initial contact see the :ref:`Contact` section above. For incident
the key and S/MIME certificate are conveyed to the subscribers by email specific mailing lists, the key and S/MIME certificate are conveyed to the
sent from the specific list. subscribers by email sent from the specific list.
Subscription to incident specific lists Subscription to incident-specific lists
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Subscription is handled by the response teams. Disclosed parties who want Subscription to incident-specific lists is handled by the response teams.
to participate in the communication send a list of potential subscribers to Disclosed parties who want to participate in the communication send a list
the response team so the response team can validate subscription requests. of potential experts to the response team so the response team can validate
subscription requests.
Each subscriber needs to send a subscription request to the response team Each subscriber needs to send a subscription request to the response team
by email. The email must be signed with the subscriber's PGP key or S/MIME by email. The email must be signed with the subscriber's PGP key or S/MIME

View File

@ -2592,7 +2592,7 @@ Specifically:
0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT] 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT]
0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND] 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND]
0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ] 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ]
0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ] 0x6030 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ]
0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] [1]_ 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] [1]_
0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] [1]_ 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] [1]_
... ...

View File

@ -5306,7 +5306,7 @@ F: drivers/media/cec/i2c/ch7322.c
CIRRUS LOGIC AUDIO CODEC DRIVERS CIRRUS LOGIC AUDIO CODEC DRIVERS
M: David Rhodes <david.rhodes@cirrus.com> M: David Rhodes <david.rhodes@cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com> M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linux-sound@vger.kernel.org
L: patches@opensource.cirrus.com L: patches@opensource.cirrus.com
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/sound/cirrus,cs* F: Documentation/devicetree/bindings/sound/cirrus,cs*
@ -5375,7 +5375,7 @@ F: sound/soc/codecs/lochnagar-sc.c
CIRRUS LOGIC MADERA CODEC DRIVERS CIRRUS LOGIC MADERA CODEC DRIVERS
M: Charles Keepax <ckeepax@opensource.cirrus.com> M: Charles Keepax <ckeepax@opensource.cirrus.com>
M: Richard Fitzgerald <rf@opensource.cirrus.com> M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linux-sound@vger.kernel.org
L: patches@opensource.cirrus.com L: patches@opensource.cirrus.com
S: Supported S: Supported
W: https://github.com/CirrusLogic/linux-drivers/wiki W: https://github.com/CirrusLogic/linux-drivers/wiki

View File

@ -2,7 +2,7 @@
VERSION = 6 VERSION = 6
PATCHLEVEL = 11 PATCHLEVEL = 11
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc2 EXTRAVERSION = -rc3
NAME = Baby Opossum Posse NAME = Baby Opossum Posse
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -21,6 +21,7 @@
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/gpio/machine.h> #include <linux/gpio/machine.h>
#include <linux/gpio/property.h>
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h> #include <linux/clk.h>
@ -40,6 +41,7 @@
#include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/mmc-pxamci.h>
#include "udc.h" #include "udc.h"
#include "gumstix.h" #include "gumstix.h"
#include "devices.h"
#include "generic.h" #include "generic.h"
@ -99,8 +101,8 @@ static void __init gumstix_mmc_init(void)
} }
#endif #endif
#ifdef CONFIG_USB_PXA25X #if IS_ENABLED(CONFIG_USB_PXA25X)
static const struct property_entry spitz_mci_props[] __initconst = { static const struct property_entry gumstix_vbus_props[] __initconst = {
PROPERTY_ENTRY_GPIO("vbus-gpios", &pxa2xx_gpiochip_node, PROPERTY_ENTRY_GPIO("vbus-gpios", &pxa2xx_gpiochip_node,
GPIO_GUMSTIX_USB_GPIOn, GPIO_ACTIVE_HIGH), GPIO_GUMSTIX_USB_GPIOn, GPIO_ACTIVE_HIGH),
PROPERTY_ENTRY_GPIO("pullup-gpios", &pxa2xx_gpiochip_node, PROPERTY_ENTRY_GPIO("pullup-gpios", &pxa2xx_gpiochip_node,
@ -109,8 +111,9 @@ static const struct property_entry spitz_mci_props[] __initconst = {
}; };
static const struct platform_device_info gumstix_gpio_vbus_info __initconst = { static const struct platform_device_info gumstix_gpio_vbus_info __initconst = {
.name = "gpio-vbus", .name = "gpio-vbus",
.id = PLATFORM_DEVID_NONE, .id = PLATFORM_DEVID_NONE,
.properties = gumstix_vbus_props,
}; };
static void __init gumstix_udc_init(void) static void __init gumstix_udc_init(void)

View File

@ -43,15 +43,6 @@
sound-dai = <&mcasp0>; sound-dai = <&mcasp0>;
}; };
}; };
reg_usb_hub: regulator-usb-hub {
compatible = "regulator-fixed";
enable-active-high;
/* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */
gpio = <&main_gpio0 31 GPIO_ACTIVE_HIGH>;
regulator-boot-on;
regulator-name = "HUB_PWR_EN";
};
}; };
/* Verdin ETHs */ /* Verdin ETHs */
@ -193,11 +184,6 @@
status = "okay"; status = "okay";
}; };
/* Do not force CTRL_SLEEP_MOCI# always enabled */
&reg_force_sleep_moci {
status = "disabled";
};
/* Verdin SD_1 */ /* Verdin SD_1 */
&sdhci1 { &sdhci1 {
status = "okay"; status = "okay";
@ -218,15 +204,7 @@
}; };
&usb1 { &usb1 {
#address-cells = <1>;
#size-cells = <0>;
status = "okay"; status = "okay";
usb-hub@1 {
compatible = "usb424,2744";
reg = <1>;
vdd-supply = <&reg_usb_hub>;
};
}; };
/* Verdin CTRL_WAKE1_MICO# */ /* Verdin CTRL_WAKE1_MICO# */

View File

@ -138,12 +138,6 @@
vin-supply = <&reg_1v8>; vin-supply = <&reg_1v8>;
}; };
/*
* By default we enable CTRL_SLEEP_MOCI#, this is required to have
* peripherals on the carrier board powered.
* If more granularity or power saving is required this can be disabled
* in the carrier board device tree files.
*/
reg_force_sleep_moci: regulator-force-sleep-moci { reg_force_sleep_moci: regulator-force-sleep-moci {
compatible = "regulator-fixed"; compatible = "regulator-fixed";
enable-active-high; enable-active-high;

View File

@ -146,6 +146,8 @@
power-domains = <&k3_pds 79 TI_SCI_PD_EXCLUSIVE>; power-domains = <&k3_pds 79 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 79 0>; clocks = <&k3_clks 79 0>;
clock-names = "gpio"; clock-names = "gpio";
gpio-ranges = <&mcu_pmx0 0 0 21>, <&mcu_pmx0 21 23 1>,
<&mcu_pmx0 22 32 2>;
}; };
mcu_rti0: watchdog@4880000 { mcu_rti0: watchdog@4880000 {

View File

@ -45,7 +45,8 @@
&main_pmx0 { &main_pmx0 {
pinctrl-single,gpio-range = pinctrl-single,gpio-range =
<&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 33 92 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 72 22 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>; <&main_pmx0_range 149 2 PIN_GPIO_RANGE_IOPAD>;

View File

@ -193,7 +193,8 @@
&main_pmx0 { &main_pmx0 {
pinctrl-single,gpio-range = pinctrl-single,gpio-range =
<&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 0 32 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 33 55 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 33 38 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 72 17 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 101 25 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 101 25 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 137 5 PIN_GPIO_RANGE_IOPAD>,
<&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>, <&main_pmx0_range 143 3 PIN_GPIO_RANGE_IOPAD>,

View File

@ -1262,6 +1262,14 @@
&serdes0 { &serdes0 {
status = "okay"; status = "okay";
serdes0_pcie1_link: phy@0 {
reg = <0>;
cdns,num-lanes = <2>;
#phy-cells = <0>;
cdns,phy-type = <PHY_TYPE_PCIE>;
resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>;
};
serdes0_usb_link: phy@3 { serdes0_usb_link: phy@3 {
reg = <3>; reg = <3>;
cdns,num-lanes = <1>; cdns,num-lanes = <1>;
@ -1386,23 +1394,6 @@
phys = <&transceiver3>; phys = <&transceiver3>;
}; };
&serdes0 {
status = "okay";
serdes0_pcie1_link: phy@0 {
reg = <0>;
cdns,num-lanes = <4>;
#phy-cells = <0>;
cdns,phy-type = <PHY_TYPE_PCIE>;
resets = <&serdes_wiz0 1>, <&serdes_wiz0 2>,
<&serdes_wiz0 3>, <&serdes_wiz0 4>;
};
};
&serdes_wiz0 {
status = "okay";
};
&pcie1_rc { &pcie1_rc {
status = "okay"; status = "okay";
num-lanes = <2>; num-lanes = <2>;

View File

@ -2755,7 +2755,7 @@
interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>, interrupts = <GIC_SPI 550 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>; <GIC_SPI 551 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tx", "rx"; interrupt-names = "tx", "rx";
dmas = <&main_udmap 0xc500>, <&main_udmap 0x4500>; dmas = <&main_udmap 0xc403>, <&main_udmap 0x4403>;
dma-names = "tx", "rx"; dma-names = "tx", "rx";
clocks = <&k3_clks 268 0>; clocks = <&k3_clks 268 0>;
clock-names = "fck"; clock-names = "fck";
@ -2773,7 +2773,7 @@
interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>, interrupts = <GIC_SPI 552 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>; <GIC_SPI 553 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tx", "rx"; interrupt-names = "tx", "rx";
dmas = <&main_udmap 0xc501>, <&main_udmap 0x4501>; dmas = <&main_udmap 0xc404>, <&main_udmap 0x4404>;
dma-names = "tx", "rx"; dma-names = "tx", "rx";
clocks = <&k3_clks 269 0>; clocks = <&k3_clks 269 0>;
clock-names = "fck"; clock-names = "fck";

View File

@ -104,7 +104,7 @@ alternative_else_nop_endif
#define __ptrauth_save_key(ctxt, key) \ #define __ptrauth_save_key(ctxt, key) \
do { \ do { \
u64 __val; \ u64 __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \

View File

@ -19,6 +19,7 @@ if VIRTUALIZATION
menuconfig KVM menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support" bool "Kernel-based Virtual Machine (KVM) support"
depends on AS_HAS_ARMV8_4
select KVM_COMMON select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER select KVM_GENERIC_MMU_NOTIFIER

View File

@ -10,6 +10,9 @@ include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/ obj-$(CONFIG_KVM) += hyp/
CFLAGS_sys_regs.o += -Wno-override-init
CFLAGS_handle_exit.o += -Wno-override-init
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \ inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o stacktrace.o \ guest.o debug.o reset.o sys_regs.o stacktrace.o \

View File

@ -164,6 +164,7 @@ static int kvm_arm_default_max_vcpus(void)
/** /**
* kvm_arch_init_vm - initializes a VM data structure * kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct * @kvm: pointer to the KVM struct
* @type: kvm device type
*/ */
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
@ -521,10 +522,10 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
{ {
if (vcpu_has_ptrauth(vcpu)) { if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) {
/* /*
* Either we're running running an L2 guest, and the API/APK * Either we're running an L2 guest, and the API/APK bits come
* bits come from L1's HCR_EL2, or API/APK are both set. * from L1's HCR_EL2, or API/APK are both set.
*/ */
if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) { if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
u64 val; u64 val;
@ -541,16 +542,10 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
* Save the host keys if there is any chance for the guest * Save the host keys if there is any chance for the guest
* to use pauth, as the entry code will reload the guest * to use pauth, as the entry code will reload the guest
* keys in that case. * keys in that case.
* Protected mode is the exception to that rule, as the
* entry into the EL2 code eagerly switch back and forth
* between host and hyp keys (and kvm_hyp_ctxt is out of
* reach anyway).
*/ */
if (is_protected_kvm_enabled())
return;
if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
struct kvm_cpu_context *ctxt; struct kvm_cpu_context *ctxt;
ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt); ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
ptrauth_save_keys(ctxt); ptrauth_save_keys(ctxt);
} }

View File

@ -27,7 +27,6 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h> #include <asm/kvm_nested.h>
#include <asm/kvm_ptrauth.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/processor.h> #include <asm/processor.h>

View File

@ -20,6 +20,8 @@ HOST_EXTRACFLAGS += -I$(objtree)/include
lib-objs := clear_page.o copy_page.o memcpy.o memset.o lib-objs := clear_page.o copy_page.o memcpy.o memset.o
lib-objs := $(addprefix ../../../lib/, $(lib-objs)) lib-objs := $(addprefix ../../../lib/, $(lib-objs))
CFLAGS_switch.nvhe.o += -Wno-override-init
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \ hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o

View File

@ -173,9 +173,8 @@ static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
/* /*
* Make sure we handle the exit for workarounds and ptrauth * Make sure we handle the exit for workarounds before the pKVM
* before the pKVM handling, as the latter could decide to * handling, as the latter could decide to UNDEF.
* UNDEF.
*/ */
return (kvm_hyp_handle_sysreg(vcpu, exit_code) || return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
kvm_handle_pvm_sysreg(vcpu, exit_code)); kvm_handle_pvm_sysreg(vcpu, exit_code));

View File

@ -6,6 +6,8 @@
asflags-y := -D__KVM_VHE_HYPERVISOR__ asflags-y := -D__KVM_VHE_HYPERVISOR__
ccflags-y := -D__KVM_VHE_HYPERVISOR__ ccflags-y := -D__KVM_VHE_HYPERVISOR__
CFLAGS_switch.o += -Wno-override-init
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../fpsimd.o ../hyp-entry.o ../exception.o

View File

@ -786,7 +786,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
if (!WARN_ON(atomic_read(&mmu->refcnt))) if (!WARN_ON(atomic_read(&mmu->refcnt)))
kvm_free_stage2_pgd(mmu); kvm_free_stage2_pgd(mmu);
} }
kfree(kvm->arch.nested_mmus); kvfree(kvm->arch.nested_mmus);
kvm->arch.nested_mmus = NULL; kvm->arch.nested_mmus = NULL;
kvm->arch.nested_mmus_size = 0; kvm->arch.nested_mmus_size = 0;
kvm_uninit_stage2_mmu(kvm); kvm_uninit_stage2_mmu(kvm);

View File

@ -45,7 +45,8 @@ static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
* Let the xarray drive the iterator after the last SPI, as the iterator * Let the xarray drive the iterator after the last SPI, as the iterator
* has exhausted the sequentially-allocated INTID space. * has exhausted the sequentially-allocated INTID space.
*/ */
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1)) { if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1) &&
iter->nr_lpis) {
if (iter->lpi_idx < iter->nr_lpis) if (iter->lpi_idx < iter->nr_lpis)
xa_find_after(&dist->lpi_xa, &iter->intid, xa_find_after(&dist->lpi_xa, &iter->intid,
VGIC_LPI_MAX_INTID, VGIC_LPI_MAX_INTID,
@ -112,7 +113,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
return iter->dist_id > 0 && return iter->dist_id > 0 &&
iter->vcpu_id == iter->nr_cpus && iter->vcpu_id == iter->nr_cpus &&
iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) && iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) &&
iter->lpi_idx > iter->nr_lpis; (!iter->nr_lpis || iter->lpi_idx > iter->nr_lpis);
} }
static void *vgic_debug_start(struct seq_file *s, loff_t *pos) static void *vgic_debug_start(struct seq_file *s, loff_t *pos)

View File

@ -438,14 +438,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
unsigned long i; unsigned long i;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
vgic_debug_destroy(kvm); vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
__kvm_vgic_vcpu_destroy(vcpu); __kvm_vgic_vcpu_destroy(vcpu);
mutex_lock(&kvm->arch.config_lock);
kvm_vgic_dist_destroy(kvm); kvm_vgic_dist_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);

View File

@ -9,7 +9,7 @@
#include <kvm/arm_vgic.h> #include <kvm/arm_vgic.h>
#include "vgic.h" #include "vgic.h"
/** /*
* vgic_irqfd_set_irq: inject the IRQ corresponding to the * vgic_irqfd_set_irq: inject the IRQ corresponding to the
* irqchip routing entry * irqchip routing entry
* *
@ -75,7 +75,8 @@ static void kvm_populate_msi(struct kvm_kernel_irq_routing_entry *e,
msi->flags = e->msi.flags; msi->flags = e->msi.flags;
msi->devid = e->msi.devid; msi->devid = e->msi.devid;
} }
/**
/*
* kvm_set_msi: inject the MSI corresponding to the * kvm_set_msi: inject the MSI corresponding to the
* MSI routing entry * MSI routing entry
* *
@ -98,7 +99,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return vgic_its_inject_msi(kvm, &msi); return vgic_its_inject_msi(kvm, &msi);
} }
/** /*
* kvm_arch_set_irq_inatomic: fast-path for irqfd injection * kvm_arch_set_irq_inatomic: fast-path for irqfd injection
*/ */
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,

View File

@ -2040,6 +2040,7 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
* @start_id: the ID of the first entry in the table * @start_id: the ID of the first entry in the table
* (non zero for 2d level tables) * (non zero for 2d level tables)
* @fn: function to apply on each entry * @fn: function to apply on each entry
* @opaque: pointer to opaque data
* *
* Return: < 0 on error, 0 if last element was identified, 1 otherwise * Return: < 0 on error, 0 if last element was identified, 1 otherwise
* (the last element may not be found on second level tables) * (the last element may not be found on second level tables)
@ -2079,7 +2080,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
return 1; return 1;
} }
/** /*
* vgic_its_save_ite - Save an interrupt translation entry at @gpa * vgic_its_save_ite - Save an interrupt translation entry at @gpa
*/ */
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
@ -2099,6 +2100,8 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
/** /**
* vgic_its_restore_ite - restore an interrupt translation entry * vgic_its_restore_ite - restore an interrupt translation entry
*
* @its: its handle
* @event_id: id used for indexing * @event_id: id used for indexing
* @ptr: pointer to the ITE entry * @ptr: pointer to the ITE entry
* @opaque: pointer to the its_device * @opaque: pointer to the its_device
@ -2231,6 +2234,7 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
* @its: ITS handle * @its: ITS handle
* @dev: ITS device * @dev: ITS device
* @ptr: GPA * @ptr: GPA
* @dte_esz: device table entry size
*/ */
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
gpa_t ptr, int dte_esz) gpa_t ptr, int dte_esz)
@ -2313,7 +2317,7 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a,
return 1; return 1;
} }
/** /*
* vgic_its_save_device_tables - Save the device table and all ITT * vgic_its_save_device_tables - Save the device table and all ITT
* into guest RAM * into guest RAM
* *
@ -2386,7 +2390,7 @@ static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
return ret; return ret;
} }
/** /*
* vgic_its_restore_device_tables - Restore the device table and all ITT * vgic_its_restore_device_tables - Restore the device table and all ITT
* from guest RAM to internal data structs * from guest RAM to internal data structs
*/ */
@ -2478,7 +2482,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
return 1; return 1;
} }
/** /*
* vgic_its_save_collection_table - Save the collection table into * vgic_its_save_collection_table - Save the collection table into
* guest RAM * guest RAM
*/ */
@ -2518,7 +2522,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
return ret; return ret;
} }
/** /*
* vgic_its_restore_collection_table - reads the collection table * vgic_its_restore_collection_table - reads the collection table
* in guest memory and restores the ITS internal state. Requires the * in guest memory and restores the ITS internal state. Requires the
* BASER registers to be restored before. * BASER registers to be restored before.
@ -2556,7 +2560,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
return ret; return ret;
} }
/** /*
* vgic_its_save_tables_v0 - Save the ITS tables into guest ARM * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
* according to v0 ABI * according to v0 ABI
*/ */
@ -2571,7 +2575,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its)
return vgic_its_save_collection_table(its); return vgic_its_save_collection_table(its);
} }
/** /*
* vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
* to internal data structs according to V0 ABI * to internal data structs according to V0 ABI
* *

View File

@ -370,7 +370,7 @@ static void map_all_vpes(struct kvm *kvm)
dist->its_vm.vpes[i]->irq)); dist->its_vm.vpes[i]->irq));
} }
/** /*
* vgic_v3_save_pending_tables - Save the pending tables into guest RAM * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
* kvm lock and all vcpu lock must be held * kvm lock and all vcpu lock must be held
*/ */

View File

@ -313,7 +313,7 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
* with all locks dropped. * with all locks dropped.
*/ */
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags) unsigned long flags) __releases(&irq->irq_lock)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;

View File

@ -186,7 +186,7 @@ bool vgic_get_phys_line_level(struct vgic_irq *irq);
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending); void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active); void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags); unsigned long flags) __releases(&irq->irq_lock);
void vgic_kick_vcpus(struct kvm *kvm); void vgic_kick_vcpus(struct kvm *kvm);
void vgic_irq_handle_resampling(struct vgic_irq *irq, void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending); bool lr_deactivated, bool lr_pending);

View File

@ -441,7 +441,10 @@ static inline int share(unsigned long addr, u16 cmd)
if (!uv_call(0, (u64)&uvcb)) if (!uv_call(0, (u64)&uvcb))
return 0; return 0;
return -EINVAL; pr_err("%s UVC failed (rc: 0x%x, rrc: 0x%x), possible hypervisor bug.\n",
uvcb.header.cmd == UVC_CMD_SET_SHARED_ACCESS ? "Share" : "Unshare",
uvcb.header.rc, uvcb.header.rrc);
panic("System security cannot be guaranteed unless the system panics now.\n");
} }
/* /*

View File

@ -267,7 +267,12 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm) static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
{ {
u32 gd = virt_to_phys(kvm->arch.gisa_int.origin); u32 gd;
if (!kvm->arch.gisa_int.origin)
return 0;
gd = virt_to_phys(kvm->arch.gisa_int.origin);
if (gd && sclp.has_gisaf) if (gd && sclp.has_gisaf)
gd |= GISA_FORMAT1; gd |= GISA_FORMAT1;

View File

@ -2192,6 +2192,8 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
#define kvm_arch_has_private_mem(kvm) false #define kvm_arch_has_private_mem(kvm) false
#endif #endif
#define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
static inline u16 kvm_read_ldt(void) static inline u16 kvm_read_ldt(void)
{ {
u16 ldt; u16 ldt;

View File

@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu)
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
/* /*
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
* *
* Native (and PV wanting native due to vCPU pinning) should disable this key. * Native (and PV wanting native due to vCPU pinning) should keep this key
* It is done in this backwards fashion to only have a single direction change, * disabled. Native does not touch the key.
* which removes ordering between native_pv_spin_init() and HV setup. *
* When in a guest then native_pv_lock_init() enables the key first and
* KVM/XEN might conditionally disable it later in the boot process again.
*/ */
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
/* /*
* Shortcut for the queued_spin_lock_slowpath() function that allows * Shortcut for the queued_spin_lock_slowpath() function that allows

View File

@ -19,7 +19,7 @@
static u64 acpi_mp_wake_mailbox_paddr __ro_after_init; static u64 acpi_mp_wake_mailbox_paddr __ro_after_init;
/* Virtual address of the Multiprocessor Wakeup Structure mailbox */ /* Virtual address of the Multiprocessor Wakeup Structure mailbox */
static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init; static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
static u64 acpi_mp_pgd __ro_after_init; static u64 acpi_mp_pgd __ro_after_init;
static u64 acpi_mp_reset_vector_paddr __ro_after_init; static u64 acpi_mp_reset_vector_paddr __ro_after_init;

View File

@ -609,7 +609,7 @@ void mtrr_save_state(void)
{ {
int first_cpu; int first_cpu;
if (!mtrr_enabled()) if (!mtrr_enabled() || !mtrr_state.have_fixed)
return; return;
first_cpu = cpumask_first(cpu_online_mask); first_cpu = cpumask_first(cpu_online_mask);

View File

@ -51,13 +51,12 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
#endif #endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
void __init native_pv_lock_init(void) void __init native_pv_lock_init(void)
{ {
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
!boot_cpu_has(X86_FEATURE_HYPERVISOR)) static_branch_enable(&virt_spin_lock_key);
static_branch_disable(&virt_spin_lock_key);
} }
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)

View File

@ -286,7 +286,6 @@ static inline int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
return HV_STATUS_ACCESS_DENIED; return HV_STATUS_ACCESS_DENIED;
} }
static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {} static inline void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu) {}
static inline void kvm_hv_free_pa_page(struct kvm *kvm) {}
static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector) static inline bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
{ {
return false; return false;

View File

@ -351,10 +351,8 @@ static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
* reversing the LDR calculation to get cluster of APICs, i.e. no * reversing the LDR calculation to get cluster of APICs, i.e. no
* additional work is required. * additional work is required.
*/ */
if (apic_x2apic_mode(apic)) { if (apic_x2apic_mode(apic))
WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic)));
return; return;
}
if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr, if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
&cluster, &mask))) { &cluster, &mask))) {
@ -2966,18 +2964,28 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s, bool set) struct kvm_lapic_state *s, bool set)
{ {
if (apic_x2apic_mode(vcpu->arch.apic)) { if (apic_x2apic_mode(vcpu->arch.apic)) {
u32 x2apic_id = kvm_x2apic_id(vcpu->arch.apic);
u32 *id = (u32 *)(s->regs + APIC_ID); u32 *id = (u32 *)(s->regs + APIC_ID);
u32 *ldr = (u32 *)(s->regs + APIC_LDR); u32 *ldr = (u32 *)(s->regs + APIC_LDR);
u64 icr; u64 icr;
if (vcpu->kvm->arch.x2apic_format) { if (vcpu->kvm->arch.x2apic_format) {
if (*id != vcpu->vcpu_id) if (*id != x2apic_id)
return -EINVAL; return -EINVAL;
} else { } else {
/*
* Ignore the userspace value when setting APIC state.
* KVM's model is that the x2APIC ID is readonly, e.g.
* KVM only supports delivering interrupts to KVM's
* version of the x2APIC ID. However, for backwards
* compatibility, don't reject attempts to set a
* mismatched ID for userspace that hasn't opted into
* x2apic_format.
*/
if (set) if (set)
*id >>= 24; *id = x2apic_id;
else else
*id <<= 24; *id = x2apic_id << 24;
} }
/* /*
@ -2986,7 +2994,7 @@ static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
* split to ICR+ICR2 in userspace for backwards compatibility. * split to ICR+ICR2 in userspace for backwards compatibility.
*/ */
if (set) { if (set) {
*ldr = kvm_apic_calc_x2apic_ldr(*id); *ldr = kvm_apic_calc_x2apic_ldr(x2apic_id);
icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) | icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
(u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32; (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;

View File

@ -2276,7 +2276,7 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) { for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
struct sev_data_snp_launch_update fw_args = {0}; struct sev_data_snp_launch_update fw_args = {0};
bool assigned; bool assigned = false;
int level; int level;
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level); ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
@ -2290,9 +2290,10 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
if (src) { if (src) {
void *vaddr = kmap_local_pfn(pfn + i); void *vaddr = kmap_local_pfn(pfn + i);
ret = copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE); if (copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE)) {
if (ret) ret = -EFAULT;
goto err; goto err;
}
kunmap_local(vaddr); kunmap_local(vaddr);
} }

View File

@ -427,8 +427,7 @@ static void kvm_user_return_msr_cpu_online(void)
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask) int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
{ {
unsigned int cpu = smp_processor_id(); struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
int err; int err;
value = (value & mask) | (msrs->values[slot].host & ~mask); value = (value & mask) | (msrs->values[slot].host & ~mask);
@ -450,8 +449,7 @@ EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
static void drop_user_return_notifiers(void) static void drop_user_return_notifiers(void)
{ {
unsigned int cpu = smp_processor_id(); struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
if (msrs->registered) if (msrs->registered)
kvm_on_user_return(&msrs->urn); kvm_on_user_return(&msrs->urn);

View File

@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
* *
* Returns a pointer to a PTE on success, or NULL on failure. * Returns a pointer to a PTE on success, or NULL on failure.
*/ */
static pte_t *pti_user_pagetable_walk_pte(unsigned long address) static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
{ {
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
pmd_t *pmd; pmd_t *pmd;
@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
if (!pmd) if (!pmd)
return NULL; return NULL;
/* We can't do anything sensible if we hit a large mapping. */ /* Large PMD mapping found */
if (pmd_leaf(*pmd)) { if (pmd_leaf(*pmd)) {
WARN_ON(1); /* Clear the PMD if we hit a large mapping from the first round */
return NULL; if (late_text) {
set_pmd(pmd, __pmd(0));
} else {
WARN_ON_ONCE(1);
return NULL;
}
} }
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
return; return;
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
if (WARN_ON(!target_pte)) if (WARN_ON(!target_pte))
return; return;
@ -301,7 +306,7 @@ enum pti_clone_level {
static void static void
pti_clone_pgtable(unsigned long start, unsigned long end, pti_clone_pgtable(unsigned long start, unsigned long end,
enum pti_clone_level level) enum pti_clone_level level, bool late_text)
{ {
unsigned long addr; unsigned long addr;
@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
return; return;
/* Allocate PTE in the user page-table */ /* Allocate PTE in the user page-table */
target_pte = pti_user_pagetable_walk_pte(addr); target_pte = pti_user_pagetable_walk_pte(addr, late_text);
if (WARN_ON(!target_pte)) if (WARN_ON(!target_pte))
return; return;
@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va); phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
pte_t *target_pte; pte_t *target_pte;
target_pte = pti_user_pagetable_walk_pte(va); target_pte = pti_user_pagetable_walk_pte(va, false);
if (WARN_ON(!target_pte)) if (WARN_ON(!target_pte))
return; return;
@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
start = CPU_ENTRY_AREA_BASE; start = CPU_ENTRY_AREA_BASE;
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES); end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
pti_clone_pgtable(start, end, PTI_CLONE_PMD); pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
/* /*
* Clone the populated PMDs of the entry text and force it RO. * Clone the populated PMDs of the entry text and force it RO.
*/ */
static void pti_clone_entry_text(void) static void pti_clone_entry_text(bool late)
{ {
pti_clone_pgtable((unsigned long) __entry_text_start, pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end, (unsigned long) __entry_text_end,
PTI_LEVEL_KERNEL_IMAGE); PTI_LEVEL_KERNEL_IMAGE, late);
} }
/* /*
@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
* pti_set_kernel_image_nonglobal() did to clear the * pti_set_kernel_image_nonglobal() did to clear the
* global bit. * global bit.
*/ */
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE); pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
/* /*
* pti_clone_pgtable() will set the global bit in any PMDs * pti_clone_pgtable() will set the global bit in any PMDs
@ -638,8 +643,15 @@ void __init pti_init(void)
/* Undo all global bits from the init pagetables in head_64.S: */ /* Undo all global bits from the init pagetables in head_64.S: */
pti_set_kernel_image_nonglobal(); pti_set_kernel_image_nonglobal();
/* Replace some of the global bits just for shared entry text: */ /* Replace some of the global bits just for shared entry text: */
pti_clone_entry_text(); /*
* This is very early in boot. Device and Late initcalls can do
* modprobe before free_initmem() and mark_readonly(). This
* pti_clone_entry_text() allows those user-mode-helpers to function,
* but notably the text is still RW.
*/
pti_clone_entry_text(false);
pti_setup_espfix64(); pti_setup_espfix64();
pti_setup_vsyscall(); pti_setup_vsyscall();
} }
@ -656,10 +668,11 @@ void pti_finalize(void)
if (!boot_cpu_has(X86_FEATURE_PTI)) if (!boot_cpu_has(X86_FEATURE_PTI))
return; return;
/* /*
* We need to clone everything (again) that maps parts of the * This is after free_initmem() (all initcalls are done) and we've done
* kernel image. * mark_readonly(). Text is now NX which might've split some PMDs
* relative to the early clone.
*/ */
pti_clone_entry_text(); pti_clone_entry_text(true);
pti_clone_kernel_text(); pti_clone_kernel_text();
debug_checkwx_user(); debug_checkwx_user();

View File

@ -31,14 +31,6 @@ static struct workqueue_struct *kthrotld_workqueue;
#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
/* We measure latency for request size from <= 4k to >= 1M */
#define LATENCY_BUCKET_SIZE 9
struct latency_bucket {
unsigned long total_latency; /* ns / 1024 */
int samples;
};
struct throtl_data struct throtl_data
{ {
/* service tree for active throtl groups */ /* service tree for active throtl groups */
@ -116,9 +108,6 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
return tg->iops[rw]; return tg->iops[rw];
} }
#define request_bucket_index(sectors) \
clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
/** /**
* throtl_log - log debug message via blktrace * throtl_log - log debug message via blktrace
* @sq: the service_queue being reported * @sq: the service_queue being reported

View File

@ -1044,13 +1044,13 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
} }
/* Find the smallest unused descriptor the "slow way" */ /* Find the smallest unused descriptor the "slow way" */
static u32 slow_desc_lookup_olocked(struct binder_proc *proc) static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
{ {
struct binder_ref *ref; struct binder_ref *ref;
struct rb_node *n; struct rb_node *n;
u32 desc; u32 desc;
desc = 1; desc = offset;
for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) { for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc); ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->data.desc > desc) if (ref->data.desc > desc)
@ -1071,21 +1071,18 @@ static int get_ref_desc_olocked(struct binder_proc *proc,
u32 *desc) u32 *desc)
{ {
struct dbitmap *dmap = &proc->dmap; struct dbitmap *dmap = &proc->dmap;
unsigned int nbits, offset;
unsigned long *new, bit; unsigned long *new, bit;
unsigned int nbits;
/* 0 is reserved for the context manager */ /* 0 is reserved for the context manager */
if (node == proc->context->binder_context_mgr_node) { offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
*desc = 0;
return 0;
}
if (!dbitmap_enabled(dmap)) { if (!dbitmap_enabled(dmap)) {
*desc = slow_desc_lookup_olocked(proc); *desc = slow_desc_lookup_olocked(proc, offset);
return 0; return 0;
} }
if (dbitmap_acquire_first_zero_bit(dmap, &bit) == 0) { if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
*desc = bit; *desc = bit;
return 0; return 0;
} }

View File

@ -939,9 +939,9 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
__free_page(alloc->pages[i].page_ptr); __free_page(alloc->pages[i].page_ptr);
page_count++; page_count++;
} }
kvfree(alloc->pages);
} }
spin_unlock(&alloc->lock); spin_unlock(&alloc->lock);
kvfree(alloc->pages);
if (alloc->mm) if (alloc->mm)
mmdrop(alloc->mm); mmdrop(alloc->mm);

View File

@ -6,8 +6,7 @@
* *
* Used by the binder driver to optimize the allocation of the smallest * Used by the binder driver to optimize the allocation of the smallest
* available descriptor ID. Each bit in the bitmap represents the state * available descriptor ID. Each bit in the bitmap represents the state
* of an ID, with the exception of BIT(0) which is used exclusively to * of an ID.
* reference binder's context manager.
* *
* A dbitmap can grow or shrink as needed. This part has been designed * A dbitmap can grow or shrink as needed. This part has been designed
* considering that users might need to briefly release their locks in * considering that users might need to briefly release their locks in
@ -58,11 +57,7 @@ static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
if (bit < (dmap->nbits >> 2)) if (bit < (dmap->nbits >> 2))
return dmap->nbits >> 1; return dmap->nbits >> 1;
/* /* find_last_bit() returns dmap->nbits when no bits are set. */
* Note that find_last_bit() returns dmap->nbits when no bits
* are set. While this is technically not possible here since
* BIT(0) is always set, this check is left for extra safety.
*/
if (bit == dmap->nbits) if (bit == dmap->nbits)
return NBITS_MIN; return NBITS_MIN;
@ -132,16 +127,17 @@ dbitmap_grow(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
} }
/* /*
* Finds and sets the first zero bit in the bitmap. Upon success @bit * Finds and sets the next zero bit in the bitmap. Upon success @bit
* is populated with the index and 0 is returned. Otherwise, -ENOSPC * is populated with the index and 0 is returned. Otherwise, -ENOSPC
* is returned to indicate that a dbitmap_grow() is needed. * is returned to indicate that a dbitmap_grow() is needed.
*/ */
static inline int static inline int
dbitmap_acquire_first_zero_bit(struct dbitmap *dmap, unsigned long *bit) dbitmap_acquire_next_zero_bit(struct dbitmap *dmap, unsigned long offset,
unsigned long *bit)
{ {
unsigned long n; unsigned long n;
n = find_first_zero_bit(dmap->map, dmap->nbits); n = find_next_zero_bit(dmap->map, dmap->nbits, offset);
if (n == dmap->nbits) if (n == dmap->nbits)
return -ENOSPC; return -ENOSPC;
@ -154,9 +150,7 @@ dbitmap_acquire_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
static inline void static inline void
dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit) dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit)
{ {
/* BIT(0) should always set for the context manager */ clear_bit(bit, dmap->map);
if (bit)
clear_bit(bit, dmap->map);
} }
static inline int dbitmap_init(struct dbitmap *dmap) static inline int dbitmap_init(struct dbitmap *dmap)
@ -168,8 +162,6 @@ static inline int dbitmap_init(struct dbitmap *dmap)
} }
dmap->nbits = NBITS_MIN; dmap->nbits = NBITS_MIN;
/* BIT(0) is reserved for the context manager */
set_bit(0, dmap->map);
return 0; return 0;
} }

View File

@ -951,8 +951,19 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
&sense_key, &asc, &ascq); &sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq); ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else { } else {
/* ATA PASS-THROUGH INFORMATION AVAILABLE */ /*
ata_scsi_set_sense(qc->dev, cmd, RECOVERED_ERROR, 0, 0x1D); * ATA PASS-THROUGH INFORMATION AVAILABLE
*
* Note: we are supposed to call ata_scsi_set_sense(), which
* respects the D_SENSE bit, instead of unconditionally
* generating the sense data in descriptor format. However,
* because hdparm, hddtemp, and udisks incorrectly assume sense
* data in descriptor format, without even looking at the
* RESPONSE CODE field in the returned sense data (to see which
* format the returned sense data is in), we are stuck with
* being bug compatible with older kernels.
*/
scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
} }
} }

View File

@ -1118,8 +1118,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
rpp->len += skb->len; rpp->len += skb->len;
if (stat & SAR_RSQE_EPDU) { if (stat & SAR_RSQE_EPDU) {
unsigned int len, truesize;
unsigned char *l1l2; unsigned char *l1l2;
unsigned int len;
l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6); l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
@ -1189,14 +1189,15 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
ATM_SKB(skb)->vcc = vcc; ATM_SKB(skb)->vcc = vcc;
__net_timestamp(skb); __net_timestamp(skb);
truesize = skb->truesize;
vcc->push(vcc, skb); vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx); atomic_inc(&vcc->stats->rx);
if (skb->truesize > SAR_FB_SIZE_3) if (truesize > SAR_FB_SIZE_3)
add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
else if (skb->truesize > SAR_FB_SIZE_2) else if (truesize > SAR_FB_SIZE_2)
add_rx_skb(card, 2, SAR_FB_SIZE_2, 1); add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
else if (skb->truesize > SAR_FB_SIZE_1) else if (truesize > SAR_FB_SIZE_1)
add_rx_skb(card, 1, SAR_FB_SIZE_1, 1); add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
else else
add_rx_skb(card, 0, SAR_FB_SIZE_0, 1); add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);

View File

@ -25,6 +25,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/string_helpers.h> #include <linux/string_helpers.h>
@ -2640,6 +2641,7 @@ static const char *dev_uevent_name(const struct kobject *kobj)
static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{ {
const struct device *dev = kobj_to_dev(kobj); const struct device *dev = kobj_to_dev(kobj);
struct device_driver *driver;
int retval = 0; int retval = 0;
/* add device node properties if present */ /* add device node properties if present */
@ -2668,8 +2670,12 @@ static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
if (dev->type && dev->type->name) if (dev->type && dev->type->name)
add_uevent_var(env, "DEVTYPE=%s", dev->type->name); add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
if (dev->driver) /* Synchronize with module_remove_driver() */
add_uevent_var(env, "DRIVER=%s", dev->driver->name); rcu_read_lock();
driver = READ_ONCE(dev->driver);
if (driver)
add_uevent_var(env, "DRIVER=%s", driver->name);
rcu_read_unlock();
/* Add common DT information about the device */ /* Add common DT information about the device */
of_device_uevent(dev, env); of_device_uevent(dev, env);
@ -2739,11 +2745,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env) if (!env)
return -ENOMEM; return -ENOMEM;
/* Synchronize with really_probe() */
device_lock(dev);
/* let the kset specific function add its keys */ /* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env); retval = kset->uevent_ops->uevent(&dev->kobj, env);
device_unlock(dev);
if (retval) if (retval)
goto out; goto out;

View File

@ -7,6 +7,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/rcupdate.h>
#include "base.h" #include "base.h"
static char *make_driver_name(const struct device_driver *drv) static char *make_driver_name(const struct device_driver *drv)
@ -97,6 +98,9 @@ void module_remove_driver(const struct device_driver *drv)
if (!drv) if (!drv)
return; return;
/* Synchronize with dev_uevent() */
synchronize_rcu();
sysfs_remove_link(&drv->p->kobj, "module"); sysfs_remove_link(&drv->p->kobj, "module");
if (drv->owner) if (drv->owner)

View File

@ -421,4 +421,5 @@ static void __exit ds1620_exit(void)
module_init(ds1620_init); module_init(ds1620_init);
module_exit(ds1620_exit); module_exit(ds1620_exit);
MODULE_DESCRIPTION("Dallas Semiconductor DS1620 thermometer driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -241,6 +241,7 @@ static void __exit nwbutton_exit (void)
MODULE_AUTHOR("Alex Holden"); MODULE_AUTHOR("Alex Holden");
MODULE_DESCRIPTION("NetWinder button driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_init(nwbutton_init); module_init(nwbutton_init);

View File

@ -618,6 +618,7 @@ static void __exit nwflash_exit(void)
iounmap((void *)FLASH_BASE); iounmap((void *)FLASH_BASE);
} }
MODULE_DESCRIPTION("NetWinder flash memory driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
module_param(flashdebug, bool, 0644); module_param(flashdebug, bool, 0644);

View File

@ -3405,6 +3405,7 @@ static const struct x86_cpu_id intel_epp_default[] = {
*/ */
X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), X86_MATCH_VFM(INTEL_ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, X86_MATCH_VFM(INTEL_METEORLAKE_L, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,
179, 64, 16)), 179, 64, 16)),
X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, X86_MATCH_VFM(INTEL_ARROWLAKE, HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE,

View File

@ -1444,5 +1444,6 @@ static void fsi_exit(void)
} }
module_exit(fsi_exit); module_exit(fsi_exit);
module_param(discard_errors, int, 0664); module_param(discard_errors, int, 0664);
MODULE_DESCRIPTION("FSI core driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses"); MODULE_PARM_DESC(discard_errors, "Don't invoke error handling on bus accesses");

View File

@ -670,4 +670,5 @@ static struct platform_driver fsi_master_aspeed_driver = {
}; };
module_platform_driver(fsi_master_aspeed_driver); module_platform_driver(fsi_master_aspeed_driver);
MODULE_DESCRIPTION("FSI master driver for AST2600");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+ // SPDX-License-Identifier: GPL-2.0+
// Copyright 2018 IBM Corp // Copyright 2018 IBM Corp
/* /*
* A FSI master controller, using a simple GPIO bit-banging interface * A FSI master based on Aspeed ColdFire coprocessor
*/ */
#include <linux/crc4.h> #include <linux/crc4.h>
@ -1438,5 +1438,6 @@ static struct platform_driver fsi_master_acf = {
}; };
module_platform_driver(fsi_master_acf); module_platform_driver(fsi_master_acf);
MODULE_DESCRIPTION("A FSI master based on Aspeed ColdFire coprocessor");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME); MODULE_FIRMWARE(FW_FILE_NAME);

View File

@ -892,4 +892,5 @@ static struct platform_driver fsi_master_gpio_driver = {
}; };
module_platform_driver(fsi_master_gpio_driver); module_platform_driver(fsi_master_gpio_driver);
MODULE_DESCRIPTION("A FSI master controller, using a simple GPIO bit-banging interface");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -295,4 +295,5 @@ static struct fsi_driver hub_master_driver = {
}; };
module_fsi_driver(hub_master_driver); module_fsi_driver(hub_master_driver);
MODULE_DESCRIPTION("FSI hub master driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -625,4 +625,5 @@ static void scom_exit(void)
module_init(scom_init); module_init(scom_init);
module_exit(scom_exit); module_exit(scom_exit);
MODULE_DESCRIPTION("SCOM FSI Client device driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View File

@ -156,6 +156,8 @@ struct amdgpu_gmc_funcs {
uint64_t addr, uint64_t *flags); uint64_t addr, uint64_t *flags);
/* get the amount of memory used by the vbios for pre-OS console */ /* get the amount of memory used by the vbios for pre-OS console */
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
/* get the DCC buffer alignment */
unsigned int (*get_dcc_alignment)(struct amdgpu_device *adev);
enum amdgpu_memory_partition (*query_mem_partition_mode)( enum amdgpu_memory_partition (*query_mem_partition_mode)(
struct amdgpu_device *adev); struct amdgpu_device *adev);
@ -363,6 +365,10 @@ struct amdgpu_gmc {
(adev)->gmc.gmc_funcs->override_vm_pte_flags \ (adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags)) ((adev), (vm), (addr), (pte_flags))
#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) #define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
#define amdgpu_gmc_get_dcc_alignment(adev) ({ \
typeof(adev) _adev = (adev); \
_adev->gmc.gmc_funcs->get_dcc_alignment(_adev); \
})
/** /**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR

View File

@ -264,9 +264,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
int r; int r;
/* Ignore soft recovered fences here */
r = drm_sched_entity_error(s_entity); r = drm_sched_entity_error(s_entity);
if (r && r != -ENODATA) if (r)
goto error; goto error;
if (!fence && job->gang_submit) if (!fence && job->gang_submit)

View File

@ -456,6 +456,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
u64 vis_usage = 0, max_bytes, min_block_size; u64 vis_usage = 0, max_bytes, min_block_size;
struct amdgpu_vram_mgr_resource *vres; struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn; u64 size, remaining_size, lpfn, fpfn;
unsigned int adjust_dcc_size = 0;
struct drm_buddy *mm = &mgr->mm; struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block; struct drm_buddy_block *block;
unsigned long pages_per_block; unsigned long pages_per_block;
@ -511,7 +512,19 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Allocate blocks in desired range */ /* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
adev->gmc.gmc_funcs->get_dcc_alignment)
adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev);
remaining_size = (u64)vres->base.size; remaining_size = (u64)vres->base.size;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
unsigned int dcc_size;
dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
remaining_size = (u64)dcc_size;
vres->flags |= DRM_BUDDY_TRIM_DISABLE;
}
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
while (remaining_size) { while (remaining_size) {
@ -521,8 +534,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
min_block_size = mgr->default_page_size; min_block_size = mgr->default_page_size;
size = remaining_size; size = remaining_size;
if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size)
min_block_size = size;
else if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT; min_block_size = (u64)pages_per_block << PAGE_SHIFT;
BUG_ON(min_block_size < mm->chunk_size); BUG_ON(min_block_size < mm->chunk_size);
@ -553,6 +569,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
} }
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
unsigned long dcc_start;
u64 trim_start;
dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks);
/* Adjust the start address for DCC buffers only */
dcc_start =
roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
adjust_dcc_size);
trim_start = (u64)dcc_start;
drm_buddy_block_trim(mm, &trim_start,
(u64)vres->base.size,
&vres->blocks);
}
vres->base.start = 0; vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks), size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
vres->base.size); vres->base.size);

View File

@ -202,6 +202,12 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
}; };
static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f),
SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020)
};
#define DEFAULT_SH_MEM_CONFIG \ #define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
@ -3432,6 +3438,24 @@ static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
} }
static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
return;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
if (adev->rev_id == 0)
soc15_program_register_sequence(adev,
golden_settings_gc_12_0,
(const u32)ARRAY_SIZE(golden_settings_gc_12_0));
break;
default:
break;
}
}
static int gfx_v12_0_hw_init(void *handle) static int gfx_v12_0_hw_init(void *handle)
{ {
int r; int r;
@ -3472,6 +3496,9 @@ static int gfx_v12_0_hw_init(void *handle)
} }
} }
if (!amdgpu_emu_mode)
gfx_v12_0_init_golden_registers(adev);
adev->gfx.is_poweron = true; adev->gfx.is_poweron = true;
if (get_gb_addr_config(adev)) if (get_gb_addr_config(adev))

View File

@ -542,6 +542,23 @@ static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
return 0; return 0;
} }
static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
{
unsigned int max_tex_channel_caches, alignment;
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
return 0;
max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
if (is_power_of_2(max_tex_channel_caches))
alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
else
alignment = roundup_pow_of_two(max_tex_channel_caches);
return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
}
static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = { static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb, .flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid, .flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
@ -551,6 +568,7 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.get_vm_pde = gmc_v12_0_get_vm_pde, .get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte, .get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size, .get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
.get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
}; };
static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)

View File

@ -80,7 +80,8 @@ static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
/* invalidate using legacy mode on vmid*/ /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid); PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); /* Only use legacy inv on mmhub side */
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);

View File

@ -1575,8 +1575,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) | SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) |
SDMA_PKT_COPY_LINEAR_HEADER_CPV((copy_flags & SDMA_PKT_COPY_LINEAR_HEADER_CPV(1);
(AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
@ -1590,6 +1589,8 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) | ((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) | ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1); SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
else
ib->ptr[ib->length_dw++] = 0;
} }
/** /**
@ -1616,7 +1617,7 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = { static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = {
.copy_max_bytes = 0x400000, .copy_max_bytes = 0x400000,
.copy_num_dw = 7, .copy_num_dw = 8,
.emit_copy_buffer = sdma_v7_0_emit_copy_buffer, .emit_copy_buffer = sdma_v7_0_emit_copy_buffer,
.fill_max_bytes = 0x400000, .fill_max_bytes = 0x400000,
.fill_num_dw = 5, .fill_num_dw = 5,

View File

@ -1270,6 +1270,9 @@ static bool is_dsc_need_re_compute(
} }
} }
if (new_stream_on_link_num == 0)
return false;
/* check current_state if there stream on link but it is not in /* check current_state if there stream on link but it is not in
* new request state * new request state
*/ */

View File

@ -185,8 +185,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true; return true;
} }

View File

@ -83,6 +83,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcfla
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)

View File

@ -1402,6 +1402,8 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
if (hubbub && hubp) { if (hubbub && hubp) {
if (hubbub->funcs->program_det_size) if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
if (hubbub->funcs->program_det_segments)
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
} }
} }

View File

@ -771,6 +771,8 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
if (hubbub && hubp) { if (hubbub && hubp) {
if (hubbub->funcs->program_det_size) if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
if (hubbub->funcs->program_det_segments)
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
} }
} }

View File

@ -723,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_prefetch_in_strobe_ns = 60000, // 60us .min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false, .disable_unbounded_requesting = false,
.enable_legacy_fast_update = false, .enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
.fams2_config = { .fams2_config = {
.bits = { .bits = {
.enable = true, .enable = true,

View File

@ -138,7 +138,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \ SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \ SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \ SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id) HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
/* ABM */ /* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \ #define ABM_DCN401_REG_LIST_RI(id) \

View File

@ -27,7 +27,8 @@
#pragma pack(push, 1) #pragma pack(push, 1)
#define SMU_14_0_2_TABLE_FORMAT_REVISION 3 #define SMU_14_0_2_TABLE_FORMAT_REVISION 23
#define SMU_14_0_2_CUSTOM_TABLE_FORMAT_REVISION 1
// POWERPLAYTABLE::ulPlatformCaps // POWERPLAYTABLE::ulPlatformCaps
#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. #define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page.
@ -43,6 +44,7 @@
#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 #define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0
#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD #define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD
#define SMU_14_0_2_PP_CUSTOM_OVERDRIVE_VERSION 0x1
#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 #define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
enum SMU_14_0_2_OD_SW_FEATURE_CAP enum SMU_14_0_2_OD_SW_FEATURE_CAP
@ -107,6 +109,7 @@ enum SMU_14_0_2_PWRMODE_SETTING
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE,
SMU_14_0_2_PMSETTING_COUNT
}; };
#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings #define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings
@ -127,17 +130,24 @@ struct smu_14_0_2_overdrive_table
int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings
}; };
enum smu_14_0_3_pptable_source {
PPTABLE_SOURCE_IFWI = 0,
PPTABLE_SOURCE_DRIVER_HARDCODED = 1,
PPTABLE_SOURCE_PPGEN_REGISTRY = 2,
PPTABLE_SOURCE_MAX = PPTABLE_SOURCE_PPGEN_REGISTRY,
};
struct smu_14_0_2_powerplay_table struct smu_14_0_2_powerplay_table
{ {
struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen.
uint8_t table_revision; // PPGen use only: table_revision = 3 uint8_t table_revision; // PPGen use only: table_revision = 3
uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). uint8_t pptable_source; // PPGen UI dropdown box
uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t)
uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t.
uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. uint16_t pmfw_sku_table_start_offset; // DO NOT CHANGE ORDER; The absolute start offset of the SkuTable_t (within smu_14_0_3_powerplay_table).
uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. uint16_t pmfw_sku_table_size; // DO NOT CHANGE ORDER; The size of SkuTable_t.
uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t
uint16_t pmfw_board_table_size; // The size of BoardTable_t. uint16_t pmfw_board_table_size; // The size of BoardTable_t.
uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable.
uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t.
uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base
@ -159,6 +169,36 @@ struct smu_14_0_2_powerplay_table
PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes
}; };
enum SMU_14_0_2_CUSTOM_OD_SW_FEATURE_CAP {
SMU_14_0_2_CUSTOM_ODCAP_POWER_MODE = 0,
SMU_14_0_2_CUSTOM_ODCAP_COUNT
};
enum SMU_14_0_2_CUSTOM_OD_FEATURE_SETTING_ID {
SMU_14_0_2_CUSTOM_ODSETTING_POWER_MODE = 0,
SMU_14_0_2_CUSTOM_ODSETTING_COUNT,
};
struct smu_14_0_2_custom_overdrive_table {
uint8_t revision;
uint8_t reserve[3];
uint8_t cap[SMU_14_0_2_CUSTOM_ODCAP_COUNT];
int32_t max[SMU_14_0_2_CUSTOM_ODSETTING_COUNT];
int32_t min[SMU_14_0_2_CUSTOM_ODSETTING_COUNT];
int16_t pm_setting[SMU_14_0_2_PMSETTING_COUNT];
};
struct smu_14_0_3_custom_powerplay_table {
uint8_t custom_table_revision;
uint16_t custom_table_size;
uint16_t custom_sku_table_offset;
uint32_t custom_platform_caps;
uint16_t software_shutdown_temp;
struct smu_14_0_2_custom_overdrive_table custom_overdrive_table;
uint32_t reserve[8];
CustomSkuTable_t custom_sku_table_pmfw;
};
#pragma pack(pop) #pragma pack(pop)
#endif #endif

View File

@ -1071,23 +1071,16 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
} }
if (async_flip && if (async_flip &&
prop != config->prop_fb_id && (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY ||
prop != config->prop_in_fence_fd && (prop != config->prop_fb_id &&
prop != config->prop_fb_damage_clips) { prop != config->prop_in_fence_fd &&
prop != config->prop_fb_damage_clips))) {
ret = drm_atomic_plane_get_property(plane, plane_state, ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val); prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
break; break;
} }
if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) {
drm_dbg_atomic(prop->dev,
"[OBJECT:%d] Only primary planes can be changed during async flip\n",
obj->id);
ret = -EINVAL;
break;
}
ret = drm_atomic_plane_set_property(plane, ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv, plane_state, file_priv,
prop, prop_value); prop, prop_value);

View File

@ -443,10 +443,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
panel_bridge = bridge; panel_bridge = bridge;
} }
if (connector_type == DRM_MODE_CONNECTOR_Unknown) { if (connector_type == DRM_MODE_CONNECTOR_Unknown)
kfree(bridge_connector);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
}
if (bridge_connector->bridge_hdmi) if (bridge_connector->bridge_hdmi)
ret = drmm_connector_hdmi_init(drm, connector, ret = drmm_connector_hdmi_init(drm, connector,
@ -461,10 +459,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
ret = drmm_connector_init(drm, connector, ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs, &drm_bridge_connector_funcs,
connector_type, ddc); connector_type, ddc);
if (ret) { if (ret)
kfree(bridge_connector);
return ERR_PTR(ret); return ERR_PTR(ret);
}
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs); drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);

View File

@ -851,6 +851,7 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
* drm_buddy_block_trim - free unused pages * drm_buddy_block_trim - free unused pages
* *
* @mm: DRM buddy manager * @mm: DRM buddy manager
* @start: start address to begin the trimming.
* @new_size: original size requested * @new_size: original size requested
* @blocks: Input and output list of allocated blocks. * @blocks: Input and output list of allocated blocks.
* MUST contain single block as input to be trimmed. * MUST contain single block as input to be trimmed.
@ -866,11 +867,13 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
* 0 on success, error code on failure. * 0 on success, error code on failure.
*/ */
int drm_buddy_block_trim(struct drm_buddy *mm, int drm_buddy_block_trim(struct drm_buddy *mm,
u64 *start,
u64 new_size, u64 new_size,
struct list_head *blocks) struct list_head *blocks)
{ {
struct drm_buddy_block *parent; struct drm_buddy_block *parent;
struct drm_buddy_block *block; struct drm_buddy_block *block;
u64 block_start, block_end;
LIST_HEAD(dfs); LIST_HEAD(dfs);
u64 new_start; u64 new_start;
int err; int err;
@ -882,6 +885,9 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
struct drm_buddy_block, struct drm_buddy_block,
link); link);
block_start = drm_buddy_block_offset(block);
block_end = block_start + drm_buddy_block_size(mm, block);
if (WARN_ON(!drm_buddy_block_is_allocated(block))) if (WARN_ON(!drm_buddy_block_is_allocated(block)))
return -EINVAL; return -EINVAL;
@ -894,6 +900,20 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
if (new_size == drm_buddy_block_size(mm, block)) if (new_size == drm_buddy_block_size(mm, block))
return 0; return 0;
new_start = block_start;
if (start) {
new_start = *start;
if (new_start < block_start)
return -EINVAL;
if (!IS_ALIGNED(new_start, mm->chunk_size))
return -EINVAL;
if (range_overflows(new_start, new_size, block_end))
return -EINVAL;
}
list_del(&block->link); list_del(&block->link);
mark_free(mm, block); mark_free(mm, block);
mm->avail += drm_buddy_block_size(mm, block); mm->avail += drm_buddy_block_size(mm, block);
@ -904,7 +924,6 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
parent = block->parent; parent = block->parent;
block->parent = NULL; block->parent = NULL;
new_start = drm_buddy_block_offset(block);
list_add(&block->tmp_link, &dfs); list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
if (err) { if (err) {
@ -1066,7 +1085,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
} while (1); } while (1);
/* Trim the allocated block to the required size */ /* Trim the allocated block to the required size */
if (original_size != size) { if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
original_size != size) {
struct list_head *trim_list; struct list_head *trim_list;
LIST_HEAD(temp); LIST_HEAD(temp);
u64 trim_size; u64 trim_size;
@ -1083,6 +1103,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
} }
drm_buddy_block_trim(mm, drm_buddy_block_trim(mm,
NULL,
trim_size, trim_size,
trim_list); trim_list);

View File

@ -880,6 +880,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
kfree(modeset->mode); kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode); modeset->mode = drm_mode_duplicate(dev, mode);
if (!modeset->mode) {
ret = -ENOMEM;
break;
}
drm_connector_get(connector); drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector; modeset->connectors[modeset->num_connectors++] = connector;
modeset->x = offset->x; modeset->x = offset->x;

View File

@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct drm_i915_private *i915) static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
{ {
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_DG1) if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1; return 1;

View File

@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915)
if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
return 2; return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_DG1) if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1; return 1;

View File

@ -290,6 +290,41 @@ out:
return i915_error_to_vmf_fault(err); return i915_error_to_vmf_fault(err);
} }
static void set_address_limits(struct vm_area_struct *area,
struct i915_vma *vma,
unsigned long obj_offset,
unsigned long *start_vaddr,
unsigned long *end_vaddr)
{
unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
long start, end; /* memory boundaries */
/*
* Let's move into the ">> PAGE_SHIFT"
* domain to be sure not to lose bits
*/
vm_start = area->vm_start >> PAGE_SHIFT;
vm_end = area->vm_end >> PAGE_SHIFT;
vma_size = vma->size >> PAGE_SHIFT;
/*
* Calculate the memory boundaries by considering the offset
* provided by the user during memory mapping and the offset
* provided for the partial mapping.
*/
start = vm_start;
start -= obj_offset;
start += vma->gtt_view.partial.offset;
end = start + vma_size;
start = max_t(long, start, vm_start);
end = min_t(long, end, vm_end);
/* Let's move back into the "<< PAGE_SHIFT" domain */
*start_vaddr = (unsigned long)start << PAGE_SHIFT;
*end_vaddr = (unsigned long)end << PAGE_SHIFT;
}
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{ {
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE; bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww; struct i915_gem_ww_ctx ww;
unsigned long obj_offset;
unsigned long start, end; /* memory boundaries */
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct i915_vma *vma; struct i915_vma *vma;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn;
int srcu; int srcu;
int ret; int ret;
/* We don't use vmf->pgoff since that has the fake offset */ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write); trace_i915_gem_object_fault(obj, page_offset, true, write);
@ -402,12 +441,14 @@ retry:
if (ret) if (ret)
goto err_unpin; goto err_unpin;
set_address_limits(area, vma, obj_offset, &start, &end);
pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
pfn += (start - area->vm_start) >> PAGE_SHIFT;
pfn += obj_offset - vma->gtt_view.partial.offset;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret) if (ret)
goto err_fence; goto err_fence;
@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
mmo = mmap_offset_attach(obj, mmap_type, NULL); mmo = mmap_offset_attach(obj, mmap_type, NULL);
if (IS_ERR(mmo)) if (IS_ERR(mmo))
return PTR_ERR(mmo); return PTR_ERR(mmo);
vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
} }
/* /*

View File

@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
obj->mm.region, &places[0], obj->bo_offset, obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags); obj->base.size, flags);
places[0].flags |= TTM_PL_FLAG_DESIRED;
/* Cache this on object? */ /* Cache this on object? */
for (i = 0; i < num_allowed; ++i) { for (i = 0; i < num_allowed; ++i) {
@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true, .interruptible = true,
.no_wait_gpu = false, .no_wait_gpu = false,
}; };
int real_num_busy; struct ttm_placement initial_placement;
struct ttm_place initial_place;
int ret; int ret;
/* First try only the requested placement. No eviction. */ /* First try only the requested placement. No eviction. */
real_num_busy = placement->num_placement; initial_placement.num_placement = 1;
placement->num_placement = 1; memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
ret = ttm_bo_validate(bo, placement, &ctx); initial_place.flags |= TTM_PL_FLAG_DESIRED;
initial_placement.placement = &initial_place;
ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) { if (ret) {
ret = i915_ttm_err_to_gem(ret); ret = i915_ttm_err_to_gem(ret);
/* /*
@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements, * If the initial attempt fails, allow all accepted placements,
* evicting if necessary. * evicting if necessary.
*/ */
placement->num_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx); ret = ttm_bo_validate(bo, placement, &ctx);
if (ret) if (ret)
return i915_ttm_err_to_gem(ret); return i915_ttm_err_to_gem(ret);

View File

@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
config DRM_OMAP config DRM_OMAP
tristate "OMAP DRM" tristate "OMAP DRM"
depends on MMU
depends on DRM && OF depends on DRM && OF
depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB) depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB)
select DRM_KMS_HELPER select DRM_KMS_HELPER

View File

@ -102,6 +102,17 @@ static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
sg_init_one(sgt->sgl, buf, TEST_SIZE); sg_init_one(sgt->sgl, buf, TEST_SIZE);
/*
* Set the DMA mask to 64-bits and map the sgtables
* otherwise drm_gem_shmem_free will cause a warning
* on debug kernels.
*/
ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
KUNIT_ASSERT_EQ(test, ret, 0);
ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
KUNIT_ASSERT_EQ(test, ret, 0);
/* Init a mock DMA-BUF */ /* Init a mock DMA-BUF */
buf_mock.size = TEST_SIZE; buf_mock.size = TEST_SIZE;
attach_mock.dmabuf = &buf_mock; attach_mock.dmabuf = &buf_mock;

View File

@ -203,9 +203,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0); reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0);
reg_val = xe_mmio_read32(hwmon->gt, rapl_limit); reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
if (reg_val & PKG_PWR_LIM_1_EN) { if (reg_val & PKG_PWR_LIM_1_EN) {
drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n");
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto unlock;
} }
goto unlock;
} }
/* Computation in 64-bits to avoid overflow. Round to nearest. */ /* Computation in 64-bits to avoid overflow. Round to nearest. */

View File

@ -1634,6 +1634,9 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot) if (!snapshot)
return NULL; return NULL;
if (lrc->bo && lrc->bo->vm)
xe_vm_get(lrc->bo->vm);
snapshot->context_desc = xe_lrc_ggtt_addr(lrc); snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc); snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
snapshot->head = xe_lrc_ring_head(lrc); snapshot->head = xe_lrc_ring_head(lrc);
@ -1653,12 +1656,14 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
{ {
struct xe_bo *bo; struct xe_bo *bo;
struct xe_vm *vm;
struct iosys_map src; struct iosys_map src;
if (!snapshot) if (!snapshot)
return; return;
bo = snapshot->lrc_bo; bo = snapshot->lrc_bo;
vm = bo->vm;
snapshot->lrc_bo = NULL; snapshot->lrc_bo = NULL;
snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL); snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
@ -1678,6 +1683,8 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
xe_bo_unlock(bo); xe_bo_unlock(bo);
put_bo: put_bo:
xe_bo_put(bo); xe_bo_put(bo);
if (vm)
xe_vm_put(vm);
} }
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p) void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
@ -1727,8 +1734,14 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
return; return;
kvfree(snapshot->lrc_snapshot); kvfree(snapshot->lrc_snapshot);
if (snapshot->lrc_bo) if (snapshot->lrc_bo) {
struct xe_vm *vm;
vm = snapshot->lrc_bo->vm;
xe_bo_put(snapshot->lrc_bo); xe_bo_put(snapshot->lrc_bo);
if (vm)
xe_vm_put(vm);
}
kfree(snapshot); kfree(snapshot);
} }

View File

@ -231,7 +231,7 @@ static void rtp_mark_active(struct xe_device *xe,
if (first == last) if (first == last)
bitmap_set(ctx->active_entries, first, 1); bitmap_set(ctx->active_entries, first, 1);
else else
bitmap_set(ctx->active_entries, first, last - first + 2); bitmap_set(ctx->active_entries, first, last - first + 1);
} }
/** /**

View File

@ -263,7 +263,7 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
if (sync->fence) if (sync->fence)
dma_fence_put(sync->fence); dma_fence_put(sync->fence);
if (sync->chain_fence) if (sync->chain_fence)
dma_fence_put(&sync->chain_fence->base); dma_fence_chain_free(sync->chain_fence);
if (sync->ufence) if (sync->ufence)
user_fence_put(sync->ufence); user_fence_put(sync->ufence);
} }

View File

@ -150,7 +150,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
} while (remaining_size); } while (remaining_size);
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks)) if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks))
size = vres->base.size; size = vres->base.size;
} }

View File

@ -990,8 +990,11 @@ static int __maybe_unused geni_i2c_runtime_resume(struct device *dev)
return ret; return ret;
ret = geni_se_resources_on(&gi2c->se); ret = geni_se_resources_on(&gi2c->se);
if (ret) if (ret) {
clk_disable_unprepare(gi2c->core_clk);
geni_icc_disable(&gi2c->se);
return ret; return ret;
}
enable_irq(gi2c->irq); enable_irq(gi2c->irq);
gi2c->suspended = 0; gi2c->suspended = 0;

View File

@ -18,7 +18,7 @@
enum testunit_cmds { enum testunit_cmds {
TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */ TU_CMD_READ_BYTES = 1, /* save 0 for ABORT, RESET or similar */
TU_CMD_HOST_NOTIFY, TU_CMD_SMBUS_HOST_NOTIFY,
TU_CMD_SMBUS_BLOCK_PROC_CALL, TU_CMD_SMBUS_BLOCK_PROC_CALL,
TU_NUM_CMDS TU_NUM_CMDS
}; };
@ -60,7 +60,7 @@ static void i2c_slave_testunit_work(struct work_struct *work)
msg.len = tu->regs[TU_REG_DATAH]; msg.len = tu->regs[TU_REG_DATAH];
break; break;
case TU_CMD_HOST_NOTIFY: case TU_CMD_SMBUS_HOST_NOTIFY:
msg.addr = 0x08; msg.addr = 0x08;
msg.flags = 0; msg.flags = 0;
msg.len = 3; msg.len = 3;

Some files were not shown because too many files have changed in this diff Show More