Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

drivers/net/can/dev.c
  b552766c87 ("can: dev: prevent potential information leak in can_fill_info()")
  3e77f70e73 ("can: dev: move driver related infrastructure into separate subdir")
  0a042c6ec9 ("can: dev: move netlink related code into seperate file")

  Code move.

drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
  57ac4a31c4 ("net/mlx5e: Correctly handle changing the number of queues when the interface is down")
  214baf2287 ("net/mlx5e: Support HTB offload")

  Adjacent code changes

net/switchdev/switchdev.c
  20776b465c ("net: switchdev: don't set port_obj_info->handled true when -EOPNOTSUPP")
  ffb68fc58e ("net: switchdev: remove the transaction structure from port object notifiers")
  bae33f2b5a ("net: switchdev: remove the transaction structure from port attributes")

  Transaction parameter gets dropped otherwise keep the fix.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2021-01-28 17:09:31 -08:00
463 changed files with 3966 additions and 2259 deletions

View File

@@ -9,9 +9,6 @@
# #
# Please keep this list dictionary sorted. # Please keep this list dictionary sorted.
# #
# This comment is parsed by git-shortlog:
# repo-abbrev: /pub/scm/linux/kernel/git/
#
Aaron Durbin <adurbin@google.com> Aaron Durbin <adurbin@google.com>
Adam Oldham <oldhamca@gmail.com> Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com> Adam Radford <aradford@gmail.com>

View File

@@ -5,8 +5,8 @@ Description:
Provide a place in sysfs for the device link objects in the Provide a place in sysfs for the device link objects in the
kernel at any given time. The name of a device link directory, kernel at any given time. The name of a device link directory,
denoted as ... above, is of the form <supplier>--<consumer> denoted as ... above, is of the form <supplier>--<consumer>
where <supplier> is the supplier device name and <consumer> is where <supplier> is the supplier bus:device name and <consumer>
the consumer device name. is the consumer bus:device name.
What: /sys/class/devlink/.../auto_remove_on What: /sys/class/devlink/.../auto_remove_on
Date: May 2020 Date: May 2020

View File

@@ -4,5 +4,6 @@ Contact: Saravana Kannan <saravanak@google.com>
Description: Description:
The /sys/devices/.../consumer:<consumer> are symlinks to device The /sys/devices/.../consumer:<consumer> are symlinks to device
links where this device is the supplier. <consumer> denotes the links where this device is the supplier. <consumer> denotes the
name of the consumer in that device link. There can be zero or name of the consumer in that device link and is of the form
more of these symlinks for a given device. bus:device name. There can be zero or more of these symlinks
for a given device.

View File

@@ -4,5 +4,6 @@ Contact: Saravana Kannan <saravanak@google.com>
Description: Description:
The /sys/devices/.../supplier:<supplier> are symlinks to device The /sys/devices/.../supplier:<supplier> are symlinks to device
links where this device is the consumer. <supplier> denotes the links where this device is the consumer. <supplier> denotes the
name of the supplier in that device link. There can be zero or name of the supplier in that device link and is of the form
more of these symlinks for a given device. bus:device name. There can be zero or more of these symlinks
for a given device.

View File

@@ -916,21 +916,25 @@ Date: September 2014
Contact: Subhash Jadavani <subhashj@codeaurora.org> Contact: Subhash Jadavani <subhashj@codeaurora.org>
Description: This entry could be used to set or show the UFS device Description: This entry could be used to set or show the UFS device
runtime power management level. The current driver runtime power management level. The current driver
implementation supports 6 levels with next target states: implementation supports 7 levels with next target states:
== ==================================================== == ====================================================
0 an UFS device will stay active, an UIC link will 0 UFS device will stay active, UIC link will
stay active stay active
1 an UFS device will stay active, an UIC link will 1 UFS device will stay active, UIC link will
hibernate hibernate
2 an UFS device will moved to sleep, an UIC link will 2 UFS device will be moved to sleep, UIC link will
stay active stay active
3 an UFS device will moved to sleep, an UIC link will 3 UFS device will be moved to sleep, UIC link will
hibernate hibernate
4 an UFS device will be powered off, an UIC link will 4 UFS device will be powered off, UIC link will
hibernate hibernate
5 an UFS device will be powered off, an UIC link will 5 UFS device will be powered off, UIC link will
be powered off be powered off
6 UFS device will be moved to deep sleep, UIC link
will be powered off. Note, deep sleep might not be
supported in which case this value will not be
accepted
== ==================================================== == ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
@@ -954,21 +958,25 @@ Date: September 2014
Contact: Subhash Jadavani <subhashj@codeaurora.org> Contact: Subhash Jadavani <subhashj@codeaurora.org>
Description: This entry could be used to set or show the UFS device Description: This entry could be used to set or show the UFS device
system power management level. The current driver system power management level. The current driver
implementation supports 6 levels with next target states: implementation supports 7 levels with next target states:
== ==================================================== == ====================================================
0 an UFS device will stay active, an UIC link will 0 UFS device will stay active, UIC link will
stay active stay active
1 an UFS device will stay active, an UIC link will 1 UFS device will stay active, UIC link will
hibernate hibernate
2 an UFS device will moved to sleep, an UIC link will 2 UFS device will be moved to sleep, UIC link will
stay active stay active
3 an UFS device will moved to sleep, an UIC link will 3 UFS device will be moved to sleep, UIC link will
hibernate hibernate
4 an UFS device will be powered off, an UIC link will 4 UFS device will be powered off, UIC link will
hibernate hibernate
5 an UFS device will be powered off, an UIC link will 5 UFS device will be powered off, UIC link will
be powered off be powered off
6 UFS device will be moved to deep sleep, UIC link
will be powered off. Note, deep sleep might not be
supported in which case this value will not be
accepted
== ==================================================== == ====================================================
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state

View File

@@ -177,14 +177,20 @@ bitmap_flush_interval:number
The bitmap flush interval in milliseconds. The metadata buffers The bitmap flush interval in milliseconds. The metadata buffers
are synchronized when this interval expires. are synchronized when this interval expires.
allow_discards
Allow block discard requests (a.k.a. TRIM) for the integrity device.
Discards are only allowed to devices using internal hash.
fix_padding fix_padding
Use a smaller padding of the tag area that is more Use a smaller padding of the tag area that is more
space-efficient. If this option is not present, large padding is space-efficient. If this option is not present, large padding is
used - that is for compatibility with older kernels. used - that is for compatibility with older kernels.
allow_discards legacy_recalculate
Allow block discard requests (a.k.a. TRIM) for the integrity device. Allow recalculating of volumes with HMAC keys. This is disabled by
Discards are only allowed to devices using internal hash. default for security reasons - an attacker could modify the volume,
set recalc_sector to zero, and the kernel would not detect the
modification.
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
allow_discards can be changed when reloading the target (load an inactive allow_discards can be changed when reloading the target (load an inactive

View File

@@ -160,29 +160,14 @@ intended for use in production as a security mitigation. Therefore it supports
boot parameters that allow to disable KASAN competely or otherwise control boot parameters that allow to disable KASAN competely or otherwise control
particular KASAN features. particular KASAN features.
The things that can be controlled are: - ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
1. Whether KASAN is enabled at all. - ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
2. Whether KASAN collects and saves alloc/free stacks. traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
3. Whether KASAN panics on a detected bug or not. ``off``).
The ``kasan.mode`` boot parameter allows to choose one of three main modes: - ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
report or also panic the kernel (default: ``report``).
- ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed
- ``kasan.mode=prod`` - only essential production features are enabled
- ``kasan.mode=full`` - all KASAN features are enabled
The chosen mode provides default control values for the features mentioned
above. However it's also possible to override the default values by providing:
- ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection
(default: ``on`` for ``mode=full``,
otherwise ``off``)
- ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic
(default: ``report``)
If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when
``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise.
For developers For developers
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~

View File

@@ -522,6 +522,63 @@ There's more boilerplate involved, but it can:
* E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256`` * E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256``
field and reuse ``cases``. field and reuse ``cases``.
* be converted to a "parameterized test", see below.
Parameterized Testing
~~~~~~~~~~~~~~~~~~~~~
The table-driven testing pattern is common enough that KUnit has special
support for it.
Reusing the same ``cases`` array from above, we can write the test as a
"parameterized test" with the following.
.. code-block:: c
// This is copy-pasted from above.
struct sha1_test_case {
const char *str;
const char *sha1;
};
struct sha1_test_case cases[] = {
{
.str = "hello world",
.sha1 = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed",
},
{
.str = "hello world!",
.sha1 = "430ce34d020724ed75a196dfc2ad67c77772d169",
},
};
// Need a helper function to generate a name for each test case.
static void case_to_desc(const struct sha1_test_case *t, char *desc)
{
strcpy(desc, t->str);
}
// Creates `sha1_gen_params()` to iterate over `cases`.
KUNIT_ARRAY_PARAM(sha1, cases, case_to_desc);
// Looks no different from a normal test.
static void sha1_test(struct kunit *test)
{
// This function can just contain the body of the for-loop.
// The former `cases[i]` is accessible under test->param_value.
char out[40];
struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
sha1sum(test_param->str, out);
KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
"sha1sum(%s)", test_param->str);
}
// Instead of KUNIT_CASE, we use KUNIT_CASE_PARAM and pass in the
// function declared by KUNIT_ARRAY_PARAM.
static struct kunit_case sha1_test_cases[] = {
KUNIT_CASE_PARAM(sha1_test, sha1_gen_params),
{}
};
.. _kunit-on-non-uml: .. _kunit-on-non-uml:
KUnit on non-UML architectures KUnit on non-UML architectures

View File

@@ -16,8 +16,8 @@ description:
properties: properties:
compatible: compatible:
enum: enum:
- bosch,bmc150 - bosch,bmc150_accel
- bosch,bmi055 - bosch,bmi055_accel
- bosch,bma255 - bosch,bma255
- bosch,bma250e - bosch,bma250e
- bosch,bma222 - bosch,bma222

View File

@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Mediatek MT8192 with MT6359, RT1015 and RT5682 ASoC sound card driver title: Mediatek MT8192 with MT6359, RT1015 and RT5682 ASoC sound card driver
maintainers: maintainers:
- Jiaxin Yu <jiaxin.yu@mediatek.com> - Jiaxin Yu <jiaxin.yu@mediatek.com>
- Shane Chien <shane.chien@mediatek.com> - Shane Chien <shane.chien@mediatek.com>
description: description:
This binding describes the MT8192 sound card. This binding describes the MT8192 sound card.

View File

@@ -1807,12 +1807,24 @@ seg6_flowlabel - INTEGER
``conf/default/*``: ``conf/default/*``:
Change the interface-specific default settings. Change the interface-specific default settings.
These settings would be used during creating new interfaces.
``conf/all/*``: ``conf/all/*``:
Change all the interface-specific settings. Change all the interface-specific settings.
[XXX: Other special features than forwarding?] [XXX: Other special features than forwarding?]
conf/all/disable_ipv6 - BOOLEAN
Changing this value is same as changing ``conf/default/disable_ipv6``
setting and also all per-interface ``disable_ipv6`` settings to the same
value.
Reading this value does not have any particular meaning. It does not say
whether IPv6 support is enabled or disabled. Returned value can be 1
also in the case when some interface has ``disable_ipv6`` set to 0 and
has configured IPv6 addresses.
conf/all/forwarding - BOOLEAN conf/all/forwarding - BOOLEAN
Enable global IPv6 forwarding between all interfaces. Enable global IPv6 forwarding between all interfaces.

View File

@@ -360,10 +360,9 @@ since the last call to this ioctl. Bit 0 is the first page in the
memory slot. Ensure the entire structure is cleared to avoid padding memory slot. Ensure the entire structure is cleared to avoid padding
issues. issues.
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
the address space for which you want to return the dirty bitmap. the address space for which you want to return the dirty bitmap. See
They must be less than the value that KVM_CHECK_EXTENSION returns for KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
The bits in the dirty bitmap are cleared before the ioctl returns, unless The bits in the dirty bitmap are cleared before the ioctl returns, unless
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
@@ -1281,6 +1280,9 @@ field userspace_addr, which must point at user addressable memory for
the entire memory slot size. Any object may back this memory, including the entire memory slot size. Any object may back this memory, including
anonymous memory, ordinary files, and hugetlbfs. anonymous memory, ordinary files, and hugetlbfs.
On architectures that support a form of address tagging, userspace_addr must
be an untagged address.
It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
be identical. This allows large pages in the guest to be backed by large be identical. This allows large pages in the guest to be backed by large
pages in the host. pages in the host.
@@ -1333,7 +1335,7 @@ documentation when it pops into existence).
:Capability: KVM_CAP_ENABLE_CAP_VM :Capability: KVM_CAP_ENABLE_CAP_VM
:Architectures: all :Architectures: all
:Type: vcpu ioctl :Type: vm ioctl
:Parameters: struct kvm_enable_cap (in) :Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error :Returns: 0 on success; -1 on error
@@ -4432,7 +4434,7 @@ to I/O ports.
:Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 :Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
:Architectures: x86, arm, arm64, mips :Architectures: x86, arm, arm64, mips
:Type: vm ioctl :Type: vm ioctl
:Parameters: struct kvm_dirty_log (in) :Parameters: struct kvm_clear_dirty_log (in)
:Returns: 0 on success, -1 on error :Returns: 0 on success, -1 on error
:: ::
@@ -4459,10 +4461,9 @@ in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
(for example via write-protection, or by clearing the dirty bit in (for example via write-protection, or by clearing the dirty bit in
a page table entry). a page table entry).
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
the address space for which you want to return the dirty bitmap. the address space for which you want to clear the dirty status. See
They must be less than the value that KVM_CHECK_EXTENSION returns for KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
is enabled; for more information, see the description of the capability. is enabled; for more information, see the description of the capability.

View File

@@ -3247,6 +3247,7 @@ L: netdev@vger.kernel.org
S: Supported S: Supported
W: http://sourceforge.net/projects/bonding/ W: http://sourceforge.net/projects/bonding/
F: drivers/net/bonding/ F: drivers/net/bonding/
F: include/net/bonding.h
F: include/uapi/linux/if_bonding.h F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@@ -3420,7 +3421,7 @@ F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
F: drivers/pci/controller/pcie-brcmstb.c F: drivers/pci/controller/pcie-brcmstb.c
F: drivers/staging/vc04_services F: drivers/staging/vc04_services
N: bcm2711 N: bcm2711
N: bcm2835 N: bcm283*
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
@@ -3899,7 +3900,7 @@ F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
F: drivers/mtd/nand/raw/cadence-nand-controller.c F: drivers/mtd/nand/raw/cadence-nand-controller.c
CADENCE USB3 DRD IP DRIVER CADENCE USB3 DRD IP DRIVER
M: Peter Chen <peter.chen@nxp.com> M: Peter Chen <peter.chen@kernel.org>
M: Pawel Laszczak <pawell@cadence.com> M: Pawel Laszczak <pawell@cadence.com>
R: Roger Quadros <rogerq@kernel.org> R: Roger Quadros <rogerq@kernel.org>
R: Aswath Govindraju <a-govindraju@ti.com> R: Aswath Govindraju <a-govindraju@ti.com>
@@ -4184,7 +4185,7 @@ S: Maintained
F: Documentation/translations/zh_CN/ F: Documentation/translations/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@nxp.com> M: Peter Chen <peter.chen@kernel.org>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -4334,7 +4335,9 @@ W: https://clangbuiltlinux.github.io/
B: https://github.com/ClangBuiltLinux/linux/issues B: https://github.com/ClangBuiltLinux/linux/issues
C: irc://chat.freenode.net/clangbuiltlinux C: irc://chat.freenode.net/clangbuiltlinux
F: Documentation/kbuild/llvm.rst F: Documentation/kbuild/llvm.rst
F: include/linux/compiler-clang.h
F: scripts/clang-tools/ F: scripts/clang-tools/
F: scripts/clang-version.sh
F: scripts/lld-version.sh F: scripts/lld-version.sh
K: \b(?i:clang|llvm)\b K: \b(?i:clang|llvm)\b
@@ -8454,11 +8457,8 @@ F: drivers/i3c/
F: include/linux/i3c/ F: include/linux/i3c/
IA64 (Itanium) PLATFORM IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org L: linux-ia64@vger.kernel.org
S: Odd Fixes S: Orphan
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
F: Documentation/ia64/ F: Documentation/ia64/
F: arch/ia64/ F: arch/ia64/
@@ -12436,6 +12436,7 @@ F: tools/testing/selftests/net/ipsec.c
NETWORKING [IPv4/IPv6] NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net> M: "David S. Miller" <davem@davemloft.net>
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org> M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
M: David Ahern <dsahern@kernel.org>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -14527,10 +14528,18 @@ S: Supported
F: drivers/crypto/qat/ F: drivers/crypto/qat/
QCOM AUDIO (ASoC) DRIVERS QCOM AUDIO (ASoC) DRIVERS
M: Patrick Lai <plai@codeaurora.org> M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
M: Banajit Goswami <bgoswami@codeaurora.org> M: Banajit Goswami <bgoswami@codeaurora.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported S: Supported
F: sound/soc/codecs/lpass-va-macro.c
F: sound/soc/codecs/lpass-wsa-macro.*
F: sound/soc/codecs/msm8916-wcd-analog.c
F: sound/soc/codecs/msm8916-wcd-digital.c
F: sound/soc/codecs/wcd9335.*
F: sound/soc/codecs/wcd934x.c
F: sound/soc/codecs/wcd-clsh-v2.*
F: sound/soc/codecs/wsa881x.c
F: sound/soc/qcom/ F: sound/soc/qcom/
QCOM IPA DRIVER QCOM IPA DRIVER
@@ -16982,7 +16991,7 @@ M: Olivier Moysan <olivier.moysan@st.com>
M: Arnaud Pouliquen <arnaud.pouliquen@st.com> M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/sound/st,stm32-*.txt F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
F: sound/soc/stm/ F: sound/soc/stm/
STM32 TIMER/LPTIMER DRIVERS STM32 TIMER/LPTIMER DRIVERS
@@ -18435,7 +18444,7 @@ F: Documentation/usb/ohci.rst
F: drivers/usb/host/ohci* F: drivers/usb/host/ohci*
USB OTG FSM (Finite State Machine) USB OTG FSM (Finite State Machine)
M: Peter Chen <Peter.Chen@nxp.com> M: Peter Chen <peter.chen@kernel.org>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git

View File

@@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 11 PATCHLEVEL = 11
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc5
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@@ -16,6 +16,13 @@
stdout-path = &uart1; stdout-path = &uart1;
}; };
aliases {
mmc0 = &usdhc2;
mmc1 = &usdhc3;
mmc2 = &usdhc4;
/delete-property/ mmc3;
};
memory@10000000 { memory@10000000 {
device_type = "memory"; device_type = "memory";
reg = <0x10000000 0x80000000>; reg = <0x10000000 0x80000000>;

View File

@@ -418,7 +418,7 @@
/* VDD_AUD_1P8: Audio codec */ /* VDD_AUD_1P8: Audio codec */
reg_aud_1p8v: ldo3 { reg_aud_1p8v: ldo3 {
regulator-name = "vdd1p8"; regulator-name = "vdd1p8a";
regulator-min-microvolt = <1800000>; regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>; regulator-max-microvolt = <1800000>;
regulator-boot-on; regulator-boot-on;

View File

@@ -137,7 +137,7 @@
lcd_backlight: lcd-backlight { lcd_backlight: lcd-backlight {
compatible = "pwm-backlight"; compatible = "pwm-backlight";
pwms = <&pwm4 0 5000000>; pwms = <&pwm4 0 5000000 0>;
pwm-names = "LCD_BKLT_PWM"; pwm-names = "LCD_BKLT_PWM";
brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
@@ -167,7 +167,7 @@
i2c-gpio,delay-us = <2>; /* ~100 kHz */ i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
status = "disabld"; status = "disabled";
}; };
i2c_cam: i2c-gpio-cam { i2c_cam: i2c-gpio-cam {
@@ -179,7 +179,7 @@
i2c-gpio,delay-us = <2>; /* ~100 kHz */ i2c-gpio,delay-us = <2>; /* ~100 kHz */
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
status = "disabld"; status = "disabled";
}; };
}; };

View File

@@ -53,7 +53,6 @@
&fec { &fec {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>; pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
phy-handle = <&phy>;
phy-mode = "rgmii-id"; phy-mode = "rgmii-id";
phy-reset-duration = <2>; phy-reset-duration = <2>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
@@ -63,10 +62,19 @@
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
phy: ethernet-phy@0 { /*
* The PHY can appear at either address 0 or 4 due to the
* configuration (LED) pin not being pulled sufficiently.
*/
ethernet-phy@0 {
reg = <0>; reg = <0>;
qca,clk-out-frequency = <125000000>; qca,clk-out-frequency = <125000000>;
}; };
ethernet-phy@4 {
reg = <4>;
qca,clk-out-frequency = <125000000>;
};
}; };
}; };

View File

@@ -115,6 +115,7 @@
compatible = "nxp,pcf2127"; compatible = "nxp,pcf2127";
reg = <0>; reg = <0>;
spi-max-frequency = <2000000>; spi-max-frequency = <2000000>;
reset-source;
}; };
}; };

View File

@@ -12,4 +12,42 @@
200000 0>; 200000 0>;
}; };
}; };
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/* Modem trace memory */
ram@06000000 {
reg = <0x06000000 0x00f00000>;
no-map;
};
/* Modem shared memory */
ram@06f00000 {
reg = <0x06f00000 0x00100000>;
no-map;
};
/* Modem private memory */
ram@07000000 {
reg = <0x07000000 0x01000000>;
no-map;
};
/*
* Initial Secure Software ISSW memory
*
* This is probably only used if the kernel tries
* to actually call into trustzone to run secure
* applications, which the mainline kernel probably
* will not do on this old chipset. But you can never
* be too careful, so reserve this memory anyway.
*/
ram@17f00000 {
reg = <0x17f00000 0x00100000>;
no-map;
};
};
}; };

View File

@@ -12,4 +12,42 @@
200000 0>; 200000 0>;
}; };
}; };
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/* Modem trace memory */
ram@06000000 {
reg = <0x06000000 0x00f00000>;
no-map;
};
/* Modem shared memory */
ram@06f00000 {
reg = <0x06f00000 0x00100000>;
no-map;
};
/* Modem private memory */
ram@07000000 {
reg = <0x07000000 0x01000000>;
no-map;
};
/*
* Initial Secure Software ISSW memory
*
* This is probably only used if the kernel tries
* to actually call into trustzone to run secure
* applications, which the mainline kernel probably
* will not do on this old chipset. But you can never
* be too careful, so reserve this memory anyway.
*/
ram@17f00000 {
reg = <0x17f00000 0x00100000>;
no-map;
};
};
}; };

View File

@@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include "ste-dbx5x0.dtsi"
/ {
cpus {
cpu@300 {
/* cpufreq controls */
operating-points = <1152000 0
800000 0
400000 0
200000 0>;
};
};
reserved-memory {
#address-cells = <1>;
#size-cells = <1>;
ranges;
/*
* Initial Secure Software ISSW memory
*
* This is probably only used if the kernel tries
* to actually call into trustzone to run secure
* applications, which the mainline kernel probably
* will not do on this old chipset. But you can never
* be too careful, so reserve this memory anyway.
*/
ram@17f00000 {
reg = <0x17f00000 0x00100000>;
no-map;
};
};
};

View File

@@ -4,7 +4,7 @@
*/ */
/dts-v1/; /dts-v1/;
#include "ste-db8500.dtsi" #include "ste-db9500.dtsi"
#include "ste-href-ab8500.dtsi" #include "ste-href-ab8500.dtsi"
#include "ste-href-family-pinctrl.dtsi" #include "ste-href-family-pinctrl.dtsi"

View File

@@ -67,6 +67,7 @@
#define MX6Q_CCM_CCR 0x0 #define MX6Q_CCM_CCR 0x0
.align 3 .align 3
.arm
.macro sync_l2_cache .macro sync_l2_cache

View File

@@ -4,11 +4,16 @@
*/ */
usb { usb {
compatible = "simple-bus"; compatible = "simple-bus";
dma-ranges;
#address-cells = <2>; #address-cells = <2>;
#size-cells = <2>; #size-cells = <2>;
ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>; ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
/*
* Internally, USB bus to the interconnect can only address up
* to 40-bit
*/
dma-ranges = <0 0 0 0 0x100 0x0>;
usbphy0: usb-phy@0 { usbphy0: usb-phy@0 {
compatible = "brcm,sr-usb-combo-phy"; compatible = "brcm,sr-usb-combo-phy";
reg = <0x0 0x00000000 0x0 0x100>; reg = <0x0 0x00000000 0x0 0x100>;

View File

@@ -101,7 +101,7 @@
reboot { reboot {
compatible ="syscon-reboot"; compatible ="syscon-reboot";
regmap = <&rst>; regmap = <&rst>;
offset = <0xb0>; offset = <0>;
mask = <0x02>; mask = <0x02>;
}; };

View File

@@ -253,7 +253,7 @@
#size-cells = <1>; #size-cells = <1>;
ranges; ranges;
spba: bus@30000000 { spba: spba-bus@30000000 {
compatible = "fsl,spba-bus", "simple-bus"; compatible = "fsl,spba-bus", "simple-bus";
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;

View File

@@ -266,7 +266,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>; #interrupt-cells = <2>;
gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>; gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
}; };
gpio4: gpio@30230000 { gpio4: gpio@30230000 {

View File

@@ -991,8 +991,6 @@ CONFIG_ARCH_TEGRA_210_SOC=y
CONFIG_ARCH_TEGRA_186_SOC=y CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_ARCH_TEGRA_194_SOC=y CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_TEGRA_234_SOC=y CONFIG_ARCH_TEGRA_234_SOC=y
CONFIG_ARCH_K3_AM6_SOC=y
CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y CONFIG_TI_SCI_PM_DOMAINS=y
CONFIG_EXTCON_PTN5150=m CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y CONFIG_EXTCON_USB_GPIO=y

View File

@@ -352,8 +352,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
unsigned long addr = instruction_pointer(regs); unsigned long addr = instruction_pointer(regs);
struct kprobe *cur = kprobe_running(); struct kprobe *cur = kprobe_running();
if (cur && (kcb->kprobe_status == KPROBE_HIT_SS) if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
&& ((unsigned long)&cur->ainsn.api.insn[1] == addr)) { ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
kprobes_restore_local_irqflag(kcb, regs); kprobes_restore_local_irqflag(kcb, regs);
post_kprobe_handler(cur, kcb, regs); post_kprobe_handler(cur, kcb, regs);

View File

@@ -1396,8 +1396,9 @@ static void cpu_init_hyp_mode(void)
* Calculate the raw per-cpu offset without a translation from the * Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2 * kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2. * so that we can use adr_l to access per-cpu variables in EL2.
* Also drop the KASAN tag which gets in the way...
*/ */
params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) - params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1); params->mair_el2 = read_sysreg(mair_el1);

View File

@@ -77,12 +77,6 @@ static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3)); cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
} }
static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
{
psci_forward(host_ctxt);
hyp_panic(); /* unreachable */
}
static unsigned int find_cpu_id(u64 mpidr) static unsigned int find_cpu_id(u64 mpidr)
{ {
unsigned int i; unsigned int i;
@@ -251,10 +245,13 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
case PSCI_0_2_FN_MIGRATE_INFO_TYPE: case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU: case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
return psci_forward(host_ctxt); return psci_forward(host_ctxt);
/*
* SYSTEM_OFF/RESET should not return according to the spec.
* Allow it so as to stay robust to broken firmware.
*/
case PSCI_0_2_FN_SYSTEM_OFF: case PSCI_0_2_FN_SYSTEM_OFF:
case PSCI_0_2_FN_SYSTEM_RESET: case PSCI_0_2_FN_SYSTEM_RESET:
psci_forward_noreturn(host_ctxt); return psci_forward(host_ctxt);
unreachable();
case PSCI_0_2_FN64_CPU_SUSPEND: case PSCI_0_2_FN64_CPU_SUSPEND:
return psci_cpu_suspend(func_id, host_ctxt); return psci_cpu_suspend(func_id, host_ctxt);
case PSCI_0_2_FN64_CPU_ON: case PSCI_0_2_FN64_CPU_ON:

View File

@@ -788,7 +788,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
{ {
unsigned long *bmap = vcpu->kvm->arch.pmu_filter; unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
u64 val, mask = 0; u64 val, mask = 0;
int base, i; int base, i, nr_events;
if (!pmceid1) { if (!pmceid1) {
val = read_sysreg(pmceid0_el0); val = read_sysreg(pmceid0_el0);
@@ -801,13 +801,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
if (!bmap) if (!bmap)
return val; return val;
nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
for (i = 0; i < 32; i += 8) { for (i = 0; i < 32; i += 8) {
u64 byte; u64 byte;
byte = bitmap_get_value8(bmap, base + i); byte = bitmap_get_value8(bmap, base + i);
mask |= byte << i; mask |= byte << i;
byte = bitmap_get_value8(bmap, 0x4000 + base + i); if (nr_events >= (0x4000 + base + 32)) {
mask |= byte << (32 + i); byte = bitmap_get_value8(bmap, 0x4000 + base + i);
mask |= byte << (32 + i);
}
} }
return val & mask; return val & mask;

View File

@@ -43,6 +43,10 @@
* 64bit interface. * 64bit interface.
*/ */
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
static bool read_from_write_only(struct kvm_vcpu *vcpu, static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params, struct sys_reg_params *params,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
@@ -273,8 +277,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, u32 sr = reg_to_encoding(r);
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
@@ -590,6 +593,15 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
} }
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (kvm_vcpu_has_pmu(vcpu))
return 0;
return REG_HIDDEN;
}
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{ {
u64 pmcr, val; u64 pmcr, val;
@@ -613,9 +625,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{ {
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
bool enabled = kvm_vcpu_has_pmu(vcpu); bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
if (!enabled) if (!enabled)
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
@@ -900,11 +911,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
if (!kvm_vcpu_has_pmu(vcpu)) {
kvm_inject_undefined(vcpu);
return false;
}
if (p->is_write) { if (p->is_write) {
if (!vcpu_mode_priv(vcpu)) { if (!vcpu_mode_priv(vcpu)) {
kvm_inject_undefined(vcpu); kvm_inject_undefined(vcpu);
@@ -921,10 +927,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true; return true;
} }
#define reg_to_encoding(x) \
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
@@ -936,15 +938,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \ { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr } trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
#define PMU_SYS_REG(r) \
SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
/* Macro to expand the PMEVCNTRn_EL0 register */ /* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \ #define PMU_PMEVCNTR_EL0(n) \
{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \ { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */ /* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \ #define PMU_PMEVTYPER_EL0(n) \
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
@@ -1020,8 +1025,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
static u64 read_id_reg(const struct kvm_vcpu *vcpu, static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz) struct sys_reg_desc const *r, bool raz)
{ {
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, u32 id = reg_to_encoding(r);
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id); u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
if (id == SYS_ID_AA64PFR0_EL1) { if (id == SYS_ID_AA64PFR0_EL1) {
@@ -1062,8 +1066,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
static unsigned int id_visibility(const struct kvm_vcpu *vcpu, static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, u32 id = reg_to_encoding(r);
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
switch (id) { switch (id) {
case SYS_ID_AA64ZFR0_EL1: case SYS_ID_AA64ZFR0_EL1:
@@ -1486,8 +1489,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, { PMU_SYS_REG(SYS_PMINTENSET_EL1),
{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, .access = access_pminten, .reg = PMINTENSET_EL1 },
{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
.access = access_pminten, .reg = PMINTENSET_EL1 },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@@ -1526,23 +1531,36 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 }, { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr }, { SYS_DESC(SYS_CTR_EL0), access_ctr },
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 }, { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, .reset = reset_pmcr, .reg = PMCR_EL0 },
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 }, .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 }, { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 }, .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid }, { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid }, .access = access_pmovs, .reg = PMOVSSET_EL0 },
{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, { PMU_SYS_REG(SYS_PMSWINC_EL0),
{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper }, .access = access_pmswinc, .reg = PMSWINC_EL0 },
{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr }, { PMU_SYS_REG(SYS_PMSELR_EL0),
.access = access_pmselr, .reg = PMSELR_EL0 },
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
.access = access_pmceid, .reset = NULL },
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
.access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
.access = access_pmu_evtyper, .reset = NULL },
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
.access = access_pmu_evcntr, .reset = NULL },
/* /*
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency. * in 32bit mode. Here we choose to reset it as zero for consistency.
*/ */
{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 }, .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
{ PMU_SYS_REG(SYS_PMOVSSET_EL0),
.access = access_pmovs, .reg = PMOVSSET_EL0 },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
@@ -1694,7 +1712,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency. * in 32bit mode. Here we choose to reset it as zero for consistency.
*/ */
{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },

View File

@@ -709,10 +709,11 @@ static int do_tag_check_fault(unsigned long far, unsigned int esr,
struct pt_regs *regs) struct pt_regs *regs)
{ {
/* /*
* The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
* check faults. Mask them out now so that userspace doesn't see them. * for tag check faults. Set them to corresponding bits in the untagged
* address.
*/ */
far &= (1UL << 60) - 1; far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
do_bad_area(far, esr, regs); do_bad_area(far, esr, regs);
return 0; return 0;
} }

View File

@@ -54,7 +54,7 @@ extern void ia64_xchg_called_with_bad_pointer(void);
}) })
#define xchg(ptr, x) \ #define xchg(ptr, x) \
((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)))) ({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
/* /*
* Atomic compare and exchange. Compare OLD with MEM, if identical, * Atomic compare and exchange. Compare OLD with MEM, if identical,

View File

@@ -171,30 +171,35 @@ void vtime_account_hardirq(struct task_struct *tsk)
static irqreturn_t static irqreturn_t
timer_interrupt (int irq, void *dev_id) timer_interrupt (int irq, void *dev_id)
{ {
unsigned long cur_itm, new_itm, ticks; unsigned long new_itm;
if (cpu_is_offline(smp_processor_id())) { if (cpu_is_offline(smp_processor_id())) {
return IRQ_HANDLED; return IRQ_HANDLED;
} }
new_itm = local_cpu_data->itm_next; new_itm = local_cpu_data->itm_next;
cur_itm = ia64_get_itc();
if (!time_after(cur_itm, new_itm)) { if (!time_after(ia64_get_itc(), new_itm))
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
cur_itm, new_itm); ia64_get_itc(), new_itm);
ticks = 1;
} else { while (1) {
ticks = DIV_ROUND_UP(cur_itm - new_itm, new_itm += local_cpu_data->itm_delta;
local_cpu_data->itm_delta);
new_itm += ticks * local_cpu_data->itm_delta; legacy_timer_tick(smp_processor_id() == time_keeper_id);
local_cpu_data->itm_next = new_itm;
if (time_after(new_itm, ia64_get_itc()))
break;
/*
* Allow IPIs to interrupt the timer loop.
*/
local_irq_enable();
local_irq_disable();
} }
if (smp_processor_id() != time_keeper_id)
ticks = 0;
legacy_timer_tick(ticks);
do { do {
/* /*
* If we're too close to the next clock tick for * If we're too close to the next clock tick for

View File

@@ -51,6 +51,7 @@ extern void kmap_flush_tlb(unsigned long addr);
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases) #define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev)
#define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr) #define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr)
#define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr) #define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr)

View File

@@ -31,7 +31,7 @@
void __iomem *ioremap(phys_addr_t offset, unsigned long size); void __iomem *ioremap(phys_addr_t offset, unsigned long size);
#define iounmap iounmap #define iounmap iounmap
extern void iounmap(void *addr); extern void iounmap(void __iomem *addr);
#include <asm-generic/io.h> #include <asm-generic/io.h>

View File

@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
} }
EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap);
void iounmap(void *addr) void iounmap(void __iomem *addr)
{ {
/* If the page is from the fixmap pool then we just clear out /* If the page is from the fixmap pool then we just clear out
* the fixmap mapping. * the fixmap mapping.

View File

@@ -202,9 +202,8 @@ config PREFETCH
depends on PA8X00 || PA7200 depends on PA8X00 || PA7200
config MLONGCALLS config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels" def_bool y if !MODULES || UBSAN || FTRACE
default y if !MODULES || UBSAN || FTRACE bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
default n
depends on PA8X00 depends on PA8X00
help help
If you configure the kernel to include many drivers built-in instead If you configure the kernel to include many drivers built-in instead

View File

@@ -47,7 +47,4 @@ extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *); extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest); extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
#endif /* _ASM_PARISC_IRQ_H */ #endif /* _ASM_PARISC_IRQ_H */

View File

@@ -997,10 +997,17 @@ intr_do_preempt:
bb,<,n %r20, 31 - PSW_SM_I, intr_restore bb,<,n %r20, 31 - PSW_SM_I, intr_restore
nop nop
/* ssm PSW_SM_I done later in intr_restore */
#ifdef CONFIG_MLONGCALLS
ldil L%intr_restore, %r2
load32 preempt_schedule_irq, %r1
bv %r0(%r1)
ldo R%intr_restore(%r2), %r2
#else
ldil L%intr_restore, %r1
BL preempt_schedule_irq, %r2 BL preempt_schedule_irq, %r2
nop ldo R%intr_restore(%r1), %r2
#endif
b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
#endif /* CONFIG_PREEMPTION */ #endif /* CONFIG_PREEMPTION */
/* /*

View File

@@ -63,6 +63,12 @@
nop; \ nop; \
nop; nop;
#define SCV_ENTRY_FLUSH_SLOT \
SCV_ENTRY_FLUSH_FIXUP_SECTION; \
nop; \
nop; \
nop;
/* /*
* r10 must be free to use, r13 must be paca * r10 must be free to use, r13 must be paca
*/ */
@@ -70,6 +76,13 @@
STF_ENTRY_BARRIER_SLOT; \ STF_ENTRY_BARRIER_SLOT; \
ENTRY_FLUSH_SLOT ENTRY_FLUSH_SLOT
/*
* r10, ctr must be free to use, r13 must be paca
*/
#define SCV_INTERRUPT_TO_KERNEL \
STF_ENTRY_BARRIER_SLOT; \
SCV_ENTRY_FLUSH_SLOT
/* /*
* Macros for annotating the expected destination of (h)rfid * Macros for annotating the expected destination of (h)rfid
* *

View File

@@ -240,6 +240,14 @@ label##3: \
FTR_ENTRY_OFFSET 957b-958b; \ FTR_ENTRY_OFFSET 957b-958b; \
.popsection; .popsection;
#define SCV_ENTRY_FLUSH_FIXUP_SECTION \
957: \
.pushsection __scv_entry_flush_fixup,"a"; \
.align 2; \
958: \
FTR_ENTRY_OFFSET 957b-958b; \
.popsection;
#define RFI_FLUSH_FIXUP_SECTION \ #define RFI_FLUSH_FIXUP_SECTION \
951: \ 951: \
.pushsection __rfi_flush_fixup,"a"; \ .pushsection __rfi_flush_fixup,"a"; \
@@ -273,10 +281,12 @@ label##3: \
extern long stf_barrier_fallback; extern long stf_barrier_fallback;
extern long entry_flush_fallback; extern long entry_flush_fallback;
extern long scv_entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup; extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup; extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;

View File

@@ -58,6 +58,8 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
__set_pte_at(mm, vaddr, ptep, ptev, 1)
#define arch_kmap_local_post_map(vaddr, pteval) \ #define arch_kmap_local_post_map(vaddr, pteval) \
local_flush_tlb_page(NULL, vaddr) local_flush_tlb_page(NULL, vaddr)
#define arch_kmap_local_post_unmap(vaddr) \ #define arch_kmap_local_post_unmap(vaddr) \

View File

@@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
bne .Ltabort_syscall bne .Ltabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM) END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif #endif
INTERRUPT_TO_KERNEL SCV_INTERRUPT_TO_KERNEL
mr r10,r1 mr r10,r1
ld r1,PACAKSAVE(r13) ld r1,PACAKSAVE(r13)
std r10,0(r1) std r10,0(r1)

View File

@@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
ld r11,PACA_EXRFI+EX_R11(r13) ld r11,PACA_EXRFI+EX_R11(r13)
blr blr
/*
* The SCV entry flush happens with interrupts enabled, so it must disable
* to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
* (containing LR) does not need to be preserved here because scv entry
* puts 0 in the pt_regs, CTR can be clobbered for the same reason.
*/
TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
li r10,0
mtmsrd r10,1
lbz r10,PACAIRQHAPPENED(r13)
ori r10,r10,PACA_IRQ_HARD_DIS
stb r10,PACAIRQHAPPENED(r13)
std r11,PACA_EXRFI+EX_R11(r13)
L1D_DISPLACEMENT_FLUSH
ld r11,PACA_EXRFI+EX_R11(r13)
li r10,MSR_RI
mtmsrd r10,1
blr
TRAMP_REAL_BEGIN(rfi_flush_fallback) TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13); SET_SCRATCH0(r13);
GET_PACA(r13); GET_PACA(r13);

View File

@@ -145,6 +145,13 @@ SECTIONS
__stop___entry_flush_fixup = .; __stop___entry_flush_fixup = .;
} }
. = ALIGN(8);
__scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
__start___scv_entry_flush_fixup = .;
*(__scv_entry_flush_fixup)
__stop___scv_entry_flush_fixup = .;
}
. = ALIGN(8); . = ALIGN(8);
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
__start___stf_exit_barrier_fixup = .; __start___stf_exit_barrier_fixup = .;

View File

@@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
long *start, *end; long *start, *end;
int i; int i;
start = PTRRELOC(&__start___entry_flush_fixup);
end = PTRRELOC(&__stop___entry_flush_fixup);
instrs[0] = 0x60000000; /* nop */ instrs[0] = 0x60000000; /* nop */
instrs[1] = 0x60000000; /* nop */ instrs[1] = 0x60000000; /* nop */
instrs[2] = 0x60000000; /* nop */ instrs[2] = 0x60000000; /* nop */
@@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
if (types & L1D_FLUSH_MTTRIG) if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
start = PTRRELOC(&__start___entry_flush_fixup);
end = PTRRELOC(&__stop___entry_flush_fixup);
for (i = 0; start < end; start++, i++) { for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start; dest = (void *)start + *start;
@@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
} }
start = PTRRELOC(&__start___scv_entry_flush_fixup);
end = PTRRELOC(&__stop___scv_entry_flush_fixup);
for (; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
if (types == L1D_FLUSH_FALLBACK)
patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
BRANCH_SET_LINK);
else
patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
}
printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
(types == L1D_FLUSH_NONE) ? "no" : (types == L1D_FLUSH_NONE) ? "no" :
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :

View File

@@ -29,7 +29,6 @@ config SUPERH
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE

View File

@@ -11,7 +11,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/bcd.h> #include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/rtc.h> #include <linux/rtc.h>

View File

@@ -27,13 +27,12 @@ CONFIG_NETFILTER=y
CONFIG_ATALK=m CONFIG_ATALK=m
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_IDE=y CONFIG_ATA=y
CONFIG_BLK_DEV_IDECD=y CONFIG_ATA_GENERIC=y
CONFIG_BLK_DEV_OFFBOARD=y CONFIG_PATA_ATP867X=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_AEC62XX=y
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_MULTI_LUN=y
CONFIG_MD=y CONFIG_MD=y
CONFIG_BLK_DEV_MD=m CONFIG_BLK_DEV_MD=m

View File

@@ -20,8 +20,6 @@ CONFIG_IP_PNP=y
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set # CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y CONFIG_NET_ETHERNET=y
CONFIG_SMC91X=y CONFIG_SMC91X=y

View File

@@ -44,16 +44,14 @@ CONFIG_NET_SCHED=y
CONFIG_PARPORT=y CONFIG_PARPORT=y
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_SPI_ATTRS=y CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_FC_ATTRS=y
CONFIG_ATA=y CONFIG_ATA=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_PLATFORM=y
CONFIG_MD=y CONFIG_MD=y
CONFIG_BLK_DEV_DM=y CONFIG_BLK_DEV_DM=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y

View File

@@ -116,9 +116,6 @@ CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_PLATFORM=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_MULTI_LUN=y

View File

@@ -29,7 +29,6 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ROM=y CONFIG_MTD_ROM=y
CONFIG_IDE=y
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y CONFIG_NET_ETHERNET=y

View File

@@ -39,9 +39,6 @@ CONFIG_IP_PNP_RARP=y
CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=m
CONFIG_BLK_DEV_IDETAPE=m
CONFIG_SCSI=m CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR=m

View File

@@ -63,8 +63,7 @@ config PVR2_DMA
config G2_DMA config G2_DMA
tristate "G2 Bus DMA support" tristate "G2 Bus DMA support"
depends on SH_DREAMCAST depends on SH_DREAMCAST && SH_DMA_API
select SH_DMA_API
help help
This enables support for the DMA controller for the Dreamcast's This enables support for the DMA controller for the Dreamcast's
G2 bus. Drivers that want this will generally enable this on G2 bus. Drivers that want this will generally enable this on

View File

@@ -16,7 +16,6 @@
#include <cpu/gpio.h> #include <cpu/gpio.h>
#endif #endif
#define ARCH_NR_GPIOS 512
#include <asm-generic/gpio.h> #include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB #ifdef CONFIG_GPIOLIB

View File

@@ -14,7 +14,6 @@
#include <cpu/mmu_context.h> #include <cpu/mmu_context.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/thread_info.h>
! NOTE: ! NOTE:
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address

View File

@@ -105,7 +105,7 @@ config VSYSCALL
(the default value) say Y. (the default value) say Y.
config NUMA config NUMA
bool "Non Uniform Memory Access (NUMA) Support" bool "Non-Uniform Memory Access (NUMA) Support"
depends on MMU && SYS_SUPPORTS_NUMA depends on MMU && SYS_SUPPORTS_NUMA
select ARCH_WANT_NUMA_VARIABLE_LOCALITY select ARCH_WANT_NUMA_VARIABLE_LOCALITY
default n default n

View File

@@ -26,7 +26,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
static int asids_seq_show(struct seq_file *file, void *iter) static int asids_debugfs_show(struct seq_file *file, void *iter)
{ {
struct task_struct *p; struct task_struct *p;
@@ -48,18 +48,7 @@ static int asids_seq_show(struct seq_file *file, void *iter)
return 0; return 0;
} }
static int asids_debugfs_open(struct inode *inode, struct file *file) DEFINE_SHOW_ATTRIBUTE(asids_debugfs);
{
return single_open(file, asids_seq_show, inode->i_private);
}
static const struct file_operations asids_debugfs_fops = {
.owner = THIS_MODULE,
.open = asids_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init asids_debugfs_init(void) static int __init asids_debugfs_init(void)
{ {

View File

@@ -22,7 +22,7 @@ enum cache_type {
CACHE_TYPE_UNIFIED, CACHE_TYPE_UNIFIED,
}; };
static int cache_seq_show(struct seq_file *file, void *iter) static int cache_debugfs_show(struct seq_file *file, void *iter)
{ {
unsigned int cache_type = (unsigned int)file->private; unsigned int cache_type = (unsigned int)file->private;
struct cache_info *cache; struct cache_info *cache;
@@ -94,18 +94,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
return 0; return 0;
} }
static int cache_debugfs_open(struct inode *inode, struct file *file) DEFINE_SHOW_ATTRIBUTE(cache_debugfs);
{
return single_open(file, cache_seq_show, inode->i_private);
}
static const struct file_operations cache_debugfs_fops = {
.owner = THIS_MODULE,
.open = cache_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init cache_debugfs_init(void) static int __init cache_debugfs_init(void)
{ {

View File

@@ -812,7 +812,7 @@ bool __in_29bit_mode(void)
return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
} }
static int pmb_seq_show(struct seq_file *file, void *iter) static int pmb_debugfs_show(struct seq_file *file, void *iter)
{ {
int i; int i;
@@ -846,18 +846,7 @@ static int pmb_seq_show(struct seq_file *file, void *iter)
return 0; return 0;
} }
static int pmb_debugfs_open(struct inode *inode, struct file *file) DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
{
return single_open(file, pmb_seq_show, NULL);
}
static const struct file_operations pmb_debugfs_fops = {
.owner = THIS_MODULE,
.open = pmb_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init pmb_debugfs_init(void) static int __init pmb_debugfs_init(void)
{ {

View File

@@ -50,10 +50,11 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()
/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */ /* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */
#define arch_kmap_local_post_map(vaddr, pteval) flush_cache_all() #define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all()
#define arch_kmap_local_post_unmap(vaddr) flush_cache_all() #define arch_kmap_local_pre_unmap(vaddr) flush_cache_all()
#define arch_kmap_local_post_map(vaddr, pteval) flush_tlb_all()
#define arch_kmap_local_post_unmap(vaddr) flush_tlb_all()
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
unsigned int nr) unsigned int nr)
{ {
if (likely(nr < IA32_NR_syscalls)) { if (likely(nr < IA32_NR_syscalls)) {
instrumentation_begin();
nr = array_index_nospec(nr, IA32_NR_syscalls); nr = array_index_nospec(nr, IA32_NR_syscalls);
regs->ax = ia32_sys_call_table[nr](regs); regs->ax = ia32_sys_call_table[nr](regs);
instrumentation_end();
} }
} }
@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
* or may not be necessary, but it matches the old asm behavior. * or may not be necessary, but it matches the old asm behavior.
*/ */
nr = (unsigned int)syscall_enter_from_user_mode(regs, nr); nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
instrumentation_begin();
do_syscall_32_irqs_on(regs, nr); do_syscall_32_irqs_on(regs, nr);
instrumentation_end();
syscall_exit_to_user_mode(regs); syscall_exit_to_user_mode(regs);
} }
@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
res = get_user(*(u32 *)&regs->bp, res = get_user(*(u32 *)&regs->bp,
(u32 __user __force *)(unsigned long)(u32)regs->sp); (u32 __user __force *)(unsigned long)(u32)regs->sp);
} }
instrumentation_end();
if (res) { if (res) {
/* User code screwed up. */ /* User code screwed up. */
regs->ax = -EFAULT; regs->ax = -EFAULT;
instrumentation_end();
syscall_exit_to_user_mode(regs); syscall_exit_to_user_mode(regs);
return false; return false;
} }
@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* Now this is just like a normal syscall. */ /* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr); do_syscall_32_irqs_on(regs, nr);
instrumentation_end();
syscall_exit_to_user_mode(regs); syscall_exit_to_user_mode(regs);
return true; return true;
} }

View File

@@ -16,14 +16,25 @@
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
* disables preemption so be careful if you intend to use it for long periods * disables preemption so be careful if you intend to use it for long periods
* of time. * of time.
* If you intend to use the FPU in softirq you need to check first with * If you intend to use the FPU in irq/softirq you need to check first with
* irq_fpu_usable() if it is possible. * irq_fpu_usable() if it is possible.
*/ */
extern void kernel_fpu_begin(void);
/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
extern void kernel_fpu_end(void); extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void); extern bool irq_fpu_usable(void);
extern void fpregs_mark_activate(void); extern void fpregs_mark_activate(void);
/* Code that is unaware of kernel_fpu_begin_mask() can use this */
static inline void kernel_fpu_begin(void)
{
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
}
/* /*
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state. * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
* A context switch will (and softirq might) save CPU's FPU registers to * A context switch will (and softirq might) save CPU's FPU registers to

View File

@@ -613,6 +613,7 @@ DECLARE_IDTENTRY_VC(X86_TRAP_VC, exc_vmm_communication);
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback); DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER, exc_xen_unknown_trap);
#endif #endif
/* Device interrupts common/spurious */ /* Device interrupts common/spurious */

View File

@@ -97,6 +97,7 @@
#define INTEL_FAM6_LAKEFIELD 0x8A #define INTEL_FAM6_LAKEFIELD 0x8A
#define INTEL_FAM6_ALDERLAKE 0x97 #define INTEL_FAM6_ALDERLAKE 0x97
#define INTEL_FAM6_ALDERLAKE_L 0x9A
/* "Small Core" Processors (Atom) */ /* "Small Core" Processors (Atom) */

View File

@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
* think of extending them - you will be slapped with a stinking trout or a frozen * think of extending them - you will be slapped with a stinking trout or a frozen
* shark will reach you, wherever you are! You've been warned. * shark will reach you, wherever you are! You've been warned.
*/ */
static inline unsigned long long notrace __rdmsr(unsigned int msr) static __always_inline unsigned long long __rdmsr(unsigned int msr)
{ {
DECLARE_ARGS(val, low, high); DECLARE_ARGS(val, low, high);
@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
} }
static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high) static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
{ {
asm volatile("1: wrmsr\n" asm volatile("1: wrmsr\n"
"2:\n" "2:\n"

View File

@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id) #define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
extern unsigned int __max_die_per_package;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu)) #define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
extern unsigned int __max_logical_packages; extern unsigned int __max_logical_packages;
#define topology_max_packages() (__max_logical_packages) #define topology_max_packages() (__max_logical_packages)
extern unsigned int __max_die_per_package;
static inline int topology_max_die_per_package(void) static inline int topology_max_die_per_package(void)
{ {
return __max_die_per_package; return __max_die_per_package;

View File

@@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
u32 ecx; u32 ecx;
ecx = cpuid_ecx(0x8000001e); ecx = cpuid_ecx(0x8000001e);
nodes_per_socket = ((ecx >> 8) & 7) + 1; __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
u64 value; u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value); rdmsrl(MSR_FAM10H_NODE_ID, value);
nodes_per_socket = ((value >> 3) & 7) + 1; __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
} }
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&

View File

@@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
* that out because it's an indirect call. Annotate it. * that out because it's an indirect call. Annotate it.
*/ */
instrumentation_begin(); instrumentation_begin();
trace_hardirqs_off_finish();
machine_check_vector(regs); machine_check_vector(regs);
if (regs->flags & X86_EFLAGS_IF)
trace_hardirqs_on_prepare();
instrumentation_end(); instrumentation_end();
irqentry_nmi_exit(regs, irq_state); irqentry_nmi_exit(regs, irq_state);
} }
@@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
{ {
irqentry_enter_from_user_mode(regs); irqentry_enter_from_user_mode(regs);
instrumentation_begin(); instrumentation_begin();
machine_check_vector(regs); machine_check_vector(regs);
instrumentation_end(); instrumentation_end();
irqentry_exit_to_user_mode(regs); irqentry_exit_to_user_mode(regs);
} }

View File

@@ -25,10 +25,10 @@
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f) #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff) #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
#ifdef CONFIG_SMP
unsigned int __max_die_per_package __read_mostly = 1; unsigned int __max_die_per_package __read_mostly = 1;
EXPORT_SYMBOL(__max_die_per_package); EXPORT_SYMBOL(__max_die_per_package);
#ifdef CONFIG_SMP
/* /*
* Check if given CPUID extended toplogy "leaf" is implemented * Check if given CPUID extended toplogy "leaf" is implemented
*/ */

View File

@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
} }
EXPORT_SYMBOL(copy_fpregs_to_fpstate); EXPORT_SYMBOL(copy_fpregs_to_fpstate);
void kernel_fpu_begin(void) void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{ {
preempt_disable(); preempt_disable();
@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
} }
__cpu_invalidate_fpregs_state(); __cpu_invalidate_fpregs_state();
if (boot_cpu_has(X86_FEATURE_XMM)) /* Put sane initial values into the control registers. */
if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
ldmxcsr(MXCSR_DEFAULT); ldmxcsr(MXCSR_DEFAULT);
if (boot_cpu_has(X86_FEATURE_FPU)) if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
asm volatile ("fninit"); asm volatile ("fninit");
} }
EXPORT_SYMBOL_GPL(kernel_fpu_begin); EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
void kernel_fpu_end(void) void kernel_fpu_end(void)
{ {

View File

@@ -660,17 +660,6 @@ static void __init trim_platform_memory_ranges(void)
static void __init trim_bios_range(void) static void __init trim_bios_range(void)
{ {
/*
* A special case is the first 4Kb of memory;
* This is a BIOS owned area, not kernel ram, but generally
* not listed as such in the E820 table.
*
* This typically reserves additional memory (64KiB by default)
* since some BIOSes are known to corrupt low memory. See the
* Kconfig help text for X86_RESERVE_LOW.
*/
e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
/* /*
* special case: Some BIOSes report the PC BIOS * special case: Some BIOSes report the PC BIOS
* area (640Kb -> 1Mb) as RAM even though it is not. * area (640Kb -> 1Mb) as RAM even though it is not.
@@ -728,6 +717,15 @@ early_param("reservelow", parse_reservelow);
static void __init trim_low_memory_range(void) static void __init trim_low_memory_range(void)
{ {
/*
* A special case is the first 4Kb of memory;
* This is a BIOS owned area, not kernel ram, but generally
* not listed as such in the E820 table.
*
* This typically reserves additional memory (64KiB by default)
* since some BIOSes are known to corrupt low memory. See the
* Kconfig help text for X86_RESERVE_LOW.
*/
memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE)); memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
} }

View File

@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
return __rdmsr(MSR_AMD64_SEV_ES_GHCB); return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
} }
static inline void sev_es_wr_ghcb_msr(u64 val) static __always_inline void sev_es_wr_ghcb_msr(u64 val)
{ {
u32 low, high; u32 low, high;
@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2; u16 d2;
u8 d1; u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
memcpy(dst, buf, size);
return ES_OK;
}
switch (size) { switch (size) {
case 1: case 1:
memcpy(&d1, buf, 1); memcpy(&d1, buf, 1);
@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2; u16 d2;
u8 d1; u8 d1;
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
memcpy(buf, src, size);
return ES_OK;
}
switch (size) { switch (size) {
case 1: case 1:
if (get_user(d1, s)) if (get_user(d1, s))

View File

@@ -56,6 +56,7 @@
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/overflow.h> #include <linux/overflow.h>
#include <linux/syscore_ops.h>
#include <asm/acpi.h> #include <asm/acpi.h>
#include <asm/desc.h> #include <asm/desc.h>
@@ -2083,6 +2084,23 @@ static void init_counter_refs(void)
this_cpu_write(arch_prev_mperf, mperf); this_cpu_write(arch_prev_mperf, mperf);
} }
#ifdef CONFIG_PM_SLEEP
static struct syscore_ops freq_invariance_syscore_ops = {
.resume = init_counter_refs,
};
static void register_freq_invariance_syscore_ops(void)
{
/* Bail out if registered already. */
if (freq_invariance_syscore_ops.node.prev)
return;
register_syscore_ops(&freq_invariance_syscore_ops);
}
#else
static inline void register_freq_invariance_syscore_ops(void) {}
#endif
static void init_freq_invariance(bool secondary, bool cppc_ready) static void init_freq_invariance(bool secondary, bool cppc_ready)
{ {
bool ret = false; bool ret = false;
@@ -2109,6 +2127,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)
if (ret) { if (ret) {
init_counter_refs(); init_counter_refs();
static_branch_enable(&arch_scale_freq_key); static_branch_enable(&arch_scale_freq_key);
register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio); pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
} else { } else {
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n"); pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");

View File

@@ -9,6 +9,34 @@
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
{ \
return vcpu->arch.regs[VCPU_REGS_##uname]; \
} \
static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
unsigned long val) \
{ \
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
}
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
#ifdef CONFIG_X86_64
BUILD_KVM_GPR_ACCESSORS(r8, R8)
BUILD_KVM_GPR_ACCESSORS(r9, R9)
BUILD_KVM_GPR_ACCESSORS(r10, R10)
BUILD_KVM_GPR_ACCESSORS(r11, R11)
BUILD_KVM_GPR_ACCESSORS(r12, R12)
BUILD_KVM_GPR_ACCESSORS(r13, R13)
BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif
static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
enum kvm_reg reg) enum kvm_reg reg)
{ {
@@ -34,35 +62,6 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
} }
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
{ \
return vcpu->arch.regs[VCPU_REGS_##uname]; \
} \
static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
unsigned long val) \
{ \
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname); \
}
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
#ifdef CONFIG_X86_64
BUILD_KVM_GPR_ACCESSORS(r8, R8)
BUILD_KVM_GPR_ACCESSORS(r9, R9)
BUILD_KVM_GPR_ACCESSORS(r10, R10)
BUILD_KVM_GPR_ACCESSORS(r11, R11)
BUILD_KVM_GPR_ACCESSORS(r12, R12)
BUILD_KVM_GPR_ACCESSORS(r13, R13)
BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg) static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
{ {
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))

View File

@@ -44,8 +44,15 @@
#define PT32_ROOT_LEVEL 2 #define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3 #define PT32E_ROOT_LEVEL 3
static inline u64 rsvd_bits(int s, int e) static __always_inline u64 rsvd_bits(int s, int e)
{ {
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
if (__builtin_constant_p(e))
BUILD_BUG_ON(e > 63);
else
e &= 63;
if (e < s) if (e < s)
return 0; return 0;

View File

@@ -200,6 +200,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
if (WARN_ON(!is_guest_mode(vcpu)))
return true;
if (!nested_svm_vmrun_msrpm(svm)) { if (!nested_svm_vmrun_msrpm(svm)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = vcpu->run->internal.suberror =

View File

@@ -1415,16 +1415,13 @@ static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
* to be returned: * to be returned:
* GPRs RAX, RBX, RCX, RDX * GPRs RAX, RBX, RCX, RDX
* *
* Copy their values to the GHCB if they are dirty. * Copy their values, even if they may not have been written during the
* VM-Exit. It's the guest's responsibility to not consume random data.
*/ */
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX)) ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]); ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX)) ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]); ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
} }
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm) static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)

View File

@@ -3739,6 +3739,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
trace_kvm_entry(vcpu);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];

View File

@@ -3124,13 +3124,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
{ {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_host_map *map;
struct page *page;
u64 hpa;
/* /*
* hv_evmcs may end up being not mapped after migration (when * hv_evmcs may end up being not mapped after migration (when
@@ -3153,6 +3149,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
} }
} }
return true;
}
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_host_map *map;
struct page *page;
u64 hpa;
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/* /*
* Translate L1 physical address to host physical * Translate L1 physical address to host physical
@@ -3221,6 +3228,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
else else
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
return true;
}
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
if (!nested_get_evmcs_page(vcpu))
return false;
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
return true; return true;
} }
@@ -6077,11 +6096,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu)) { if (is_guest_mode(vcpu)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12); sync_vmcs02_to_vmcs12(vcpu, vmcs12);
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
} else if (!vmx->nested.need_vmcs12_to_shadow_sync) { } else {
if (vmx->nested.hv_evmcs) copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
copy_enlightened_to_vmcs12(vmx); if (!vmx->nested.need_vmcs12_to_shadow_sync) {
else if (enable_shadow_vmcs) if (vmx->nested.hv_evmcs)
copy_shadow_to_vmcs12(vmx); copy_enlightened_to_vmcs12(vmx);
else if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
}
} }
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
@@ -6602,7 +6624,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
.hv_timer_pending = nested_vmx_preemption_timer_pending, .hv_timer_pending = nested_vmx_preemption_timer_pending,
.get_state = vmx_get_nested_state, .get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state, .set_state = vmx_set_nested_state,
.get_nested_state_pages = nested_get_vmcs12_pages, .get_nested_state_pages = vmx_get_nested_state_pages,
.write_log_dirty = nested_vmx_write_pml_buffer, .write_log_dirty = nested_vmx_write_pml_buffer,
.enable_evmcs = nested_enable_evmcs, .enable_evmcs = nested_enable_evmcs,
.get_evmcs_version = nested_get_evmcs_version, .get_evmcs_version = nested_get_evmcs_version,

View File

@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
}; };
/* mapping between fixed pmc index and intel_arch_events array */ /* mapping between fixed pmc index and intel_arch_events array */
@@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
x86_pmu.num_counters_gp); x86_pmu.num_counters_gp);
eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
pmu->available_event_types = ~entry->ebx & pmu->available_event_types = ~entry->ebx &
((1ull << eax.split.mask_length) - 1); ((1ull << eax.split.mask_length) - 1);
@@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters = pmu->nr_arch_fixed_counters =
min_t(int, edx.split.num_counters_fixed, min_t(int, edx.split.num_counters_fixed,
x86_pmu.num_counters_fixed); x86_pmu.num_counters_fixed);
edx.split.bit_width_fixed = min_t(int,
edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] = pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1; ((u64)1 << edx.split.bit_width_fixed) - 1;
} }

View File

@@ -6653,6 +6653,8 @@ reenter_guest:
if (vmx->emulation_required) if (vmx->emulation_required)
return EXIT_FASTPATH_NONE; return EXIT_FASTPATH_NONE;
trace_kvm_entry(vcpu);
if (vmx->ple_window_dirty) { if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false; vmx->ple_window_dirty = false;
vmcs_write32(PLE_WINDOW, vmx->ple_window); vmcs_write32(PLE_WINDOW, vmx->ple_window);

View File

@@ -105,6 +105,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu);
static void process_smi(struct kvm_vcpu *vcpu);
static void enter_smm(struct kvm_vcpu *vcpu); static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu); static void store_regs(struct kvm_vcpu *vcpu);
@@ -4230,6 +4231,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
{ {
process_nmi(vcpu); process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu);
/* /*
* In guest mode, payload delivery should be deferred, * In guest mode, payload delivery should be deferred,
* so that the L1 hypervisor can intercept #PF before * so that the L1 hypervisor can intercept #PF before
@@ -8802,9 +8806,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) { if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
if (WARN_ON_ONCE(!is_guest_mode(vcpu))) if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
;
else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
r = 0; r = 0;
goto out; goto out;
} }
@@ -8988,8 +8990,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops.request_immediate_exit(vcpu); kvm_x86_ops.request_immediate_exit(vcpu);
} }
trace_kvm_entry(vcpu);
fpregs_assert_state_consistent(); fpregs_assert_state_consistent();
if (test_thread_flag(TIF_NEED_FPU_LOAD)) if (test_thread_flag(TIF_NEED_FPU_LOAD))
switch_fpu_return(); switch_fpu_return();
@@ -11556,6 +11556,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
} }
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io); EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);

View File

@@ -26,6 +26,16 @@
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
#include <asm/asm.h> #include <asm/asm.h>
/*
* Use KFPU_387. MMX instructions are not affected by MXCSR,
* but both AMD and Intel documentation states that even integer MMX
* operations will result in #MF if an exception is pending in FCW.
*
* EMMS is not needed afterwards because, after calling kernel_fpu_end(),
* any subsequent user of the 387 stack will reinitialize it using
* KFPU_387.
*/
void *_mmx_memcpy(void *to, const void *from, size_t len) void *_mmx_memcpy(void *to, const void *from, size_t len)
{ {
void *p; void *p;
@@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
p = to; p = to;
i = len >> 6; /* len/64 */ i = len >> 6; /* len/64 */
kernel_fpu_begin(); kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: prefetch (%0)\n" /* This set is 28 bytes */ "1: prefetch (%0)\n" /* This set is 28 bytes */
@@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
{ {
int i; int i;
kernel_fpu_begin(); kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ ( __asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : : " pxor %%mm0, %%mm0\n" : :
@@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
{ {
int i; int i;
kernel_fpu_begin(); kernel_fpu_begin_mask(KFPU_387);
/* /*
* maybe the prefetch stuff can go before the expensive fnsave... * maybe the prefetch stuff can go before the expensive fnsave...
@@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
{ {
int i; int i;
kernel_fpu_begin(); kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ ( __asm__ __volatile__ (
" pxor %%mm0, %%mm0\n" : : " pxor %%mm0, %%mm0\n" : :
@@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
{ {
int i; int i;
kernel_fpu_begin(); kernel_fpu_begin_mask(KFPU_387);
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: prefetch (%0)\n" "1: prefetch (%0)\n"

View File

@@ -583,6 +583,13 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
exc_debug(regs); exc_debug(regs);
} }
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
pr_err("Unknown trap in Xen PV mode.");
BUG();
}
struct trap_array_entry { struct trap_array_entry {
void (*orig)(void); void (*orig)(void);
void (*xen)(void); void (*xen)(void);
@@ -631,6 +638,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
{ {
unsigned int nr; unsigned int nr;
bool ist_okay = false; bool ist_okay = false;
bool found = false;
/* /*
* Replace trap handler addresses by Xen specific ones. * Replace trap handler addresses by Xen specific ones.
@@ -645,6 +653,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
if (*addr == entry->orig) { if (*addr == entry->orig) {
*addr = entry->xen; *addr = entry->xen;
ist_okay = entry->ist_okay; ist_okay = entry->ist_okay;
found = true;
break; break;
} }
} }
@@ -655,9 +664,13 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
nr = (*addr - (void *)early_idt_handler_array[0]) / nr = (*addr - (void *)early_idt_handler_array[0]) /
EARLY_IDT_HANDLER_SIZE; EARLY_IDT_HANDLER_SIZE;
*addr = (void *)xen_early_idt_handler_array[nr]; *addr = (void *)xen_early_idt_handler_array[nr];
found = true;
} }
if (WARN_ON(ist != 0 && !ist_okay)) if (!found)
*addr = (void *)xen_asm_exc_xen_unknown_trap;
if (WARN_ON(found && ist != 0 && !ist_okay))
return false; return false;
return true; return true;

View File

@@ -178,6 +178,7 @@ xen_pv_trap asm_exc_simd_coprocessor_error
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
xen_pv_trap entry_INT80_compat xen_pv_trap entry_INT80_compat
#endif #endif
xen_pv_trap asm_exc_xen_unknown_trap
xen_pv_trap asm_exc_xen_hypervisor_callback xen_pv_trap asm_exc_xen_hypervisor_callback
__INIT __INIT

View File

@@ -586,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
if (!device) if (!device)
return -EINVAL; return -EINVAL;
*device = NULL;
status = acpi_get_data_full(handle, acpi_scan_drop_device, status = acpi_get_data_full(handle, acpi_scan_drop_device,
(void **)device, callback); (void **)device, callback);
if (ACPI_FAILURE(status) || !*device) { if (ACPI_FAILURE(status) || !*device) {

View File

@@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
#endif #endif
#endif /* !CONFIG_SRCU */ #endif /* !CONFIG_SRCU */
static bool device_is_ancestor(struct device *dev, struct device *target)
{
while (target->parent) {
target = target->parent;
if (dev == target)
return true;
}
return false;
}
/** /**
* device_is_dependent - Check if one device depends on another one * device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for. * @dev: Device to check dependencies for.
@@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
struct device_link *link; struct device_link *link;
int ret; int ret;
if (dev == target) /*
* The "ancestors" check is needed to catch the case when the target
* device has not been completely initialized yet and it is still
* missing from the list of children of its parent device.
*/
if (dev == target || device_is_ancestor(dev, target))
return 1; return 1;
ret = device_for_each_child(dev, target, device_is_dependent); ret = device_for_each_child(dev, target, device_is_dependent);
@@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
struct device *con = link->consumer; struct device *con = link->consumer;
char *buf; char *buf;
len = max(strlen(dev_name(sup)), strlen(dev_name(con))); len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
strlen(dev_bus_name(con)) + strlen(dev_name(con)));
len += strlen(":");
len += strlen("supplier:") + 1; len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL); buf = kzalloc(len, GFP_KERNEL);
if (!buf) if (!buf)
@@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
if (ret) if (ret)
goto err_con; goto err_con;
snprintf(buf, len, "consumer:%s", dev_name(con)); snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
if (ret) if (ret)
goto err_con_dev; goto err_con_dev;
snprintf(buf, len, "supplier:%s", dev_name(sup)); snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
if (ret) if (ret)
goto err_sup_dev; goto err_sup_dev;
@@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
goto out; goto out;
err_sup_dev: err_sup_dev:
snprintf(buf, len, "consumer:%s", dev_name(con)); snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf); sysfs_remove_link(&sup->kobj, buf);
err_con_dev: err_con_dev:
sysfs_remove_link(&link->link_dev.kobj, "consumer"); sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
sysfs_remove_link(&link->link_dev.kobj, "consumer"); sysfs_remove_link(&link->link_dev.kobj, "consumer");
sysfs_remove_link(&link->link_dev.kobj, "supplier"); sysfs_remove_link(&link->link_dev.kobj, "supplier");
len = max(strlen(dev_name(sup)), strlen(dev_name(con))); len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
strlen(dev_bus_name(con)) + strlen(dev_name(con)));
len += strlen(":");
len += strlen("supplier:") + 1; len += strlen("supplier:") + 1;
buf = kzalloc(len, GFP_KERNEL); buf = kzalloc(len, GFP_KERNEL);
if (!buf) { if (!buf) {
@@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
return; return;
} }
snprintf(buf, len, "supplier:%s", dev_name(sup)); snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
sysfs_remove_link(&con->kobj, buf); sysfs_remove_link(&con->kobj, buf);
snprintf(buf, len, "consumer:%s", dev_name(con)); snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
sysfs_remove_link(&sup->kobj, buf); sysfs_remove_link(&sup->kobj, buf);
kfree(buf); kfree(buf);
} }
@@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
link->link_dev.class = &devlink_class; link->link_dev.class = &devlink_class;
device_set_pm_not_required(&link->link_dev); device_set_pm_not_required(&link->link_dev);
dev_set_name(&link->link_dev, "%s--%s", dev_set_name(&link->link_dev, "%s:%s--%s:%s",
dev_name(supplier), dev_name(consumer)); dev_bus_name(supplier), dev_name(supplier),
dev_bus_name(consumer), dev_name(consumer));
if (device_register(&link->link_dev)) { if (device_register(&link->link_dev)) {
put_device(consumer); put_device(consumer);
put_device(supplier); put_device(supplier);
@@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
* never change once they are set, so they don't need special care. * never change once they are set, so they don't need special care.
*/ */
drv = READ_ONCE(dev->driver); drv = READ_ONCE(dev->driver);
return drv ? drv->name : return drv ? drv->name : dev_bus_name(dev);
(dev->bus ? dev->bus->name :
(dev->class ? dev->class->name : ""));
} }
EXPORT_SYMBOL(dev_driver_string); EXPORT_SYMBOL(dev_driver_string);

View File

@@ -370,13 +370,6 @@ static void driver_bound(struct device *dev)
device_pm_check_callbacks(dev); device_pm_check_callbacks(dev);
/*
* Reorder successfully probed devices to the end of the device list.
* This ensures that suspend/resume order matches probe order, which
* is usually what drivers rely on.
*/
device_pm_move_to_tail(dev);
/* /*
* Make sure the device is no longer in one of the deferred lists and * Make sure the device is no longer in one of the deferred lists and
* kick off retrying all pending devices * kick off retrying all pending devices
@@ -619,6 +612,8 @@ dev_groups_failed:
else if (drv->remove) else if (drv->remove)
drv->remove(dev); drv->remove(dev);
probe_failed: probe_failed:
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
if (dev->bus) if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier, blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev); BUS_NOTIFY_DRIVER_NOT_BOUND, dev);

View File

@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
return -ERANGE; return -ERANGE;
nvec = platform_irq_count(dev); nvec = platform_irq_count(dev);
if (nvec < 0)
return nvec;
if (nvec < minvec) if (nvec < minvec)
return -ENOSPC; return -ENOSPC;

View File

@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
if (info->feature_discard) { if (info->feature_discard) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd)); blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_granularity = info->discard_granularity ?:
info->physical_sector_size;
rq->limits.discard_alignment = info->discard_alignment; rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard) if (info->feature_secdiscard)
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
static void blkfront_setup_discard(struct blkfront_info *info) static void blkfront_setup_discard(struct blkfront_info *info)
{ {
int err;
unsigned int discard_granularity;
unsigned int discard_alignment;
info->feature_discard = 1; info->feature_discard = 1;
err = xenbus_gather(XBT_NIL, info->xbdev->otherend, info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
"discard-granularity", "%u", &discard_granularity, "discard-granularity",
"discard-alignment", "%u", &discard_alignment, 0);
NULL); info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
if (!err) { "discard-alignment", 0);
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
info->feature_secdiscard = info->feature_secdiscard =
!!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
0); 0);

View File

@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
ret = of_platform_default_populate(child, NULL, dev); ret = of_platform_default_populate(child, NULL, dev);
if (ret) { if (ret) {
dev_err(dev, "failed to populate module\n"); dev_err(dev, "failed to populate module\n");
of_node_put(child);
return ret; return ret;
} }
} }

View File

@@ -6,8 +6,6 @@ config MXC_CLK
config MXC_CLK_SCU config MXC_CLK_SCU
tristate tristate
depends on ARCH_MXC
depends on IMX_SCU && HAVE_ARM_SMCCC
config CLK_IMX1 config CLK_IMX1
def_bool SOC_IMX1 def_bool SOC_IMX1

View File

@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev) #ifdef CONFIG_PM
static int mmp2_audio_clk_suspend(struct device *dev)
{ {
struct mmp2_audio_clk *priv = dev_get_drvdata(dev); struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
return 0; return 0;
} }
static int __maybe_unused mmp2_audio_clk_resume(struct device *dev) static int mmp2_audio_clk_resume(struct device *dev)
{ {
struct mmp2_audio_clk *priv = dev_get_drvdata(dev); struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
return 0; return 0;
} }
#endif
static const struct dev_pm_ops mmp2_audio_clk_pm_ops = { static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL) SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)

View File

@@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
}, },
}; };
static struct clk_branch gcc_camera_ahb_clk = {
.halt_reg = 0xb008,
.halt_check = BRANCH_HALT,
.hwcg_reg = 0xb008,
.hwcg_bit = 1,
.clkr = {
.enable_reg = 0xb008,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_camera_ahb_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gcc_camera_hf_axi_clk = { static struct clk_branch gcc_camera_hf_axi_clk = {
.halt_reg = 0xb020, .halt_reg = 0xb020,
.halt_check = BRANCH_HALT, .halt_check = BRANCH_HALT,
@@ -2317,7 +2302,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
[GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr, [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
[GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
[GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
[GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
[GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr, [GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
[GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr, [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
@@ -2519,11 +2503,12 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
/* /*
* Keep the clocks always-ON * Keep the clocks always-ON
* GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
* GCC_GPU_CFG_AHB_CLK * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
*/ */
regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));

Some files were not shown because too many files have changed in this diff Show More