Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

No conflicts or adjacent changes.

Link: https://patch.msgid.link/20240808170148.3629934-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-08-08 14:03:51 -07:00
commit e47fd9beb1
281 changed files with 3040 additions and 1783 deletions

View File

@ -166,6 +166,7 @@ Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
David Heidelberg <david@ixit.cz> <d.okias@gmail.com>
David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com>
David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com>
David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>

View File

@ -4798,11 +4798,9 @@
profile= [KNL] Enable kernel profiling via /proc/profile
Format: [<profiletype>,]<number>
Param: <profiletype>: "schedule", "sleep", or "kvm"
Param: <profiletype>: "schedule" or "kvm"
[defaults to kernel profiling]
Param: "schedule" - profile schedule points.
Param: "sleep" - profile D-state sleeping (millisecs).
Requires CONFIG_SCHEDSTATS
Param: "kvm" - profile VM exits.
Param: <number> - step/bucket size as a power of 2 for
statistical time based profiling.

View File

@ -122,10 +122,18 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1490853 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #3324349 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1491015 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #3324348 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A78 | #3324344 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A78C | #3324346,3324347| ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
@ -138,8 +146,14 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1 | #1502854 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1 | #3324344 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X1C | #3324346 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
@ -160,6 +174,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #3324349 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
@ -170,6 +186,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V1 | #1619801 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-V3 | #3312417 | ARM64_ERRATUM_3194386 |

View File

@ -35,6 +35,9 @@ properties:
ports-implemented:
const: 1
power-domains:
maxItems: 1
sata-port@0:
$ref: /schemas/ata/snps,dwc-ahci-common.yaml#/$defs/dwc-ahci-port

View File

@ -4,8 +4,6 @@ Generic Thermal Sysfs driver How To
Written by Sujith Thomas <sujith.thomas@intel.com>, Zhang Rui <rui.zhang@intel.com>
Updated: 2 January 2008
Copyright (c) 2008 Intel Corporation
@ -38,23 +36,23 @@ temperature) and throttle appropriate devices.
::
struct thermal_zone_device
*thermal_zone_device_register(char *type,
int trips, int mask, void *devdata,
struct thermal_zone_device_ops *ops,
struct thermal_zone_device *
thermal_zone_device_register_with_trips(const char *type,
const struct thermal_trip *trips,
int num_trips, void *devdata,
const struct thermal_zone_device_ops *ops,
const struct thermal_zone_params *tzp,
int passive_delay, int polling_delay))
unsigned int passive_delay,
unsigned int polling_delay)
This interface function adds a new thermal zone device (sensor) to
This interface function adds a new thermal zone device (sensor) to the
/sys/class/thermal folder as `thermal_zone[0-*]`. It tries to bind all the
thermal cooling devices registered at the same time.
thermal cooling devices registered to it at the same time.
type:
the thermal zone type.
trips:
the total number of trip points this thermal zone supports.
mask:
Bit string: If 'n'th bit is set, then trip point 'n' is writable.
the table of trip points for this thermal zone.
devdata:
device private data
ops:
@ -70,29 +68,26 @@ temperature) and throttle appropriate devices.
set the trip points window. Whenever the current temperature
is updated, the trip points immediately below and above the
current temperature are found.
.get_mode:
get the current mode (enabled/disabled) of the thermal zone.
- "enabled" means the kernel thermal management is
enabled.
- "disabled" will prevent kernel thermal driver action
upon trip points so that user applications can take
charge of thermal management.
.set_mode:
set the mode (enabled/disabled) of the thermal zone.
.get_trip_type:
get the type of certain trip point.
.get_trip_temp:
get the temperature above which the certain trip point
will be fired.
.change_mode:
change the mode (enabled/disabled) of the thermal zone.
.set_trip_temp:
set the temperature of a given trip point.
.get_crit_temp:
get the critical temperature for this thermal zone.
.set_emul_temp:
set the emulation temperature which helps in debugging
different threshold temperature points.
.get_trend:
get the trend of most recent zone temperature changes.
.hot:
hot trip point crossing handler.
.critical:
critical trip point crossing handler.
tzp:
thermal zone platform parameters.
passive_delay:
number of milliseconds to wait between polls when
performing passive cooling.
number of milliseconds to wait between polls when performing passive
cooling.
polling_delay:
number of milliseconds to wait between polls when checking
whether trip points have been crossed (0 for interrupt driven systems).

View File

@ -21,9 +21,9 @@ are often referred to as greyscale formats.
.. raw:: latex
\scriptsize
\tiny
.. tabularcolumns:: |p{3.6cm}|p{3.0cm}|p{1.3cm}|p{2.6cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
.. tabularcolumns:: |p{3.6cm}|p{2.4cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|p{1.3cm}|
.. flat-table:: Luma-Only Image Formats
:header-rows: 1

View File

@ -6368,7 +6368,7 @@ a single guest_memfd file, but the bound ranges must not overlap).
See KVM_SET_USER_MEMORY_REGION2 for additional details.
4.143 KVM_PRE_FAULT_MEMORY
------------------------
---------------------------
:Capability: KVM_CAP_PRE_FAULT_MEMORY
:Architectures: none
@ -6405,6 +6405,12 @@ for the current vCPU state. KVM maps memory as if the vCPU generated a
stage-2 read page fault, e.g. faults in memory as needed, but doesn't break
CoW. However, KVM does not mark any newly created stage-2 PTE as Accessed.
In the case of confidential VM types where there is an initial set up of
private guest memory before the guest is 'finalized'/measured, this ioctl
should only be issued after completing all the necessary setup to put the
guest into a 'finalized' state so that the above semantics can be reliably
ensured.
In some cases, multiple vCPUs might share the page tables. In this
case, the ioctl can be called in parallel.

View File

@ -130,12 +130,12 @@ data using the `bmfdec <https://github.com/pali/bmfdec>`_ utility:
Due to a peculiarity in how Windows handles the ``CreateByteField()`` ACPI operator (errors only
happen when a invalid byte field is ultimately accessed), all methods require a 32 byte input
buffer, even if the Binay MOF says otherwise.
buffer, even if the Binary MOF says otherwise.
The input buffer contains a single byte to select the subfeature to be accessed and 31 bytes of
input data, the meaning of which depends on the subfeature being accessed.
The output buffer contains a singe byte which signals success or failure (``0x00`` on failure)
The output buffer contains a single byte which signals success or failure (``0x00`` on failure)
and 31 bytes of output data, the meaning if which depends on the subfeature being accessed.
WMI method Get_EC()
@ -147,7 +147,7 @@ data contains a flag byte and a 28 byte controller firmware version string.
The first 4 bits of the flag byte contain the minor version of the embedded controller interface,
with the next 2 bits containing the major version of the embedded controller interface.
The 7th bit signals if the embedded controller page chaged (exact meaning is unknown), and the
The 7th bit signals if the embedded controller page changed (exact meaning is unknown), and the
last bit signals if the platform is a Tigerlake platform.
The MSI software seems to only use this interface when the last bit is set.

View File

@ -13324,14 +13324,16 @@ F: Documentation/devicetree/bindings/i2c/i2c-mux-ltc4306.txt
F: drivers/i2c/muxes/i2c-mux-ltc4306.c
LTP (Linux Test Project)
M: Andrea Cervesato <andrea.cervesato@suse.com>
M: Cyril Hrubis <chrubis@suse.cz>
M: Jan Stancek <jstancek@redhat.com>
M: Petr Vorel <pvorel@suse.cz>
M: Li Wang <liwang@redhat.com>
M: Yang Xu <xuyang2018.jy@fujitsu.com>
M: Xiao Yang <yangx.jy@fujitsu.com>
L: ltp@lists.linux.it (subscribers-only)
S: Maintained
W: http://linux-test-project.github.io/
W: https://linux-test-project.readthedocs.io/
T: git https://github.com/linux-test-project/ltp.git
LTR390 AMBIENT/UV LIGHT SENSOR DRIVER
@ -13539,7 +13541,7 @@ MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
M: Stephen Hemminger <stephen@networkplumber.org>
L: netdev@vger.kernel.org
S: Maintained
S: Odd fixes
F: drivers/net/ethernet/marvell/sk*
MARVELL LIBERTAS WIRELESS DRIVER

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -534,8 +534,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define ioread16be(p) swab16(ioread16(p))
#define ioread32be(p) swab32(ioread32(p))
#define ioread64be(p) swab64(ioread64(p))
#define iowrite16be(v,p) iowrite16(swab16(v), (p))
#define iowrite32be(v,p) iowrite32(swab32(v), (p))
#define iowrite64be(v,p) iowrite64(swab64(v), (p))
#define inb_p inb
#define inw_p inw
@ -634,8 +636,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
*/
#define ioread64 ioread64
#define iowrite64 iowrite64
#define ioread64be ioread64be
#define iowrite64be iowrite64be
#define ioread8_rep ioread8_rep
#define ioread16_rep ioread16_rep
#define ioread32_rep ioread32_rep

View File

@ -157,7 +157,7 @@
clocks = <&xtal24mhz>;
};
pclk: clock-24000000 {
pclk: clock-pclk {
#clock-cells = <0>;
compatible = "fixed-factor-clock";
clock-div = <1>;

View File

@ -1069,18 +1069,28 @@ config ARM64_ERRATUM_3117295
If unsure, say Y.
config ARM64_ERRATUM_3194386
bool "Cortex-{A720,X4,X925}/Neoverse-V3: workaround for MSR SSBS not self-synchronizing"
bool "Cortex-*/Neoverse-*: workaround for MSR SSBS not self-synchronizing"
default y
help
This option adds the workaround for the following errata:
* ARM Cortex-A76 erratum 3324349
* ARM Cortex-A77 erratum 3324348
* ARM Cortex-A78 erratum 3324344
* ARM Cortex-A78C erratum 3324346
* ARM Cortex-A78C erratum 3324347
* ARM Cortex-A710 erratam 3324338
* ARM Cortex-A720 erratum 3456091
* ARM Cortex-A725 erratum 3456106
* ARM Cortex-X1 erratum 3324344
* ARM Cortex-X1C erratum 3324346
* ARM Cortex-X2 erratum 3324338
* ARM Cortex-X3 erratum 3324335
* ARM Cortex-X4 erratum 3194386
* ARM Cortex-X925 erratum 3324334
* ARM Neoverse-N1 erratum 3324349
* ARM Neoverse N2 erratum 3324339
* ARM Neoverse-V1 erratum 3324341
* ARM Neoverse V2 erratum 3324336
* ARM Neoverse-V3 erratum 3312417
@ -1088,11 +1098,11 @@ config ARM64_ERRATUM_3194386
subsequent speculative instructions, which may permit unexepected
speculative store bypassing.
Work around this problem by placing a speculation barrier after
kernel changes to SSBS. The presence of the SSBS special-purpose
register is hidden from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such
that userspace will use the PR_SPEC_STORE_BYPASS prctl to change
SSBS.
Work around this problem by placing a Speculation Barrier (SB) or
Instruction Synchronization Barrier (ISB) after kernel changes to
SSBS. The presence of the SSBS special-purpose register is hidden
from hwcaps and EL0 reads of ID_AA64PFR1_EL1, such that userspace
will use the PR_SPEC_STORE_BYPASS prctl to change SSBS.
If unsure, say Y.

View File

@ -86,12 +86,14 @@
#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define ARM_CPU_PART_CORTEX_A78C 0xD4B
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_X3 0xD4E
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@ -165,12 +167,14 @@
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_X3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X3)
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

View File

@ -13,6 +13,7 @@
#include <linux/types.h>
#include <asm/insn.h>
#define HAVE_JUMP_LABEL_BATCH
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
#define JUMP_TABLE_ENTRY(key, label) \

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_32 +=
syscall_abis_64 += renameat newstat rlimit memfd_secret
syscall_abis_64 += renameat rlimit memfd_secret
syscalltbl = arch/arm64/tools/syscall_%.tbl

View File

@ -434,15 +434,24 @@ static const struct midr_range erratum_spec_unpriv_load_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_3194386
static const struct midr_range erratum_spec_ssbs_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X4),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3),
{}
};
#endif

View File

@ -7,10 +7,11 @@
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/smp.h>
#include <asm/insn.h>
#include <asm/patching.h>
void arch_jump_label_transform(struct jump_entry *entry,
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
}
aarch64_insn_patch_text_nosync(addr, insn);
return true;
}
void arch_jump_label_transform_apply(void)
{
kick_all_cpus_sync();
}

View File

@ -34,7 +34,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t clear;
pte_t pte = *ptep;
pte_t pte = ptep_get(ptep);
pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear);
@ -65,7 +65,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t pte,
int dirty)
{
int changed = !pte_same(*ptep, pte);
int changed = !pte_same(ptep_get(ptep), pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);

View File

@ -53,13 +53,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
pte_t *pte = virt_to_kpte(addr);
if (WARN_ON(!pte) || pte_none(*pte))
if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
return false;
if (protect)
set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
else
set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));
preempt_disable();
local_flush_tlb_one(addr);

View File

@ -26,8 +26,6 @@
#define KVM_MAX_VCPUS 256
#define KVM_MAX_CPUCFG_REGS 21
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)

View File

@ -39,9 +39,9 @@ struct kvm_steal_time {
* Hypercall interface for KVM hypervisor
*
* a0: function identifier
* a1-a6: args
* a1-a5: args
* Return value will be placed in a0.
* Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6.
* Up to 5 arguments are passed in a1, a2, a3, a4, a5.
*/
static __always_inline long kvm_hypercall0(u64 fid)
{

View File

@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
#define ptep_get(ptep) READ_ONCE(*(ptep))
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED
@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d)
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
}
static inline void p4d_clear(p4d_t *p4dp)
{
p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
}
static inline pud_t *p4d_pgtable(p4d_t p4d)
{
return (pud_t *)p4d_val(p4d);
@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
*p4d = p4dval;
WRITE_ONCE(*p4d, p4dval);
}
static inline void p4d_clear(p4d_t *p4dp)
{
set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
}
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud)
return pud_val(pud) != (unsigned long)invalid_pmd_table;
}
static inline void pud_clear(pud_t *pudp)
{
pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
}
static inline pmd_t *pud_pgtable(pud_t pud)
{
return (pmd_t *)pud_val(pud);
}
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
static inline void set_pud(pud_t *pud, pud_t pudval)
{
WRITE_ONCE(*pud, pudval);
}
static inline void pud_clear(pud_t *pudp)
{
set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
}
#define pud_phys(pud) PHYSADDR(pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd)
return pmd_val(pmd) != (unsigned long)invalid_pte_table;
}
static inline void pmd_clear(pmd_t *pmdp)
static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
{
pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
WRITE_ONCE(*pmd, pmdval);
}
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
static inline void pmd_clear(pmd_t *pmdp)
{
set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
}
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
@ -314,7 +323,8 @@ extern void paging_init(void);
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
WRITE_ONCE(*ptep, pteval);
if (pte_val(pteval) & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
/*
@ -341,8 +351,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
if (pte_none(ptep_get(buddy)))
WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
#endif /* CONFIG_SMP */
}
}
@ -350,7 +360,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
/* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL)
set_pte(ptep, __pte(_PAGE_GLOBAL));
else
set_pte(ptep, __pte(0));
@ -603,7 +613,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
pmd_t old = *pmdp;
pmd_t old = pmdp_get(pmdp);
pmd_clear(pmdp);

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_64 += newstat
# No special ABIs on loongarch so far
syscall_abis_64 +=

View File

@ -66,6 +66,12 @@ void __init efi_runtime_init(void)
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
}
bool efi_poweroff_required(void)
{
return efi_enabled(EFI_RUNTIME_SERVICES) &&
(acpi_gbl_reduced_hardware || acpi_no_s5);
}
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
#if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)

View File

@ -714,19 +714,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
* value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value).
*/
pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
pgd = pgdp_get(pgd_offset(kvm->mm, hva));
if (pgd_none(pgd))
goto out;
p4d = READ_ONCE(*p4d_offset(&pgd, hva));
p4d = p4dp_get(p4d_offset(&pgd, hva));
if (p4d_none(p4d) || !p4d_present(p4d))
goto out;
pud = READ_ONCE(*pud_offset(&p4d, hva));
pud = pudp_get(pud_offset(&p4d, hva));
if (pud_none(pud) || !pud_present(pud))
goto out;
pmd = READ_ONCE(*pmd_offset(&pud, hva));
pmd = pmdp_get(pmd_offset(&pud, hva));
if (pmd_none(pmd) || !pmd_present(pmd))
goto out;

View File

@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) {
if (pgd_present(pgdp_get(pgd))) {
p4d = p4d_offset(pgd, addr);
if (p4d_present(*p4d)) {
if (p4d_present(p4dp_get(p4d))) {
pud = pud_offset(p4d, addr);
if (pud_present(*pud))
if (pud_present(pudp_get(pud)))
pmd = pmd_offset(pud, addr);
}
}

View File

@ -141,7 +141,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next)
{
int huge = pmd_val(*pmd) & _PAGE_HUGE;
int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
if (huge)
vmemmap_verify((pte_t *)pmd, node, addr, next);
@ -173,7 +173,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
pud_t *pud;
pmd_t *pmd;
if (p4d_none(*p4d)) {
if (p4d_none(p4dp_get(p4d))) {
pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pud)
panic("%s: Failed to allocate memory\n", __func__);
@ -184,7 +184,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
if (pud_none(pudp_get(pud))) {
pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pmd)
panic("%s: Failed to allocate memory\n", __func__);
@ -195,7 +195,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
}
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) {
if (!pmd_present(pmdp_get(pmd))) {
pte_t *pte;
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
@ -216,7 +216,7 @@ void __init __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = populate_kernel_pte(addr);
if (!pte_none(*ptep)) {
if (!pte_none(ptep_get(ptep))) {
pte_ERROR(*ptep);
return;
}

View File

@ -105,7 +105,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
{
if (__pmd_none(early, READ_ONCE(*pmdp))) {
if (__pmd_none(early, pmdp_get(pmdp))) {
phys_addr_t pte_phys = early ?
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
if (!early)
@ -118,7 +118,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
{
if (__pud_none(early, READ_ONCE(*pudp))) {
if (__pud_none(early, pudp_get(pudp))) {
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
if (!early)
@ -131,7 +131,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
{
if (__p4d_none(early, READ_ONCE(*p4dp))) {
if (__p4d_none(early, p4dp_get(p4dp))) {
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
if (!early)
@ -154,7 +154,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
: kasan_alloc_zeroed_page(node);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
} while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
}
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
@ -166,7 +166,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
kasan_pte_populate(pmdp, addr, next, node, early);
} while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
} while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
}
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,

View File

@ -128,7 +128,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot)
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
WRITE_ONCE(*pmdp, pmd);
flush_tlb_all();
}

View File

@ -20,6 +20,7 @@ config PARISC
select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_STACKWALK
select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE
select DMA_OPS

View File

@ -20,7 +20,16 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_PA20
#define ARCH_DMA_MINALIGN 128
#else
#define ARCH_DMA_MINALIGN 32
#endif
#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
#define arch_slab_minalign() ((unsigned)dcache_stride)
#define cache_line_size() dcache_stride
#define dma_get_cache_alignment cache_line_size
#define __read_mostly __section(".data..read_mostly")

View File

@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
jit_data->header =
bpf_jit_binary_alloc(prog_size + extable_size,
&jit_data->image,
sizeof(u32),
sizeof(long),
bpf_fill_ill_insns);
if (!jit_data->header) {
prog = orig_prog;

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
syscall_abis_32 += riscv memfd_secret
syscall_abis_64 += riscv newstat rlimit memfd_secret
syscall_abis_64 += riscv rlimit memfd_secret

View File

@ -432,28 +432,26 @@ static void __init riscv_resolve_isa(unsigned long *source_isa,
bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX);
for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) {
ext = riscv_get_isa_ext_data(bit);
if (!ext)
continue;
if (ext->validate) {
if (ext && ext->validate) {
ret = ext->validate(ext, resolved_isa);
if (ret == -EPROBE_DEFER) {
loop = true;
continue;
} else if (ret) {
/* Disable the extension entirely */
clear_bit(ext->id, source_isa);
clear_bit(bit, source_isa);
continue;
}
}
set_bit(ext->id, resolved_isa);
set_bit(bit, resolved_isa);
/* No need to keep it in source isa now that it is enabled */
clear_bit(ext->id, source_isa);
clear_bit(bit, source_isa);
/* Single letter extensions get set in hwcap */
if (ext->id < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[ext->id];
if (bit < RISCV_ISA_EXT_BASE)
*this_hwcap |= isa2hwcap[bit];
}
} while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa)));
}

View File

@ -71,7 +71,7 @@ void __init sbi_ipi_init(void)
* the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux
*/
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);

View File

@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
{
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
pagefault_out_of_memory();
return;
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, addr);
return;
}
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
return;
} else if (fault & VM_FAULT_SIGSEGV) {
do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
return;
}
BUG();
}

View File

@ -234,8 +234,6 @@ static void __init setup_bootmem(void)
*/
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
phys_ram_end = memblock_end_of_DRAM();
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
@ -250,6 +248,16 @@ static void __init setup_bootmem(void)
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
* The size of the linear page mapping may restrict the amount of
* usable RAM.
*/
if (IS_ENABLED(CONFIG_64BIT)) {
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
memblock_cap_memory_range(phys_ram_base,
max_mapped_addr - phys_ram_base);
}
/*
* Reserve physical address space that would be mapped to virtual
* addresses greater than (void *)(-PAGE_SIZE) because:
@ -266,6 +274,7 @@ static void __init setup_bootmem(void)
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
@ -1284,8 +1293,6 @@ static void __init create_linear_mapping_page_table(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
if (end >= __pa(PAGE_OFFSET) + memory_limit)
end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0, NULL);
}

View File

@ -7,6 +7,7 @@
* Author: Li Zhengyu (lizhengyu3@huawei.com)
*
*/
#include <asm/asm.h>
#include <linux/linkage.h>
.text
@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start)
.data
.align LGREG
SYM_DATA(riscv_kernel_entry, .quad 0)
.end

View File

@ -113,7 +113,7 @@ void load_fpu_state(struct fpu *state, int flags)
int mask;
if (flags & KERNEL_FPC)
fpu_lfpc(&state->fpc);
fpu_lfpc_safe(&state->fpc);
if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_V0V7)
load_fp_regs_vx(state->vxrs);

View File

@ -59,14 +59,6 @@ SECTIONS
} :text = 0x0700
RO_DATA(PAGE_SIZE)
.data.rel.ro : {
*(.data.rel.ro .data.rel.ro.*)
}
.got : {
__got_start = .;
*(.got)
__got_end = .;
}
. = ALIGN(PAGE_SIZE);
_sdata = .; /* Start of data section */
@ -80,6 +72,15 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
.data.rel.ro : {
*(.data.rel.ro .data.rel.ro.*)
}
.got : {
__got_start = .;
*(.got)
__got_end = .;
}
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
.data.rel : {
*(.data.rel*)

View File

@ -3,6 +3,7 @@
#include <linux/ptdump.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/sort.h>
#include <linux/mm.h>
#include <linux/kfence.h>
#include <linux/kasan.h>
@ -15,13 +16,15 @@
static unsigned long max_addr;
struct addr_marker {
int is_start;
unsigned long start_address;
const char *name;
};
enum address_markers_idx {
IDENTITY_BEFORE_NR = 0,
IDENTITY_BEFORE_END_NR,
KVA_NR = 0,
LOWCORE_START_NR,
LOWCORE_END_NR,
AMODE31_START_NR,
AMODE31_END_NR,
KERNEL_START_NR,
@ -30,8 +33,8 @@ enum address_markers_idx {
KFENCE_START_NR,
KFENCE_END_NR,
#endif
IDENTITY_AFTER_NR,
IDENTITY_AFTER_END_NR,
IDENTITY_START_NR,
IDENTITY_END_NR,
VMEMMAP_NR,
VMEMMAP_END_NR,
VMALLOC_NR,
@ -59,43 +62,44 @@ enum address_markers_idx {
};
static struct addr_marker address_markers[] = {
[IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
[IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
[AMODE31_START_NR] = {0, "Amode31 Area Start"},
[AMODE31_END_NR] = {0, "Amode31 Area End"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
[KVA_NR] = {0, 0, "Kernel Virtual Address Space"},
[LOWCORE_START_NR] = {1, 0, "Lowcore Start"},
[LOWCORE_END_NR] = {0, 0, "Lowcore End"},
[IDENTITY_START_NR] = {1, 0, "Identity Mapping Start"},
[IDENTITY_END_NR] = {0, 0, "Identity Mapping End"},
[AMODE31_START_NR] = {1, 0, "Amode31 Area Start"},
[AMODE31_END_NR] = {0, 0, "Amode31 Area End"},
[KERNEL_START_NR] = {1, (unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {0, (unsigned long)_end, "Kernel Image End"},
#ifdef CONFIG_KFENCE
[KFENCE_START_NR] = {0, "KFence Pool Start"},
[KFENCE_END_NR] = {0, "KFence Pool End"},
[KFENCE_START_NR] = {1, 0, "KFence Pool Start"},
[KFENCE_END_NR] = {0, 0, "KFence Pool End"},
#endif
[IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
[IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
[VMEMMAP_NR] = {0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, "vmemmap Area End"},
[VMALLOC_NR] = {0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, "vmalloc Area End"},
[VMEMMAP_NR] = {1, 0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, 0, "vmemmap Area End"},
[VMALLOC_NR] = {1, 0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, 0, "vmalloc Area End"},
#ifdef CONFIG_KMSAN
[KMSAN_VMALLOC_SHADOW_START_NR] = {0, "Kmsan vmalloc Shadow Start"},
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, "Kmsan vmalloc Shadow End"},
[KMSAN_VMALLOC_ORIGIN_START_NR] = {0, "Kmsan vmalloc Origins Start"},
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, "Kmsan vmalloc Origins End"},
[KMSAN_MODULES_SHADOW_START_NR] = {0, "Kmsan Modules Shadow Start"},
[KMSAN_MODULES_SHADOW_END_NR] = {0, "Kmsan Modules Shadow End"},
[KMSAN_MODULES_ORIGIN_START_NR] = {0, "Kmsan Modules Origins Start"},
[KMSAN_MODULES_ORIGIN_END_NR] = {0, "Kmsan Modules Origins End"},
[KMSAN_VMALLOC_SHADOW_START_NR] = {1, 0, "Kmsan vmalloc Shadow Start"},
[KMSAN_VMALLOC_SHADOW_END_NR] = {0, 0, "Kmsan vmalloc Shadow End"},
[KMSAN_VMALLOC_ORIGIN_START_NR] = {1, 0, "Kmsan vmalloc Origins Start"},
[KMSAN_VMALLOC_ORIGIN_END_NR] = {0, 0, "Kmsan vmalloc Origins End"},
[KMSAN_MODULES_SHADOW_START_NR] = {1, 0, "Kmsan Modules Shadow Start"},
[KMSAN_MODULES_SHADOW_END_NR] = {0, 0, "Kmsan Modules Shadow End"},
[KMSAN_MODULES_ORIGIN_START_NR] = {1, 0, "Kmsan Modules Origins Start"},
[KMSAN_MODULES_ORIGIN_END_NR] = {0, 0, "Kmsan Modules Origins End"},
#endif
[MODULES_NR] = {0, "Modules Area Start"},
[MODULES_END_NR] = {0, "Modules Area End"},
[ABS_LOWCORE_NR] = {0, "Lowcore Area Start"},
[ABS_LOWCORE_END_NR] = {0, "Lowcore Area End"},
[MEMCPY_REAL_NR] = {0, "Real Memory Copy Area Start"},
[MEMCPY_REAL_END_NR] = {0, "Real Memory Copy Area End"},
[MODULES_NR] = {1, 0, "Modules Area Start"},
[MODULES_END_NR] = {0, 0, "Modules Area End"},
[ABS_LOWCORE_NR] = {1, 0, "Lowcore Area Start"},
[ABS_LOWCORE_END_NR] = {0, 0, "Lowcore Area End"},
[MEMCPY_REAL_NR] = {1, 0, "Real Memory Copy Area Start"},
[MEMCPY_REAL_END_NR] = {0, 0, "Real Memory Copy Area End"},
#ifdef CONFIG_KASAN
[KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
[KASAN_SHADOW_START_NR] = {1, KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {0, KASAN_SHADOW_END, "Kasan Shadow End"},
#endif
{ -1, NULL }
{1, -1UL, NULL}
};
struct pg_state {
@ -163,6 +167,19 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
static void note_page_update_state(struct pg_state *st, unsigned long addr, unsigned int prot, int level)
{
struct seq_file *m = st->seq;
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
st->level = level;
}
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
{
int width = sizeof(unsigned long) * 2;
@ -186,9 +203,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
addr = max_addr;
if (st->level == -1) {
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
st->start_address = addr;
st->current_prot = prot;
st->level = level;
note_page_update_state(st, addr, prot, level);
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
note_prot_wx(st, addr);
@ -202,13 +217,7 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
}
pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
st->level = level;
note_page_update_state(st, addr, prot, level);
}
}
@ -280,22 +289,25 @@ static int ptdump_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(ptdump);
#endif /* CONFIG_PTDUMP_DEBUGFS */
/*
* Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple
* insertion sort to preserve the original order of markers with the same
* start address.
*/
static void sort_address_markers(void)
static int ptdump_cmp(const void *a, const void *b)
{
struct addr_marker tmp;
int i, j;
const struct addr_marker *ama = a;
const struct addr_marker *amb = b;
for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) {
tmp = address_markers[i];
for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--)
address_markers[j + 1] = address_markers[j];
address_markers[j + 1] = tmp;
}
if (ama->start_address > amb->start_address)
return 1;
if (ama->start_address < amb->start_address)
return -1;
/*
* If the start addresses of two markers are identical consider the
* marker which defines the start of an area higher than the one which
* defines the end of an area. This keeps pairs of markers sorted.
*/
if (ama->is_start)
return 1;
if (amb->is_start)
return -1;
return 0;
}
static int pt_dump_init(void)
@ -303,6 +315,8 @@ static int pt_dump_init(void)
#ifdef CONFIG_KFENCE
unsigned long kfence_start = (unsigned long)__kfence_pool;
#endif
unsigned long lowcore = (unsigned long)get_lowcore();
/*
* Figure out the maximum virtual address being accessible with the
* kernel ASCE. We need this to keep the page table walker functions
@ -310,7 +324,10 @@ static int pt_dump_init(void)
*/
max_addr = (get_lowcore()->kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[LOWCORE_START_NR].start_address = lowcore;
address_markers[LOWCORE_END_NR].start_address = lowcore + sizeof(struct lowcore);
address_markers[IDENTITY_START_NR].start_address = __identity_base;
address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
@ -337,7 +354,8 @@ static int pt_dump_init(void)
address_markers[KMSAN_MODULES_ORIGIN_START_NR].start_address = KMSAN_MODULES_ORIGIN_START;
address_markers[KMSAN_MODULES_ORIGIN_END_NR].start_address = KMSAN_MODULES_ORIGIN_END;
#endif
sort_address_markers();
sort(address_markers, ARRAY_SIZE(address_markers) - 1,
sizeof(address_markers[0]), ptdump_cmp, NULL);
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */

View File

@ -108,6 +108,8 @@ void mark_rodata_ro(void)
{
unsigned long size = __end_ro_after_init - __start_ro_after_init;
if (MACHINE_HAS_NX)
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
}
@ -170,13 +172,6 @@ void __init mem_init(void)
setup_zero_pages(); /* Setup zeroed pages. */
}
void free_initmem(void)
{
set_memory_rwnx((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
free_initmem_default(POISON_FREE_INITMEM);
}
unsigned long memory_block_size_bytes(void)
{
/*

View File

@ -661,7 +661,6 @@ void __init vmem_map_init(void)
{
__set_memory_rox(_stext, _etext);
__set_memory_ro(_etext, __end_rodata);
__set_memory_rox(_sinittext, _einittext);
__set_memory_rox(__stext_amode31, __etext_amode31);
/*
* If the BEAR-enhancement facility is not installed the first
@ -670,16 +669,8 @@ void __init vmem_map_init(void)
*/
if (!static_key_enabled(&cpu_has_bear))
set_memory_x(0, 1);
if (debug_pagealloc_enabled()) {
/*
* Use RELOC_HIDE() as long as __va(0) translates to NULL,
* since performing pointer arithmetic on a NULL pointer
* has undefined behavior and generates compiler warnings.
*/
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
}
if (MACHINE_HAS_NX)
system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
if (debug_pagealloc_enabled())
__set_memory_4k(__va(0), __va(0) + ident_map_size);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}

View File

@ -163,7 +163,7 @@ struct sev_config {
*/
use_cas : 1,
__reserved : 62;
__reserved : 61;
};
static struct sev_config sev_cfg __read_mostly;

View File

@ -344,6 +344,7 @@
332 common statx sys_statx
333 common io_pgetevents sys_io_pgetevents
334 common rseq sys_rseq
335 common uretprobe sys_uretprobe
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
424 common pidfd_send_signal sys_pidfd_send_signal
@ -385,7 +386,6 @@
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
462 common mseal sys_mseal
467 common uretprobe sys_uretprobe
#
# Due to a historical design error, certain syscalls are numbered differently

View File

@ -1520,20 +1520,23 @@ static void x86_pmu_start(struct perf_event *event, int flags)
void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
unsigned long *cntr_mask, *fixed_cntr_mask;
struct event_constraint *pebs_constraints;
struct cpu_hw_events *cpuc;
u64 pebs, debugctl;
int cpu = smp_processor_id();
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask);
unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
unsigned long flags;
int idx;
int cpu, idx;
guard(irqsave)();
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_events, cpu);
cntr_mask = hybrid(cpuc->pmu, cntr_mask);
fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask);
pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
if (!*(u64 *)cntr_mask)
return;
local_irq_save(flags);
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
@ -1577,7 +1580,6 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
cpu, idx, pmc_count);
}
local_irq_restore(flags);
}
void x86_pmu_stop(struct perf_event *event, int flags)

View File

@ -64,7 +64,7 @@
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
* KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
* RPL,SPR,MTL,ARL,LNL
* RPL,SPR,MTL,ARL,LNL,SRF
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
@ -693,7 +693,8 @@ static const struct cstate_model srf_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
.pkg_events = BIT(PERF_CSTATE_PKG_C6_RES),
.pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
BIT(PERF_CSTATE_PKG_C6_RES),
.module_events = BIT(PERF_CSTATE_MODULE_C6_RES),
};

View File

@ -2,6 +2,10 @@
#ifndef _ASM_X86_CMDLINE_H
#define _ASM_X86_CMDLINE_H
#include <asm/setup.h>
extern char builtin_cmdline[COMMAND_LINE_SIZE];
int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
int cmdline_find_option(const char *cmdline_ptr, const char *option,
char *buffer, int bufsize);

View File

@ -1305,6 +1305,7 @@ struct kvm_arch {
u8 vm_type;
bool has_private_mem;
bool has_protected_state;
bool pre_fault_allowed;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;

View File

@ -462,7 +462,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
switch (c->x86_model) {
case 0x00 ... 0x2f:
case 0x40 ... 0x4f:
case 0x70 ... 0x7f:
case 0x60 ... 0x7f:
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
default:

View File

@ -306,7 +306,7 @@ static void freq_invariance_enable(void)
WARN_ON_ONCE(1);
return;
}
static_branch_enable(&arch_scale_freq_key);
static_branch_enable_cpuslocked(&arch_scale_freq_key);
register_freq_invariance_syscore_ops();
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
}
@ -323,9 +323,11 @@ static void __init bp_init_freq_invariance(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
if (intel_set_max_freq_ratio())
if (intel_set_max_freq_ratio()) {
guard(cpus_read_lock)();
freq_invariance_enable();
}
}
static void disable_freq_invariance_workfn(struct work_struct *work)
{

View File

@ -164,7 +164,7 @@ unsigned long saved_video_mode;
static char __initdata command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
char builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
bool builtin_cmdline_added __ro_after_init;
#endif

View File

@ -141,8 +141,8 @@ config KVM_AMD_SEV
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
select ARCH_HAS_CC_PLATFORM
select KVM_GENERIC_PRIVATE_MEM
select HAVE_KVM_GMEM_PREPARE
select HAVE_KVM_GMEM_INVALIDATE
select HAVE_KVM_ARCH_GMEM_PREPARE
select HAVE_KVM_ARCH_GMEM_INVALIDATE
help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors.

View File

@ -1743,7 +1743,7 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
s64 min_period = min_timer_period_us * 1000LL;
if (apic->lapic_timer.period < min_period) {
pr_info_ratelimited(
pr_info_once(
"vcpu %i: requested %lld ns "
"lapic timer period limited to %lld ns\n",
apic->vcpu->vcpu_id,

View File

@ -4335,7 +4335,7 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
if (req_max_level)
max_level = min(max_level, req_max_level);
return req_max_level;
return max_level;
}
static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
@ -4743,6 +4743,9 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
u64 end;
int r;
if (!vcpu->kvm->arch.pre_fault_allowed)
return -EOPNOTSUPP;
/*
* reload is efficient when called repeatedly, so we can do it on
* every iteration.
@ -7510,7 +7513,7 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
if (level == PG_LEVEL_2M)
return kvm_range_has_memory_attributes(kvm, start, end, attrs);
return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
if (hugepage_test_mixed(slot, gfn, level - 1) ||

View File

@ -2279,18 +2279,11 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
bool assigned;
int level;
if (!kvm_mem_is_private(kvm, gfn)) {
pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
__func__, gfn);
ret = -EINVAL;
goto err;
}
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
if (ret || assigned) {
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
__func__, gfn, ret, assigned);
ret = -EINVAL;
ret = ret ? -EINVAL : -EEXIST;
goto err;
}
@ -2549,6 +2542,14 @@ static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
data->gctx_paddr = __psp_pa(sev->snp_context);
ret = sev_issue_cmd(kvm, SEV_CMD_SNP_LAUNCH_FINISH, data, &argp->error);
/*
* Now that there will be no more SNP_LAUNCH_UPDATE ioctls, private pages
* can be given to the guest simply by marking the RMP entry as private.
* This can happen on first access and also with KVM_PRE_FAULT_MEMORY.
*/
if (!ret)
kvm->arch.pre_fault_allowed = true;
kfree(id_auth);
e_free_id_block:

View File

@ -4949,6 +4949,7 @@ static int svm_vm_init(struct kvm *kvm)
to_kvm_sev_info(kvm)->need_init = true;
kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM);
kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem;
}
if (!pause_filter_count || !pause_filter_thresh)

View File

@ -12646,6 +12646,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.vm_type = type;
kvm->arch.has_private_mem =
(type == KVM_X86_SW_PROTECTED_VM);
/* Decided by the vendor code for other VM types. */
kvm->arch.pre_fault_allowed =
type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;
ret = kvm_page_track_init(kvm);
if (ret)
@ -13641,19 +13644,14 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
{
return kvm->arch.vm_type == KVM_X86_SNP_VM;
}
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
{
return kvm_x86_call(gmem_prepare)(kvm, pfn, gfn, max_order);
}
#endif
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
{
kvm_x86_call(gmem_invalidate)(start, end);

View File

@ -207,18 +207,29 @@ __cmdline_find_option(const char *cmdline, int max_cmdline_size,
int cmdline_find_option_bool(const char *cmdline, const char *option)
{
if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
WARN_ON_ONCE(!builtin_cmdline_added);
int ret;
return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
ret = __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
if (ret > 0)
return ret;
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
return __cmdline_find_option_bool(builtin_cmdline, COMMAND_LINE_SIZE, option);
return ret;
}
int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
int bufsize)
{
if (IS_ENABLED(CONFIG_CMDLINE_BOOL))
WARN_ON_ONCE(!builtin_cmdline_added);
int ret;
return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
buffer, bufsize);
ret = __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
if (ret > 0)
return ret;
if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && !builtin_cmdline_added)
return __cmdline_find_option(builtin_cmdline, COMMAND_LINE_SIZE, option, buffer, bufsize);
return ret;
}

View File

@ -88,12 +88,14 @@ SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8)
#ifndef CONFIG_X86_64
xor %ecx,%ecx
#endif
check_range size=8
ASM_STAC
#ifdef CONFIG_X86_64
UACCESS movq (%_ASM_AX),%rdx
#else
xor %ecx,%ecx
UACCESS movl (%_ASM_AX),%edx
UACCESS movl 4(%_ASM_AX),%ecx
#endif

View File

@ -374,14 +374,14 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
*/
*target_pmd = *pmd;
addr += PMD_SIZE;
addr = round_up(addr + 1, PMD_SIZE);
} else if (level == PTI_CLONE_PTE) {
/* Walk the page-table down to the pte level */
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
addr += PAGE_SIZE;
addr = round_up(addr + 1, PAGE_SIZE);
continue;
}
@ -401,7 +401,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
/* Clone the PTE */
*target_pte = *pte;
addr += PAGE_SIZE;
addr = round_up(addr + 1, PAGE_SIZE);
} else {
BUG();
@ -496,7 +496,7 @@ static void pti_clone_entry_text(void)
{
pti_clone_pgtable((unsigned long) __entry_text_start,
(unsigned long) __entry_text_end,
PTI_CLONE_PMD);
PTI_LEVEL_KERNEL_IMAGE);
}
/*

View File

@ -2160,7 +2160,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
qcadev = serdev_device_get_drvdata(hu->serdev);
power = qcadev->bt_power;
if (power->pwrseq) {
if (power && power->pwrseq) {
pwrseq_power_off(power->pwrseq);
set_bit(QCA_BT_OFF, &qca->flags);
return;
@ -2187,10 +2187,6 @@ static void qca_power_shutdown(struct hci_uart *hu)
}
break;
case QCA_QCA6390:
pwrseq_power_off(qcadev->bt_power->pwrseq);
break;
default:
gpiod_set_value_cansleep(qcadev->bt_en, 0);
}
@ -2416,11 +2412,14 @@ static int qca_serdev_probe(struct serdev_device *serdev)
break;
case QCA_QCA6390:
if (dev_of_node(&serdev->dev)) {
qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev,
"bluetooth");
if (IS_ERR(qcadev->bt_power->pwrseq))
return PTR_ERR(qcadev->bt_power->pwrseq);
break;
}
fallthrough;
default:
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",

View File

@ -18,6 +18,7 @@ config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller"
depends on RISCV
depends on ARCH_STARFIVE
depends on 64BIT
select RISCV_DMA_NONCOHERENT
select RISCV_NONSTANDARD_CACHE_OPS
help

View File

@ -268,6 +268,7 @@ config DRM_EXEC
config DRM_GPUVM
tristate
depends on DRM
select DRM_EXEC
help
GPU-VM representation providing helpers to manage a GPUs virtual
address space

View File

@ -1778,7 +1778,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
int r;
int i, r;
addr /= AMDGPU_GPU_PAGE_SIZE;
@ -1793,13 +1793,13 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
for (i = 0; i < (*bo)->placement.num_placement; i++)
(*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
}
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}

View File

@ -103,7 +103,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
if (!amdgpu_mes_log_enable)
return 0;
r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
@ -113,7 +113,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
return r;
}
memset(adev->mes.event_log_cpu_addr, 0, PAGE_SIZE);
memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
return 0;
@ -1573,7 +1573,7 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
mem, adev->mes.event_log_size, false);
return 0;
}

View File

@ -52,7 +52,6 @@ enum amdgpu_mes_priority_level {
#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
struct amdgpu_mes_funcs;
@ -135,6 +134,7 @@ struct amdgpu_mes {
unsigned long *doorbell_bitmap;
/* MES event log buffer */
uint32_t event_log_size;
struct amdgpu_bo *event_log_gpu_obj;
uint64_t event_log_gpu_addr;
void *event_log_cpu_addr;

View File

@ -1163,6 +1163,8 @@ static int mes_v11_0_sw_init(void *handle)
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
r = amdgpu_mes_init(adev);
if (r)
return r;

View File

@ -551,8 +551,10 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.oversubscription_timer = 50;
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
mes_set_hw_res_pkt.enable_mes_event_int_logging = 0;
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
}
return mes_v12_0_submit_pkt_and_poll_completion(mes,
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
@ -1237,6 +1239,8 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
r = amdgpu_mes_init(adev);
if (r)
return r;

View File

@ -28,6 +28,9 @@
#define MES_API_VERSION 1
/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000
/* Driver submits one API(cmd) as a single Frame and this command size is same
* for all API to ease the debugging and parsing of ring buffer.
*/

View File

@ -28,6 +28,9 @@
#define MES_API_VERSION 0x14
/* Maximum log buffer size for MES. Needs to be updated if MES expands MES_EVT_INTR_HIST_LOG_12 */
#define AMDGPU_MES_LOG_BUFFER_SIZE 0xC000
/* Driver submits one API(cmd) as a single Frame and this command size is same for all API
* to ease the debugging and parsing of ring buffer.
*/

View File

@ -618,7 +618,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int r = 0;
if (!pp_funcs || !pp_funcs->load_firmware || adev->flags & AMD_IS_APU)
if (!pp_funcs || !pp_funcs->load_firmware ||
(is_support_sw_smu(adev) && (adev->flags & AMD_IS_APU)))
return 0;
mutex_lock(&adev->pm.mutex);

View File

@ -66,6 +66,7 @@
#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000
#define DEBUGSMC_MSG_Mode1Reset 2
#define LINK_SPEED_MAX 3
static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
@ -221,7 +222,6 @@ static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COU
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
};
#if 0
static const uint8_t smu_v14_0_2_throttler_map[] = {
[THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT),
[THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT),
@ -241,7 +241,6 @@ static const uint8_t smu_v14_0_2_throttler_map[] = {
[THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT),
[THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT),
};
#endif
static int
smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
@ -1869,6 +1868,88 @@ static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
return ret;
}
static ssize_t smu_v14_0_2_get_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_3 *gpu_metrics =
(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
SmuMetricsExternal_t metrics_ext;
SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
int ret = 0;
ret = smu_cmn_get_metrics_table(smu,
&metrics_ext,
true);
if (ret)
return ret;
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
gpu_metrics->temperature_vrmem = max(metrics->AvgTemperature[TEMP_VR_MEM0],
metrics->AvgTemperature[TEMP_VR_MEM1]);
gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
gpu_metrics->average_mm_activity = max(metrics->Vcn0ActivityPercentage,
metrics->Vcn1ActivityPercentage);
gpu_metrics->average_socket_power = metrics->AverageSocketPower;
gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
else
gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
else
gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_0];
gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_0];
gpu_metrics->throttle_status =
smu_v14_0_2_get_throttler_status(metrics);
gpu_metrics->indep_throttle_status =
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
smu_v14_0_2_throttler_map);
gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
gpu_metrics->pcie_link_width = metrics->PcieWidth;
if ((metrics->PcieRate - 1) > LINK_SPEED_MAX)
gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1);
else
gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate);
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_VDD_SOC];
gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VDDIO_MEM];
*table = (void *)gpu_metrics;
return sizeof(struct gpu_metrics_v1_3);
}
static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
@ -1905,6 +1986,7 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
.enable_thermal_alert = smu_v14_0_enable_thermal_alert,
.disable_thermal_alert = smu_v14_0_disable_thermal_alert,
.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
.get_gpu_metrics = smu_v14_0_2_get_gpu_metrics,
.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,

View File

@ -158,7 +158,14 @@ void ast_dp_launch(struct drm_device *dev)
ASTDP_HOST_EDID_READ_DONE);
}
bool ast_dp_power_is_on(struct ast_device *ast)
{
u8 vgacre3;
vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
return !(vgacre3 & AST_DP_PHY_SLEEP);
}
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{

View File

@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev)
{
struct ast_device *ast = to_ast_device(dev);
ast_enable_vga(ast->ioregs);
ast_open_key(ast->ioregs);
ast_enable_mmio(dev->dev, ast->ioregs);
ast_post_gpu(dev);
return drm_mode_config_helper_resume(dev);

View File

@ -472,6 +472,7 @@ void ast_init_3rdtx(struct drm_device *dev);
bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
void ast_dp_launch(struct drm_device *dev);
bool ast_dp_power_is_on(struct ast_device *ast);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);

View File

@ -28,6 +28,7 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/pci.h>
@ -1687,11 +1688,35 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
struct drm_connector_state *connector_state = connector->state;
bool is_active = false;
mutex_lock(&ast->modeset_lock);
if (connector_state && connector_state->crtc) {
struct drm_crtc_state *crtc_state = connector_state->crtc->state;
if (crtc_state && crtc_state->active)
is_active = true;
}
if (!is_active && !ast_dp_power_is_on(ast)) {
ast_dp_power_on_off(dev, true);
msleep(50);
}
if (ast_astdp_is_connected(ast))
return connector_status_connected;
return connector_status_disconnected;
status = connector_status_connected;
if (!is_active && status == connector_status_disconnected)
ast_dp_power_on_off(dev, false);
mutex_unlock(&ast->modeset_lock);
return status;
}
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {

View File

@ -1070,7 +1070,10 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
if (async_flip && prop != config->prop_fb_id) {
if (async_flip &&
prop != config->prop_fb_id &&
prop != config->prop_in_fence_fd &&
prop != config->prop_fb_damage_clips) {
ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);

View File

@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
err_drm_gem_vmap_unlocked:
drm_gem_unlock(gem);
return 0;
return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap_local);

View File

@ -624,6 +624,17 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height)
{
/*
* This function may be invoked by panic() to flush the frame
* buffer, where all CPUs except the panic CPU are stopped.
* During the following schedule_work(), the panic CPU needs
* the worker_pool lock, which might be held by a stopped CPU,
* causing schedule_work() and panic() to block. Return early on
* oops_in_progress to prevent this blocking.
*/
if (oops_in_progress)
return;
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
schedule_work(&helper->damage_work);

View File

@ -414,6 +414,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* OrangePi Neo */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "NEO-01"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),

View File

@ -1658,7 +1658,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
}
static int
skl_ddi_calculate_wrpll(int clock /* in Hz */,
skl_ddi_calculate_wrpll(int clock,
int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
@ -1683,7 +1683,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
};
unsigned int dco, d, i;
unsigned int p0, p1, p2;
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
@ -1808,7 +1808,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
struct skl_wrpll_params wrpll_params = {};
int ret;
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
i915->display.dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;

View File

@ -251,7 +251,7 @@
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
PIPE_HDCP2_STREAM_STATUS(pipe))
PIPE_HDCP2_STREAM_STATUS(port))
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04

View File

@ -2748,26 +2748,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
return 0;
}
static int
gen12_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config,
struct i915_active *active)
{
struct flex regs[] = {
{
GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
CTX_R_PWR_CLK_STATE,
},
};
if (stream->engine->class != RENDER_CLASS)
return 0;
return oa_configure_all_contexts(stream,
regs, ARRAY_SIZE(regs),
active);
}
static int
lrc_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config,
@ -2874,7 +2854,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
bool periodic = stream->periodic;
u32 period_exponent = stream->period_exponent;
u32 sqcnt1;
@ -2918,15 +2897,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
/*
* Update all contexts prior writing the mux configurations as we need
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
ret = gen12_configure_all_contexts(stream, oa_config, active);
if (ret)
return ret;
/*
* For Gen12, performance counters are context
* saved/restored. Only enable it for the context that
@ -2980,9 +2950,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
_MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
}
/* Reset all contexts' slices/subslices configurations. */
gen12_configure_all_contexts(stream, NULL, NULL);
/* disable the context save/restore or OAR counters */
if (stream->ctx)
gen12_configure_oar_context(stream, NULL);

View File

@ -898,7 +898,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
* Without this the operation can timeout and we'll fallback to a
* software copy, which might take several minutes to finish.
*/
nouveau_fence_wait(fence, false);
nouveau_fence_wait(fence, false, false);
ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
new_reg);
nouveau_fence_unref(&fence);

View File

@ -72,7 +72,7 @@ nouveau_channel_idle(struct nouveau_channel *chan)
ret = nouveau_fence_new(&fence, chan);
if (!ret) {
ret = nouveau_fence_wait(fence, false);
ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}

View File

@ -128,7 +128,7 @@ static void nouveau_dmem_page_free(struct page *page)
static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
{
if (fence) {
nouveau_fence_wait(*fence, false);
nouveau_fence_wait(*fence, true, false);
nouveau_fence_unref(fence);
} else {
/*

View File

@ -311,11 +311,39 @@ nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
return timeout - t;
}
static int
nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
{
int ret = 0;
while (!nouveau_fence_done(fence)) {
if (time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
__set_current_state(intr ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
return ret;
}
int
nouveau_fence_wait(struct nouveau_fence *fence, bool intr)
nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
long ret;
if (!lazy)
return nouveau_fence_wait_busy(fence, intr);
ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
if (ret < 0)
return ret;

View File

@ -23,7 +23,7 @@ void nouveau_fence_unref(struct nouveau_fence **);
int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
int nouveau_fence_wait(struct nouveau_fence *, bool intr);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
struct nouveau_fence_chan {

View File

@ -928,7 +928,7 @@ revalidate:
}
if (sync) {
if (!(ret = nouveau_fence_wait(fence, false))) {
if (!(ret = nouveau_fence_wait(fence, false, false))) {
if ((ret = dma_fence_get_status(&fence->base)) == 1)
ret = 0;
}

View File

@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
drm_gem_object_release(&nvbo->bo.base);
kfree(nvbo);
obj = ERR_PTR(-ENOMEM);
goto unlock;
}

View File

@ -1803,6 +1803,7 @@ nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
return nouveau_bo_validate(nvbo, true, false);
}

View File

@ -565,6 +565,10 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
/* v3d_sched.c */
void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
unsigned int count);
void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
unsigned int count);
void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);

View File

@ -73,24 +73,44 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
v3d_job_cleanup(job);
}
void
v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
unsigned int count)
{
if (query_info->queries) {
unsigned int i;
for (i = 0; i < count; i++)
drm_syncobj_put(query_info->queries[i].syncobj);
kvfree(query_info->queries);
}
}
void
v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
unsigned int count)
{
if (query_info->queries) {
unsigned int i;
for (i = 0; i < count; i++)
drm_syncobj_put(query_info->queries[i].syncobj);
kvfree(query_info->queries);
}
}
static void
v3d_cpu_job_free(struct drm_sched_job *sched_job)
{
struct v3d_cpu_job *job = to_cpu_job(sched_job);
struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
struct v3d_performance_query_info *performance_query = &job->performance_query;
if (timestamp_query->queries) {
for (int i = 0; i < timestamp_query->count; i++)
drm_syncobj_put(timestamp_query->queries[i].syncobj);
kvfree(timestamp_query->queries);
}
v3d_timestamp_query_info_free(&job->timestamp_query,
job->timestamp_query.count);
if (performance_query->queries) {
for (int i = 0; i < performance_query->count; i++)
drm_syncobj_put(performance_query->queries[i].syncobj);
kvfree(performance_query->queries);
}
v3d_performance_query_info_free(&job->performance_query,
job->performance_query.count);
v3d_job_cleanup(&job->base);
}

View File

@ -452,6 +452,8 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_timestamp_query timestamp;
unsigned int i;
int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@ -480,26 +482,34 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
offsets = u64_to_user_ptr(timestamp.offsets);
syncs = u64_to_user_ptr(timestamp.syncs);
for (int i = 0; i < timestamp.count; i++) {
for (i = 0; i < timestamp.count; i++) {
u32 offset, sync;
if (copy_from_user(&offset, offsets++, sizeof(offset))) {
kvfree(job->timestamp_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->timestamp_query.queries[i].offset = offset;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->timestamp_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->timestamp_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
job->timestamp_query.count = timestamp.count;
return 0;
error:
v3d_timestamp_query_info_free(&job->timestamp_query, i);
return err;
}
static int
@ -509,6 +519,8 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
{
u32 __user *syncs;
struct drm_v3d_reset_timestamp_query reset;
unsigned int i;
int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@ -533,21 +545,29 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(reset.syncs);
for (int i = 0; i < reset.count; i++) {
for (i = 0; i < reset.count; i++) {
u32 sync;
job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->timestamp_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->timestamp_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
job->timestamp_query.count = reset.count;
return 0;
error:
v3d_timestamp_query_info_free(&job->timestamp_query, i);
return err;
}
/* Get data for the copy timestamp query results job submission. */
@ -558,7 +578,8 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_copy_timestamp_query copy;
int i;
unsigned int i;
int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@ -591,18 +612,22 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
u32 offset, sync;
if (copy_from_user(&offset, offsets++, sizeof(offset))) {
kvfree(job->timestamp_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->timestamp_query.queries[i].offset = offset;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->timestamp_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->timestamp_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
job->timestamp_query.count = copy.count;
@ -613,6 +638,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
error:
v3d_timestamp_query_info_free(&job->timestamp_query, i);
return err;
}
static int
@ -623,6 +652,8 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
u32 __user *syncs;
u64 __user *kperfmon_ids;
struct drm_v3d_reset_performance_query reset;
unsigned int i, j;
int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@ -637,6 +668,9 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
if (copy_from_user(&reset, ext, sizeof(reset)))
return -EFAULT;
if (reset.nperfmons > V3D_MAX_PERFMONS)
return -EINVAL;
job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
job->performance_query.queries = kvmalloc_array(reset.count,
@ -648,39 +682,47 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(reset.syncs);
kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
for (int i = 0; i < reset.count; i++) {
for (i = 0; i < reset.count; i++) {
u32 sync;
u64 ids;
u32 __user *ids_pointer;
u32 id;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->performance_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
kvfree(job->performance_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
ids_pointer = u64_to_user_ptr(ids);
for (int j = 0; j < reset.nperfmons; j++) {
for (j = 0; j < reset.nperfmons; j++) {
if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
kvfree(job->performance_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->performance_query.queries[i].kperfmon_ids[j] = id;
}
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->performance_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
job->performance_query.count = reset.count;
job->performance_query.nperfmons = reset.nperfmons;
return 0;
error:
v3d_performance_query_info_free(&job->performance_query, i);
return err;
}
static int
@ -691,6 +733,8 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
u32 __user *syncs;
u64 __user *kperfmon_ids;
struct drm_v3d_copy_performance_query copy;
unsigned int i, j;
int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@ -708,6 +752,9 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
if (copy.pad)
return -EINVAL;
if (copy.nperfmons > V3D_MAX_PERFMONS)
return -EINVAL;
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
job->performance_query.queries = kvmalloc_array(copy.count,
@ -719,34 +766,38 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(copy.syncs);
kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
for (int i = 0; i < copy.count; i++) {
for (i = 0; i < copy.count; i++) {
u32 sync;
u64 ids;
u32 __user *ids_pointer;
u32 id;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
kvfree(job->performance_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
kvfree(job->performance_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
ids_pointer = u64_to_user_ptr(ids);
for (int j = 0; j < copy.nperfmons; j++) {
for (j = 0; j < copy.nperfmons; j++) {
if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
kvfree(job->performance_query.queries);
return -EFAULT;
err = -EFAULT;
goto error;
}
job->performance_query.queries[i].kperfmon_ids[j] = id;
}
job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
if (!job->performance_query.queries[i].syncobj) {
err = -ENOENT;
goto error;
}
}
job->performance_query.count = copy.count;
job->performance_query.nperfmons = copy.nperfmons;
@ -759,6 +810,10 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
error:
v3d_performance_query_info_free(&job->performance_query, i);
return err;
}
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data

View File

@ -48,7 +48,7 @@ struct virtio_gpu_submit {
static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
struct dma_fence *in_fence)
{
u32 context = submit->fence_ctx + submit->ring_idx;
u64 context = submit->fence_ctx + submit->ring_idx;
if (dma_fence_match_context(in_fence, context))
return 0;

View File

@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
* Copyright 2021 VMware, Inc.
* SPDX-License-Identifier: GPL-2.0 OR MIT
*
* Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
* Broadcom refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@ -31,6 +33,10 @@
#include <drm/vmwgfx_drm.h>
#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) ((svga3d_flags) >> 32)
#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
((svga3d_flags) & ((uint64_t)U32_MAX))
static inline u32 clamped_umul32(u32 a, u32 b)
{
uint64_t tmp = (uint64_t) a*b;

View File

@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
* Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
* Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
* Broadcom refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@ -28,15 +28,39 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include <drm/ttm/ttm_placement.h>
static void vmw_bo_release(struct vmw_bo *vbo)
{
struct vmw_resource *res;
WARN_ON(vbo->tbo.base.funcs &&
kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
xa_destroy(&vbo->detached_resources);
WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
if (vbo->is_dumb && vbo->dumb_surface) {
res = &vbo->dumb_surface->res;
WARN_ON(vbo != res->guest_memory_bo);
WARN_ON(!res->guest_memory_bo);
if (res->guest_memory_bo) {
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void)vmw_resource_reserve(res, false, true);
vmw_resource_mob_detach(res);
if (res->coherent)
vmw_bo_dirty_release(res->guest_memory_bo);
res->guest_memory_bo = NULL;
res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL,
0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
vmw_surface_unreference(&vbo->dumb_surface);
}
drm_gem_object_release(&vbo->tbo.base);
}
@ -325,6 +349,11 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
*
*/
void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
}
void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
{
struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
@ -335,9 +364,10 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
if (virtual)
return virtual;
ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
ret, bo->base.size, size);
return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}
@ -390,6 +420,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
xa_init(&vmw_bo->detached_resources);
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
@ -654,52 +685,6 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
dma_fence_put(&fence->base);
}
/**
* vmw_dumb_create - Create a dumb kms buffer
*
* @file_priv: Pointer to a struct drm_file identifying the caller.
* @dev: Pointer to the drm device.
* @args: Pointer to a struct drm_mode_create_dumb structure
* Return: Zero on success, negative error code on failure.
*
* This is a driver callback for the core drm create_dumb functionality.
* Note that this is very similar to the vmw_bo_alloc ioctl, except
* that the arguments have a different format.
*/
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_bo *vbo;
int cpp = DIV_ROUND_UP(args->bpp, 8);
int ret;
switch (cpp) {
case 1: /* DRM_FORMAT_C8 */
case 2: /* DRM_FORMAT_RGB565 */
case 4: /* DRM_FORMAT_XRGB8888 */
break;
default:
/*
* Dumb buffers don't allow anything else.
* This is tested via IGT's dumb_buffers
*/
return -EINVAL;
}
args->pitch = args->width * cpp;
args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
&vbo);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(&vbo->tbo.base);
return ret;
}
/**
* vmw_bo_swap_notify - swapout notify callback.
*
@ -853,3 +838,43 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
}
void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
{
xa_erase(&vbo->detached_resources, (unsigned long)res);
}
struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
{
unsigned long index;
struct vmw_resource *res = NULL;
struct vmw_surface *surf = NULL;
struct rb_node *rb_itr = vbo->res_tree.rb_node;
if (vbo->is_dumb && vbo->dumb_surface) {
res = &vbo->dumb_surface->res;
goto out;
}
xa_for_each(&vbo->detached_resources, index, res) {
if (res->func->res_type == vmw_res_surface)
goto out;
}
for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
rb_itr = rb_next(rb_itr)) {
res = rb_entry(rb_itr, struct vmw_resource, mob_node);
if (res->func->res_type == vmw_res_surface)
goto out;
}
out:
if (res)
surf = vmw_res_to_srf(res);
return surf;
}

Some files were not shown because too many files have changed in this diff Show More