Merge drm/drm-next into drm-misc-next

drm-misc-next hasn't been updated in a while and I need a post -rc2
state to merge some vc4 patches.

Signed-off-by: Maxime Ripard <maxime@cerno.tech>
This commit is contained in:
Maxime Ripard
2021-10-25 15:27:56 +02:00
875 changed files with 72647 additions and 16854 deletions

View File

@@ -259,7 +259,7 @@ Configuring the kernel
Compiling the kernel Compiling the kernel
-------------------- --------------------
- Make sure you have at least gcc 4.9 available. - Make sure you have at least gcc 5.1 available.
For more information, refer to :ref:`Documentation/process/changes.rst <changes>`. For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
Please note that you can still run a.out user programs with this kernel. Please note that you can still run a.out user programs with this kernel.

View File

@@ -54,7 +54,7 @@ properties:
- const: toradex,apalis_t30 - const: toradex,apalis_t30
- const: nvidia,tegra30 - const: nvidia,tegra30
- items: - items:
- const: toradex,apalis_t30-eval-v1.1 - const: toradex,apalis_t30-v1.1-eval
- const: toradex,apalis_t30-eval - const: toradex,apalis_t30-eval
- const: toradex,apalis_t30-v1.1 - const: toradex,apalis_t30-v1.1
- const: toradex,apalis_t30 - const: toradex,apalis_t30

View File

@@ -9,7 +9,7 @@ function block.
All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node. All DISP device tree nodes must be siblings to the central MMSYS_CONFIG node.
For a description of the MMSYS_CONFIG binding, see For a description of the MMSYS_CONFIG binding, see
Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt. Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.yaml.
DISP function blocks DISP function blocks
==================== ====================

View File

@@ -39,6 +39,7 @@ properties:
- renesas,du-r8a77980 # for R-Car V3H compatible DU - renesas,du-r8a77980 # for R-Car V3H compatible DU
- renesas,du-r8a77990 # for R-Car E3 compatible DU - renesas,du-r8a77990 # for R-Car E3 compatible DU
- renesas,du-r8a77995 # for R-Car D3 compatible DU - renesas,du-r8a77995 # for R-Car D3 compatible DU
- renesas,du-r8a779a0 # for R-Car V3U compatible DU
reg: reg:
maxItems: 1 maxItems: 1
@@ -773,6 +774,56 @@ allOf:
- reset-names - reset-names
- renesas,vsps - renesas,vsps
- if:
properties:
compatible:
contains:
enum:
- renesas,du-r8a779a0
then:
properties:
clocks:
items:
- description: Functional clock
clock-names:
maxItems: 1
items:
- const: du.0
interrupts:
maxItems: 2
resets:
maxItems: 1
reset-names:
items:
- const: du.0
ports:
properties:
port@0:
description: DSI 0
port@1:
description: DSI 1
port@2: false
port@3: false
required:
- port@0
- port@1
renesas,vsps:
minItems: 2
required:
- clock-names
- interrupts
- resets
- reset-names
- renesas,vsps
additionalProperties: false additionalProperties: false
examples: examples:

View File

@@ -19,7 +19,9 @@ properties:
- const: allwinner,sun8i-v3s-emac - const: allwinner,sun8i-v3s-emac
- const: allwinner,sun50i-a64-emac - const: allwinner,sun50i-a64-emac
- items: - items:
- const: allwinner,sun50i-h6-emac - enum:
- allwinner,sun20i-d1-emac
- allwinner,sun50i-h6-emac
- const: allwinner,sun50i-a64-emac - const: allwinner,sun50i-a64-emac
reg: reg:

View File

@@ -0,0 +1,89 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/samsung,exynos-ufs.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung SoC series UFS host controller Device Tree Bindings
maintainers:
- Alim Akhtar <alim.akhtar@samsung.com>
description: |
Each Samsung UFS host controller instance should have its own node.
This binding define Samsung specific binding other then what is used
in the common ufshcd bindings
[1] Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
properties:
compatible:
enum:
- samsung,exynos7-ufs
reg:
items:
- description: HCI register
- description: vendor specific register
- description: unipro register
- description: UFS protector register
reg-names:
items:
- const: hci
- const: vs_hci
- const: unipro
- const: ufsp
clocks:
items:
- description: ufs link core clock
- description: unipro main clock
clock-names:
items:
- const: core_clk
- const: sclk_unipro_main
interrupts:
maxItems: 1
phys:
maxItems: 1
phy-names:
const: ufs-phy
required:
- compatible
- reg
- interrupts
- phys
- phy-names
- clocks
- clock-names
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/exynos7-clk.h>
ufs: ufs@15570000 {
compatible = "samsung,exynos7-ufs";
reg = <0x15570000 0x100>,
<0x15570100 0x100>,
<0x15571000 0x200>,
<0x15572000 0x300>;
reg-names = "hci", "vs_hci", "unipro", "ufsp";
interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clock_fsys1 ACLK_UFS20_LINK>,
<&clock_fsys1 SCLK_UFSUNIPRO20_USER>;
clock-names = "core_clk", "sclk_unipro_main";
pinctrl-names = "default";
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
phys = <&ufs_phy>;
phy-names = "ufs-phy";
};
...

View File

@@ -300,8 +300,8 @@ pcie_replay_count
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
:doc: pcie_replay_count :doc: pcie_replay_count
+GPU SmartShift Information GPU SmartShift Information
============================ ==========================
GPU SmartShift information via sysfs GPU SmartShift information via sysfs

View File

@@ -183,26 +183,23 @@ Frame Buffer Compression (FBC)
Display Refresh Rate Switching (DRRS) Display Refresh Rate Switching (DRRS)
------------------------------------- -------------------------------------
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c .. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c
:doc: Display Refresh Rate Switching (DRRS) :doc: Display Refresh Rate Switching (DRRS)
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c .. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c
:functions: intel_dp_set_drrs_state :functions: intel_drrs_enable
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c .. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c
:functions: intel_edp_drrs_enable :functions: intel_drrs_disable
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c .. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c
:functions: intel_edp_drrs_disable :functions: intel_drrs_invalidate
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c .. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c
:functions: intel_edp_drrs_invalidate :functions: intel_drrs_flush
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c .. kernel-doc:: drivers/gpu/drm/i915/display/intel_drrs.c
:functions: intel_edp_drrs_flush :functions: intel_drrs_init
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dp.c
:functions: intel_dp_drrs_init
DPIO DPIO
---- ----
@@ -474,6 +471,14 @@ Object Tiling IOCTLs
.. kernel-doc:: drivers/gpu/drm/i915/gem/i915_gem_tiling.c .. kernel-doc:: drivers/gpu/drm/i915/gem/i915_gem_tiling.c
:doc: buffer object tiling :doc: buffer object tiling
Protected Objects
-----------------
.. kernel-doc:: drivers/gpu/drm/i915/pxp/intel_pxp.c
:doc: PXP
.. kernel-doc:: drivers/gpu/drm/i915/pxp/intel_pxp_types.h
Microcontrollers Microcontrollers
================ ================
@@ -498,6 +503,8 @@ GuC
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c
:doc: GuC :doc: GuC
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.h
GuC Firmware Layout GuC Firmware Layout
~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~

View File

@@ -1,122 +0,0 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
/**
* struct drm_i915_context_engines_parallel_submit - Configure engine for
* parallel submission.
*
* Setup a slot in the context engine map to allow multiple BBs to be submitted
* in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
* in parallel. Multiple hardware contexts are created internally in the i915
* run these BBs. Once a slot is configured for N BBs only N BBs can be
* submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
* doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
* many BBs there are based on the slot's configuration. The N BBs are the last
* N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
*
* The default placement behavior is to create implicit bonds between each
* context if each context maps to more than 1 physical engine (e.g. context is
* a virtual engine). Also we only allow contexts of same engine class and these
* contexts must be in logically contiguous order. Examples of the placement
* behavior described below. Lastly, the default is to not allow BBs to
* preempted mid BB rather insert coordinated preemption on all hardware
* contexts between each set of BBs. Flags may be added in the future to change
* both of these default behaviors.
*
* Returns -EINVAL if hardware context placement configuration is invalid or if
* the placement configuration isn't supported on the platform / submission
* interface.
* Returns -ENODEV if extension isn't supported on the platform / submission
* interface.
*
* .. code-block:: none
*
* Example 1 pseudo code:
* CS[X] = generic engine of same class, logical instance X
* INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
* set_engines(INVALID)
* set_parallel(engine_index=0, width=2, num_siblings=1,
* engines=CS[0],CS[1])
*
* Results in the following valid placement:
* CS[0], CS[1]
*
* Example 2 pseudo code:
* CS[X] = generic engine of same class, logical instance X
* INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
* set_engines(INVALID)
* set_parallel(engine_index=0, width=2, num_siblings=2,
* engines=CS[0],CS[2],CS[1],CS[3])
*
* Results in the following valid placements:
* CS[0], CS[1]
* CS[2], CS[3]
*
* This can also be thought of as 2 virtual engines described by 2-D array
* in the engines the field with bonds placed between each index of the
* virtual engines. e.g. CS[0] is bonded to CS[1], CS[2] is bonded to
* CS[3].
* VE[0] = CS[0], CS[2]
* VE[1] = CS[1], CS[3]
*
* Example 3 pseudo code:
* CS[X] = generic engine of same class, logical instance X
* INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
* set_engines(INVALID)
* set_parallel(engine_index=0, width=2, num_siblings=2,
* engines=CS[0],CS[1],CS[1],CS[3])
*
* Results in the following valid and invalid placements:
* CS[0], CS[1]
* CS[1], CS[3] - Not logical contiguous, return -EINVAL
*/
struct drm_i915_context_engines_parallel_submit {
/**
* @base: base user extension.
*/
struct i915_user_extension base;
/**
* @engine_index: slot for parallel engine
*/
__u16 engine_index;
/**
* @width: number of contexts per parallel engine
*/
__u16 width;
/**
* @num_siblings: number of siblings per context
*/
__u16 num_siblings;
/**
* @mbz16: reserved for future use; must be zero
*/
__u16 mbz16;
/**
* @flags: all undefined flags must be zero, currently not defined flags
*/
__u64 flags;
/**
* @mbz64: reserved for future use; must be zero
*/
__u64 mbz64[3];
/**
* @engines: 2-d array of engine instances to configure parallel engine
*
* length = width (i) * num_siblings (j)
* index = j + i * num_siblings
*/
struct i915_engine_class_instance engines[0];
} __packed;

View File

@@ -135,8 +135,8 @@ Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and
drm_i915_context_engines_parallel_submit to the uAPI to implement this drm_i915_context_engines_parallel_submit to the uAPI to implement this
extension. extension.
.. kernel-doc:: Documentation/gpu/rfc/i915_parallel_execbuf.h .. kernel-doc:: include/uapi/drm/i915_drm.h
:functions: drm_i915_context_engines_parallel_submit :functions: i915_context_engines_parallel_submit
Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
------------------------------------------------------------------- -------------------------------------------------------------------

View File

@@ -296,7 +296,7 @@ not available.
Device Tree bindings and board design Device Tree bindings and board design
===================================== =====================================
This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt`` This section references ``Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml``
and aims to showcase some potential switch caveats. and aims to showcase some potential switch caveats.
RMII PHY role and out-of-band signaling RMII PHY role and out-of-band signaling

View File

@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ======================================== ====================== =============== ========================================
Program Minimal version Command to check the version Program Minimal version Command to check the version
====================== =============== ======================================== ====================== =============== ========================================
GNU C 4.9 gcc --version GNU C 5.1 gcc --version
Clang/LLVM (optional) 10.0.1 clang --version Clang/LLVM (optional) 10.0.1 clang --version
GNU make 3.81 make --version GNU make 3.81 make --version
binutils 2.23 ld -v binutils 2.23 ld -v

View File

@@ -223,7 +223,7 @@ Linux内核5.x版本 <http://kernel.org/>
编译内核 编译内核
--------- ---------
- 确保您至少有gcc 4.9可用。 - 确保您至少有gcc 5.1可用。
有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>` 有关更多信息,请参阅 :ref:`Documentation/process/changes.rst <changes>`
请注意您仍然可以使用此内核运行a.out用户程序。 请注意您仍然可以使用此内核运行a.out用户程序。

View File

@@ -226,7 +226,7 @@ Linux內核5.x版本 <http://kernel.org/>
編譯內核 編譯內核
--------- ---------
- 確保您至少有gcc 4.9可用。 - 確保您至少有gcc 5.1可用。
有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>` 有關更多信息,請參閱 :ref:`Documentation/process/changes.rst <changes>`
請注意您仍然可以使用此內核運行a.out用戶程序。 請注意您仍然可以使用此內核運行a.out用戶程序。

View File

@@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained S: Maintained
F: drivers/platform/x86/amd-pmc.* F: drivers/platform/x86/amd-pmc.*
AMD POWERPLAY AMD POWERPLAY AND SWSMU
M: Evan Quan <evan.quan@amd.com> M: Evan Quan <evan.quan@amd.com>
L: amd-gfx@lists.freedesktop.org L: amd-gfx@lists.freedesktop.org
S: Supported S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/ F: drivers/gpu/drm/amd/pm/
AMD PTDMA DRIVER AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com> M: Sanjay R Mehta <sanju.mehta@amd.com>
@@ -14364,7 +14364,8 @@ F: Documentation/devicetree/bindings/pci/intel,ixp4xx-pci.yaml
F: drivers/pci/controller/pci-ixp4xx.c F: drivers/pci/controller/pci-ixp4xx.c
PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD) PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
M: Jonathan Derrick <jonathan.derrick@intel.com> M: Nirmal Patel <nirmal.patel@linux.intel.com>
R: Jonathan Derrick <jonathan.derrick@linux.dev>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
S: Supported S: Supported
F: drivers/pci/controller/vmd.c F: drivers/pci/controller/vmd.c

View File

@@ -2,7 +2,7 @@
VERSION = 5 VERSION = 5
PATCHLEVEL = 15 PATCHLEVEL = 15
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Opossums on Parade NAME = Opossums on Parade
# *DOCUMENTATION* # *DOCUMENTATION*
@@ -849,12 +849,6 @@ endif
DEBUG_CFLAGS := DEBUG_CFLAGS :=
# Workaround for GCC versions < 5.0
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
ifdef CONFIG_CC_IS_GCC
DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
endif
ifdef CONFIG_DEBUG_INFO ifdef CONFIG_DEBUG_INFO
ifdef CONFIG_DEBUG_INFO_SPLIT ifdef CONFIG_DEBUG_INFO_SPLIT

View File

@@ -20,7 +20,7 @@ config ALPHA
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
select VIRT_TO_BUS select VIRT_TO_BUS
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP if PCI select GENERIC_PCI_IOMAP
select AUTO_IRQ_AFFINITY if SMP select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
@@ -199,7 +199,6 @@ config ALPHA_EIGER
config ALPHA_JENSEN config ALPHA_JENSEN
bool "Jensen" bool "Jensen"
depends on BROKEN
select HAVE_EISA select HAVE_EISA
help help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one

View File

@@ -16,3 +16,4 @@ extern void __divlu(void);
extern void __remlu(void); extern void __remlu(void);
extern void __divqu(void); extern void __divqu(void);
extern void __remqu(void); extern void __remqu(void);
extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, unsigned long , unsigned long);

View File

@@ -60,7 +60,7 @@ extern inline void set_hae(unsigned long new_hae)
* Change virtual addresses to physical addresses and vv. * Change virtual addresses to physical addresses and vv.
*/ */
#ifdef USE_48_BIT_KSEG #ifdef USE_48_BIT_KSEG
static inline unsigned long virt_to_phys(void *address) static inline unsigned long virt_to_phys(volatile void *address)
{ {
return (unsigned long)address - IDENT_ADDR; return (unsigned long)address - IDENT_ADDR;
} }
@@ -70,7 +70,7 @@ static inline void * phys_to_virt(unsigned long address)
return (void *) (address + IDENT_ADDR); return (void *) (address + IDENT_ADDR);
} }
#else #else
static inline unsigned long virt_to_phys(void *address) static inline unsigned long virt_to_phys(volatile void *address)
{ {
unsigned long phys = (unsigned long)address; unsigned long phys = (unsigned long)address;
@@ -106,7 +106,7 @@ static inline void * phys_to_virt(unsigned long address)
extern unsigned long __direct_map_base; extern unsigned long __direct_map_base;
extern unsigned long __direct_map_size; extern unsigned long __direct_map_size;
static inline unsigned long __deprecated virt_to_bus(void *address) static inline unsigned long __deprecated virt_to_bus(volatile void *address)
{ {
unsigned long phys = virt_to_phys(address); unsigned long phys = virt_to_phys(address);
unsigned long bus = phys + __direct_map_base; unsigned long bus = phys + __direct_map_base;

View File

@@ -111,18 +111,18 @@ __EXTERN_INLINE void jensen_set_hae(unsigned long addr)
* convinced that I need one of the newer machines. * convinced that I need one of the newer machines.
*/ */
static inline unsigned int jensen_local_inb(unsigned long addr) __EXTERN_INLINE unsigned int jensen_local_inb(unsigned long addr)
{ {
return 0xff & *(vuip)((addr << 9) + EISA_VL82C106); return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
} }
static inline void jensen_local_outb(u8 b, unsigned long addr) __EXTERN_INLINE void jensen_local_outb(u8 b, unsigned long addr)
{ {
*(vuip)((addr << 9) + EISA_VL82C106) = b; *(vuip)((addr << 9) + EISA_VL82C106) = b;
mb(); mb();
} }
static inline unsigned int jensen_bus_inb(unsigned long addr) __EXTERN_INLINE unsigned int jensen_bus_inb(unsigned long addr)
{ {
long result; long result;
@@ -131,7 +131,7 @@ static inline unsigned int jensen_bus_inb(unsigned long addr)
return __kernel_extbl(result, addr & 3); return __kernel_extbl(result, addr & 3);
} }
static inline void jensen_bus_outb(u8 b, unsigned long addr) __EXTERN_INLINE void jensen_bus_outb(u8 b, unsigned long addr)
{ {
jensen_set_hae(0); jensen_set_hae(0);
*(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101; *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;

View File

@@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ALPHA_SETUP_H
#define __ALPHA_SETUP_H
#include <uapi/asm/setup.h>
/*
* We leave one page for the initial stack page, and one page for
* the initial process structure. Also, the console eats 3 MB for
* the initial bootloader (one of which we can reclaim later).
*/
#define BOOT_PCB 0x20000000
#define BOOT_ADDR 0x20000000
/* Remove when official MILO sources have ELF support: */
#define BOOT_SIZE (16*1024)
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
#else
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
#endif
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
#define SWAPPER_PGD KERNEL_START
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
/*
* This is setup by the secondary bootstrap loader. Because
* the zero page is zeroed out as soon as the vm system is
* initialized, we need to copy things out into a more permanent
* place.
*/
#define PARAM ZERO_PGE
#define COMMAND_LINE ((char *)(absolute_pointer(PARAM + 0x0000)))
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
#endif

View File

@@ -1,43 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ALPHA_SETUP_H #ifndef _UAPI__ALPHA_SETUP_H
#define __ALPHA_SETUP_H #define _UAPI__ALPHA_SETUP_H
#define COMMAND_LINE_SIZE 256 #define COMMAND_LINE_SIZE 256
/* #endif /* _UAPI__ALPHA_SETUP_H */
* We leave one page for the initial stack page, and one page for
* the initial process structure. Also, the console eats 3 MB for
* the initial bootloader (one of which we can reclaim later).
*/
#define BOOT_PCB 0x20000000
#define BOOT_ADDR 0x20000000
/* Remove when official MILO sources have ELF support: */
#define BOOT_SIZE (16*1024)
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
#else
#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
#endif
#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
#define SWAPPER_PGD KERNEL_START
#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
/*
* This is setup by the secondary bootstrap loader. Because
* the zero page is zeroed out as soon as the vm system is
* initialized, we need to copy things out into a more permanent
* place.
*/
#define PARAM ZERO_PGE
#define COMMAND_LINE ((char*)(PARAM + 0x0000))
#define INITRD_START (*(unsigned long *) (PARAM+0x100))
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
#endif

View File

@@ -7,6 +7,11 @@
* *
* Code supporting the Jensen. * Code supporting the Jensen.
*/ */
#define __EXTERN_INLINE
#include <asm/io.h>
#include <asm/jensen.h>
#undef __EXTERN_INLINE
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
@@ -17,11 +22,6 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/jensen.h>
#undef __EXTERN_INLINE
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>

View File

@@ -14,6 +14,7 @@ ev6-$(CONFIG_ALPHA_EV6) := ev6-
ev67-$(CONFIG_ALPHA_EV67) := ev67- ev67-$(CONFIG_ALPHA_EV67) := ev67-
lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
udiv-qrnnd.o \
udelay.o \ udelay.o \
$(ev6-y)memset.o \ $(ev6-y)memset.o \
$(ev6-y)memcpy.o \ $(ev6-y)memcpy.o \

View File

@@ -25,6 +25,7 @@
# along with GCC; see the file COPYING. If not, write to the # along with GCC; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, # Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA. # MA 02111-1307, USA.
#include <asm/export.h>
.set noreorder .set noreorder
.set noat .set noat
@@ -161,3 +162,4 @@ $Odd:
ret $31,($26),1 ret $31,($26),1
.end __udiv_qrnnd .end __udiv_qrnnd
EXPORT_SYMBOL(__udiv_qrnnd)

View File

@@ -7,4 +7,4 @@ ccflags-y := -w
obj-$(CONFIG_MATHEMU) += math-emu.o obj-$(CONFIG_MATHEMU) += math-emu.o
math-emu-objs := math.o qrnnd.o math-emu-objs := math.o

View File

@@ -403,5 +403,3 @@ alpha_fp_emul_imprecise (struct pt_regs *regs, unsigned long write_mask)
egress: egress:
return si_code; return si_code;
} }
EXPORT_SYMBOL(__udiv_qrnnd);

View File

@@ -86,7 +86,7 @@ config ARM64
select ARCH_SUPPORTS_LTO_CLANG_THIN select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_CFI_CLANG select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG) select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_DEFAULT_BPF_JIT

View File

@@ -513,7 +513,7 @@ size_t sve_state_size(struct task_struct const *task)
void sve_alloc(struct task_struct *task) void sve_alloc(struct task_struct *task)
{ {
if (task->thread.sve_state) { if (task->thread.sve_state) {
memset(task->thread.sve_state, 0, sve_state_size(current)); memset(task->thread.sve_state, 0, sve_state_size(task));
return; return;
} }

View File

@@ -18,7 +18,6 @@
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/sched.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/unistd.h> #include <linux/unistd.h>
@@ -58,7 +57,7 @@
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h> #include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly; unsigned long __stack_chk_guard __ro_after_init;
EXPORT_SYMBOL(__stack_chk_guard); EXPORT_SYMBOL(__stack_chk_guard);
#endif #endif

View File

@@ -17,21 +17,21 @@
* two accesses to memory, which may be undesirable for some devices. * two accesses to memory, which may be undesirable for some devices.
*/ */
#define in_8(addr) \ #define in_8(addr) \
({ u8 __v = (*(__force volatile u8 *) (addr)); __v; }) ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
#define in_be16(addr) \ #define in_be16(addr) \
({ u16 __v = (*(__force volatile u16 *) (addr)); __v; }) ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
#define in_be32(addr) \ #define in_be32(addr) \
({ u32 __v = (*(__force volatile u32 *) (addr)); __v; }) ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
#define in_le16(addr) \ #define in_le16(addr) \
({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; }) ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
#define in_le32(addr) \ #define in_le32(addr) \
({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; }) ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b)) #define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w)) #define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l)) #define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w)) #define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l)) #define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
#define raw_inb in_8 #define raw_inb in_8
#define raw_inw in_be16 #define raw_inw in_be16

View File

@@ -171,7 +171,6 @@ static int bcd2int (unsigned char b)
int mvme147_hwclk(int op, struct rtc_time *t) int mvme147_hwclk(int op, struct rtc_time *t)
{ {
#warning check me!
if (!op) { if (!op) {
m147_rtc->ctrl = RTC_READ; m147_rtc->ctrl = RTC_READ;
t->tm_year = bcd2int (m147_rtc->bcd_year); t->tm_year = bcd2int (m147_rtc->bcd_year);
@@ -183,6 +182,9 @@ int mvme147_hwclk(int op, struct rtc_time *t)
m147_rtc->ctrl = 0; m147_rtc->ctrl = 0;
if (t->tm_year < 70) if (t->tm_year < 70)
t->tm_year += 100; t->tm_year += 100;
} else {
/* FIXME Setting the time is not yet supported */
return -EOPNOTSUPP;
} }
return 0; return 0;
} }

View File

@@ -436,7 +436,6 @@ int bcd2int (unsigned char b)
int mvme16x_hwclk(int op, struct rtc_time *t) int mvme16x_hwclk(int op, struct rtc_time *t)
{ {
#warning check me!
if (!op) { if (!op) {
rtc->ctrl = RTC_READ; rtc->ctrl = RTC_READ;
t->tm_year = bcd2int (rtc->bcd_year); t->tm_year = bcd2int (rtc->bcd_year);
@@ -448,6 +447,9 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
rtc->ctrl = 0; rtc->ctrl = 0;
if (t->tm_year < 70) if (t->tm_year < 70)
t->tm_year += 100; t->tm_year += 100;
} else {
/* FIXME Setting the time is not yet supported */
return -EOPNOTSUPP;
} }
return 0; return 0;
} }

View File

@@ -184,7 +184,7 @@ extern int npmem_ranges;
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#include <asm/pdc.h> #include <asm/pdc.h>
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) #define PAGE0 ((struct zeropage *)absolute_pointer(__PAGE_OFFSET))
/* DEFINITION OF THE ZERO-PAGE (PAG0) */ /* DEFINITION OF THE ZERO-PAGE (PAG0) */
/* based on work by Jason Eckhardt (jason@equator.com) */ /* based on work by Jason Eckhardt (jason@equator.com) */

View File

@@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
} }
} }
#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr) void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{ {
if (!INDIRECT_ADDR(addr)) { if (!INDIRECT_ADDR(addr)) {
iounmap(addr); iounmap(addr);
} }
} }
EXPORT_SYMBOL(pci_iounmap);
#endif
EXPORT_SYMBOL(ioread8); EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread16);
@@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep); EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap); EXPORT_SYMBOL(ioport_unmap);
EXPORT_SYMBOL(pci_iounmap);

View File

@@ -35,7 +35,6 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-include $(srctree)/include/linux/compiler_attributes.h \
$(LINUXINCLUDE) $(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER ifdef CONFIG_PPC64_BOOT_WRAPPER
@@ -70,6 +69,7 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
BOOTCFLAGS += -fno-stack-protector BOOTCFLAGS += -fno-stack-protector
endif endif
BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj) BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
DTC_FLAGS ?= -p 1024 DTC_FLAGS ?= -p 1024

View File

@@ -12,16 +12,6 @@
# define ASM_CONST(x) __ASM_CONST(x) # define ASM_CONST(x) __ASM_CONST(x)
#endif #endif
/*
* Inline assembly memory constraint
*
* GCC 4.9 doesn't properly handle pre update memory constraint "m<>"
*
*/
#if defined(GCC_VERSION) && GCC_VERSION < 50000
#define UPD_CONSTR ""
#else
#define UPD_CONSTR "<>" #define UPD_CONSTR "<>"
#endif
#endif /* _ASM_POWERPC_ASM_CONST_H */ #endif /* _ASM_POWERPC_ASM_CONST_H */

View File

@@ -18,6 +18,7 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/syscall.h> #include <asm/syscall.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/tm.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32) #if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
*/ */
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED); irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
/*
* If system call is called with TM active, set _TIF_RESTOREALL to
* prevent RFSCV being used to return to userspace, because POWER9
* TM implementation has problems with this instruction returning to
* transactional state. Final register values are not relevant because
* the transaction will be aborted upon return anyway. Or in the case
* of unsupported_scv SIGILL fault, the return state does not much
* matter because it's an edge case.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
current_thread_info()->flags |= _TIF_RESTOREALL;
/*
* If the system call was made with a transaction active, doom it and
* return without performing the system call. Unless it was an
* unsupported scv vector, in which case it's treated like an illegal
* instruction.
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
!trap_is_unsupported_scv(regs)) {
/* Enable TM in the kernel, and disable EE (for scv) */
hard_irq_disable();
mtmsr(mfmsr() | MSR_TM);
/* tabort, this dooms the transaction, nothing else */
asm volatile(".long 0x7c00071d | ((%0) << 16)"
:: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
/*
* Userspace will never see the return value. Execution will
* resume after the tbegin. of the aborted transaction with the
* checkpointed register state. A context switch could occur
* or signal delivered to the process before resuming the
* doomed transaction context, but that should all be handled
* as expected.
*/
return -ENOSYS;
}
#endif // CONFIG_PPC_TRANSACTIONAL_MEM
local_irq_enable(); local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) { if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {

View File

@@ -12,7 +12,6 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/tm.h>
.section ".toc","aw" .section ".toc","aw"
SYS_CALL_TABLE: SYS_CALL_TABLE:
@@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
.globl system_call_vectored_\name .globl system_call_vectored_\name
system_call_vectored_\name: system_call_vectored_\name:
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
SCV_INTERRUPT_TO_KERNEL SCV_INTERRUPT_TO_KERNEL
mr r10,r1 mr r10,r1
ld r1,PACAKSAVE(r13) ld r1,PACAKSAVE(r13)
@@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
.globl system_call_common .globl system_call_common
system_call_common: system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common) _ASM_NOKPROBE_SYMBOL(system_call_common)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
mr r10,r1 mr r10,r1
ld r1,PACAKSAVE(r13) ld r1,PACAKSAVE(r13)
std r10,0(r1) std r10,0(r1)
@@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart) RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif #endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tabort_syscall:
_ASM_NOKPROBE_SYMBOL(tabort_syscall)
/* Firstly we need to enable TM in the kernel */
mfmsr r10
li r9, 1
rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r10, 0
/* tabort, this dooms the transaction, nothing else */
li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
TABORT(R9)
/*
* Return directly to userspace. We have corrupted user register state,
* but userspace will never see that register state. Execution will
* resume after the tbegin of the aborted transaction with the
* checkpointed register state.
*/
li r9, MSR_RI
andc r10, r10, r9
mtmsrd r10, 1
mtspr SPRN_SRR0, r11
mtspr SPRN_SRR1, r12
RFI_TO_USER
b . /* prevent speculative execution */
#endif
/* /*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, no exit work created, then this can be used. * touched, no exit work created, then this can be used.

View File

@@ -249,6 +249,7 @@ void machine_check_queue_event(void)
{ {
int index; int index;
struct machine_check_event evt; struct machine_check_event evt;
unsigned long msr;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return; return;
@@ -262,8 +263,20 @@ void machine_check_queue_event(void)
memcpy(&local_paca->mce_info->mce_event_queue[index], memcpy(&local_paca->mce_info->mce_event_queue[index],
&evt, sizeof(evt)); &evt, sizeof(evt));
/* Queue irq work to process this event later. */ /*
* Queue irq work to process this event later. Before
* queuing the work enable translation for non radix LPAR,
* as irq_work_queue may try to access memory outside RMO
* region.
*/
if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
msr = mfmsr();
mtmsr(msr | MSR_IR | MSR_DR);
irq_work_queue(&mce_event_process_work); irq_work_queue(&mce_event_process_work);
mtmsr(msr);
} else {
irq_work_queue(&mce_event_process_work);
}
} }
void mce_common_process_ue(struct pt_regs *regs, void mce_common_process_ue(struct pt_regs *regs,

View File

@@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
/* The following code handles the fake_suspend = 1 case */ /* The following code handles the fake_suspend = 1 case */
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
stdu r1, -PPC_MIN_STKFRM(r1) stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */ /* Turn on TM. */
mfmsr r8 mfmsr r8
@@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop nop
/*
* It's possible that treclaim. may modify registers, if we have lost
* track of fake-suspend state in the guest due to it using rfscv.
* Save and restore registers in case this occurs.
*/
mfspr r3, SPRN_DSCR
mfspr r4, SPRN_XER
mfspr r5, SPRN_AMR
/* SPRN_TAR would need to be saved here if the kernel ever used it */
mfcr r12
SAVE_NVGPRS(r1)
SAVE_GPR(2, r1)
SAVE_GPR(3, r1)
SAVE_GPR(4, r1)
SAVE_GPR(5, r1)
stw r12, 8(r1)
std r1, HSTATE_HOST_R1(r13)
/* We have to treclaim here because that's the only way to do S->N */ /* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3) TRECLAIM(R3)
GET_PACA(r13)
ld r1, HSTATE_HOST_R1(r13)
REST_GPR(2, r1)
REST_GPR(3, r1)
REST_GPR(4, r1)
REST_GPR(5, r1)
lwz r12, 8(r1)
REST_NVGPRS(r1)
mtspr SPRN_DSCR, r3
mtspr SPRN_XER, r4
mtspr SPRN_AMR, r5
mtcr r12
HMT_MEDIUM
/* /*
* We were in fake suspend, so we are not going to save the * We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since * register state as the guest checkpointed state (since
@@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
std r5, VCPU_TFHAR(r9) std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9) std r6, VCPU_TFIAR(r9)
addi r1, r1, PPC_MIN_STKFRM addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1) ld r0, PPC_LR_STKOFF(r1)
mtlr r0 mtlr r0
blr blr

View File

@@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
if (xics_ics->check(xics_ics, hwirq)) if (xics_ics->check(xics_ics, hwirq))
return -EINVAL; return -EINVAL;
/* No chip data for the XICS domain */ /* Let the ICS be the chip data for the XICS domain. For ICS native */
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip, irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
NULL, handle_fasteoi_irq, NULL, NULL); xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0; return 0;
} }

View File

@@ -236,7 +236,7 @@ config ARCH_RV32I
config ARCH_RV64I config ARCH_RV64I
bool "RV64I" bool "RV64I"
select 64BIT select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8) select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL

View File

@@ -685,16 +685,6 @@ config STACK_GUARD
The minimum size for the stack guard should be 256 for 31 bit and The minimum size for the stack guard should be 256 for 31 bit and
512 for 64 bit. 512 for 64 bit.
config WARN_DYNAMIC_STACK
def_bool n
prompt "Emit compiler warnings for function with dynamic stack usage"
help
This option enables the compiler option -mwarn-dynamicstack. If the
compiler supports this options generates warnings for functions
that dynamically allocate stack space using alloca.
Say N if you are unsure.
endmenu endmenu
menu "I/O subsystem" menu "I/O subsystem"

View File

@@ -85,13 +85,6 @@ cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
endif endif
endif endif
ifdef CONFIG_WARN_DYNAMIC_STACK
ifneq ($(call cc-option,-mwarn-dynamicstack),)
KBUILD_CFLAGS += -mwarn-dynamicstack
KBUILD_CFLAGS_DECOMPRESSOR += -mwarn-dynamicstack
endif
endif
ifdef CONFIG_EXPOLINE ifdef CONFIG_EXPOLINE
ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),) ifneq ($(call cc-option,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),)
CC_FLAGS_EXPOLINE := -mindirect-branch=thunk CC_FLAGS_EXPOLINE := -mindirect-branch=thunk

View File

@@ -10,6 +10,7 @@ CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y CONFIG_BPF_LSM=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y CONFIG_TASKSTATS=y
@@ -503,6 +504,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set # CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
@@ -661,7 +663,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y CONFIG_CIFS_POSIX=y
@@ -720,6 +721,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m
@@ -774,7 +777,6 @@ CONFIG_RANDOM32_SELFTEST=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_DMA_API_DEBUG=y CONFIG_DMA_API_DEBUG=y
CONFIG_STRING_SELFTEST=y
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO=y
@@ -853,12 +855,12 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y CONFIG_TEST_MIN_HEAP=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y CONFIG_ATOMIC64_SELFTEST=y
CONFIG_STRING_SELFTEST=y
CONFIG_TEST_BITOPS=m CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m CONFIG_TEST_BPF=m
CONFIG_TEST_LIVEPATCH=m CONFIG_TEST_LIVEPATCH=m

View File

@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_LSM=y CONFIG_BPF_LSM=y
CONFIG_SCHED_CORE=y
CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y CONFIG_TASKSTATS=y
@@ -494,6 +495,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_HUAWEI is not set # CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MICROSOFT is not set # CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m CONFIG_MLX5_CORE=m
@@ -648,7 +650,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y CONFIG_CIFS_POSIX=y
@@ -708,6 +709,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m

View File

@@ -159,7 +159,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
ret = -EINVAL; ret = -EINVAL;
vma = find_vma(current->mm, mmio_addr); vma = vma_lookup(current->mm, mmio_addr);
if (!vma) if (!vma)
goto out_unlock_mmap; goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
@@ -298,7 +298,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
mmap_read_lock(current->mm); mmap_read_lock(current->mm);
ret = -EINVAL; ret = -EINVAL;
vma = find_vma(current->mm, mmio_addr); vma = vma_lookup(current->mm, mmio_addr);
if (!vma) if (!vma)
goto out_unlock_mmap; goto out_unlock_mmap;
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))

View File

@@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzo) $(call if_changed,lzo)
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
$(call if_changed,uimage,bzip2) $(call if_changed,uimage,bzip2)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip) $(call if_changed,uimage,gzip)
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma) $(call if_changed,uimage,lzma)
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz $(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
$(call if_changed,uimage,xz) $(call if_changed,uimage,xz)
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
$(call if_changed,uimage,lzo) $(call if_changed,uimage,lzo)
$(obj)/uImage.bin: $(obj)/vmlinux.bin $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none) $(call if_changed,uimage,none)
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux $(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
OBJCOPYFLAGS_uImage.srec := -I binary -O srec OBJCOPYFLAGS_uImage.srec := -I binary -O srec
$(obj)/uImage.srec: $(obj)/uImage $(obj)/uImage.srec: $(obj)/uImage FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
$(obj)/uImage: $(obj)/uImage.$(suffix-y) $(obj)/uImage: $(obj)/uImage.$(suffix-y)

View File

@@ -356,7 +356,9 @@ err_nomem:
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs) dma_addr_t dma_addr, unsigned long attrs)
{ {
if (!sparc_dma_free_resource(cpu_addr, PAGE_ALIGN(size))) size = PAGE_ALIGN(size);
if (!sparc_dma_free_resource(cpu_addr, size))
return; return;
dma_make_coherent(dma_addr, size); dma_make_coherent(dma_addr, size);

View File

@@ -39,6 +39,7 @@ struct mdesc_hdr {
u32 node_sz; /* node block size */ u32 node_sz; /* node block size */
u32 name_sz; /* name block size */ u32 name_sz; /* name block size */
u32 data_sz; /* data block size */ u32 data_sz; /* data block size */
char data[];
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
struct mdesc_elem { struct mdesc_elem {
@@ -612,7 +613,7 @@ EXPORT_SYMBOL(mdesc_get_node_info);
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{ {
return (struct mdesc_elem *) (mdesc + 1); return (struct mdesc_elem *) mdesc->data;
} }
static void *name_block(struct mdesc_hdr *mdesc) static void *name_block(struct mdesc_hdr *mdesc)

View File

@@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config ARCH_HIBERNATION_POSSIBLE config ARCH_HIBERNATION_POSSIBLE
def_bool y def_bool y
config ARCH_NR_GPIO
int
default 1024 if X86_64
default 512
config ARCH_SUSPEND_POSSIBLE config ARCH_SUSPEND_POSSIBLE
def_bool y def_bool y

View File

@@ -4,6 +4,12 @@
tune = $(call cc-option,-mtune=$(1),$(2)) tune = $(call cc-option,-mtune=$(1),$(2))
ifdef CONFIG_CC_IS_CLANG
align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
else
align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
endif
cflags-$(CONFIG_M486SX) += -march=i486 cflags-$(CONFIG_M486SX) += -march=i486
cflags-$(CONFIG_M486) += -march=i486 cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586 cflags-$(CONFIG_M586) += -march=i586
@@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6
# They make zero difference whatsosever to performance at this time. # They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon cflags-$(CONFIG_MK7) += -march=athlon
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon) cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586) cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0 cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686 cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)

View File

@@ -99,7 +99,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
/* /*
* IPI implementation on Hyper-V. * IPI implementation on Hyper-V.
*/ */
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector) static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
bool exclude_self)
{ {
struct hv_send_ipi_ex **arg; struct hv_send_ipi_ex **arg;
struct hv_send_ipi_ex *ipi_arg; struct hv_send_ipi_ex *ipi_arg;
@@ -123,6 +124,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
if (!cpumask_equal(mask, cpu_present_mask)) { if (!cpumask_equal(mask, cpu_present_mask)) {
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
if (exclude_self)
nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask);
else
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
} }
if (nr_bank < 0) if (nr_bank < 0)
@@ -138,15 +142,25 @@ ipi_mask_ex_done:
return hv_result_success(status); return hv_result_success(status);
} }
static bool __send_ipi_mask(const struct cpumask *mask, int vector) static bool __send_ipi_mask(const struct cpumask *mask, int vector,
bool exclude_self)
{ {
int cur_cpu, vcpu; int cur_cpu, vcpu, this_cpu = smp_processor_id();
struct hv_send_ipi ipi_arg; struct hv_send_ipi ipi_arg;
u64 status; u64 status;
unsigned int weight;
trace_hyperv_send_ipi_mask(mask, vector); trace_hyperv_send_ipi_mask(mask, vector);
if (cpumask_empty(mask)) weight = cpumask_weight(mask);
/*
* Do nothing if
* 1. the mask is empty
* 2. the mask only contains self when exclude_self is true
*/
if (weight == 0 ||
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
return true; return true;
if (!hv_hypercall_pg) if (!hv_hypercall_pg)
@@ -172,6 +186,8 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
ipi_arg.cpu_mask = 0; ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) { for_each_cpu(cur_cpu, mask) {
if (exclude_self && cur_cpu == this_cpu)
continue;
vcpu = hv_cpu_number_to_vp_number(cur_cpu); vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL) if (vcpu == VP_INVAL)
return false; return false;
@@ -191,7 +207,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
return hv_result_success(status); return hv_result_success(status);
do_ex_hypercall: do_ex_hypercall:
return __send_ipi_mask_ex(mask, vector); return __send_ipi_mask_ex(mask, vector, exclude_self);
} }
static bool __send_ipi_one(int cpu, int vector) static bool __send_ipi_one(int cpu, int vector)
@@ -208,7 +224,7 @@ static bool __send_ipi_one(int cpu, int vector)
return false; return false;
if (vp >= 64) if (vp >= 64)
return __send_ipi_mask_ex(cpumask_of(cpu), vector); return __send_ipi_mask_ex(cpumask_of(cpu), vector, false);
status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp)); status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
return hv_result_success(status); return hv_result_success(status);
@@ -222,20 +238,13 @@ static void hv_send_ipi(int cpu, int vector)
static void hv_send_ipi_mask(const struct cpumask *mask, int vector) static void hv_send_ipi_mask(const struct cpumask *mask, int vector)
{ {
if (!__send_ipi_mask(mask, vector)) if (!__send_ipi_mask(mask, vector, false))
orig_apic.send_IPI_mask(mask, vector); orig_apic.send_IPI_mask(mask, vector);
} }
static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) static void hv_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{ {
unsigned int this_cpu = smp_processor_id(); if (!__send_ipi_mask(mask, vector, true))
struct cpumask new_mask;
const struct cpumask *local_mask;
cpumask_copy(&new_mask, mask);
cpumask_clear_cpu(this_cpu, &new_mask);
local_mask = &new_mask;
if (!__send_ipi_mask(local_mask, vector))
orig_apic.send_IPI_mask_allbutself(mask, vector); orig_apic.send_IPI_mask_allbutself(mask, vector);
} }
@@ -246,7 +255,7 @@ static void hv_send_ipi_allbutself(int vector)
static void hv_send_ipi_all(int vector) static void hv_send_ipi_all(int vector)
{ {
if (!__send_ipi_mask(cpu_online_mask, vector)) if (!__send_ipi_mask(cpu_online_mask, vector, false))
orig_apic.send_IPI_all(vector); orig_apic.send_IPI_all(vector);
} }

View File

@@ -358,7 +358,7 @@ extern int mce_threshold_remove_device(unsigned int cpu);
void mce_amd_feature_init(struct cpuinfo_x86 *c); void mce_amd_feature_init(struct cpuinfo_x86 *c);
int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr); int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
enum smca_bank_types smca_get_bank_type(unsigned int bank);
#else #else
static inline int mce_threshold_create_device(unsigned int cpu) { return 0; }; static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };

View File

@@ -301,8 +301,8 @@ do { \
unsigned int __gu_low, __gu_high; \ unsigned int __gu_low, __gu_high; \
const unsigned int __user *__gu_ptr; \ const unsigned int __user *__gu_ptr; \
__gu_ptr = (const void __user *)(ptr); \ __gu_ptr = (const void __user *)(ptr); \
__get_user_asm(__gu_low, ptr, "l", "=r", label); \ __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
__get_user_asm(__gu_high, ptr+1, "l", "=r", label); \ __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
(x) = ((unsigned long long)__gu_high << 32) | __gu_low; \ (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
} while (0) } while (0)
#else #else

View File

@@ -119,7 +119,7 @@ const char *smca_get_long_name(enum smca_bank_types t)
} }
EXPORT_SYMBOL_GPL(smca_get_long_name); EXPORT_SYMBOL_GPL(smca_get_long_name);
static enum smca_bank_types smca_get_bank_type(unsigned int bank) enum smca_bank_types smca_get_bank_type(unsigned int bank)
{ {
struct smca_bank *b; struct smca_bank *b;
@@ -132,6 +132,7 @@ static enum smca_bank_types smca_get_bank_type(unsigned int bank)
return b->hwid->bank_type; return b->hwid->bank_type;
} }
EXPORT_SYMBOL_GPL(smca_get_bank_type);
static struct smca_hwid smca_hwid_mcatypes[] = { static struct smca_hwid smca_hwid_mcatypes[] = {
/* { bank_type, hwid_mcatype } */ /* { bank_type, hwid_mcatype } */

View File

@@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
static void kill_me_now(struct callback_head *ch) static void kill_me_now(struct callback_head *ch)
{ {
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
p->mce_count = 0;
force_sig(SIGBUS); force_sig(SIGBUS);
} }
@@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED; int flags = MF_ACTION_REQUIRED;
int ret; int ret;
p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr); pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv) if (!p->mce_ripv)
@@ -1290,8 +1294,12 @@ static void kill_me_maybe(struct callback_head *cb)
} }
} }
static void queue_task_work(struct mce *m, int kill_current_task) static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{ {
int count = ++current->mce_count;
/* First call, save all the details */
if (count == 1) {
current->mce_addr = m->addr; current->mce_addr = m->addr;
current->mce_kflags = m->kflags; current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV); current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
@@ -1301,6 +1309,19 @@ static void queue_task_work(struct mce *m, int kill_current_task)
current->mce_kill_me.func = kill_me_now; current->mce_kill_me.func = kill_me_now;
else else
current->mce_kill_me.func = kill_me_maybe; current->mce_kill_me.func = kill_me_maybe;
}
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
if (count > 10)
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
/* Second or later call, make sure page address matches the one from first call */
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
mce_panic("Consecutive machine checks to different user pages", m, msg);
/* Do not call task_work_add() more than once */
if (count > 1)
return;
task_work_add(current, &current->mce_kill_me, TWA_RESUME); task_work_add(current, &current->mce_kill_me, TWA_RESUME);
} }
@@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */ /* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs)); BUG_ON(!on_thread_stack() || !user_mode(regs));
queue_task_work(&m, kill_current_task); queue_task_work(&m, msg, kill_current_task);
} else { } else {
/* /*
@@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
} }
if (m.kflags & MCE_IN_KERNEL_COPYIN) if (m.kflags & MCE_IN_KERNEL_COPYIN)
queue_task_work(&m, kill_current_task); queue_task_work(&m, msg, kill_current_task);
} }
out: out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);

View File

@@ -135,7 +135,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
static void __init pcpu_fc_free(void *ptr, size_t size) static void __init pcpu_fc_free(void *ptr, size_t size)
{ {
memblock_free(__pa(ptr), size); memblock_free_ptr(ptr, size);
} }
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)

View File

@@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
return 0; return 0;
p4d = p4d_offset(pgd, addr); p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) if (!p4d_present(*p4d))
return 0; return 0;
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
if (pud_none(*pud)) if (!pud_present(*pud))
return 0; return 0;
if (pud_large(*pud)) if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud)); return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) if (!pmd_present(*pmd))
return 0; return 0;
if (pmd_large(*pmd)) if (pmd_large(*pmd))

View File

@@ -49,8 +49,7 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
p = early_alloc(PMD_SIZE, nid, false); p = early_alloc(PMD_SIZE, nid, false);
if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
return; return;
else if (p) memblock_free_ptr(p, PMD_SIZE);
memblock_free(__pa(p), PMD_SIZE);
} }
p = early_alloc(PAGE_SIZE, nid, true); p = early_alloc(PAGE_SIZE, nid, true);
@@ -86,8 +85,7 @@ static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
p = early_alloc(PUD_SIZE, nid, false); p = early_alloc(PUD_SIZE, nid, false);
if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
return; return;
else if (p) memblock_free_ptr(p, PUD_SIZE);
memblock_free(__pa(p), PUD_SIZE);
} }
p = early_alloc(PAGE_SIZE, nid, true); p = early_alloc(PAGE_SIZE, nid, true);

View File

@@ -355,7 +355,7 @@ void __init numa_reset_distance(void)
/* numa_distance could be 1LU marking allocation failure, test cnt */ /* numa_distance could be 1LU marking allocation failure, test cnt */
if (numa_distance_cnt) if (numa_distance_cnt)
memblock_free(__pa(numa_distance), size); memblock_free_ptr(numa_distance, size);
numa_distance_cnt = 0; numa_distance_cnt = 0;
numa_distance = NULL; /* enable table creation */ numa_distance = NULL; /* enable table creation */
} }

View File

@@ -517,8 +517,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
} }
/* free the copied physical distance table */ /* free the copied physical distance table */
if (phys_dist) memblock_free_ptr(phys_dist, phys_size);
memblock_free(__pa(phys_dist), phys_size);
return; return;
no_emu: no_emu:

View File

@@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
int err = 0; int err = 0;
start = sanitize_phys(start); start = sanitize_phys(start);
end = sanitize_phys(end);
/*
* The end address passed into this function is exclusive, but
* sanitize_phys() expects an inclusive address.
*/
end = sanitize_phys(end - 1) + 1;
if (start >= end) { if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type)); start, end - 1, cattr_name(req_type));

View File

@@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1; x86_platform.legacy.rtc = 1;
} }
static void __init xen_domu_set_legacy_features(void)
{
x86_platform.legacy.rtc = 0;
}
/* First C function to be called on Xen boot */ /* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void) asmlinkage __visible void __init xen_start_kernel(void)
{ {
@@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL); add_preferred_console("xenboot", 0, NULL);
if (pci_xen) if (pci_xen)
x86_init.pci.arch_init = pci_xen_init; x86_init.pci.arch_init = pci_xen_init;
x86_platform.set_legacy_features =
xen_domu_set_legacy_features;
} else { } else {
const struct dom0_vga_console_info *info = const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info + (void *)((char *)xen_start_info +

View File

@@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (pinned) { if (pinned) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
if (static_branch_likely(&xen_struct_pages_ready)) pinned = false;
if (static_branch_likely(&xen_struct_pages_ready)) {
pinned = PagePinned(page);
SetPagePinned(page); SetPagePinned(page);
}
xen_mc_batch(); xen_mc_batch();
__set_pfn_prot(pfn, PAGE_KERNEL_RO); __set_pfn_prot(pfn, PAGE_KERNEL_RO);
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(PARAVIRT_LAZY_MMU);

View File

@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded) if (preloaded)
radix_tree_preload_end(); radix_tree_preload_end();
ret = blk_iolatency_init(q);
if (ret)
goto err_destroy_all;
ret = blk_ioprio_init(q); ret = blk_ioprio_init(q);
if (ret) if (ret)
goto err_destroy_all; goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
if (ret) if (ret)
goto err_destroy_all; goto err_destroy_all;
ret = blk_iolatency_init(q);
if (ret) {
blk_throtl_exit(q);
goto err_destroy_all;
}
return 0; return 0;
err_destroy_all: err_destroy_all:
@@ -1364,10 +1366,14 @@ enomem:
/* alloc failed, nothing's initialized yet, free everything */ /* alloc failed, nothing's initialized yet, free everything */
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) { if (blkg->pd[pol->plid]) {
pol->pd_free_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;
} }
spin_unlock(&blkcg->lock);
} }
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
ret = -ENOMEM; ret = -ENOMEM;
@@ -1399,12 +1405,16 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols); __clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) { list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
spin_lock(&blkcg->lock);
if (blkg->pd[pol->plid]) { if (blkg->pd[pol->plid]) {
if (pol->pd_offline_fn) if (pol->pd_offline_fn)
pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_offline_fn(blkg->pd[pol->plid]);
pol->pd_free_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL; blkg->pd[pol->plid] = NULL;
} }
spin_unlock(&blkcg->lock);
} }
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);

View File

@@ -426,8 +426,15 @@ EXPORT_SYMBOL(blk_integrity_register);
*/ */
void blk_integrity_unregister(struct gendisk *disk) void blk_integrity_unregister(struct gendisk *disk)
{ {
struct blk_integrity *bi = &disk->queue->integrity;
if (!bi->profile)
return;
/* ensure all bios are off the integrity workqueue */
blk_flush_integrity();
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue); blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, disk->queue);
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); memset(bi, 0, sizeof(*bi));
} }
EXPORT_SYMBOL(blk_integrity_unregister); EXPORT_SYMBOL(blk_integrity_unregister);

View File

@@ -208,7 +208,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
spin_lock_irqsave(&tags->lock, flags); spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr]; rq = tags->rqs[bitnr];
if (!rq || !refcount_inc_not_zero(&rq->ref)) if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
rq = NULL; rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags); spin_unlock_irqrestore(&tags->lock, flags);
return rq; return rq;

View File

@@ -264,7 +264,7 @@ void __init numa_free_distance(void)
size = numa_distance_cnt * numa_distance_cnt * size = numa_distance_cnt * numa_distance_cnt *
sizeof(numa_distance[0]); sizeof(numa_distance[0]);
memblock_free(__pa(numa_distance), size); memblock_free_ptr(numa_distance, size);
numa_distance_cnt = 0; numa_distance_cnt = 0;
numa_distance = NULL; numa_distance = NULL;
} }

View File

@@ -13,6 +13,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/rtc.h> #include <linux/rtc.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
@@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
const char *file = *(const char **)(tracedata + 2); const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value; unsigned int user_hash_value, file_hash_value;
if (!x86_platform.legacy.rtc)
return;
user_hash_value = user % USERHASH; user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH); file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value); set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
@@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {
static int __init early_resume_init(void) static int __init early_resume_init(void)
{ {
if (!x86_platform.legacy.rtc)
return 0;
hash_value_early_read = read_magic_time(); hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb); register_pm_notifier(&pm_trace_nb);
return 0; return 0;
@@ -277,6 +284,9 @@ static int __init late_resume_init(void)
unsigned int val = hash_value_early_read; unsigned int val = hash_value_early_read;
unsigned int user, file, dev; unsigned int user, file, dev;
if (!x86_platform.legacy.rtc)
return 0;
user = val % USERHASH; user = val % USERHASH;
val = val / USERHASH; val = val / USERHASH;
file = val % FILEHASH; file = val % FILEHASH;

View File

@@ -74,8 +74,8 @@ unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *l
if (count) if (count)
return count; return count;
kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock); mutex_destroy(&attr_set->update_lock);
kobject_put(&attr_set->kobj);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(gov_attr_set_put); EXPORT_SYMBOL_GPL(gov_attr_set_put);

View File

@@ -3205,11 +3205,15 @@ static int __init intel_pstate_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return -ENODEV; return -ENODEV;
if (no_load)
return -ENODEV;
id = x86_match_cpu(hwp_support_ids); id = x86_match_cpu(hwp_support_ids);
if (id) { if (id) {
bool hwp_forced = intel_pstate_hwp_is_enabled();
if (hwp_forced)
pr_info("HWP enabled by BIOS\n");
else if (no_load)
return -ENODEV;
copy_cpu_funcs(&core_funcs); copy_cpu_funcs(&core_funcs);
/* /*
* Avoid enabling HWP for processors without EPP support, * Avoid enabling HWP for processors without EPP support,
@@ -3219,8 +3223,7 @@ static int __init intel_pstate_init(void)
* If HWP is enabled already, though, there is no choice but to * If HWP is enabled already, though, there is no choice but to
* deal with it. * deal with it.
*/ */
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
intel_pstate_hwp_is_enabled()) {
hwp_active++; hwp_active++;
hwp_mode_bdw = id->driver_data; hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs; intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3235,7 +3238,11 @@ static int __init intel_pstate_init(void)
goto hwp_cpu_matched; goto hwp_cpu_matched;
} }
pr_info("HWP not enabled\n");
} else { } else {
if (no_load)
return -ENODEV;
id = x86_match_cpu(intel_pstate_cpu_ids); id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id) { if (!id) {
pr_info("CPU model not supported\n"); pr_info("CPU model not supported\n");
@@ -3314,10 +3321,9 @@ static int __init intel_pstate_setup(char *str)
else if (!strcmp(str, "passive")) else if (!strcmp(str, "passive"))
default_driver = &intel_cpufreq; default_driver = &intel_cpufreq;
if (!strcmp(str, "no_hwp")) { if (!strcmp(str, "no_hwp"))
pr_info("HWP disabled\n");
no_hwp = 1; no_hwp = 1;
}
if (!strcmp(str, "force")) if (!strcmp(str, "force"))
force_load = 1; force_load = 1;
if (!strcmp(str, "hwp_only")) if (!strcmp(str, "hwp_only"))

View File

@@ -451,7 +451,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy) static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
{ {
struct device *cpu_dev; struct device *cpu_dev;
int cur_cluster = cpu_to_cluster(policy->cpu);
cpu_dev = get_cpu_device(policy->cpu); cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) { if (!cpu_dev) {

View File

@@ -744,6 +744,7 @@ enum amd_hw_ip_block_type {
UVD_HWIP, UVD_HWIP,
VCN_HWIP = UVD_HWIP, VCN_HWIP = UVD_HWIP,
JPEG_HWIP = VCN_HWIP, JPEG_HWIP = VCN_HWIP,
VCN1_HWIP,
VCE_HWIP, VCE_HWIP,
DF_HWIP, DF_HWIP,
DCE_HWIP, DCE_HWIP,
@@ -755,10 +756,15 @@ enum amd_hw_ip_block_type {
CLK_HWIP, CLK_HWIP,
UMC_HWIP, UMC_HWIP,
RSMU_HWIP, RSMU_HWIP,
XGMI_HWIP,
DCI_HWIP,
MAX_HWIP MAX_HWIP
}; };
#define HWIP_MAX_INSTANCE 8 #define HWIP_MAX_INSTANCE 10
#define HW_ID_MAX 300
#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv))
struct amd_powerplay { struct amd_powerplay {
void *pp_handle; void *pp_handle;
@@ -830,6 +836,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb; struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob; struct debugfs_blob_wrapper debugfs_vbios_blob;
struct debugfs_blob_wrapper debugfs_discovery_blob;
struct mutex srbm_mutex; struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */ /* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex; struct mutex grbm_idx_mutex;
@@ -1078,8 +1085,6 @@ struct amdgpu_device {
char product_name[32]; char product_name[32];
char serial[20]; char serial[20];
struct amdgpu_autodump autodump;
atomic_t throttling_logging_enabled; atomic_t throttling_logging_enabled;
struct ratelimit_state throttling_logging_rs; struct ratelimit_state throttling_logging_rs;
uint32_t ras_hw_enabled; uint32_t ras_hw_enabled;
@@ -1087,8 +1092,10 @@ struct amdgpu_device {
bool no_hw_access; bool no_hw_access;
struct pci_saved_state *pci_state; struct pci_saved_state *pci_state;
pci_channel_state_t pci_channel_state;
struct amdgpu_reset_control *reset_cntl; struct amdgpu_reset_control *reset_cntl;
uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
}; };
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)

View File

@@ -31,6 +31,8 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include "amdgpu_xgmi.h" #include "amdgpu_xgmi.h"
#include <uapi/linux/kfd_ioctl.h> #include <uapi/linux/kfd_ioctl.h>
#include "amdgpu_ras.h"
#include "amdgpu_umc.h"
/* Total memory size in system memory and all GPU VRAM. Used to /* Total memory size in system memory and all GPU VRAM. Used to
* estimate worst case amount of memory to reserve for page tables * estimate worst case amount of memory to reserve for page tables
@@ -70,8 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
if (!kfd_initialized) if (!kfd_initialized)
return; return;
adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf);
adev->pdev, adev->asic_type, vf);
if (adev->kfd.dev) if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -192,6 +193,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
kgd2kfd_suspend(adev->kfd.dev, run_pm); kgd2kfd_suspend(adev->kfd.dev, run_pm);
} }
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
{
int r = 0;
if (adev->kfd.dev)
r = kgd2kfd_resume_iommu(adev->kfd.dev);
return r;
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{ {
int r = 0; int r = 0;
@@ -770,3 +781,15 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
return adev->have_atomics_support; return adev->have_atomics_support;
} }
void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct ras_err_data err_data = {0, 0, 0, NULL};
/* CPU MCA will handle page retirement if connected_to_cpu is 1 */
if (!adev->gmc.xgmi.connected_to_cpu)
amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL);
else
amdgpu_amdkfd_gpu_reset(kgd);
}

View File

@@ -137,6 +137,7 @@ int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void); void amdgpu_amdkfd_fini(void);
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry); const void *ih_ring_entry);
@@ -289,6 +290,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
uint64_t *mmap_offset); uint64_t *mmap_offset);
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config); struct tile_config *config);
void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
#if IS_ENABLED(CONFIG_HSA_AMD) #if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
@@ -320,13 +322,13 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
#if IS_ENABLED(CONFIG_HSA_AMD) #if IS_ENABLED(CONFIG_HSA_AMD)
int kgd2kfd_init(void); int kgd2kfd_init(void);
void kgd2kfd_exit(void); void kgd2kfd_exit(void);
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf);
unsigned int asic_type, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd, bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev, struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources); const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_device_exit(struct kfd_dev *kfd);
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd);
@@ -344,8 +346,7 @@ static inline void kgd2kfd_exit(void)
} }
static inline static inline
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
unsigned int asic_type, bool vf)
{ {
return NULL; return NULL;
} }
@@ -365,6 +366,11 @@ static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{ {
} }
static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
{
return 0;
}
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{ {
return 0; return 0;

View File

@@ -563,6 +563,7 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
kfree(ttm->sg);
ttm->sg = NULL; ttm->sg = NULL;
} }

View File

@@ -61,7 +61,7 @@ static void amdgpu_bo_list_free(struct kref *ref)
int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info, struct drm_amdgpu_bo_list_entry *info,
unsigned num_entries, struct amdgpu_bo_list **result) size_t num_entries, struct amdgpu_bo_list **result)
{ {
unsigned last_entry = 0, first_userptr = num_entries; unsigned last_entry = 0, first_userptr = num_entries;
struct amdgpu_bo_list_entry *array; struct amdgpu_bo_list_entry *array;

View File

@@ -61,7 +61,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
int amdgpu_bo_list_create(struct amdgpu_device *adev, int amdgpu_bo_list_create(struct amdgpu_device *adev,
struct drm_file *filp, struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info, struct drm_amdgpu_bo_list_entry *info,
unsigned num_entries, size_t num_entries,
struct amdgpu_bo_list **list); struct amdgpu_bo_list **list);
static inline struct amdgpu_bo_list_entry * static inline struct amdgpu_bo_list_entry *

View File

@@ -43,14 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1, [AMDGPU_HW_IP_VCN_JPEG] = 1,
}; };
static int amdgpu_ctx_priority_permit(struct drm_file *filp, bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
enum drm_sched_priority priority)
{ {
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT) switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
case AMDGPU_CTX_PRIORITY_HIGH:
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
return false;
}
}
static enum drm_sched_priority
amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_LOW:
return DRM_SCHED_PRIORITY_MIN;
case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;
/* This should not happen as we sanitized userspace provided priority
* already, WARN if this happens.
*/
default:
WARN(1, "Invalid context priority %d\n", ctx_prio);
return DRM_SCHED_PRIORITY_NORMAL;
}
}
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
int32_t priority)
{
if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL; return -EINVAL;
/* NORMAL and below are accessible by everyone */ /* NORMAL and below are accessible by everyone */
if (priority <= DRM_SCHED_PRIORITY_NORMAL) if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0; return 0;
if (capable(CAP_SYS_NICE)) if (capable(CAP_SYS_NICE))
@@ -62,26 +109,51 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES; return -EACCES;
} }
static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
{ {
switch (prio) { switch (prio) {
case DRM_SCHED_PRIORITY_HIGH: case AMDGPU_CTX_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL: case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH; return AMDGPU_GFX_PIPE_PRIO_HIGH;
default: default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL; return AMDGPU_GFX_PIPE_PRIO_NORMAL;
} }
} }
static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
enum drm_sched_priority prio,
u32 hw_ip)
{ {
switch (prio) {
case AMDGPU_CTX_PRIORITY_HIGH:
return AMDGPU_RING_PRIO_1;
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_RING_PRIO_2;
default:
return AMDGPU_RING_PRIO_0;
}
}
static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
{
struct amdgpu_device *adev = ctx->adev;
int32_t ctx_prio;
unsigned int hw_prio; unsigned int hw_prio;
hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ? ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
amdgpu_ctx_sched_prio_to_compute_prio(prio) : ctx->init_priority : ctx->override_priority;
AMDGPU_RING_PRIO_DEFAULT;
switch (hw_ip) {
case AMDGPU_HW_IP_COMPUTE:
hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
break;
case AMDGPU_HW_IP_VCE:
case AMDGPU_HW_IP_VCN_ENC:
hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
break;
default:
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
break;
}
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT; hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,6 +161,7 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
return hw_prio; return hw_prio;
} }
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
const u32 ring) const u32 ring)
{ {
@@ -96,8 +169,9 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
struct amdgpu_ctx_entity *entity; struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0; unsigned num_scheds = 0;
int32_t ctx_prio;
unsigned int hw_prio; unsigned int hw_prio;
enum drm_sched_priority priority; enum drm_sched_priority drm_prio;
int r; int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
@@ -105,10 +179,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
if (!entity) if (!entity)
return -ENOMEM; return -ENOMEM;
entity->sequence = 1; ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority; ctx->init_priority : ctx->override_priority;
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip); entity->sequence = 1;
hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched; scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
@@ -124,7 +199,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
num_scheds = 1; num_scheds = 1;
} }
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
&ctx->guilty); &ctx->guilty);
if (r) if (r)
goto error_free_entity; goto error_free_entity;
@@ -139,7 +214,7 @@ error_free_entity:
} }
static int amdgpu_ctx_init(struct amdgpu_device *adev, static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority, int32_t priority,
struct drm_file *filp, struct drm_file *filp,
struct amdgpu_ctx *ctx) struct amdgpu_ctx *ctx)
{ {
@@ -161,7 +236,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->reset_counter_query = ctx->reset_counter; ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority; ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
return 0; return 0;
} }
@@ -234,7 +309,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, struct amdgpu_fpriv *fpriv,
struct drm_file *filp, struct drm_file *filp,
enum drm_sched_priority priority, int32_t priority,
uint32_t *id) uint32_t *id)
{ {
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -397,19 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{ {
int r; int r;
uint32_t id; uint32_t id;
enum drm_sched_priority priority; int32_t priority;
union drm_amdgpu_ctx *args = data; union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
id = args->in.ctx_id; id = args->in.ctx_id;
r = amdgpu_to_sched_priority(args->in.priority, &priority); priority = args->in.priority;
/* For backwards compatibility reasons, we need to accept /* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */ * ioctls with garbage in the priority field */
if (r == -EINVAL) if (!amdgpu_ctx_priority_is_valid(priority))
priority = DRM_SCHED_PRIORITY_NORMAL; priority = AMDGPU_CTX_PRIORITY_NORMAL;
switch (args->in.op) { switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX: case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -517,7 +592,7 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
struct amdgpu_ctx_entity *aentity, struct amdgpu_ctx_entity *aentity,
int hw_ip, int hw_ip,
enum drm_sched_priority priority) int32_t priority)
{ {
struct amdgpu_device *adev = ctx->adev; struct amdgpu_device *adev = ctx->adev;
unsigned int hw_prio; unsigned int hw_prio;
@@ -525,12 +600,12 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
unsigned num_scheds; unsigned num_scheds;
/* set sw priority */ /* set sw priority */
drm_sched_entity_set_priority(&aentity->entity, priority); drm_sched_entity_set_priority(&aentity->entity,
amdgpu_ctx_to_drm_sched_prio(priority));
/* set hw priority */ /* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) { if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
AMDGPU_HW_IP_COMPUTE);
hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched; scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
@@ -540,14 +615,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
} }
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority) int32_t priority)
{ {
enum drm_sched_priority ctx_prio; int32_t ctx_prio;
unsigned i, j; unsigned i, j;
ctx->override_priority = priority; ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority; ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {

View File

@@ -47,8 +47,8 @@ struct amdgpu_ctx {
spinlock_t ring_lock; spinlock_t ring_lock;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented; bool preamble_presented;
enum drm_sched_priority init_priority; int32_t init_priority;
enum drm_sched_priority override_priority; int32_t override_priority;
struct mutex lock; struct mutex lock;
atomic_t guilty; atomic_t guilty;
unsigned long ras_counter_ce; unsigned long ras_counter_ce;
@@ -75,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity, struct drm_sched_entity *entity,
uint64_t seq); uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio);
enum drm_sched_priority priority); void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);

View File

@@ -27,7 +27,6 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/poll.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_pm.h" #include "amdgpu_pm.h"
@@ -36,87 +35,10 @@
#include "amdgpu_rap.h" #include "amdgpu_rap.h"
#include "amdgpu_securedisplay.h" #include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h" #include "amdgpu_fw_attestation.h"
#include "amdgpu_umr.h"
int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned long timeout = 600 * HZ;
int ret;
wake_up_interruptible(&adev->autodump.gpu_hang);
ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
if (ret == 0) {
pr_err("autodump: timeout, move on to gpu recovery\n");
return -ETIMEDOUT;
}
#endif
return 0;
}
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
{
struct amdgpu_device *adev = inode->i_private;
int ret;
file->private_data = adev;
ret = down_read_killable(&adev->reset_sem);
if (ret)
return ret;
if (adev->autodump.dumping.done) {
reinit_completion(&adev->autodump.dumping);
ret = 0;
} else {
ret = -EBUSY;
}
up_read(&adev->reset_sem);
return ret;
}
static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
{
struct amdgpu_device *adev = file->private_data;
complete_all(&adev->autodump.dumping);
return 0;
}
static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
{
struct amdgpu_device *adev = file->private_data;
poll_wait(file, &adev->autodump.gpu_hang, poll_table);
if (amdgpu_in_reset(adev))
return POLLIN | POLLRDNORM | POLLWRNORM;
return 0;
}
static const struct file_operations autodump_debug_fops = {
.owner = THIS_MODULE,
.open = amdgpu_debugfs_autodump_open,
.poll = amdgpu_debugfs_autodump_poll,
.release = amdgpu_debugfs_autodump_release,
};
static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
{
init_completion(&adev->autodump.dumping);
complete_all(&adev->autodump.dumping);
init_waitqueue_head(&adev->autodump.gpu_hang);
debugfs_create_file("amdgpu_autodump", 0600,
adev_to_drm(adev)->primary->debugfs_root,
adev, &autodump_debug_fops);
}
/** /**
* amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
* *
@@ -279,6 +201,145 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
} }
static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd;
rd = kzalloc(sizeof *rd, GFP_KERNEL);
if (!rd)
return -ENOMEM;
rd->adev = file_inode(file)->i_private;
file->private_data = rd;
mutex_init(&rd->lock);
return 0;
}
static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
{
struct amdgpu_debugfs_regs2_data *rd = file->private_data;
mutex_destroy(&rd->lock);
kfree(file->private_data);
return 0;
}
static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
{
struct amdgpu_debugfs_regs2_data *rd = f->private_data;
struct amdgpu_device *adev = rd->adev;
ssize_t result = 0;
int r;
uint32_t value;
if (size & 0x3 || offset & 0x3)
return -EINVAL;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
r = amdgpu_virt_enable_access_debugfs(adev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
mutex_lock(&rd->lock);
if (rd->id.use_grbm) {
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
mutex_unlock(&rd->lock);
return -EINVAL;
}
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
rd->id.grbm.sh,
rd->id.grbm.instance);
}
if (rd->id.use_srbm) {
mutex_lock(&adev->srbm_mutex);
amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
rd->id.srbm.queue, rd->id.srbm.vmid);
}
if (rd->id.pg_lock)
mutex_lock(&adev->pm.mutex);
while (size) {
if (!write_en) {
value = RREG32(offset >> 2);
r = put_user(value, (uint32_t *)buf);
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
}
if (r) {
result = r;
goto end;
}
offset += 4;
size -= 4;
result += 4;
buf += 4;
}
end:
if (rd->id.use_grbm) {
amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
if (rd->id.use_srbm) {
amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
if (rd->id.pg_lock)
mutex_unlock(&adev->pm.mutex);
mutex_unlock(&rd->lock);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return result;
}
static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
{
struct amdgpu_debugfs_regs2_data *rd = f->private_data;
int r;
switch (cmd) {
case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
mutex_lock(&rd->lock);
r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
mutex_unlock(&rd->lock);
return r ? -EINVAL : 0;
default:
return -EINVAL;
}
return 0;
}
static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
{
return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
}
static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
{
return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
}
/** /**
* amdgpu_debugfs_regs_pcie_read - Read from a PCIE register * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
@@ -1091,6 +1152,16 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
return result; return result;
} }
static const struct file_operations amdgpu_debugfs_regs2_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
.read = amdgpu_debugfs_regs2_read,
.write = amdgpu_debugfs_regs2_write,
.open = amdgpu_debugfs_regs2_open,
.release = amdgpu_debugfs_regs2_release,
.llseek = default_llseek
};
static const struct file_operations amdgpu_debugfs_regs_fops = { static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read, .read = amdgpu_debugfs_regs_read,
@@ -1148,6 +1219,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
static const struct file_operations *debugfs_regs[] = { static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs2_fops,
&amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops, &amdgpu_debugfs_regs_smc_fops,
@@ -1160,6 +1232,7 @@ static const struct file_operations *debugfs_regs[] = {
static const char *debugfs_regs_names[] = { static const char *debugfs_regs_names[] = {
"amdgpu_regs", "amdgpu_regs",
"amdgpu_regs2",
"amdgpu_regs_didt", "amdgpu_regs_didt",
"amdgpu_regs_pcie", "amdgpu_regs_pcie",
"amdgpu_regs_smc", "amdgpu_regs_smc",
@@ -1206,7 +1279,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
} }
/* Avoid accidently unparking the sched thread during GPU reset */ /* Avoid accidently unparking the sched thread during GPU reset */
r = down_read_killable(&adev->reset_sem); r = down_write_killable(&adev->reset_sem);
if (r) if (r)
return r; return r;
@@ -1235,7 +1308,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
kthread_unpark(ring->sched.thread); kthread_unpark(ring->sched.thread);
} }
up_read(&adev->reset_sem); up_write(&adev->reset_sem);
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
@@ -1255,7 +1328,7 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
return r; return r;
} }
*val = amdgpu_bo_evict_vram(adev); *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
@@ -1268,17 +1341,15 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)data; struct amdgpu_device *adev = (struct amdgpu_device *)data;
struct drm_device *dev = adev_to_drm(adev); struct drm_device *dev = adev_to_drm(adev);
struct ttm_resource_manager *man;
int r; int r;
r = pm_runtime_get_sync(dev->dev); r = pm_runtime_get_sync(dev->dev);
if (r < 0) { if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(dev->dev);
return r; return r;
} }
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
*val = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev); pm_runtime_put_autosuspend(dev->dev);
@@ -1544,20 +1615,21 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
struct dentry *ent; struct dentry *ent;
int r, i; int r, i;
if (!debugfs_initialized())
return 0;
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev, ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt); &fops_ib_preempt);
if (!ent) { if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO; return PTR_ERR(ent);
} }
ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev, ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
&fops_sclk_set); &fops_sclk_set);
if (!ent) { if (IS_ERR(ent)) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n"); DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
return -EIO; return PTR_ERR(ent);
} }
/* Register debugfs entries for amdgpu_ttm */ /* Register debugfs entries for amdgpu_ttm */
@@ -1584,13 +1656,10 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (!ring) if (!ring)
continue; continue;
if (amdgpu_debugfs_ring_init(adev, ring)) { amdgpu_debugfs_ring_init(adev, ring);
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
} }
amdgpu_ras_debugfs_create_all(adev); amdgpu_ras_debugfs_create_all(adev);
amdgpu_debugfs_autodump_init(adev);
amdgpu_rap_debugfs_init(adev); amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev); amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev);
@@ -1609,6 +1678,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root, debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob); &adev->debugfs_vbios_blob);
adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
debugfs_create_blob("amdgpu_discovery", 0444, root,
&adev->debugfs_discovery_blob);
return 0; return 0;
} }

View File

@@ -22,14 +22,9 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
/* /*
* Debugfs * Debugfs
*/ */
struct amdgpu_autodump {
struct completion dumping;
struct wait_queue_head gpu_hang;
};
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev); int amdgpu_debugfs_init(struct amdgpu_device *adev);
@@ -37,4 +32,3 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);

View File

@@ -125,6 +125,7 @@ const char *amdgpu_asic_name[] = {
"DIMGREY_CAVEFISH", "DIMGREY_CAVEFISH",
"BEIGE_GOBY", "BEIGE_GOBY",
"YELLOW_CARP", "YELLOW_CARP",
"IP DISCOVERY",
"LAST", "LAST",
}; };
@@ -305,7 +306,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
uint64_t last; uint64_t last;
int idx; int idx;
if (!drm_dev_enter(&adev->ddev, &idx)) if (!drm_dev_enter(adev_to_drm(adev), &idx))
return; return;
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4)); BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
@@ -2126,46 +2127,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
break; break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
case CHIP_ALDEBARAN:
if (adev->flags & AMD_IS_APU)
adev->family = AMDGPU_FAMILY_RV;
else
adev->family = AMDGPU_FAMILY_AI;
r = soc15_set_ip_blocks(adev);
if (r)
return r;
break;
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
case CHIP_YELLOW_CARP:
case CHIP_CYAN_SKILLFISH:
if (adev->asic_type == CHIP_VANGOGH)
adev->family = AMDGPU_FAMILY_VGH;
else if (adev->asic_type == CHIP_YELLOW_CARP)
adev->family = AMDGPU_FAMILY_YC;
else
adev->family = AMDGPU_FAMILY_NV;
r = nv_set_ip_blocks(adev);
if (r)
return r;
break;
default: default:
/* FIXME: not supported yet */ r = amdgpu_discovery_set_ip_blocks(adev);
return -EINVAL; if (r)
return r;
break;
} }
amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_probe(adev);
@@ -2432,6 +2398,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.pending_reset) if (!adev->gmc.xgmi.pending_reset)
amdgpu_amdkfd_device_init(adev); amdgpu_amdkfd_device_init(adev);
r = amdgpu_amdkfd_resume_iommu(adev);
if (r)
goto init_failed;
amdgpu_fru_get_product_info(adev); amdgpu_fru_get_product_info(adev);
init_failed: init_failed:
@@ -2741,6 +2711,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
} }
if (amdgpu_sriov_vf(adev)) {
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
}
return 0; return 0;
} }
@@ -2801,10 +2776,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ras_fini(adev); amdgpu_ras_fini(adev);
if (amdgpu_sriov_vf(adev))
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
return 0; return 0;
} }
@@ -3148,6 +3119,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{ {
int r; int r;
r = amdgpu_amdkfd_resume_iommu(adev);
if (r)
return r;
r = amdgpu_device_ip_resume_phase1(adev); r = amdgpu_device_ip_resume_phase1(adev);
if (r) if (r)
return r; return r;
@@ -3232,6 +3207,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_NAVI14: case CHIP_NAVI14:
case CHIP_NAVI12: case CHIP_NAVI12:
case CHIP_RENOIR: case CHIP_RENOIR:
case CHIP_CYAN_SKILLFISH:
case CHIP_SIENNA_CICHLID: case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER: case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH: case CHIP_DIMGREY_CAVEFISH:
@@ -3239,13 +3215,15 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VANGOGH: case CHIP_VANGOGH:
case CHIP_YELLOW_CARP: case CHIP_YELLOW_CARP:
#endif #endif
default:
return amdgpu_dc != 0; return amdgpu_dc != 0;
#endif #else
default: default:
if (amdgpu_dc > 0) if (amdgpu_dc > 0)
DRM_INFO_ONCE("Display Core has been requested via kernel parameter " DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n"); "but isn't supported by ASIC, ignoring\n");
return false; return false;
#endif
} }
} }
@@ -3346,6 +3324,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
continue; continue;
} else if (timeout < 0) { } else if (timeout < 0) {
timeout = MAX_SCHEDULE_TIMEOUT; timeout = MAX_SCHEDULE_TIMEOUT;
dev_warn(adev->dev, "lockup timeout disabled");
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
} else { } else {
timeout = msecs_to_jiffies(timeout); timeout = msecs_to_jiffies(timeout);
} }
@@ -3530,17 +3510,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
/* enable PCIE atomic ops */
r = pci_enable_atomic_ops_to_root(adev->pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
if (r) {
adev->have_atomics_support = false;
DRM_INFO("PCIE atomic ops is not supported\n");
} else {
adev->have_atomics_support = true;
}
amdgpu_device_get_pcie_info(adev); amdgpu_device_get_pcie_info(adev);
if (amdgpu_mcbp) if (amdgpu_mcbp)
@@ -3563,6 +3532,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r) if (r)
return r; return r;
/* enable PCIE atomic ops */
if (amdgpu_sriov_vf(adev))
adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
(PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
else
adev->have_atomics_support =
!pci_enable_atomic_ops_to_root(adev->pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
if (!adev->have_atomics_support)
dev_info(adev->dev, "PCIE atomic ops is not supported\n");
/* doorbell bar mapping and doorbell index init*/ /* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev); amdgpu_device_doorbell_init(adev);
@@ -3861,6 +3843,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_device_ip_fini_early(adev); amdgpu_device_ip_fini_early(adev);
ttm_device_clear_dma_mappings(&adev->mman.bdev);
amdgpu_gart_dummy_page_fini(adev); amdgpu_gart_dummy_page_fini(adev);
amdgpu_device_unmap_mmio(adev); amdgpu_device_unmap_mmio(adev);
@@ -3901,6 +3885,25 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
} }
/**
* amdgpu_device_evict_resources - evict device resources
* @adev: amdgpu device object
*
* Evicts all ttm device resources(vram BOs, gart table) from the lru list
* of the vram memory type. Mainly used for evicting device resources
* at suspend time.
*
*/
static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
/* No need to evict vram on APUs for suspend to ram */
if (adev->in_s3 && (adev->flags & AMD_IS_APU))
return;
if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
DRM_WARN("evicting device resources failed\n");
}
/* /*
* Suspend & resume. * Suspend & resume.
@@ -3941,17 +3944,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (!adev->in_s0ix) if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm); amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* evict vram memory */ /* First evict vram memory */
amdgpu_bo_evict_vram(adev); amdgpu_device_evict_resources(adev);
amdgpu_fence_driver_hw_fini(adev); amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev); amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory /* This second call to evict device resources is to evict
* This second call to evict vram is to evict the gart page table * the gart page table using the CPU.
* using the CPU.
*/ */
amdgpu_bo_evict_vram(adev); amdgpu_device_evict_resources(adev);
return 0; return 0;
} }
@@ -4458,10 +4460,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (reset_context->reset_req_dev == adev) if (reset_context->reset_req_dev == adev)
job = reset_context->job; job = reset_context->job;
/* no need to dump if device is not in good state during probe period */
if (!adev->gmc.xgmi.pending_reset)
amdgpu_debugfs_wait_dump(adev);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
/* stop the data exchange thread */ /* stop the data exchange thread */
amdgpu_virt_fini_data_exchange(adev); amdgpu_virt_fini_data_exchange(adev);
@@ -4601,6 +4599,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
dev_warn(tmp_adev->dev, "asic atom init failed!"); dev_warn(tmp_adev->dev, "asic atom init failed!");
} else { } else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n"); dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_amdkfd_resume_iommu(tmp_adev);
if (r)
goto out;
r = amdgpu_device_ip_resume_phase1(tmp_adev); r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r) if (r)
goto out; goto out;
@@ -5387,6 +5389,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
} }
adev->pci_channel_state = state;
switch (state) { switch (state) {
case pci_channel_io_normal: case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER; return PCI_ERS_RESULT_CAN_RECOVER;
@@ -5529,6 +5533,10 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
DRM_INFO("PCI error: resume callback!!\n"); DRM_INFO("PCI error: resume callback!!\n");
/* Only continue execution for the case of pci_channel_io_frozen */
if (adev->pci_channel_state != pci_channel_io_frozen)
return;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];

View File

@@ -52,6 +52,7 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val); uint32_t ficadl_val, uint32_t ficadh_val);
bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
}; };
struct amdgpu_df { struct amdgpu_df {

View File

@@ -21,16 +21,58 @@
* *
*/ */
#include <linux/firmware.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_discovery.h" #include "amdgpu_discovery.h"
#include "soc15_hw_ip.h" #include "soc15_hw_ip.h"
#include "discovery.h" #include "discovery.h"
#include "soc15.h"
#include "gfx_v9_0.h"
#include "gmc_v9_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
#include "hdp_v4_0.h"
#include "vega10_ih.h"
#include "vega20_ih.h"
#include "sdma_v4_0.h"
#include "uvd_v7_0.h"
#include "vce_v4_0.h"
#include "vcn_v1_0.h"
#include "vcn_v2_5.h"
#include "jpeg_v2_5.h"
#include "smuio_v9_0.h"
#include "gmc_v10_0.h"
#include "gfxhub_v2_0.h"
#include "mmhub_v2_0.h"
#include "nbio_v2_3.h"
#include "nbio_v7_2.h"
#include "hdp_v5_0.h"
#include "nv.h"
#include "navi10_ih.h"
#include "gfx_v10_0.h"
#include "sdma_v5_0.h"
#include "sdma_v5_2.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v3_0.h"
#include "jpeg_v3_0.h"
#include "amdgpu_vkms.h"
#include "mes_v10_1.h"
#include "smuio_v11_0.h"
#include "smuio_v11_0_6.h"
#include "smuio_v13_0.h"
MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
#define mmRCC_CONFIG_MEMSIZE 0xde3 #define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMM_INDEX 0x0 #define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6 #define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1 #define mmMM_DATA 0x1
#define HW_ID_MAX 300
static const char *hw_id_names[HW_ID_MAX] = { static const char *hw_id_names[HW_ID_MAX] = {
[MP1_HWID] = "MP1", [MP1_HWID] = "MP1",
@@ -129,6 +171,8 @@ static int hw_id_map[MAX_HWIP] = {
[THM_HWIP] = THM_HWID, [THM_HWIP] = THM_HWID,
[CLK_HWIP] = CLKA_HWID, [CLK_HWIP] = CLKA_HWID,
[UMC_HWIP] = UMC_HWID, [UMC_HWIP] = UMC_HWID,
[XGMI_HWIP] = XGMI_HWID,
[DCI_HWIP] = DCI_HWID,
}; };
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
@@ -164,6 +208,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
struct binary_header *bhdr; struct binary_header *bhdr;
struct ip_discovery_header *ihdr; struct ip_discovery_header *ihdr;
struct gpu_info_header *ghdr; struct gpu_info_header *ghdr;
const struct firmware *fw;
uint16_t offset; uint16_t offset;
uint16_t size; uint16_t size;
uint16_t checksum; uint16_t checksum;
@@ -174,11 +219,22 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (!adev->mman.discovery_bin) if (!adev->mman.discovery_bin)
return -ENOMEM; return -ENOMEM;
if (amdgpu_discovery == 2) {
r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev);
if (r)
goto get_from_vram;
dev_info(adev->dev, "Using IP discovery from file\n");
memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data,
adev->mman.discovery_tmr_size);
release_firmware(fw);
} else {
get_from_vram:
r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin); r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
if (r) { if (r) {
DRM_ERROR("failed to read ip discovery binary\n"); DRM_ERROR("failed to read ip discovery binary\n");
goto out; goto out;
} }
}
bhdr = (struct binary_header *)adev->mman.discovery_bin; bhdr = (struct binary_header *)adev->mman.discovery_bin;
@@ -245,6 +301,22 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev)
adev->mman.discovery_bin = NULL; adev->mman.discovery_bin = NULL;
} }
static int amdgpu_discovery_validate_ip(const struct ip *ip)
{
if (ip->number_instance >= HWIP_MAX_INSTANCE) {
DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
ip->number_instance);
return -EINVAL;
}
if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
le16_to_cpu(ip->hw_id));
return -EINVAL;
}
return 0;
}
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{ {
struct binary_header *bhdr; struct binary_header *bhdr;
@@ -290,6 +362,10 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (j = 0; j < num_ips; j++) { for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
if (amdgpu_discovery_validate_ip(ip))
goto next_ip;
num_base_address = ip->num_base_address; num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -301,6 +377,11 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
if (le16_to_cpu(ip->hw_id) == VCN_HWID) if (le16_to_cpu(ip->hw_id) == VCN_HWID)
adev->vcn.num_vcn_inst++; adev->vcn.num_vcn_inst++;
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
le16_to_cpu(ip->hw_id) == SDMA3_HWID)
adev->sdma.num_instances++;
for (k = 0; k < num_base_address; k++) { for (k = 0; k < num_base_address; k++) {
/* /*
@@ -317,10 +398,21 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
hw_id_names[le16_to_cpu(ip->hw_id)]); hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] = adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address; ip->base_address;
/* Instance support is somewhat inconsistent.
* SDMA is a good example. Sienna cichlid has 4 total
* SDMA instances, each enumerated separately (HWIDs
* 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
* but they are enumerated as multiple instances of the
* same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
* example. On most chips there are multiple instances
* with the same HWID.
*/
adev->ip_versions[hw_ip][ip->number_instance] =
IP_VERSION(ip->major, ip->minor, ip->revision);
}
} }
} next_ip:
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
} }
} }
@@ -401,6 +493,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
switch (le32_to_cpu(harvest_info->list[i].hw_id)) { switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID: case VCN_HWID:
vcn_harvest_count++; vcn_harvest_count++;
if (harvest_info->list[i].number_instance == 0)
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
else
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
break; break;
case DMU_HWID: case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -413,6 +509,13 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
} }
if ((adev->pdev->device == 0x731E &&
(adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) ||
(adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) ||
(adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
}
} }
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
@@ -450,3 +553,731 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
return 0; return 0;
} }
static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
{
/* what IP to use for this? */
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
{
/* use GC or MMHUB IP version */
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[OSSSYS_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 1):
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 3, 0):
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
break;
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 3):
case IP_VERSION(5, 2, 0):
case IP_VERSION(5, 2, 1):
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(9, 0, 0):
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
break;
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
break;
case IP_VERSION(11, 0, 8):
amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
break;
case IP_VERSION(11, 0, 3):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
case IP_VERSION(11, 0, 2):
if (adev->asic_type == CHIP_ARCTURUS)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
else
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
case IP_VERSION(11, 5, 0):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 2):
case IP_VERSION(13, 0, 3):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
{
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
} else if (adev->ip_versions[DCE_HWIP][0]) {
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
case IP_VERSION(2, 0, 2):
case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 3):
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
case IP_VERSION(2, 0, 3):
break;
default:
return -EINVAL;
}
} else if (adev->ip_versions[DCI_HWIP][0]) {
switch (adev->ip_versions[DCI_HWIP][0]) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
case IP_VERSION(12, 1, 0):
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
return -EINVAL;
}
#endif
}
return 0;
}
static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
case IP_VERSION(10, 3, 3):
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[SDMA0_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 1):
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 2):
case IP_VERSION(4, 4, 0):
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
break;
case IP_VERSION(5, 2, 0):
case IP_VERSION(5, 2, 2):
case IP_VERSION(5, 2, 4):
case IP_VERSION(5, 2, 5):
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 1):
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
break;
default:
return -EINVAL;
}
return 0;
}
static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
{
if (adev->ip_versions[VCE_HWIP][0]) {
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 2, 0):
/* UVD is not supported on vega20 SR-IOV */
if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
break;
default:
return -EINVAL;
}
switch (adev->ip_versions[VCE_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 1, 0):
/* VCE is not supported on vega20 SR-IOV */
if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break;
default:
return -EINVAL;
}
} else {
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
case IP_VERSION(2, 0, 0):
case IP_VERSION(2, 0, 2):
case IP_VERSION(2, 2, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
case IP_VERSION(2, 0, 3):
break;
case IP_VERSION(2, 5, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
break;
case IP_VERSION(2, 6, 0):
amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 16):
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
break;
case IP_VERSION(3, 0, 33):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
break;
default:
return -EINVAL;
}
}
return 0;
}
static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 3):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
default:
break;;
}
return 0;
}
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
switch (adev->asic_type) {
case CHIP_VEGA10:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
break;
case CHIP_VEGA12:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
break;
case CHIP_RAVEN:
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
adev->vcn.num_vcn_inst = 1;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
} else {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
}
break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
break;
case CHIP_ARCTURUS:
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
adev->vcn.num_vcn_inst = 2;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
break;
case CHIP_ALDEBARAN:
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
adev->vcn.num_vcn_inst = 2;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r)
return -EINVAL;
amdgpu_discovery_harvest_ip(adev);
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
break;
}
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 0, 1):
case IP_VERSION(9, 2, 1):
case IP_VERSION(9, 4, 0):
case IP_VERSION(9, 4, 1):
case IP_VERSION(9, 4, 2):
adev->family = AMDGPU_FAMILY_AI;
break;
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
case IP_VERSION(9, 3, 0):
adev->family = AMDGPU_FAMILY_RV;
break;
case IP_VERSION(10, 1, 10):
case IP_VERSION(10, 1, 1):
case IP_VERSION(10, 1, 2):
case IP_VERSION(10, 1, 3):
case IP_VERSION(10, 3, 0):
case IP_VERSION(10, 3, 2):
case IP_VERSION(10, 3, 4):
case IP_VERSION(10, 3, 5):
adev->family = AMDGPU_FAMILY_NV;
break;
case IP_VERSION(10, 3, 1):
adev->family = AMDGPU_FAMILY_VGH;
break;
case IP_VERSION(10, 3, 3):
adev->family = AMDGPU_FAMILY_YC;
break;
default:
return -EINVAL;
}
if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
adev->gmc.xgmi.supported = true;
/* set NBIO version */
switch (adev->ip_versions[NBIO_HWIP][0]) {
case IP_VERSION(6, 1, 0):
case IP_VERSION(6, 2, 0):
adev->nbio.funcs = &nbio_v6_1_funcs;
adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
break;
case IP_VERSION(7, 0, 0):
case IP_VERSION(7, 0, 1):
case IP_VERSION(2, 5, 0):
adev->nbio.funcs = &nbio_v7_0_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
break;
case IP_VERSION(7, 4, 0):
case IP_VERSION(7, 4, 1):
case IP_VERSION(7, 4, 4):
adev->nbio.funcs = &nbio_v7_4_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
break;
case IP_VERSION(7, 2, 0):
case IP_VERSION(7, 2, 1):
case IP_VERSION(7, 5, 0):
adev->nbio.funcs = &nbio_v7_2_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
break;
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 3, 0):
case IP_VERSION(2, 3, 1):
case IP_VERSION(2, 3, 2):
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
case IP_VERSION(3, 3, 2):
case IP_VERSION(3, 3, 3):
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
break;
default:
break;
}
switch (adev->ip_versions[HDP_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 1):
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 1):
case IP_VERSION(4, 4, 0):
adev->hdp.funcs = &hdp_v4_0_funcs;
break;
case IP_VERSION(5, 0, 0):
case IP_VERSION(5, 0, 1):
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 3):
case IP_VERSION(5, 0, 4):
case IP_VERSION(5, 2, 0):
adev->hdp.funcs = &hdp_v5_0_funcs;
break;
default:
break;
}
switch (adev->ip_versions[DF_HWIP][0]) {
case IP_VERSION(3, 6, 0):
case IP_VERSION(3, 6, 1):
case IP_VERSION(3, 6, 2):
adev->df.funcs = &df_v3_6_funcs;
break;
case IP_VERSION(2, 1, 0):
case IP_VERSION(2, 1, 1):
case IP_VERSION(2, 5, 0):
case IP_VERSION(3, 5, 1):
case IP_VERSION(3, 5, 2):
adev->df.funcs = &df_v1_7_funcs;
break;
default:
break;
}
switch (adev->ip_versions[SMUIO_HWIP][0]) {
case IP_VERSION(9, 0, 0):
case IP_VERSION(9, 0, 1):
case IP_VERSION(10, 0, 0):
case IP_VERSION(10, 0, 1):
case IP_VERSION(10, 0, 2):
adev->smuio.funcs = &smuio_v9_0_funcs;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 8):
adev->smuio.funcs = &smuio_v11_0_funcs;
break;
case IP_VERSION(11, 0, 6):
case IP_VERSION(11, 0, 10):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 5, 0):
case IP_VERSION(13, 0, 1):
adev->smuio.funcs = &smuio_v11_0_6_funcs;
break;
case IP_VERSION(13, 0, 2):
adev->smuio.funcs = &smuio_v13_0_funcs;
break;
default:
break;
}
r = amdgpu_discovery_set_common_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_gmc_ip_blocks(adev);
if (r)
return r;
/* For SR-IOV, PSP needs to be initialized before IH */
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_discovery_set_psp_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_ih_ip_blocks(adev);
if (r)
return r;
} else {
r = amdgpu_discovery_set_ih_ip_blocks(adev);
if (r)
return r;
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
r = amdgpu_discovery_set_psp_ip_blocks(adev);
if (r)
return r;
}
}
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
r = amdgpu_discovery_set_smu_ip_blocks(adev);
if (r)
return r;
}
r = amdgpu_discovery_set_display_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_gc_ip_blocks(adev);
if (r)
return r;
r = amdgpu_discovery_set_sdma_ip_blocks(adev);
if (r)
return r;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev)) {
r = amdgpu_discovery_set_smu_ip_blocks(adev);
if (r)
return r;
}
r = amdgpu_discovery_set_mm_ip_blocks(adev);
if (r)
return r;
if (adev->enable_mes) {
r = amdgpu_discovery_set_mes_ip_blocks(adev);
if (r)
return r;
}
return 0;
}

View File

@@ -36,5 +36,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
int *major, int *minor, int *revision); int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */ #endif /* __AMDGPU_DISCOVERY__ */

View File

@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return 0; return 0;
} }
/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
{
u64 micro_tile_mode;
/* Zero swizzle mode means linear */
if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
return 0;
micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
switch (micro_tile_mode) {
case 0: /* DISPLAY */
case 3: /* RENDER */
return 0;
default:
drm_dbg_kms(afb->base.dev,
"Micro tile mode %llu not supported for scanout\n",
micro_tile_mode);
return -EINVAL;
}
}
static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
unsigned int *width, unsigned int *height) unsigned int *width, unsigned int *height)
{ {
@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj) struct drm_gem_object *obj)
{ {
struct amdgpu_device *adev = drm_to_adev(dev);
int ret, i; int ret, i;
/* /*
@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret) if (ret)
return ret; return ret;
if (!dev->mode_config.allow_fb_modifiers) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);
if (ret)
return ret;
}
if (dev->mode_config.allow_fb_modifiers && if (dev->mode_config.allow_fb_modifiers &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
ret = convert_tiling_flags_to_modifier(rfb); ret = convert_tiling_flags_to_modifier(rfb);

View File

@@ -96,9 +96,11 @@
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
* - 3.41.0 - Add video codec query * - 3.41.0 - Add video codec query
* - 3.42.0 - Add 16bpc fixed point display support * - 3.42.0 - Add 16bpc fixed point display support
* - 3.43.0 - Add device hot plug/unplug support
* - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 42 #define KMS_DRIVER_MINOR 44
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit; int amdgpu_vram_limit;
@@ -627,7 +629,7 @@ module_param_named(mcbp, amdgpu_mcbp, int, 0444);
/** /**
* DOC: discovery (int) * DOC: discovery (int)
* Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM. * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
* (-1 = auto (default), 0 = disabled, 1 = enabled) * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file)
*/ */
MODULE_PARM_DESC(discovery, MODULE_PARM_DESC(discovery,
"Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM"); "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
@@ -890,6 +892,636 @@ MODULE_PARM_DESC(smu_pptable_id,
"specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)"); "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444); module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/
static const u16 amdgpu_unsupported_pciidlist[] = {
/* mach64 */
0x4354,
0x4358,
0x4554,
0x4742,
0x4744,
0x4749,
0x474C,
0x474D,
0x474E,
0x474F,
0x4750,
0x4751,
0x4752,
0x4753,
0x4754,
0x4755,
0x4756,
0x4757,
0x4758,
0x4759,
0x475A,
0x4C42,
0x4C44,
0x4C47,
0x4C49,
0x4C4D,
0x4C4E,
0x4C50,
0x4C51,
0x4C52,
0x4C53,
0x5654,
0x5655,
0x5656,
/* r128 */
0x4c45,
0x4c46,
0x4d46,
0x4d4c,
0x5041,
0x5042,
0x5043,
0x5044,
0x5045,
0x5046,
0x5047,
0x5048,
0x5049,
0x504A,
0x504B,
0x504C,
0x504D,
0x504E,
0x504F,
0x5050,
0x5051,
0x5052,
0x5053,
0x5054,
0x5055,
0x5056,
0x5057,
0x5058,
0x5245,
0x5246,
0x5247,
0x524b,
0x524c,
0x534d,
0x5446,
0x544C,
0x5452,
/* radeon */
0x3150,
0x3151,
0x3152,
0x3154,
0x3155,
0x3E50,
0x3E54,
0x4136,
0x4137,
0x4144,
0x4145,
0x4146,
0x4147,
0x4148,
0x4149,
0x414A,
0x414B,
0x4150,
0x4151,
0x4152,
0x4153,
0x4154,
0x4155,
0x4156,
0x4237,
0x4242,
0x4336,
0x4337,
0x4437,
0x4966,
0x4967,
0x4A48,
0x4A49,
0x4A4A,
0x4A4B,
0x4A4C,
0x4A4D,
0x4A4E,
0x4A4F,
0x4A50,
0x4A54,
0x4B48,
0x4B49,
0x4B4A,
0x4B4B,
0x4B4C,
0x4C57,
0x4C58,
0x4C59,
0x4C5A,
0x4C64,
0x4C66,
0x4C67,
0x4E44,
0x4E45,
0x4E46,
0x4E47,
0x4E48,
0x4E49,
0x4E4A,
0x4E4B,
0x4E50,
0x4E51,
0x4E52,
0x4E53,
0x4E54,
0x4E56,
0x5144,
0x5145,
0x5146,
0x5147,
0x5148,
0x514C,
0x514D,
0x5157,
0x5158,
0x5159,
0x515A,
0x515E,
0x5460,
0x5462,
0x5464,
0x5548,
0x5549,
0x554A,
0x554B,
0x554C,
0x554D,
0x554E,
0x554F,
0x5550,
0x5551,
0x5552,
0x5554,
0x564A,
0x564B,
0x564F,
0x5652,
0x5653,
0x5657,
0x5834,
0x5835,
0x5954,
0x5955,
0x5974,
0x5975,
0x5960,
0x5961,
0x5962,
0x5964,
0x5965,
0x5969,
0x5a41,
0x5a42,
0x5a61,
0x5a62,
0x5b60,
0x5b62,
0x5b63,
0x5b64,
0x5b65,
0x5c61,
0x5c63,
0x5d48,
0x5d49,
0x5d4a,
0x5d4c,
0x5d4d,
0x5d4e,
0x5d4f,
0x5d50,
0x5d52,
0x5d57,
0x5e48,
0x5e4a,
0x5e4b,
0x5e4c,
0x5e4d,
0x5e4f,
0x6700,
0x6701,
0x6702,
0x6703,
0x6704,
0x6705,
0x6706,
0x6707,
0x6708,
0x6709,
0x6718,
0x6719,
0x671c,
0x671d,
0x671f,
0x6720,
0x6721,
0x6722,
0x6723,
0x6724,
0x6725,
0x6726,
0x6727,
0x6728,
0x6729,
0x6738,
0x6739,
0x673e,
0x6740,
0x6741,
0x6742,
0x6743,
0x6744,
0x6745,
0x6746,
0x6747,
0x6748,
0x6749,
0x674A,
0x6750,
0x6751,
0x6758,
0x6759,
0x675B,
0x675D,
0x675F,
0x6760,
0x6761,
0x6762,
0x6763,
0x6764,
0x6765,
0x6766,
0x6767,
0x6768,
0x6770,
0x6771,
0x6772,
0x6778,
0x6779,
0x677B,
0x6840,
0x6841,
0x6842,
0x6843,
0x6849,
0x684C,
0x6850,
0x6858,
0x6859,
0x6880,
0x6888,
0x6889,
0x688A,
0x688C,
0x688D,
0x6898,
0x6899,
0x689b,
0x689c,
0x689d,
0x689e,
0x68a0,
0x68a1,
0x68a8,
0x68a9,
0x68b0,
0x68b8,
0x68b9,
0x68ba,
0x68be,
0x68bf,
0x68c0,
0x68c1,
0x68c7,
0x68c8,
0x68c9,
0x68d8,
0x68d9,
0x68da,
0x68de,
0x68e0,
0x68e1,
0x68e4,
0x68e5,
0x68e8,
0x68e9,
0x68f1,
0x68f2,
0x68f8,
0x68f9,
0x68fa,
0x68fe,
0x7100,
0x7101,
0x7102,
0x7103,
0x7104,
0x7105,
0x7106,
0x7108,
0x7109,
0x710A,
0x710B,
0x710C,
0x710E,
0x710F,
0x7140,
0x7141,
0x7142,
0x7143,
0x7144,
0x7145,
0x7146,
0x7147,
0x7149,
0x714A,
0x714B,
0x714C,
0x714D,
0x714E,
0x714F,
0x7151,
0x7152,
0x7153,
0x715E,
0x715F,
0x7180,
0x7181,
0x7183,
0x7186,
0x7187,
0x7188,
0x718A,
0x718B,
0x718C,
0x718D,
0x718F,
0x7193,
0x7196,
0x719B,
0x719F,
0x71C0,
0x71C1,
0x71C2,
0x71C3,
0x71C4,
0x71C5,
0x71C6,
0x71C7,
0x71CD,
0x71CE,
0x71D2,
0x71D4,
0x71D5,
0x71D6,
0x71DA,
0x71DE,
0x7200,
0x7210,
0x7211,
0x7240,
0x7243,
0x7244,
0x7245,
0x7246,
0x7247,
0x7248,
0x7249,
0x724A,
0x724B,
0x724C,
0x724D,
0x724E,
0x724F,
0x7280,
0x7281,
0x7283,
0x7284,
0x7287,
0x7288,
0x7289,
0x728B,
0x728C,
0x7290,
0x7291,
0x7293,
0x7297,
0x7834,
0x7835,
0x791e,
0x791f,
0x793f,
0x7941,
0x7942,
0x796c,
0x796d,
0x796e,
0x796f,
0x9400,
0x9401,
0x9402,
0x9403,
0x9405,
0x940A,
0x940B,
0x940F,
0x94A0,
0x94A1,
0x94A3,
0x94B1,
0x94B3,
0x94B4,
0x94B5,
0x94B9,
0x9440,
0x9441,
0x9442,
0x9443,
0x9444,
0x9446,
0x944A,
0x944B,
0x944C,
0x944E,
0x9450,
0x9452,
0x9456,
0x945A,
0x945B,
0x945E,
0x9460,
0x9462,
0x946A,
0x946B,
0x947A,
0x947B,
0x9480,
0x9487,
0x9488,
0x9489,
0x948A,
0x948F,
0x9490,
0x9491,
0x9495,
0x9498,
0x949C,
0x949E,
0x949F,
0x94C0,
0x94C1,
0x94C3,
0x94C4,
0x94C5,
0x94C6,
0x94C7,
0x94C8,
0x94C9,
0x94CB,
0x94CC,
0x94CD,
0x9500,
0x9501,
0x9504,
0x9505,
0x9506,
0x9507,
0x9508,
0x9509,
0x950F,
0x9511,
0x9515,
0x9517,
0x9519,
0x9540,
0x9541,
0x9542,
0x954E,
0x954F,
0x9552,
0x9553,
0x9555,
0x9557,
0x955f,
0x9580,
0x9581,
0x9583,
0x9586,
0x9587,
0x9588,
0x9589,
0x958A,
0x958B,
0x958C,
0x958D,
0x958E,
0x958F,
0x9590,
0x9591,
0x9593,
0x9595,
0x9596,
0x9597,
0x9598,
0x9599,
0x959B,
0x95C0,
0x95C2,
0x95C4,
0x95C5,
0x95C6,
0x95C7,
0x95C9,
0x95CC,
0x95CD,
0x95CE,
0x95CF,
0x9610,
0x9611,
0x9612,
0x9613,
0x9614,
0x9615,
0x9616,
0x9640,
0x9641,
0x9642,
0x9643,
0x9644,
0x9645,
0x9647,
0x9648,
0x9649,
0x964a,
0x964b,
0x964c,
0x964e,
0x964f,
0x9710,
0x9711,
0x9712,
0x9713,
0x9714,
0x9715,
0x9802,
0x9803,
0x9804,
0x9805,
0x9806,
0x9807,
0x9808,
0x9809,
0x980A,
0x9900,
0x9901,
0x9903,
0x9904,
0x9905,
0x9906,
0x9907,
0x9908,
0x9909,
0x990A,
0x990B,
0x990C,
0x990D,
0x990E,
0x990F,
0x9910,
0x9913,
0x9917,
0x9918,
0x9919,
0x9990,
0x9991,
0x9992,
0x9993,
0x9994,
0x9995,
0x9996,
0x9997,
0x9998,
0x9999,
0x999A,
0x999B,
0x999C,
0x999D,
0x99A0,
0x99A2,
0x99A4,
};
static const struct pci_device_id pciidlist[] = { static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1239,6 +1871,16 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY}, {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{ PCI_DEVICE(0x1002, PCI_ANY_ID),
.class = PCI_CLASS_DISPLAY_VGA << 8,
.class_mask = 0xffffff,
.driver_data = CHIP_IP_DISCOVERY },
{ PCI_DEVICE(0x1002, PCI_ANY_ID),
.class = PCI_CLASS_DISPLAY_OTHER << 8,
.class_mask = 0xffffff,
.driver_data = CHIP_IP_DISCOVERY },
{0, 0, 0} {0, 0, 0}
}; };
@@ -1252,9 +1894,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
struct drm_device *ddev; struct drm_device *ddev;
struct amdgpu_device *adev; struct amdgpu_device *adev;
unsigned long flags = ent->driver_data; unsigned long flags = ent->driver_data;
int ret, retry = 0; int ret, retry = 0, i;
bool supports_atomic = false; bool supports_atomic = false;
/* skip devices which are owned by radeon */
for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
if (amdgpu_unsupported_pciidlist[i] == pdev->device)
return -ENODEV;
}
if (flags == 0) {
DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n");
return -ENODEV;
}
if (amdgpu_virtual_display || if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true; supports_atomic = true;
@@ -1508,6 +2161,10 @@ static int amdgpu_pmops_resume(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev); struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r; int r;
/* Avoids registers access if device is physically gone */
if (!pci_device_is_present(adev->pdev))
adev->no_hw_access = true;
r = amdgpu_device_resume(drm_dev, true); r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_active(adev)) if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false; adev->in_s0ix = false;

View File

@@ -550,7 +550,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
drm_sched_stop(&ring->sched, NULL); drm_sched_stop(&ring->sched, NULL);
/* You can't wait for HW to signal if it's gone */ /* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(&adev->ddev)) if (!drm_dev_is_unplugged(adev_to_drm(adev)))
r = amdgpu_fence_wait_empty(ring); r = amdgpu_fence_wait_empty(ring);
else else
r = -ENODEV; r = -ENODEV;

View File

@@ -34,6 +34,7 @@
#include <asm/set_memory.h> #include <asm/set_memory.h>
#endif #endif
#include "amdgpu.h" #include "amdgpu.h"
#include <drm/drm_drv.h>
/* /*
* GART * GART
@@ -230,12 +231,16 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
u64 page_base; u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */ /* Starting from VEGA10, system bit must be 0 to mean invalid. */
uint64_t flags = 0; uint64_t flags = 0;
int idx;
if (!adev->gart.ready) { if (!adev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n"); WARN(1, "trying to unbind memory from uninitialized GART !\n");
return -EINVAL; return -EINVAL;
} }
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return 0;
t = offset / AMDGPU_GPU_PAGE_SIZE; t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) { for (i = 0; i < pages; i++, p++) {
@@ -254,6 +259,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < adev->num_vmhubs; i++) for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
drm_dev_exit(idx);
return 0; return 0;
} }
@@ -276,12 +282,16 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
{ {
uint64_t page_base; uint64_t page_base;
unsigned i, j, t; unsigned i, j, t;
int idx;
if (!adev->gart.ready) { if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n"); WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL; return -EINVAL;
} }
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return 0;
t = offset / AMDGPU_GPU_PAGE_SIZE; t = offset / AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
@@ -291,6 +301,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
drm_dev_exit(idx);
return 0; return 0;
} }

View File

@@ -62,7 +62,6 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT, 1); TTM_BO_VM_NUM_PREFAULT, 1);
drm_dev_exit(idx); drm_dev_exit(idx);
} else { } else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);

View File

@@ -31,6 +31,8 @@
/* delay 0.1 second to enable gfx off feature */ /* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
#define GFX_OFF_NO_DELAY 0
/* /*
* GPU GFX IP block helpers function. * GPU GFX IP block helpers function.
*/ */
@@ -558,6 +560,8 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{ {
unsigned long delay = GFX_OFF_DELAY_ENABLE;
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return; return;
@@ -573,8 +577,14 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_off_req_count--; adev->gfx.gfx_off_req_count--;
if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) if (adev->gfx.gfx_off_req_count == 0 &&
schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE); !adev->gfx.gfx_off_state) {
/* If going to s2idle, no need to wait */
if (adev->in_s0ix)
delay = GFX_OFF_NO_DELAY;
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
}
} else { } else {
if (adev->gfx.gfx_off_req_count == 0) { if (adev->gfx.gfx_off_req_count == 0) {
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);

View File

@@ -42,10 +42,9 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
enum gfx_pipe_priority { enum amdgpu_gfx_pipe_priority {
AMDGPU_GFX_PIPE_PRIO_NORMAL = 1, AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1,
AMDGPU_GFX_PIPE_PRIO_HIGH, AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
AMDGPU_GFX_PIPE_PRIO_MAX
}; };
/* Argument for PPSMC_MSG_GpuChangeState */ /* Argument for PPSMC_MSG_GpuChangeState */

View File

@@ -153,10 +153,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
int idx;
if (!drm_dev_enter(&adev->ddev, &idx))
return 0;
/* /*
* The following is for PTE only. GART does not have PDEs. * The following is for PTE only. GART does not have PDEs.
@@ -165,8 +161,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
value |= flags; value |= flags;
writeq(value, ptr + (gpu_page_idx * 8)); writeq(value, ptr + (gpu_page_idx * 8));
drm_dev_exit(idx);
return 0; return 0;
} }
@@ -598,7 +592,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
break; break;
default: default:
adev->gmc.tmz_enabled = false; adev->gmc.tmz_enabled = false;
dev_warn(adev->dev, dev_info(adev->dev,
"Trusted Memory Zone (TMZ) feature not supported\n"); "Trusted Memory Zone (TMZ) feature not supported\n");
break; break;
} }
@@ -749,6 +743,10 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
u64 vram_end = vram_addr + vram_size; u64 vram_end = vram_addr + vram_size;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
int idx;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
flags |= AMDGPU_PTE_WRITEABLE; flags |= AMDGPU_PTE_WRITEABLE;
@@ -770,6 +768,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
/* Requires gart_ptb_gpu_pa to be 4K aligned */ /* Requires gart_ptb_gpu_pa to be 4K aligned */
amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
drm_dev_exit(idx);
} }
/** /**

View File

@@ -300,20 +300,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/ */
int amdgpu_ib_pool_init(struct amdgpu_device *adev) int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{ {
unsigned size;
int r, i; int r, i;
if (adev->ib_pool_ready) if (adev->ib_pool_ready)
return 0; return 0;
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
if (i == AMDGPU_IB_POOL_DIRECT)
size = PAGE_SIZE * 6;
else
size = AMDGPU_IB_POOL_SIZE;
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i], r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
size, AMDGPU_GPU_PAGE_SIZE, AMDGPU_IB_POOL_SIZE,
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT); AMDGPU_GEM_DOMAIN_GTT);
if (r) if (r)
goto error; goto error;

View File

@@ -38,7 +38,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
int idx; int idx;
if (!drm_dev_enter(&adev->ddev, &idx)) { if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
__func__, s_job->sched->name); __func__, s_job->sched->name);

View File

@@ -341,27 +341,34 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
switch (query_fw->index) { switch (query_fw->index) {
case TA_FW_TYPE_PSP_XGMI: case TA_FW_TYPE_PSP_XGMI:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.xgmi.feature_version; fw_info->feature = adev->psp.xgmi_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_RAS: case TA_FW_TYPE_PSP_RAS:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ras.feature_version; fw_info->feature = adev->psp.ras_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_HDCP: case TA_FW_TYPE_PSP_HDCP:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.hdcp.feature_version; fw_info->feature = adev->psp.hdcp_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_DTM: case TA_FW_TYPE_PSP_DTM:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.dtm.feature_version; fw_info->feature = adev->psp.dtm_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_RAP: case TA_FW_TYPE_PSP_RAP:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.rap.feature_version; fw_info->feature = adev->psp.rap_context.context
.bin_desc.feature_version;
break; break;
case TA_FW_TYPE_PSP_SECUREDISPLAY: case TA_FW_TYPE_PSP_SECUREDISPLAY:
fw_info->ver = adev->psp.ta_fw_version; fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.securedisplay.feature_version; fw_info->feature =
adev->psp.securedisplay_context.context.bin_desc
.feature_version;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@@ -378,8 +385,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = adev->psp.sos.feature_version; fw_info->feature = adev->psp.sos.feature_version;
break; break;
case AMDGPU_INFO_FW_ASD: case AMDGPU_INFO_FW_ASD:
fw_info->ver = adev->psp.asd.fw_version; fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
fw_info->feature = adev->psp.asd.feature_version; fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
break; break;
case AMDGPU_INFO_FW_DMCU: case AMDGPU_INFO_FW_DMCU:
fw_info->ver = adev->dm.dmcu_fw_version; fw_info->ver = adev->dm.dmcu_fw_version;

View File

@@ -31,7 +31,7 @@ void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr, uint64_t mc_status_addr,
unsigned long *error_count) unsigned long *error_count)
{ {
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@@ -42,7 +42,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr, uint64_t mc_status_addr,
unsigned long *error_count) unsigned long *error_count)
{ {
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4); uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
@@ -56,7 +56,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev, void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr) uint64_t mc_status_addr)
{ {
WREG64_PCIE(mc_status_addr * 4, 0x0ULL); WREG64_PCIE(mc_status_addr, 0x0ULL);
} }
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
@@ -87,8 +87,8 @@ int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
if (!mca_dev->ras_if) if (!mca_dev->ras_if)
return -ENOMEM; return -ENOMEM;
mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block; mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block;
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
mca_dev->ras_if->sub_block_index = 0;
} }
ih_info.head = fs_info.head = *mca_dev->ras_if; ih_info.head = fs_info.head = *mca_dev->ras_if;
r = amdgpu_ras_late_init(adev, mca_dev->ras_if, r = amdgpu_ras_late_init(adev, mca_dev->ras_if,

Some files were not shown because too many files have changed in this diff Show More