drm/nouveau/gsp/r535: add support for booting GSP-RM

This commit adds the initial code needed to boot the GSP-RM firmware
provided by NVIDIA, bringing with it the beginnings of Ada support.

Until it's had more testing and time to bake, support is disabled by
default (except on Ada).  GSP-RM usage can be enabled by passing the
"config=NvGspRm=1" module option.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230918202149.4343-33-skeggsb@gmail.com
This commit is contained in:
Ben Skeggs 2023-09-19 06:21:37 +10:00 committed by Dave Airlie
parent 17a74021a3
commit 176fdcbddf
40 changed files with 3896 additions and 2 deletions

View File

@ -35,6 +35,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
#define NV_DEVICE_INFO_V0_TURING 0x0c
#define NV_DEVICE_INFO_V0_AMPERE 0x0d
#define NV_DEVICE_INFO_V0_ADA 0x0e
__u8 family;
__u8 pad06[2];
__u64 ram_size;

View File

@ -46,6 +46,7 @@ struct nvkm_device {
GV100 = 0x140,
TU100 = 0x160,
GA100 = 0x170,
AD100 = 0x190,
} card_type;
u32 chipset;
u8 chiprev;

View File

@ -48,6 +48,7 @@ int nvkm_falcon_pio_rd(struct nvkm_falcon *, u8 port, enum nvkm_falcon_mem type,
const u8 *img, u32 img_base, int len);
int nvkm_falcon_dma_wr(struct nvkm_falcon *, const u8 *img, u64 dma_addr, u32 dma_base,
enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec);
bool nvkm_falcon_riscv_active(struct nvkm_falcon *);
int gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
int gm200_flcn_disable(struct nvkm_falcon *);
@ -61,10 +62,13 @@ void gm200_flcn_tracepc(struct nvkm_falcon *);
int gp102_flcn_reset_eng(struct nvkm_falcon *);
extern const struct nvkm_falcon_func_pio gp102_flcn_emem_pio;
bool tu102_flcn_riscv_active(struct nvkm_falcon *);
int ga102_flcn_select(struct nvkm_falcon *);
int ga102_flcn_reset_prep(struct nvkm_falcon *);
int ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
extern const struct nvkm_falcon_func_dma ga102_flcn_dma;
bool ga102_flcn_riscv_active(struct nvkm_falcon *);
void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
void *, u32, u32, u16, u8, bool);

View File

@ -87,6 +87,8 @@ struct nvkm_falcon_func {
u32 stride;
} cmdq, msgq;
bool (*riscv_active)(struct nvkm_falcon *);
struct {
u32 *data;
u32 size;

View File

@ -29,6 +29,7 @@ int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
void *nvbios_pointer(struct nvkm_bios *, u32 addr);
int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **);
#endif

View File

@ -3,18 +3,186 @@
#define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
#include <core/subdev.h>
#include <core/falcon.h>
#include <core/firmware.h>
#define GSP_PAGE_SHIFT 12
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
struct nvkm_gsp_mem {
u32 size;
void *data;
dma_addr_t addr;
};
struct nvkm_gsp_radix3 {
struct nvkm_gsp_mem mem[3];
};
int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
void nvkm_gsp_sg_free(struct nvkm_device *, struct sg_table *);
typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
struct nvkm_gsp {
const struct nvkm_gsp_func *func;
struct nvkm_subdev subdev;
struct nvkm_falcon falcon;
struct {
struct {
const struct firmware *load;
const struct firmware *unload;
} booter;
const struct firmware *bl;
const struct firmware *rm;
} fws;
struct nvkm_firmware fw;
struct nvkm_gsp_mem sig;
struct nvkm_gsp_radix3 radix3;
struct {
struct {
struct {
u64 addr;
u64 size;
} vga_workspace;
u64 addr;
u64 size;
} bios;
struct {
struct {
u64 addr;
u64 size;
} frts, boot, elf, heap;
u64 addr;
u64 size;
} wpr2;
struct {
u64 addr;
u64 size;
} heap;
u64 addr;
u64 size;
} fb;
struct {
struct nvkm_falcon_fw load;
struct nvkm_falcon_fw unload;
} booter;
struct {
struct nvkm_gsp_mem fw;
u32 code_offset;
u32 data_offset;
u32 manifest_offset;
u32 app_version;
} boot;
struct nvkm_gsp_mem libos;
struct nvkm_gsp_mem loginit;
struct nvkm_gsp_mem logintr;
struct nvkm_gsp_mem logrm;
struct nvkm_gsp_mem rmargs;
struct nvkm_gsp_mem wpr_meta;
struct {
struct sg_table sgt;
struct nvkm_gsp_radix3 radix3;
struct nvkm_gsp_mem meta;
} sr;
struct {
struct nvkm_gsp_mem mem;
struct {
int nr;
u32 size;
u64 *ptr;
} ptes;
struct {
u32 size;
void *ptr;
} cmdq, msgq;
} shm;
struct nvkm_gsp_cmdq {
struct mutex mutex;
u32 cnt;
u32 seq;
u32 *wptr;
u32 *rptr;
} cmdq;
struct nvkm_gsp_msgq {
struct mutex mutex;
u32 cnt;
u32 *wptr;
u32 *rptr;
struct nvkm_gsp_msgq_ntfy {
u32 fn;
nvkm_gsp_msg_ntfy_func func;
void *priv;
} ntfy[16];
int ntfy_nr;
} msgq;
bool running;
const struct nvkm_gsp_rm {
void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
} *rm;
};
static inline bool
nvkm_gsp_rm(struct nvkm_gsp *gsp)
{
return false;
return gsp && (gsp->fws.rm || gsp->fw.img);
}
static inline void *
nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
{
return gsp->rm->rpc_get(gsp, fn, argc);
}
static inline void *
nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
{
return gsp->rm->rpc_push(gsp, argv, wait, repc);
}
static inline void *
nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
{
void *argv = nvkm_gsp_rpc_get(gsp, fn, argc);
if (IS_ERR_OR_NULL(argv))
return argv;
return nvkm_gsp_rpc_push(gsp, argv, true, argc);
}
static inline int
nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
{
void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
if (IS_ERR(repv))
return PTR_ERR(repv);
return 0;
}
static inline void
nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
{
gsp->rm->rpc_done(gsp, repv);
}
int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
@ -22,4 +190,5 @@ int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
#endif

View File

@ -0,0 +1,31 @@
#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS (16U)
#endif

View File

@ -0,0 +1,33 @@
#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0 (0x00000000U)
#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3 (0x00000003U)
#endif

View File

@ -0,0 +1,46 @@
#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct
{
NvU32 version; // queue version
NvU32 size; // bytes, page aligned
NvU32 msgSize; // entry size, bytes, must be power-of-2, 16 is minimum
NvU32 msgCount; // number of entries in queue
NvU32 writePtr; // message id of next slot
NvU32 flags; // if set it means "i want to swap RX"
NvU32 rxHdrOff; // Offset of msgqRxHeader from start of backing store.
NvU32 entryOff; // Offset of entries from start of backing store.
} msgqTxHeader;
typedef struct
{
NvU32 readPtr; // message id of last message read
} msgqRxHeader;
#endif

View File

@ -0,0 +1,52 @@
#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
#define __src_common_uproc_os_common_include_libos_init_args_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef NvU64 LibosAddress;
typedef enum {
LIBOS_MEMORY_REGION_NONE,
LIBOS_MEMORY_REGION_CONTIGUOUS,
LIBOS_MEMORY_REGION_RADIX3
} LibosMemoryRegionKind;
typedef enum {
LIBOS_MEMORY_REGION_LOC_NONE,
LIBOS_MEMORY_REGION_LOC_SYSMEM,
LIBOS_MEMORY_REGION_LOC_FB
} LibosMemoryRegionLoc;
typedef struct
{
LibosAddress id8; // Id tag.
LibosAddress pa; // Physical address.
LibosAddress size; // Size of memory area.
NvU8 kind; // See LibosMemoryRegionKind above.
NvU8 loc; // See LibosMemoryRegionLoc above.
} LibosMemoryRegionInitArgument;
#endif

View File

@ -0,0 +1,79 @@
#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define GSP_FW_SR_META_MAGIC 0x8a3bb9e6c6c39d93ULL
#define GSP_FW_SR_META_REVISION 2
typedef struct
{
//
// Magic
// Use for verification by Booter
//
NvU64 magic; // = GSP_FW_SR_META_MAGIC;
//
// Revision number
// Bumped up when we change this interface so it is not backward compatible.
// Bumped up when we revoke GSP-RM ucode
//
NvU64 revision; // = GSP_FW_SR_META_MAGIC_REVISION;
//
// ---- Members regarding data in SYSMEM ----------------------------
// Consumed by Booter for DMA
//
NvU64 sysmemAddrOfSuspendResumeData;
NvU64 sizeOfSuspendResumeData;
// ---- Members for crypto ops across S/R ---------------------------
//
// HMAC over the entire GspFwSRMeta structure (including padding)
// with the hmac field itself zeroed.
//
NvU8 hmac[32];
// Hash over GspFwWprMeta structure
NvU8 wprMetaHash[32];
// Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
NvU8 heapFreeListHash[32];
// Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
NvU8 dataHash[32];
//
// Pad structure to exactly 256 bytes (1 DMA chunk).
// Padding initialized to zero.
//
NvU32 padding[24];
} GspFwSRMeta;
#endif

View File

@ -0,0 +1,149 @@
#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct
{
// Magic
// BL to use for verification (i.e. Booter locked it in WPR2)
NvU64 magic; // = 0xdc3aae21371a60b3;
// Revision number of Booter-BL-Sequencer handoff interface
// Bumped up when we change this interface so it is not backward compatible.
// Bumped up when we revoke GSP-RM ucode
NvU64 revision; // = 1;
// ---- Members regarding data in SYSMEM ----------------------------
// Consumed by Booter for DMA
NvU64 sysmemAddrOfRadix3Elf;
NvU64 sizeOfRadix3Elf;
NvU64 sysmemAddrOfBootloader;
NvU64 sizeOfBootloader;
// Offsets inside bootloader image needed by Booter
NvU64 bootloaderCodeOffset;
NvU64 bootloaderDataOffset;
NvU64 bootloaderManifestOffset;
union
{
// Used only at initial boot
struct
{
NvU64 sysmemAddrOfSignature;
NvU64 sizeOfSignature;
};
//
// Used at suspend/resume to read GspFwHeapFreeList
// Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
//
struct
{
NvU32 gspFwHeapFreeListWprOffset;
NvU32 unused0;
NvU64 unused1;
};
};
// ---- Members describing FB layout --------------------------------
NvU64 gspFwRsvdStart;
NvU64 nonWprHeapOffset;
NvU64 nonWprHeapSize;
NvU64 gspFwWprStart;
// GSP-RM to use to setup heap.
NvU64 gspFwHeapOffset;
NvU64 gspFwHeapSize;
// BL to use to find ELF for jump
NvU64 gspFwOffset;
// Size is sizeOfRadix3Elf above.
NvU64 bootBinOffset;
// Size is sizeOfBootloader above.
NvU64 frtsOffset;
NvU64 frtsSize;
NvU64 gspFwWprEnd;
// GSP-RM to use for fbRegionInfo?
NvU64 fbSize;
// ---- Other members -----------------------------------------------
// GSP-RM to use for fbRegionInfo?
NvU64 vgaWorkspaceOffset;
NvU64 vgaWorkspaceSize;
// Boot count. Used to determine whether to load the firmware image.
NvU64 bootCount;
// TODO: the partitionRpc* fields below do not really belong in this
// structure. The values are patched in by the partition bootstrapper
// when GSP-RM is booted in a partition, and this structure was a
// convenient place for the bootstrapper to access them. These should
// be moved to a different comm. mechanism between the bootstrapper
// and the GSP-RM tasks.
// Shared partition RPC memory (physical address)
NvU64 partitionRpcAddr;
// Offsets relative to partitionRpcAddr
NvU16 partitionRpcRequestOffset;
NvU16 partitionRpcReplyOffset;
// Code section and dataSection offset and size.
NvU32 elfCodeOffset;
NvU32 elfDataOffset;
NvU32 elfCodeSize;
NvU32 elfDataSize;
// Used during GSP-RM resume to check for revocation
NvU32 lsUcodeVersion;
// Number of VF partitions allocating sub-heaps from the WPR heap
// Used during boot to ensure the heap is adequately sized
NvU8 gspFwHeapVfPartitionCount;
// Pad structure to exactly 256 bytes. Can replace padding with additional
// fields without incrementing revision. Padding initialized to 0.
NvU8 padding[7];
// BL to use for verification (i.e. Booter says OK to boot)
NvU64 verified; // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
} GspFwWprMeta;
#define GSP_FW_WPR_META_REVISION 1
#define GSP_FW_WPR_META_MAGIC 0xdc3aae21371a60b3ULL
#endif

View File

@ -0,0 +1,82 @@
#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct {
//
// Version 1
// Version 2
// Version 3 = for Partition boot
// Version 4 = for eb riscv boot
// Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
//
NvU32 version; // structure version
NvU32 bootloaderOffset;
NvU32 bootloaderSize;
NvU32 bootloaderParamOffset;
NvU32 bootloaderParamSize;
NvU32 riscvElfOffset;
NvU32 riscvElfSize;
NvU32 appVersion; // Changelist number associated with the image
//
// Manifest contains information about Monitor and it is
// input to BR
//
NvU32 manifestOffset;
NvU32 manifestSize;
//
// Monitor Data offset within RISCV image and size
//
NvU32 monitorDataOffset;
NvU32 monitorDataSize;
//
// Monitor Code offset withtin RISCV image and size
//
NvU32 monitorCodeOffset;
NvU32 monitorCodeSize;
NvU32 bIsMonitorEnabled;
//
// Swbrom Code offset within RISCV image and size
//
NvU32 swbromCodeOffset;
NvU32 swbromCodeSize;
//
// Swbrom Data offset within RISCV image and size
//
NvU32 swbromDataOffset;
NvU32 swbromDataSize;
//
// Total size of FB carveout (image and reserved space).
//
NvU32 fbReservedSize;
//
// Indicates whether the entire RISC-V image is signed as "code" in code section.
//
NvU32 bSignedAsCode;
} RM_RISCV_UCODE_DESC;
#endif

View File

@ -0,0 +1,100 @@
#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef enum GSP_SEQ_BUF_OPCODE
{
GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
GSP_SEQ_BUF_OPCODE_REG_MODIFY,
GSP_SEQ_BUF_OPCODE_REG_POLL,
GSP_SEQ_BUF_OPCODE_DELAY_US,
GSP_SEQ_BUF_OPCODE_REG_STORE,
GSP_SEQ_BUF_OPCODE_CORE_RESET,
GSP_SEQ_BUF_OPCODE_CORE_START,
GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
GSP_SEQ_BUF_OPCODE_CORE_RESUME,
} GSP_SEQ_BUF_OPCODE;
#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode) \
((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_REG_POLL) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_DELAY_US) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US) / sizeof(NvU32)) : \
(opcode == GSP_SEQ_BUF_OPCODE_REG_STORE) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE) / sizeof(NvU32)) : \
/* GSP_SEQ_BUF_OPCODE_CORE_RESET */ \
/* GSP_SEQ_BUF_OPCODE_CORE_START */ \
/* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */ \
/* GSP_SEQ_BUF_OPCODE_CORE_RESUME */ \
0)
typedef struct
{
NvU32 addr;
NvU32 val;
} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
typedef struct
{
NvU32 addr;
NvU32 mask;
NvU32 val;
} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
typedef struct
{
NvU32 addr;
NvU32 mask;
NvU32 val;
NvU32 timeout;
NvU32 error;
} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
typedef struct
{
NvU32 val;
} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
typedef struct
{
NvU32 addr;
NvU32 index;
} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
typedef struct GSP_SEQUENCER_BUFFER_CMD
{
GSP_SEQ_BUF_OPCODE opCode;
union
{
GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
} payload;
} GSP_SEQUENCER_BUFFER_CMD;
#endif

View File

@ -0,0 +1,38 @@
#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
#define __src_nvidia_generated_g_chipset_nvoc_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct
{
NvU16 deviceID; // deviceID
NvU16 vendorID; // vendorID
NvU16 subdeviceID; // subsystem deviceID
NvU16 subvendorID; // subsystem vendorID
NvU8 revisionID; // revision ID
} BUSINFO;
#endif

View File

@ -0,0 +1,44 @@
#ifndef __src_nvidia_generated_g_os_nvoc_h__
#define __src_nvidia_generated_g_os_nvoc_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct PACKED_REGISTRY_ENTRY
{
NvU32 nameOffset;
NvU8 type;
NvU32 data;
NvU32 length;
} PACKED_REGISTRY_ENTRY;
typedef struct PACKED_REGISTRY_TABLE
{
NvU32 size;
NvU32 numEntries;
PACKED_REGISTRY_ENTRY entries[0];
} PACKED_REGISTRY_TABLE;
#endif

View File

@ -0,0 +1,52 @@
#ifndef __src_nvidia_generated_g_rpc_structures_h__
#define __src_nvidia_generated_g_rpc_structures_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct rpc_unloading_guest_driver_v1F_07
{
NvBool bInPMTransition;
NvBool bGc6Entering;
NvU32 newLevel;
} rpc_unloading_guest_driver_v1F_07;
typedef struct rpc_run_cpu_sequencer_v17_00
{
NvU32 bufferSizeDWord;
NvU32 cmdIndex;
NvU32 regSaveArea[8];
NvU32 commandBuffer[];
} rpc_run_cpu_sequencer_v17_00;
typedef struct rpc_os_error_log_v17_00
{
NvU32 exceptType;
NvU32 runlistId;
NvU32 chid;
char errString[0x100];
} rpc_os_error_log_v17_00;
#endif

View File

@ -0,0 +1,74 @@
#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
#include <nvrm/535.54.03/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct DOD_METHOD_DATA
{
NV_STATUS status;
NvU32 acpiIdListLen;
NvU32 acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
} DOD_METHOD_DATA;
typedef struct JT_METHOD_DATA
{
NV_STATUS status;
NvU32 jtCaps;
NvU16 jtRevId;
NvBool bSBIOSCaps;
} JT_METHOD_DATA;
typedef struct MUX_METHOD_DATA_ELEMENT
{
NvU32 acpiId;
NvU32 mode;
NV_STATUS status;
} MUX_METHOD_DATA_ELEMENT;
typedef struct MUX_METHOD_DATA
{
NvU32 tableLen;
MUX_METHOD_DATA_ELEMENT acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
MUX_METHOD_DATA_ELEMENT acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
} MUX_METHOD_DATA;
typedef struct CAPS_METHOD_DATA
{
NV_STATUS status;
NvU32 optimusCaps;
} CAPS_METHOD_DATA;
typedef struct ACPI_METHOD_DATA
{
NvBool bValid;
DOD_METHOD_DATA dodMethodData;
JT_METHOD_DATA jtMethodData;
MUX_METHOD_DATA muxMethodData;
CAPS_METHOD_DATA capsMethodData;
} ACPI_METHOD_DATA;
#endif

View File

@ -0,0 +1,33 @@
#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB (96 << 10) // All architectures
#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE ((48 << 10) * 2048) // Support 2048 channels
#endif

View File

@ -0,0 +1,57 @@
#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct {
RmPhysAddr sharedMemPhysAddr;
NvU32 pageTableEntryCount;
NvLength cmdQueueOffset;
NvLength statQueueOffset;
NvLength locklessCmdQueueOffset;
NvLength locklessStatQueueOffset;
} MESSAGE_QUEUE_INIT_ARGUMENTS;
typedef struct {
NvU32 oldLevel;
NvU32 flags;
NvBool bInPMTransition;
} GSP_SR_INIT_ARGUMENTS;
typedef struct
{
MESSAGE_QUEUE_INIT_ARGUMENTS messageQueueInitArguments;
GSP_SR_INIT_ARGUMENTS srInitArguments;
NvU32 gpuInstance;
struct
{
NvU64 pa;
NvU64 size;
} profilerArgs;
} GSP_ARGUMENTS_CACHED;
#endif

View File

@ -0,0 +1,74 @@
#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
#include <nvrm/535.54.03/nvidia/generated/g_chipset_nvoc.h>
#include <nvrm/535.54.03/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
typedef struct GSP_VF_INFO
{
NvU32 totalVFs;
NvU32 firstVFOffset;
NvU64 FirstVFBar0Address;
NvU64 FirstVFBar1Address;
NvU64 FirstVFBar2Address;
NvBool b64bitBar0;
NvBool b64bitBar1;
NvBool b64bitBar2;
} GSP_VF_INFO;
typedef struct GspSystemInfo
{
NvU64 gpuPhysAddr;
NvU64 gpuPhysFbAddr;
NvU64 gpuPhysInstAddr;
NvU64 nvDomainBusDeviceFunc;
NvU64 simAccessBufPhysAddr;
NvU64 pcieAtomicsOpMask;
NvU64 consoleMemSize;
NvU64 maxUserVa;
NvU32 pciConfigMirrorBase;
NvU32 pciConfigMirrorSize;
NvU8 oorArch;
NvU64 clPdbProperties;
NvU32 Chipset;
NvBool bGpuBehindBridge;
NvBool bMnocAvailable;
NvBool bUpstreamL0sUnsupported;
NvBool bUpstreamL1Unsupported;
NvBool bUpstreamL1PorSupported;
NvBool bUpstreamL1PorMobileOnly;
NvU8 upstreamAddressValid;
BUSINFO FHBBusInfo;
BUSINFO chipsetIDInfo;
ACPI_METHOD_DATA acpiMethodData;
NvU32 hypervisorType;
NvBool bIsPassthru;
NvU64 sysTimerOffsetNs;
GSP_VF_INFO gspVFInfo;
} GspSystemInfo;
#endif

View File

@ -0,0 +1,262 @@
#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.54.03 */
#ifndef X
# define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC,
# define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
enum {
#endif
X(RM, NOP) // 0
X(RM, SET_GUEST_SYSTEM_INFO) // 1
X(RM, ALLOC_ROOT) // 2
X(RM, ALLOC_DEVICE) // 3 deprecated
X(RM, ALLOC_MEMORY) // 4
X(RM, ALLOC_CTX_DMA) // 5
X(RM, ALLOC_CHANNEL_DMA) // 6
X(RM, MAP_MEMORY) // 7
X(RM, BIND_CTX_DMA) // 8 deprecated
X(RM, ALLOC_OBJECT) // 9
X(RM, FREE) //10
X(RM, LOG) //11
X(RM, ALLOC_VIDMEM) //12
X(RM, UNMAP_MEMORY) //13
X(RM, MAP_MEMORY_DMA) //14
X(RM, UNMAP_MEMORY_DMA) //15
X(RM, GET_EDID) //16
X(RM, ALLOC_DISP_CHANNEL) //17
X(RM, ALLOC_DISP_OBJECT) //18
X(RM, ALLOC_SUBDEVICE) //19
X(RM, ALLOC_DYNAMIC_MEMORY) //20
X(RM, DUP_OBJECT) //21
X(RM, IDLE_CHANNELS) //22
X(RM, ALLOC_EVENT) //23
X(RM, SEND_EVENT) //24
X(RM, REMAPPER_CONTROL) //25 deprecated
X(RM, DMA_CONTROL) //26
X(RM, DMA_FILL_PTE_MEM) //27
X(RM, MANAGE_HW_RESOURCE) //28
X(RM, BIND_ARBITRARY_CTX_DMA) //29 deprecated
X(RM, CREATE_FB_SEGMENT) //30
X(RM, DESTROY_FB_SEGMENT) //31
X(RM, ALLOC_SHARE_DEVICE) //32
X(RM, DEFERRED_API_CONTROL) //33
X(RM, REMOVE_DEFERRED_API) //34
X(RM, SIM_ESCAPE_READ) //35
X(RM, SIM_ESCAPE_WRITE) //36
X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA) //37
X(RM, FREE_VIDMEM_VIRT) //38
X(RM, PERF_GET_PSTATE_INFO) //39 deprecated for vGPU, used by GSP
X(RM, PERF_GET_PERFMON_SAMPLE) //40
X(RM, PERF_GET_VIRTUAL_PSTATE_INFO) //41 deprecated
X(RM, PERF_GET_LEVEL_INFO) //42
X(RM, MAP_SEMA_MEMORY) //43
X(RM, UNMAP_SEMA_MEMORY) //44
X(RM, SET_SURFACE_PROPERTIES) //45
X(RM, CLEANUP_SURFACE) //46
X(RM, UNLOADING_GUEST_DRIVER) //47
X(RM, TDR_SET_TIMEOUT_STATE) //48
X(RM, SWITCH_TO_VGA) //49
X(RM, GPU_EXEC_REG_OPS) //50
X(RM, GET_STATIC_INFO) //51
X(RM, ALLOC_VIRTMEM) //52
X(RM, UPDATE_PDE_2) //53
X(RM, SET_PAGE_DIRECTORY) //54
X(RM, GET_STATIC_PSTATE_INFO) //55
X(RM, TRANSLATE_GUEST_GPU_PTES) //56
X(RM, RESERVED_57) //57
X(RM, RESET_CURRENT_GR_CONTEXT) //58
X(RM, SET_SEMA_MEM_VALIDATION_STATE) //59
X(RM, GET_ENGINE_UTILIZATION) //60
X(RM, UPDATE_GPU_PDES) //61
X(RM, GET_ENCODER_CAPACITY) //62
X(RM, VGPU_PF_REG_READ32) //63
X(RM, SET_GUEST_SYSTEM_INFO_EXT) //64
X(GSP, GET_GSP_STATIC_INFO) //65
X(RM, RMFS_INIT) //66
X(RM, RMFS_CLOSE_QUEUE) //67
X(RM, RMFS_CLEANUP) //68
X(RM, RMFS_TEST) //69
X(RM, UPDATE_BAR_PDE) //70
X(RM, CONTINUATION_RECORD) //71
X(RM, GSP_SET_SYSTEM_INFO) //72
X(RM, SET_REGISTRY) //73
X(GSP, GSP_INIT_POST_OBJGPU) //74 deprecated
X(RM, SUBDEV_EVENT_SET_NOTIFICATION) //75 deprecated
X(GSP, GSP_RM_CONTROL) //76
X(RM, GET_STATIC_INFO2) //77
X(RM, DUMP_PROTOBUF_COMPONENT) //78
X(RM, UNSET_PAGE_DIRECTORY) //79
X(RM, GET_CONSOLIDATED_STATIC_INFO) //80
X(RM, GMMU_REGISTER_FAULT_BUFFER) //81 deprecated
X(RM, GMMU_UNREGISTER_FAULT_BUFFER) //82 deprecated
X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER) //83 deprecated
X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated
X(RM, CTRL_SET_VGPU_FB_USAGE) //85
X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO) //86
X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO) //87
X(RM, CTRL_RESET_CHANNEL) //88
X(RM, CTRL_RESET_ISOLATED_CHANNEL) //89
X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT) //90
X(RM, CTRL_CLK_GET_EXTENDED_INFO) //91
X(RM, CTRL_PERF_BOOST) //92
X(RM, CTRL_PERF_VPSTATES_GET_CONTROL) //93
X(RM, CTRL_GET_ZBC_CLEAR_TABLE) //94
X(RM, CTRL_SET_ZBC_COLOR_CLEAR) //95
X(RM, CTRL_SET_ZBC_DEPTH_CLEAR) //96
X(RM, CTRL_GPFIFO_SCHEDULE) //97
X(RM, CTRL_SET_TIMESLICE) //98
X(RM, CTRL_PREEMPT) //99
X(RM, CTRL_FIFO_DISABLE_CHANNELS) //100
X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL) //101
X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL) //102
X(GSP, GSP_RM_ALLOC) //103
X(RM, CTRL_GET_P2P_CAPS_V2) //104
X(RM, CTRL_CIPHER_AES_ENCRYPT) //105
X(RM, CTRL_CIPHER_SESSION_KEY) //106
X(RM, CTRL_CIPHER_SESSION_KEY_STATUS) //107
X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES) //108
X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES) //109
X(RM, CTRL_DBG_SET_EXCEPTION_MASK) //110
X(RM, CTRL_GPU_PROMOTE_CTX) //111
X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND) //112
X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE) //113
X(RM, CTRL_GR_CTXSW_ZCULL_BIND) //114
X(RM, CTRL_GPU_INITIALIZE_CTX) //115
X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES) //116
X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT) //117
X(RM, CTRL_GET_LATEST_ECC_ADDRESSES) //118
X(RM, CTRL_MC_SERVICE_INTERRUPTS) //119
X(RM, CTRL_DMA_SET_DEFAULT_VASPACE) //120
X(RM, CTRL_GET_CE_PCE_MASK) //121
X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY) //122
X(RM, CTRL_GET_NVLINK_PEER_ID_MASK) //123
X(RM, CTRL_GET_NVLINK_STATUS) //124
X(RM, CTRL_GET_P2P_CAPS) //125
X(RM, CTRL_GET_P2P_CAPS_MATRIX) //126
X(RM, RESERVED_0) //127
X(RM, CTRL_RESERVE_PM_AREA_SMPC) //128
X(RM, CTRL_RESERVE_HWPM_LEGACY) //129
X(RM, CTRL_B0CC_EXEC_REG_OPS) //130
X(RM, CTRL_BIND_PM_RESOURCES) //131
X(RM, CTRL_DBG_SUSPEND_CONTEXT) //132
X(RM, CTRL_DBG_RESUME_CONTEXT) //133
X(RM, CTRL_DBG_EXEC_REG_OPS) //134
X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG) //135
X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE) //136
X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137
X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG) //138
X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE) //139
X(RM, CTRL_ALLOC_PMA_STREAM) //140
X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT) //141
X(RM, CTRL_FB_GET_INFO_V2) //142
X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES) //143
X(RM, CTRL_GR_GET_CTX_BUFFER_INFO) //144
X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES) //145
X(RM, CTRL_GPU_EVICT_CTX) //146
X(RM, CTRL_FB_GET_FS_INFO) //147
X(RM, CTRL_GRMGR_GET_GR_FS_INFO) //148
X(RM, CTRL_STOP_CHANNEL) //149
X(RM, CTRL_GR_PC_SAMPLING_MODE) //150
X(RM, CTRL_PERF_RATED_TDP_GET_STATUS) //151
X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL) //152
X(RM, CTRL_FREE_PMA_STREAM) //153
X(RM, CTRL_TIMER_SET_GR_TICK_FREQ) //154
X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155
X(RM, GET_CONSOLIDATED_GR_STATIC_INFO) //156
X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP) //157
X(RM, CTRL_GR_GET_TPC_PARTITION_MODE) //158
X(RM, CTRL_GR_SET_TPC_PARTITION_MODE) //159
X(UVM, UVM_PAGING_CHANNEL_ALLOCATE) //160
X(UVM, UVM_PAGING_CHANNEL_DESTROY) //161
X(UVM, UVM_PAGING_CHANNEL_MAP) //162
X(UVM, UVM_PAGING_CHANNEL_UNMAP) //163
X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM) //164
X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES) //165
X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION) //166
X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL) //167
X(RM, DCE_RM_INIT) //168
X(RM, REGISTER_VIRTUAL_EVENT_BUFFER) //169
X(RM, CTRL_EVENT_BUFFER_UPDATE_GET) //170
X(RM, GET_PLCABLE_ADDRESS_KIND) //171
X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2) //172
X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM) //173
X(RM, CTRL_GET_MMU_DEBUG_MODE) //174
X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175
X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE) //176
X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO) //177
X(RM, DISABLE_CHANNELS) //178
X(RM, CTRL_FABRIC_MEMORY_DESCRIBE) //179
X(RM, CTRL_FABRIC_MEM_STATS) //180
X(RM, SAVE_HIBERNATION_DATA) //181
X(RM, RESTORE_HIBERNATION_DATA) //182
X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183
X(RM, CTRL_EXEC_PARTITIONS_CREATE) //184
X(RM, CTRL_EXEC_PARTITIONS_DELETE) //185
X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN) //186
X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187
X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION) //188
X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK) //189
X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER) //190
X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS) // 191
X(RM, CTRL_BUS_SET_P2P_MAPPING) // 192
X(RM, CTRL_BUS_UNSET_P2P_MAPPING) // 193
X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK) // 194
X(RM, CTRL_GPU_MIGRATABLE_OPS) // 195
X(RM, CTRL_GET_TOTAL_HS_CREDITS) // 196
X(RM, CTRL_GET_HS_CREDITS) // 197
X(RM, CTRL_SET_HS_CREDITS) // 198
X(RM, CTRL_PM_AREA_PC_SAMPLER) // 199
X(RM, INVALIDATE_TLB) // 200
X(RM, NUM_FUNCTIONS) //END
#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
};
# undef X
# undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
#endif
#ifndef E
# define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
# define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
enum {
#endif
E(FIRST_EVENT = 0x1000) // 0x1000
E(GSP_INIT_DONE) // 0x1001
E(GSP_RUN_CPU_SEQUENCER) // 0x1002
E(POST_EVENT) // 0x1003
E(RC_TRIGGERED) // 0x1004
E(MMU_FAULT_QUEUED) // 0x1005
E(OS_ERROR_LOG) // 0x1006
E(RG_LINE_INTR) // 0x1007
E(GPUACCT_PERFMON_UTIL_SAMPLES) // 0x1008
E(SIM_READ) // 0x1009
E(SIM_WRITE) // 0x100a
E(SEMAPHORE_SCHEDULE_CALLBACK) // 0x100b
E(UCODE_LIBOS_PRINT) // 0x100c
E(VGPU_GSP_PLUGIN_TRIGGERED) // 0x100d
E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK) // 0x100e
E(PERF_BRIDGELESS_INFO_UPDATE) // 0x100f
E(VGPU_CONFIG) // 0x1010
E(DISPLAY_MODESET) // 0x1011
E(EXTDEV_INTR_SERVICE) // 0x1012
E(NVLINK_INBAND_RECEIVED_DATA_256) // 0x1013
E(NVLINK_INBAND_RECEIVED_DATA_512) // 0x1014
E(NVLINK_INBAND_RECEIVED_DATA_1024) // 0x1015
E(NVLINK_INBAND_RECEIVED_DATA_2048) // 0x1016
E(NVLINK_INBAND_RECEIVED_DATA_4096) // 0x1017
E(TIMED_SEMAPHORE_RELEASE) // 0x1018
E(NVLINK_IS_GPU_DEGRADED) // 0x1019
E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK) // 0x101a
E(GSP_SEND_USER_SHARED_DATA) // 0x101b
E(NVLINK_FAULT_UP) // 0x101c
E(GSP_LOCKDOWN_NOTICE) // 0x101d
E(MIG_CI_CONFIG_UPDATE) // 0x101e
E(NUM_EVENTS) // END
#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
};
# undef E
# undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
#endif
#endif

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NVRM_NVTYPES_H__
#define __NVRM_NVTYPES_H__
#define NV_ALIGN_BYTES(a) __attribute__ ((__aligned__(a)))
#define NV_DECLARE_ALIGNED(f,a) f __attribute__ ((__aligned__(a)))
typedef u32 NvV32;
typedef u8 NvU8;
typedef u16 NvU16;
typedef u32 NvU32;
typedef u64 NvU64;
typedef void* NvP64;
typedef NvU8 NvBool;
typedef NvU32 NvHandle;
typedef NvU64 NvLength;
typedef NvU64 RmPhysAddr;
typedef NvU32 NV_STATUS;
#endif

View File

@ -2744,6 +2744,66 @@ nv177_chipset = {
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv192_chipset = {
.name = "AD102",
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fb = { 0x00000001, ga102_fb_new },
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv193_chipset = {
.name = "AD103",
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fb = { 0x00000001, ga102_fb_new },
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv194_chipset = {
.name = "AD104",
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fb = { 0x00000001, ga102_fb_new },
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv196_chipset = {
.name = "AD106",
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fb = { 0x00000001, ga102_fb_new },
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
static const struct nvkm_device_chip
nv197_chipset = {
.name = "AD107",
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fb = { 0x00000001, ga102_fb_new },
.gsp = { 0x00000001, ad102_gsp_new },
.pci = { 0x00000001, gp100_pci_new },
.timer = { 0x00000001, gk20a_timer_new },
.sec2 = { 0x00000001, ga102_sec2_new },
};
struct nvkm_subdev *
nvkm_device_subdev(struct nvkm_device *device, int type, int inst)
{
@ -3062,6 +3122,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
case 0x190: device->card_type = AD100; break;
default:
break;
}
@ -3164,6 +3225,11 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
case 0x192: device->chip = &nv192_chipset; break;
case 0x193: device->chip = &nv193_chipset; break;
case 0x194: device->chip = &nv194_chipset; break;
case 0x196: device->chip = &nv196_chipset; break;
case 0x197: device->chip = &nv197_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {

View File

@ -147,6 +147,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
default:
args->v0.family = 0;
break;

View File

@ -8,5 +8,6 @@ nvkm-y += nvkm/falcon/v1.o
nvkm-y += nvkm/falcon/gm200.o
nvkm-y += nvkm/falcon/gp102.o
nvkm-y += nvkm/falcon/tu102.o
nvkm-y += nvkm/falcon/ga100.o
nvkm-y += nvkm/falcon/ga102.o

View File

@ -25,6 +25,15 @@
#include <subdev/timer.h>
#include <subdev/top.h>
bool
nvkm_falcon_riscv_active(struct nvkm_falcon *falcon)
{
if (!falcon->func->riscv_active)
return false;
return falcon->func->riscv_active(falcon);
}
static const struct nvkm_falcon_func_dma *
nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
{

View File

@ -24,6 +24,12 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
bool
ga102_flcn_riscv_active(struct nvkm_falcon *falcon)
{
return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x388) & 0x00000080) != 0;
}
static bool
ga102_flcn_dma_done(struct nvkm_falcon *falcon)
{

View File

@ -0,0 +1,28 @@
/*
* Copyright 2023 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
bool
tu102_flcn_riscv_active(struct nvkm_falcon *falcon)
{
return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x240) & 0x00000001) != 0;
}

View File

@ -46,6 +46,14 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
return true;
}
void *
nvbios_pointer(struct nvkm_bios *bios, u32 addr)
{
if (likely(nvbios_addr(bios, &addr, 0)))
return &bios->data[addr];
return NULL;
}
u8
nvbios_rd08(struct nvkm_bios *bios, u32 addr)
{

View File

@ -1,7 +1,12 @@
# SPDX-License-Identifier: MIT
nvkm-y += nvkm/subdev/gsp/base.o
nvkm-y += nvkm/subdev/gsp/fwsec.o
nvkm-y += nvkm/subdev/gsp/gv100.o
nvkm-y += nvkm/subdev/gsp/tu102.o
nvkm-y += nvkm/subdev/gsp/tu116.o
nvkm-y += nvkm/subdev/gsp/ga100.o
nvkm-y += nvkm/subdev/gsp/ga102.o
nvkm-y += nvkm/subdev/gsp/ad102.o
nvkm-y += nvkm/subdev/gsp/r535.o

View File

@ -0,0 +1,57 @@
/*
* Copyright 2023 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
static const struct nvkm_gsp_func
ad102_gsp_r535_54_03 = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ad10x",
.wpr_heap.os_carveout_size = 20 << 20,
.wpr_heap.base_size = 8 << 20,
.wpr_heap.min_size = 84 << 20,
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = r535_gsp_init,
.fini = r535_gsp_fini,
.reset = ga102_gsp_reset,
.rm = &r535_gsp_rm,
};
static struct nvkm_gsp_fwif
ad102_gsps[] = {
{ 0, r535_gsp_load, &ad102_gsp_r535_54_03, "535.54.03", true },
{}
};
int
ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_gsp **pgsp)
{
return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
}

View File

@ -90,6 +90,7 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
return PTR_ERR(fwif);
gsp->func = fwif->func;
gsp->rm = gsp->func->rm;
return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
&gsp->falcon);

View File

@ -0,0 +1,359 @@
/*
* Copyright 2023 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "priv.h"
#include <subdev/bios.h>
#include <subdev/bios/pmu.h>
#include <nvfw/fw.h>
union nvfw_falcon_appif_hdr {
struct nvfw_falcon_appif_hdr_v1 {
u8 ver;
u8 hdr;
u8 len;
u8 cnt;
} v1;
};
union nvfw_falcon_appif {
struct nvfw_falcon_appif_v1 {
#define NVFW_FALCON_APPIF_ID_DMEMMAPPER 0x00000004
u32 id;
u32 dmem_base;
} v1;
};
union nvfw_falcon_appif_dmemmapper {
struct {
u32 signature;
u16 version;
u16 size;
u32 cmd_in_buffer_offset;
u32 cmd_in_buffer_size;
u32 cmd_out_buffer_offset;
u32 cmd_out_buffer_size;
u32 nvf_img_data_buffer_offset;
u32 nvf_img_data_buffer_size;
u32 printf_buffer_hdr;
u32 ucode_build_time_stamp;
u32 ucode_signature;
#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS 0x00000015
#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB 0x00000019
u32 init_cmd;
u32 ucode_feature;
u32 ucode_cmd_mask0;
u32 ucode_cmd_mask1;
u32 multi_tgt_tbl;
} v3;
};
struct nvfw_fwsec_frts_cmd {
struct {
u32 ver;
u32 hdr;
u64 addr;
u32 size;
u32 flags;
} read_vbios;
struct {
u32 ver;
u32 hdr;
u32 addr;
u32 size;
#define NVFW_FRTS_CMD_REGION_TYPE_FB 0x00000002
u32 type;
} frts_region;
};
static int
nvkm_gsp_fwsec_patch(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, u32 if_offset, u32 init_cmd)
{
union nvfw_falcon_appif_hdr *hdr = (void *)(fw->fw.img + fw->dmem_base_img + if_offset);
const u8 *dmem = fw->fw.img + fw->dmem_base_img;
int i;
if (WARN_ON(hdr->v1.ver != 1))
return -EINVAL;
for (i = 0; i < hdr->v1.cnt; i++) {
union nvfw_falcon_appif *app = (void *)((u8 *)hdr + hdr->v1.hdr + i * hdr->v1.len);
union nvfw_falcon_appif_dmemmapper *dmemmap;
struct nvfw_fwsec_frts_cmd *frtscmd;
if (app->v1.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER)
continue;
dmemmap = (void *)(dmem + app->v1.dmem_base);
dmemmap->v3.init_cmd = init_cmd;
frtscmd = (void *)(dmem + dmemmap->v3.cmd_in_buffer_offset);
frtscmd->read_vbios.ver = 1;
frtscmd->read_vbios.hdr = sizeof(frtscmd->read_vbios);
frtscmd->read_vbios.addr = 0;
frtscmd->read_vbios.size = 0;
frtscmd->read_vbios.flags = 2;
if (init_cmd == NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS) {
frtscmd->frts_region.ver = 1;
frtscmd->frts_region.hdr = sizeof(frtscmd->frts_region);
frtscmd->frts_region.addr = gsp->fb.wpr2.frts.addr >> 12;
frtscmd->frts_region.size = gsp->fb.wpr2.frts.size >> 12;
frtscmd->frts_region.type = NVFW_FRTS_CMD_REGION_TYPE_FB;
}
break;
}
if (WARN_ON(i == hdr->v1.cnt))
return -EINVAL;
return 0;
}
union nvfw_falcon_ucode_desc {
struct nvkm_falcon_ucode_desc_v2 {
u32 Hdr;
u32 StoredSize;
u32 UncompressedSize;
u32 VirtualEntry;
u32 InterfaceOffset;
u32 IMEMPhysBase;
u32 IMEMLoadSize;
u32 IMEMVirtBase;
u32 IMEMSecBase;
u32 IMEMSecSize;
u32 DMEMOffset;
u32 DMEMPhysBase;
u32 DMEMLoadSize;
u32 altIMEMLoadSize;
u32 altDMEMLoadSize;
} v2;
struct nvkm_falcon_ucode_desc_v3 {
u32 Hdr;
u32 StoredSize;
u32 PKCDataOffset;
u32 InterfaceOffset;
u32 IMEMPhysBase;
u32 IMEMLoadSize;
u32 IMEMVirtBase;
u32 DMEMPhysBase;
u32 DMEMLoadSize;
u16 EngineIdMask;
u8 UcodeId;
u8 SignatureCount;
u16 SignatureVersions;
u16 Reserved;
} v3;
};
static int
nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
const struct nvkm_falcon_ucode_desc_v2 *desc, u32 size, u32 init_cmd,
struct nvkm_falcon_fw *fw)
{
struct nvkm_subdev *subdev = &gsp->subdev;
const struct firmware *bl;
const struct nvfw_bin_hdr *hdr;
const struct nvfw_bl_desc *bld;
int ret;
/* Build ucode. */
ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, subdev->device, true,
(u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
&gsp->falcon, fw);
if (WARN_ON(ret))
return ret;
fw->nmem_base_img = 0;
fw->nmem_base = desc->IMEMPhysBase;
fw->nmem_size = desc->IMEMLoadSize - desc->IMEMSecSize;
fw->imem_base_img = 0;
fw->imem_base = desc->IMEMSecBase;
fw->imem_size = desc->IMEMSecSize;
fw->dmem_base_img = desc->DMEMOffset;
fw->dmem_base = desc->DMEMPhysBase;
fw->dmem_size = desc->DMEMLoadSize;
/* Bootloader. */
ret = nvkm_firmware_get(subdev, "acr/bl", 0, &bl);
if (ret)
return ret;
hdr = nvfw_bin_hdr(subdev, bl->data);
bld = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
fw->boot_addr = bld->start_tag << 8;
fw->boot_size = bld->code_size;
fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
if (!fw->boot)
ret = -ENOMEM;
nvkm_firmware_put(bl);
/* Patch in interface data. */
return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
}
static int
nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name,
const struct nvkm_falcon_ucode_desc_v3 *desc, u32 size, u32 init_cmd,
struct nvkm_falcon_fw *fw)
{
struct nvkm_device *device = gsp->subdev.device;
struct nvkm_bios *bios = device->bios;
int ret;
/* Build ucode. */
ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, device, true,
(u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
&gsp->falcon, fw);
if (WARN_ON(ret))
return ret;
fw->imem_base_img = 0;
fw->imem_base = desc->IMEMPhysBase;
fw->imem_size = desc->IMEMLoadSize;
fw->dmem_base_img = desc->IMEMLoadSize;
fw->dmem_base = desc->DMEMPhysBase;
fw->dmem_size = ALIGN(desc->DMEMLoadSize, 256);
fw->dmem_sign = desc->PKCDataOffset;
fw->boot_addr = 0;
fw->fuse_ver = desc->SignatureVersions;
fw->ucode_id = desc->UcodeId;
fw->engine_id = desc->EngineIdMask;
/* Patch in signature. */
ret = nvkm_falcon_fw_sign(fw, fw->dmem_base_img + desc->PKCDataOffset, 96 * 4,
nvbios_pointer(bios, 0), desc->SignatureCount,
(u8 *)desc + 0x2c - (u8 *)nvbios_pointer(bios, 0), 0, 0);
if (WARN_ON(ret))
return ret;
/* Patch in interface data. */
return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
}
static int
nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
{
struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
const union nvfw_falcon_ucode_desc *desc;
struct nvbios_pmuE flcn_ucode;
u8 idx, ver, hdr;
u32 data;
u16 size, vers;
struct nvkm_falcon_fw fw = {};
u32 mbox0 = 0;
int ret;
/* Lookup in VBIOS. */
for (idx = 0; (data = nvbios_pmuEp(bios, idx, &ver, &hdr, &flcn_ucode)); idx++) {
if (flcn_ucode.type == 0x85)
break;
}
if (WARN_ON(!data))
return -EINVAL;
/* Deteremine version. */
desc = nvbios_pointer(bios, flcn_ucode.data);
if (WARN_ON(!(desc->v2.Hdr & 0x00000001)))
return -EINVAL;
size = (desc->v2.Hdr & 0xffff0000) >> 16;
vers = (desc->v2.Hdr & 0x0000ff00) >> 8;
switch (vers) {
case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break;
case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break;
default:
nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers);
return -EINVAL;
}
if (ret) {
nvkm_error(subdev, "%s(v%d): %d\n", name, vers, ret);
return ret;
}
/* Boot. */
ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0);
nvkm_falcon_fw_dtor(&fw);
if (ret)
return ret;
return 0;
}
int
nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
{
struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
int ret;
u32 err;
ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
if (ret)
return ret;
/* Verify. */
err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff;
if (err) {
nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err);
return -EIO;
}
return 0;
}
int
nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
{
struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
int ret;
u32 err, wpr2_lo, wpr2_hi;
ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
if (ret)
return ret;
/* Verify. */
err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16;
if (err) {
nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err);
return -EIO;
}
wpr2_lo = nvkm_rd32(device, 0x1fa824);
wpr2_hi = nvkm_rd32(device, 0x1fa828);
nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
return 0;
}

View File

@ -21,8 +21,30 @@
*/
#include "priv.h"
static const struct nvkm_gsp_func
ga100_gsp_r535_54_03 = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
.wpr_heap.base_size = 8 << 20,
.wpr_heap.min_size = 64 << 20,
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = r535_gsp_init,
.fini = r535_gsp_fini,
.reset = tu102_gsp_reset,
.rm = &r535_gsp_rm,
};
static struct nvkm_gsp_fwif
ga100_gsps[] = {
{ 0, r535_gsp_load, &ga100_gsp_r535_54_03, "535.54.03" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};

View File

@ -21,7 +21,119 @@
*/
#include "priv.h"
static const struct nvkm_falcon_func
#include <nvfw/flcn.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
int
ga102_gsp_reset(struct nvkm_gsp *gsp)
{
int ret;
ret = gsp->falcon.func->reset_eng(&gsp->falcon);
if (ret)
return ret;
nvkm_falcon_mask(&gsp->falcon, 0x1668, 0x00000111, 0x00000111);
return 0;
}
int
ga102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
{
struct nvkm_subdev *subdev = &gsp->subdev;
const struct nvkm_falcon_fw_func *func = &ga102_flcn_fw;
const struct nvfw_bin_hdr *hdr;
const struct nvfw_hs_header_v2 *hshdr;
const struct nvfw_hs_load_header_v2 *lhdr;
u32 loc, sig, cnt, *meta;
int ret;
hdr = nvfw_bin_hdr(subdev, blob->data);
hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
meta = (u32 *)(blob->data + hshdr->meta_data_offset);
loc = *(u32 *)(blob->data + hshdr->patch_loc);
sig = *(u32 *)(blob->data + hshdr->patch_sig);
cnt = *(u32 *)(blob->data + hshdr->num_sig);
ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
if (ret)
goto done;
ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
cnt, hshdr->sig_prod_offset + sig, 0, 0);
if (ret)
goto done;
lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
fw->imem_base_img = lhdr->app[0].offset;
fw->imem_base = 0;
fw->imem_size = lhdr->app[0].size;
fw->dmem_base_img = lhdr->os_data_offset;
fw->dmem_base = 0;
fw->dmem_size = lhdr->os_data_size;
fw->dmem_sign = loc - lhdr->os_data_offset;
fw->boot_addr = lhdr->app[0].offset;
fw->fuse_ver = meta[0];
fw->engine_id = meta[1];
fw->ucode_id = meta[2];
done:
if (ret)
nvkm_falcon_fw_dtor(fw);
return ret;
}
static int
ga102_gsp_fwsec_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
{
struct nvkm_falcon *falcon = fw->falcon;
struct nvkm_device *device = falcon->owner->device;
u32 sig_fuse_version = fw->fuse_ver;
u32 reg_fuse_version;
int idx = 0;
FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id);
FLCN_DBG(falcon, "sig_fuse_version: %08x", sig_fuse_version);
if (fw->engine_id & 0x00000400) {
reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4);
} else {
WARN_ON(1);
return -ENOSYS;
}
FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
reg_fuse_version = BIT(fls(reg_fuse_version));
FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
if (!(reg_fuse_version & fw->fuse_ver))
return -EINVAL;
while (!(reg_fuse_version & sig_fuse_version & 1)) {
idx += (sig_fuse_version & 1);
reg_fuse_version >>= 1;
sig_fuse_version >>= 1;
}
return idx;
}
const struct nvkm_falcon_fw_func
ga102_gsp_fwsec = {
.signature = ga102_gsp_fwsec_signature,
.reset = gm200_flcn_fw_reset,
.load = ga102_flcn_fw_load,
.boot = ga102_flcn_fw_boot,
};
const struct nvkm_falcon_func
ga102_gsp_flcn = {
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
@ -32,6 +144,29 @@ ga102_gsp_flcn = {
.reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing,
.imem_dma = &ga102_flcn_dma,
.dmem_dma = &ga102_flcn_dma,
.riscv_active = ga102_flcn_riscv_active,
};
static const struct nvkm_gsp_func
ga102_gsp_r535_54_03 = {
.flcn = &ga102_gsp_flcn,
.fwsec = &ga102_gsp_fwsec,
.sig_section = ".fwsignature_ga10x",
.wpr_heap.os_carveout_size = 20 << 20,
.wpr_heap.base_size = 8 << 20,
.wpr_heap.min_size = 84 << 20,
.booter.ctor = ga102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = r535_gsp_init,
.fini = r535_gsp_fini,
.reset = ga102_gsp_reset,
.rm = &r535_gsp_rm,
};
static const struct nvkm_gsp_func
@ -41,6 +176,7 @@ ga102_gsp = {
static struct nvkm_gsp_fwif
ga102_gsps[] = {
{ 0, r535_gsp_load, &ga102_gsp_r535_54_03, "535.54.03" },
{ -1, gv100_gsp_nofw, &ga102_gsp },
{}
};

View File

@ -4,23 +4,65 @@
#include <subdev/gsp.h>
enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
struct nvkm_gsp_fwif {
int version;
int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
const struct nvkm_gsp_func *func;
const char *ver;
bool enable;
};
int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
int r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
struct nvkm_gsp_func {
const struct nvkm_falcon_func *flcn;
const struct nvkm_falcon_fw_func *fwsec;
char *sig_section;
struct {
u32 os_carveout_size;
u32 base_size;
u64 min_size;
} wpr_heap;
struct {
int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
} booter;
void (*dtor)(struct nvkm_gsp *);
int (*oneinit)(struct nvkm_gsp *);
int (*init)(struct nvkm_gsp *);
int (*fini)(struct nvkm_gsp *, bool suspend);
int (*reset)(struct nvkm_gsp *);
const struct nvkm_gsp_rm *rm;
};
extern const struct nvkm_falcon_func tu102_gsp_flcn;
extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int tu102_gsp_oneinit(struct nvkm_gsp *);
int tu102_gsp_reset(struct nvkm_gsp *);
extern const struct nvkm_falcon_func ga102_gsp_flcn;
extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
struct nvkm_falcon *, struct nvkm_falcon_fw *);
int ga102_gsp_reset(struct nvkm_gsp *);
void r535_gsp_dtor(struct nvkm_gsp *);
int r535_gsp_oneinit(struct nvkm_gsp *);
int r535_gsp_init(struct nvkm_gsp *);
int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
extern const struct nvkm_gsp_rm r535_gsp_rm;
int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
struct nvkm_gsp **);

File diff suppressed because it is too large Load Diff

View File

@ -21,8 +21,170 @@
*/
#include "priv.h"
#include <subdev/fb.h>
#include <nvfw/flcn.h>
#include <nvfw/fw.h>
#include <nvfw/hs.h>
int
tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
{
struct nvkm_subdev *subdev = &gsp->subdev;
const struct nvkm_falcon_fw_func *func = &gm200_flcn_fw;
const struct nvfw_bin_hdr *hdr;
const struct nvfw_hs_header_v2 *hshdr;
const struct nvfw_hs_load_header_v2 *lhdr;
u32 loc, sig, cnt;
int ret;
hdr = nvfw_bin_hdr(subdev, blob->data);
hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
loc = *(u32 *)(blob->data + hshdr->patch_loc);
sig = *(u32 *)(blob->data + hshdr->patch_sig);
cnt = *(u32 *)(blob->data + hshdr->num_sig);
ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
if (ret)
goto done;
ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
cnt, hshdr->sig_prod_offset + sig, 0, 0);
if (ret)
goto done;
lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
fw->nmem_base_img = 0;
fw->nmem_base = lhdr->os_code_offset;
fw->nmem_size = lhdr->os_code_size;
fw->imem_base_img = fw->nmem_size;
fw->imem_base = lhdr->app[0].offset;
fw->imem_size = lhdr->app[0].size;
fw->dmem_base_img = lhdr->os_data_offset;
fw->dmem_base = 0;
fw->dmem_size = lhdr->os_data_size;
fw->dmem_sign = loc - fw->dmem_base_img;
fw->boot_addr = lhdr->os_code_offset;
done:
if (ret)
nvkm_falcon_fw_dtor(fw);
return ret;
}
static int
tu102_gsp_fwsec_load_bld(struct nvkm_falcon_fw *fw)
{
struct flcn_bl_dmem_desc_v2 desc = {
.ctx_dma = FALCON_DMAIDX_PHYS_SYS_NCOH,
.code_dma_base = fw->fw.phys,
.non_sec_code_off = fw->nmem_base,
.non_sec_code_size = fw->nmem_size,
.sec_code_off = fw->imem_base,
.sec_code_size = fw->imem_size,
.code_entry_point = 0,
.data_dma_base = fw->fw.phys + fw->dmem_base_img,
.data_size = fw->dmem_size,
.argc = 0,
.argv = 0,
};
flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &desc);
nvkm_falcon_mask(fw->falcon, 0x600 + desc.ctx_dma * 4, 0x00000007, 0x00000005);
return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&desc, 0, 0, DMEM, 0, sizeof(desc), 0, 0);
}
const struct nvkm_falcon_fw_func
tu102_gsp_fwsec = {
.reset = gm200_flcn_fw_reset,
.load = gm200_flcn_fw_load,
.load_bld = tu102_gsp_fwsec_load_bld,
.boot = gm200_flcn_fw_boot,
};
int
tu102_gsp_reset(struct nvkm_gsp *gsp)
{
return gsp->falcon.func->reset_eng(&gsp->falcon);
}
static u64
tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
{
struct nvkm_device *device = gsp->subdev.device;
const u64 base = fb_size - 0x100000;
u64 addr = 0;
if (device->disp)
addr = nvkm_rd32(gsp->subdev.device, 0x625f04);
if (!(addr & 0x00000008))
return base;
addr = (addr & 0xffffff00) << 8;
if (addr < base)
return fb_size - 0x20000;
return addr;
}
int
tu102_gsp_oneinit(struct nvkm_gsp *gsp)
{
gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
return r535_gsp_oneinit(gsp);
}
const struct nvkm_falcon_func
tu102_gsp_flcn = {
.disable = gm200_flcn_disable,
.enable = gm200_flcn_enable,
.addr2 = 0x1000,
.reset_eng = gp102_flcn_reset_eng,
.reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
.bind_inst = gm200_flcn_bind_inst,
.bind_stat = gm200_flcn_bind_stat,
.bind_intr = true,
.imem_pio = &gm200_flcn_imem_pio,
.dmem_pio = &gm200_flcn_dmem_pio,
.riscv_active = tu102_flcn_riscv_active,
};
static const struct nvkm_gsp_func
tu102_gsp_r535_54_03 = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu10x",
.wpr_heap.base_size = 8 << 20,
.wpr_heap.min_size = 64 << 20,
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = r535_gsp_init,
.fini = r535_gsp_fini,
.reset = tu102_gsp_reset,
.rm = &r535_gsp_rm,
};
static struct nvkm_gsp_fwif
tu102_gsps[] = {
{ 0, r535_gsp_load, &tu102_gsp_r535_54_03, "535.54.03" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};

View File

@ -21,8 +21,30 @@
*/
#include "priv.h"
static const struct nvkm_gsp_func
tu116_gsp_r535_54_03 = {
.flcn = &tu102_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_tu11x",
.wpr_heap.base_size = 8 << 20,
.wpr_heap.min_size = 64 << 20,
.booter.ctor = tu102_gsp_booter_ctor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = r535_gsp_init,
.fini = r535_gsp_fini,
.reset = tu102_gsp_reset,
.rm = &r535_gsp_rm,
};
static struct nvkm_gsp_fwif
tu116_gsps[] = {
{ 0, r535_gsp_load, &tu116_gsp_r535_54_03, "535.54.03" },
{ -1, gv100_gsp_nofw, &gv100_gsp },
{}
};