Cross-subsystem Changes:

- Split dma fence array creation into alloc and arm (Matthew Brost)
 
 Driver Changes:
 - Move kernel_lrc to execlist backend (Ilia)
 - Fix type width for pcode coommand (Karthik)
 - Make xe_drm.h include unambiguous (Jani)
 - Fixes and debug improvements for GSC load (Daniele)
 - Track resources and VF state by PF (Michal Wajdeczko)
 - Fix memory leak on error path (Nirmoy)
 - Cleanup header includes (Matt Roper)
 - Move pcode logic to tile scope (Matt Roper)
 - Move hwmon logic to device scope (Matt Roper)
 - Fix media TLB invalidation (Matthew Brost)
 - Threshold config fixes for PF (Michal Wajdeczko)
 - Remove extra "[drm]" from logs (Michal Wajdeczko)
 - Add missing runtime ref (Rodrigo Vivi)
 - Fix circular locking on runtime suspend (Rodrigo Vivi)
 - Fix rpm in TTM swapout path (Thomas)
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE6rM8lpABPHM5FqyDm6KlpjDL6lMFAmbaZs8ZHGx1Y2FzLmRl
 bWFyY2hpQGludGVsLmNvbQAKCRCboqWmMMvqU8cqEACG6sxanezneROhqqTW/kaj
 xEoOkPbVOwP7NBz08VDKwDvGtPjUNHclLlx2AaQkz/uJhThEmW8h8tgjEbvRy6kG
 4VpS5eglu4VZp1ZoEIJKUWeEroz/crJ49NgWfx9baUFW2AUp7iW1RDLW0TRjdb+B
 3++T/J15a+Wa7l5kJLYd/z8O3K9tQ6M4O1MJjZSoijrXv1C0MzgKcJemfU2HQYZD
 3Yz3/7FO4MFJvTCJX+zFFzqaRP+hSPisxknRAa0ImOnbEMdczdNLD74ebGLFwIri
 uP/1d/wVZ84nqQXndZF3D5+zmK0JOAL55yabZVYju0/KoPXb+GmnuiQXztPwen27
 EAnQvHueD1rwXIgKUh8neMh492RejHQhbMIZf1eF1abKEOif0NWjpCdMHxkEfHqW
 76v03Q4fdBIW7uK9PNNcI5hQ//7SbUfdXatwfUmJ61Rkryu3eYXtMA0PtZUIfqRL
 57+TIurQyf5ivRjgnoag0KhAhN7eP7YqN+Q70E8QGKy6Fql79AssTgLWCWRVAW8L
 +kDT3waNaZeDQgB9usZcyXKSrwb4rYGIqmC3ZDKya6XPmmOyTtfywe34UhVPK/FC
 qKg1TpEinEmCftm3WZz1rXzFwe4K4lRs2PFmc/3oHmDp83Qx5Rsu0iosqH6nhi9l
 az2p4dnhRUGMZGtN5EflAQ==
 =rTLT
 -----END PGP SIGNATURE-----

Merge tag 'drm-xe-next-2024-09-05' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next

Cross-subsystem Changes:
- Split dma fence array creation into alloc and arm (Matthew Brost)

Driver Changes:
- Move kernel_lrc to execlist backend (Ilia)
- Fix type width for pcode coommand (Karthik)
- Make xe_drm.h include unambiguous (Jani)
- Fixes and debug improvements for GSC load (Daniele)
- Track resources and VF state by PF (Michal Wajdeczko)
- Fix memory leak on error path (Nirmoy)
- Cleanup header includes (Matt Roper)
- Move pcode logic to tile scope (Matt Roper)
- Move hwmon logic to device scope (Matt Roper)
- Fix media TLB invalidation (Matthew Brost)
- Threshold config fixes for PF (Michal Wajdeczko)
- Remove extra "[drm]" from logs (Michal Wajdeczko)
- Add missing runtime ref (Rodrigo Vivi)
- Fix circular locking on runtime suspend (Rodrigo Vivi)
- Fix rpm in TTM swapout path (Thomas)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/eirx5vdvoflbbqlrzi5cip6bpu3zjojm2pxseufu3rlq4pp6xv@eytjvhizfyu6
This commit is contained in:
Dave Airlie 2024-09-10 13:17:56 +10:00
commit 2ef8d63da8
60 changed files with 1977 additions and 271 deletions

View File

@ -144,37 +144,38 @@ const struct dma_fence_ops dma_fence_array_ops = {
EXPORT_SYMBOL(dma_fence_array_ops);
/**
* dma_fence_array_create - Create a custom fence array
* dma_fence_array_alloc - Allocate a custom fence array
* @num_fences: [in] number of fences to add in the array
*
* Return dma fence array on success, NULL on failure
*/
struct dma_fence_array *dma_fence_array_alloc(int num_fences)
{
struct dma_fence_array *array;
return kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
}
EXPORT_SYMBOL(dma_fence_array_alloc);
/**
* dma_fence_array_init - Init a custom fence array
* @array: [in] dma fence array to arm
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
* Allocate a dma_fence_array object and initialize the base fence with
* dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
* array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
* Implementation of @dma_fence_array_create without allocation. Useful to init
* a preallocated dma fence array in the path of reclaim or dma fence signaling.
*/
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
bool signal_on_any)
void dma_fence_array_init(struct dma_fence_array *array,
int num_fences, struct dma_fence **fences,
u64 context, unsigned seqno,
bool signal_on_any)
{
struct dma_fence_array *array;
WARN_ON(!num_fences || !fences);
array = kzalloc(struct_size(array, callbacks, num_fences), GFP_KERNEL);
if (!array)
return NULL;
array->num_fences = num_fences;
spin_lock_init(&array->lock);
@ -200,6 +201,41 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
*/
while (num_fences--)
WARN_ON(dma_fence_is_container(fences[num_fences]));
}
EXPORT_SYMBOL(dma_fence_array_init);
/**
* dma_fence_array_create - Create a custom fence array
* @num_fences: [in] number of fences to add in the array
* @fences: [in] array containing the fences
* @context: [in] fence context to use
* @seqno: [in] sequence number to use
* @signal_on_any: [in] signal on any fence in the array
*
* Allocate a dma_fence_array object and initialize the base fence with
* dma_fence_init().
* In case of error it returns NULL.
*
* The caller should allocate the fences array with num_fences size
* and fill it with the fences it wants to add to the object. Ownership of this
* array is taken and dma_fence_put() is used on each fence on release.
*
* If @signal_on_any is true the fence array signals if any fence in the array
* signals, otherwise it signals when all fences in the array signal.
*/
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
bool signal_on_any)
{
struct dma_fence_array *array;
array = dma_fence_array_alloc(num_fences);
if (!array)
return NULL;
dma_fence_array_init(array, num_fences, fences,
context, seqno, signal_on_any);
return array;
}

View File

@ -40,6 +40,7 @@ xe-y += xe_bb.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
xe_gsc.o \
xe_gsc_debugfs.o \
xe_gsc_proxy.o \
xe_gsc_submit.o \
xe_gt.o \

View File

@ -13,7 +13,7 @@ static inline int
snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms)
{
return xe_pcode_write_timeout(__compat_uncore_to_gt(uncore), mbox, val,
return xe_pcode_write_timeout(__compat_uncore_to_tile(uncore), mbox, val,
slow_timeout_ms ?: 1);
}
@ -21,13 +21,13 @@ static inline int
snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
{
return xe_pcode_write(__compat_uncore_to_gt(uncore), mbox, val);
return xe_pcode_write(__compat_uncore_to_tile(uncore), mbox, val);
}
static inline int
snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
{
return xe_pcode_read(__compat_uncore_to_gt(uncore), mbox, val, val1);
return xe_pcode_read(__compat_uncore_to_tile(uncore), mbox, val, val1);
}
static inline int
@ -35,7 +35,7 @@ skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
int timeout_base_ms)
{
return xe_pcode_request(__compat_uncore_to_gt(uncore), mbox, request, reply_mask, reply,
return xe_pcode_request(__compat_uncore_to_tile(uncore), mbox, request, reply_mask, reply,
timeout_base_ms);
}

View File

@ -17,6 +17,13 @@ static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
return xe_root_mmio_gt(xe);
}
static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
return xe_device_get_root_tile(xe);
}
static inline u32 intel_uncore_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{

View File

@ -8,7 +8,6 @@
#include "intel_display_types.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
#include "xe_gt.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_wa.h"

View File

@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
@ -345,10 +345,10 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
intel_hpd_cancel_work(xe);
if (!runtime && has_display(xe))
if (!runtime && has_display(xe)) {
intel_display_driver_suspend_access(xe);
intel_encoder_suspend_all(&xe->display);
intel_encoder_suspend_all(&xe->display);
}
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);

View File

@ -9,7 +9,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_gt.h"
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{

View File

@ -12,7 +12,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_pm.h"
static void

View File

@ -16,7 +16,6 @@
#include "xe_force_wake.h"
#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
#include "xe_gt.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_uc_fw.h"

View File

@ -32,8 +32,12 @@
#define HECI1_FWSTS1_CURRENT_STATE_RESET 0
#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5
#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9)
#define HECI_FWSTS2(base) XE_REG((base) + 0xc48)
#define HECI_FWSTS3(base) XE_REG((base) + 0xc60)
#define HECI_FWSTS4(base) XE_REG((base) + 0xc64)
#define HECI_FWSTS5(base) XE_REG((base) + 0xc68)
#define HECI1_FWSTS5_HUC_AUTH_DONE REG_BIT(19)
#define HECI_FWSTS6(base) XE_REG((base) + 0xc6c)
#define HECI_H_GS1(base) XE_REG((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)

View File

@ -3,7 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <kunit/test.h>
#include <kunit/visibility.h>

View File

@ -81,7 +81,7 @@
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define __xe_assert_msg(xe, condition, msg, arg...) ({ \
(void)drm_WARN(&(xe)->drm, !(condition), "[" DRM_NAME "] Assertion `%s` failed!\n" msg, \
(void)drm_WARN(&(xe)->drm, !(condition), "Assertion `%s` failed!\n" msg, \
__stringify(condition), ## arg); \
})
#else

View File

@ -13,7 +13,7 @@
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_dma_buf.h"
@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
xe_pm_runtime_get_noresume(xe);
if (xe_rpm_reclaim_safe(xe)) {
/*
* We might be called through swapout in the validation path of
* another TTM device, so unconditionally acquire rpm here.
*/
xe_pm_runtime_get(xe);
} else {
drm_WARN_ON(&xe->drm, handle_system_ccs);
xe_pm_runtime_get_noresume(xe);
}
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*

View File

@ -15,7 +15,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "display/xe_display.h"
#include "instructions/xe_gpu_commands.h"

View File

@ -208,6 +208,12 @@ struct xe_tile {
} vf;
} sriov;
/** @pcode: tile's PCODE */
struct {
/** @pcode.lock: protecting tile's PCODE mailbox data */
struct mutex lock;
} pcode;
/** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate;

View File

@ -5,7 +5,7 @@
#include "xe_drm_client.h"
#include <drm/drm_print.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>

View File

@ -8,7 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_exec.h>
#include <drm/drm_file.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <linux/delay.h>
#include "xe_bo.h"

View File

@ -9,7 +9,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"

View File

@ -123,8 +123,8 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
if (!port->running_exl)
return;
xe_lrc_write_ring(port->hwe->kernel_lrc, noop, sizeof(noop));
__start_lrc(port->hwe, port->hwe->kernel_lrc, 0);
xe_lrc_write_ring(port->lrc, noop, sizeof(noop));
__start_lrc(port->hwe, port->lrc, 0);
port->running_exl = NULL;
}
@ -254,14 +254,22 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
{
struct drm_device *drm = &xe->drm;
struct xe_execlist_port *port;
int i;
int i, err;
port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
if (!port) {
err = -ENOMEM;
goto err;
}
port->hwe = hwe;
port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc);
goto err;
}
spin_lock_init(&port->lock);
for (i = 0; i < ARRAY_SIZE(port->active); i++)
INIT_LIST_HEAD(&port->active[i]);
@ -277,6 +285,9 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
add_timer(&port->irq_fail);
return port;
err:
return ERR_PTR(err);
}
void xe_execlist_port_destroy(struct xe_execlist_port *port)
@ -287,6 +298,8 @@ void xe_execlist_port_destroy(struct xe_execlist_port *port)
spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
port->hwe->irq_handler = NULL;
spin_unlock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
xe_lrc_put(port->lrc);
}
static struct dma_fence *

View File

@ -27,6 +27,8 @@ struct xe_execlist_port {
struct xe_execlist_exec_queue *running_exl;
struct timer_list irq_fail;
struct xe_lrc *lrc;
};
struct xe_execlist_exec_queue {

View File

@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <generated/xe_wa_oob.h>
@ -165,10 +166,11 @@ static int query_compatibility_version(struct xe_gsc *gsc)
return err;
}
compat->major = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major);
compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
xe_gt_info(gt, "found GSC cv%u.%u\n", compat->major, compat->minor);
xe_gt_info(gt, "found GSC cv%u.%u.%u\n", compat->major, compat->minor, compat->patch);
out_bo:
xe_bo_unpin_map_no_vm(bo);
@ -333,9 +335,11 @@ static int gsc_er_complete(struct xe_gt *gt)
if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
/*
* XXX: we should trigger an FLR here, but we don't have support
* for that yet.
* for that yet. Since we can't recover from the error, we
* declare the device as wedged.
*/
xe_gt_err(gt, "GSC ER timed out!\n");
xe_device_declare_wedged(gt_to_xe(gt));
return -EIO;
}
@ -513,13 +517,28 @@ out_bo:
void xe_gsc_load_start(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
return;
/*
* The GSC HW is only reset by driver FLR or D3cold entry. We don't
* support the former at runtime, while the latter is only supported on
* DGFX, for which we don't support GSC. Therefore, if GSC failed to
* load previously there is no need to try again because the HW is
* stuck in the error state.
*/
xe_assert(xe, !IS_DGFX(xe));
if (xe_uc_fw_is_in_error_state(&gsc->fw))
return;
/* GSC FW survives GT reset and D3Hot */
if (gsc_fw_is_loaded(gt)) {
xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
if (xe_gsc_proxy_init_done(gsc))
xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
else
xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
return;
}
@ -571,3 +590,35 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
msleep(200);
}
}
/**
* xe_gsc_print_info - print info about GSC FW status
* @gsc: the GSC structure
* @p: the printer to be used to print the info
*/
void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
{
struct xe_gt *gt = gsc_to_gt(gsc);
int err;
xe_uc_fw_print(&gsc->fw, p);
drm_printf(p, "\tfound security version %u\n", gsc->security_version);
if (!xe_uc_fw_is_enabled(&gsc->fw))
return;
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
if (err)
return;
drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
}

View File

@ -8,6 +8,7 @@
#include <linux/types.h>
struct drm_printer;
struct xe_gsc;
struct xe_gt;
struct xe_hw_engine;
@ -21,4 +22,6 @@ void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p);
#endif

View File

@ -0,0 +1,71 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "xe_gsc_debugfs.h"
#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gsc.h"
#include "xe_macros.h"
#include "xe_pm.h"
static struct xe_gt *
gsc_to_gt(struct xe_gsc *gsc)
{
return container_of(gsc, struct xe_gt, uc.gsc);
}
static struct xe_device *
gsc_to_xe(struct xe_gsc *gsc)
{
return gt_to_xe(gsc_to_gt(gsc));
}
static struct xe_gsc *node_to_gsc(struct drm_info_node *node)
{
return node->info_ent->data;
}
static int gsc_info(struct seq_file *m, void *data)
{
struct xe_gsc *gsc = node_to_gsc(m->private);
struct xe_device *xe = gsc_to_xe(gsc);
struct drm_printer p = drm_seq_file_printer(m);
xe_pm_runtime_get(xe);
xe_gsc_print_info(gsc, &p);
xe_pm_runtime_put(xe);
return 0;
}
static const struct drm_info_list debugfs_list[] = {
{"gsc_info", gsc_info, 0},
};
void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent)
{
struct drm_minor *minor = gsc_to_xe(gsc)->drm.primary;
struct drm_info_list *local;
int i;
#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
local = drmm_kmalloc(&gsc_to_xe(gsc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
if (!local)
return;
memcpy(local, debugfs_list, DEBUGFS_SIZE);
#undef DEBUGFS_SIZE
for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
local[i].data = gsc;
drm_debugfs_create_files(local,
ARRAY_SIZE(debugfs_list),
parent, minor);
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2024 Intel Corporation
*/
#ifndef _XE_GSC_DEBUGFS_H_
#define _XE_GSC_DEBUGFS_H_
struct dentry;
struct xe_gsc;
void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent);
#endif

View File

@ -8,7 +8,7 @@
#include <linux/minmax.h>
#include <drm/drm_managed.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@ -48,7 +48,6 @@
#include "xe_migrate.h"
#include "xe_mmio.h"
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_mocs.h"
#include "xe_reg_sr.h"
@ -388,7 +387,6 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
xe_pcode_init(gt);
spin_lock_init(&gt->global_invl_lock);
return 0;
@ -756,12 +754,13 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_info(gt, "reset started\n");
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
}
xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_sanitize(gt);
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
@ -796,11 +795,11 @@ err_out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
XE_WARN_ON(xe_uc_start(&gt->uc));
xe_pm_runtime_put(gt_to_xe(gt));
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
xe_pm_runtime_put(gt_to_xe(gt));
return err;
}

View File

@ -388,20 +388,17 @@ static void pagefault_fini(void *arg)
{
struct xe_gt *gt = arg;
struct xe_device *xe = gt_to_xe(gt);
int i;
if (!xe->info.has_usm)
return;
destroy_workqueue(gt->usm.acc_wq);
destroy_workqueue(gt->usm.pf_wq);
for (i = 0; i < NUM_PF_QUEUE; ++i)
kfree(gt->usm.pf_queue[i].data);
}
static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
{
struct xe_device *xe = gt_to_xe(gt);
xe_dss_mask_t all_dss;
int num_dss, num_eus;
@ -417,7 +414,8 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
pf_queue->gt = gt;
pf_queue->data = kcalloc(pf_queue->num_dw, sizeof(u32), GFP_KERNEL);
pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
sizeof(u32), GFP_KERNEL);
if (!pf_queue->data)
return -ENOMEM;

View File

@ -9,6 +9,7 @@
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_mmio.h"
@ -57,6 +58,10 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
err = xe_gt_sriov_pf_control_init(gt);
if (err)
return err;
return 0;
}
@ -93,4 +98,5 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
xe_gt_sriov_pf_config_restart(gt);
xe_gt_sriov_pf_control_restart(gt);
}

View File

@ -29,6 +29,7 @@
#include "xe_guc_submit.h"
#include "xe_lmtt.h"
#include "xe_map.h"
#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wopcm.h"
@ -276,6 +277,14 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
cfg[n++] = config->preempt_timeout;
#define encode_threshold_config(TAG, ...) ({ \
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
});
MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
#undef encode_threshold_config
return n;
}
@ -1833,6 +1842,18 @@ u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
return value;
}
static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
#define reset_threshold_config(TAG, ...) ({ \
config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
});
MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
#undef reset_threshold_config
}
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@ -1848,6 +1869,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
pf_reset_config_sched(gt, config);
pf_reset_config_thresholds(gt, config);
}
/**
@ -1881,6 +1903,87 @@ int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool forc
return force ? 0 : err;
}
static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
{
if (xe_ggtt_node_allocated(ggtt_region))
xe_ggtt_assign(ggtt_region, vfid);
}
static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
{
struct xe_migrate *m = tile->migrate;
struct dma_fence *fence;
int err;
if (!bo)
return 0;
xe_bo_lock(bo, false);
fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
} else if (!fence) {
err = -ENOMEM;
} else {
long ret = dma_fence_wait_timeout(fence, false, timeout);
err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
dma_fence_put(fence);
if (!err)
xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
jiffies_to_msecs(timeout - ret));
}
xe_bo_unlock(bo);
return err;
}
static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
int err = 0;
/*
* Only GGTT and LMEM requires to be cleared by the PF.
* GuC doorbell IDs and context IDs do not need any clearing.
*/
if (!xe_gt_is_media_type(gt)) {
pf_sanitize_ggtt(config->ggtt_region, vfid);
if (IS_DGFX(xe))
err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
}
return err;
}
/**
* xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
* @gt: the &xe_gt
* @vfid: the VF identifier (can't be PF)
* @timeout: maximum timeout to wait for completion in jiffies
*
* This function can only be called on PF.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
{
int err;
xe_gt_assert(gt, vfid != PFID);
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
err = pf_sanitize_vf_resources(gt, vfid, timeout);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
if (unlikely(err))
xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
vfid, ERR_PTR(err));
return err;
}
/**
* xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
* @gt: the &xe_gt

View File

@ -50,6 +50,7 @@ int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index, u32 value);
int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout);
int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,9 @@
struct xe_gt;
int xe_gt_sriov_pf_control_init(struct xe_gt *gt);
void xe_gt_sriov_pf_control_restart(struct xe_gt *gt);
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);

View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2024 Intel Corporation
*/
#ifndef _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
#define _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <linux/workqueue_types.h>
/**
* enum xe_gt_sriov_control_bits - Various bits used by the PF to represent a VF state
*
* @XE_GT_SRIOV_STATE_WIP: indicates that some operations are in progress.
* @XE_GT_SRIOV_STATE_FLR_WIP: indicates that a VF FLR is in progress.
* @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command.
* @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
* @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC.
* @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources.
* @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data.
* @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers.
* @XE_GT_SRIOV_STATE_FLR_SEND_FINISH: indicates that the PF wants to send a FLR FINISH message.
* @XE_GT_SRIOV_STATE_FLR_FAILED: indicates that VF FLR sequence failed.
* @XE_GT_SRIOV_STATE_PAUSE_WIP: indicates that a VF pause operation is in progress.
* @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
* @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
* @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
* @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
* @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
* @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
* @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command.
* @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed.
* @XE_GT_SRIOV_STATE_RESUMED: indicates that the VF was resumed.
* @XE_GT_SRIOV_STATE_STOP_WIP: indicates that a VF stop operation is in progress.
* @XE_GT_SRIOV_STATE_STOP_SEND_STOP: indicates that the PF wants to send a STOP command.
* @XE_GT_SRIOV_STATE_STOP_FAILED: indicates that the VF stop operation has failed
* @XE_GT_SRIOV_STATE_STOPPED: indicates that the VF was stopped.
* @XE_GT_SRIOV_STATE_MISMATCH: indicates that the PF has detected a VF state mismatch.
*/
enum xe_gt_sriov_control_bits {
XE_GT_SRIOV_STATE_WIP = 1,
XE_GT_SRIOV_STATE_FLR_WIP,
XE_GT_SRIOV_STATE_FLR_SEND_START,
XE_GT_SRIOV_STATE_FLR_WAIT_GUC,
XE_GT_SRIOV_STATE_FLR_GUC_DONE,
XE_GT_SRIOV_STATE_FLR_RESET_CONFIG,
XE_GT_SRIOV_STATE_FLR_RESET_DATA,
XE_GT_SRIOV_STATE_FLR_RESET_MMIO,
XE_GT_SRIOV_STATE_FLR_SEND_FINISH,
XE_GT_SRIOV_STATE_FLR_FAILED,
XE_GT_SRIOV_STATE_PAUSE_WIP,
XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
XE_GT_SRIOV_STATE_PAUSE_FAILED,
XE_GT_SRIOV_STATE_PAUSED,
XE_GT_SRIOV_STATE_RESUME_WIP,
XE_GT_SRIOV_STATE_RESUME_SEND_RESUME,
XE_GT_SRIOV_STATE_RESUME_FAILED,
XE_GT_SRIOV_STATE_RESUMED,
XE_GT_SRIOV_STATE_STOP_WIP,
XE_GT_SRIOV_STATE_STOP_SEND_STOP,
XE_GT_SRIOV_STATE_STOP_FAILED,
XE_GT_SRIOV_STATE_STOPPED,
XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1,
};
/**
* struct xe_gt_sriov_control_state - GT-level per-VF control state.
*
* Used by the PF driver to maintain per-VF control data.
*/
struct xe_gt_sriov_control_state {
/** @state: VF state bits */
unsigned long state;
/** @done: completion of async operations */
struct completion done;
/** @link: link into worker list */
struct list_head link;
};
/**
* struct xe_gt_sriov_pf_control - GT-level control data.
*
* Used by the PF driver to maintain its data.
*/
struct xe_gt_sriov_pf_control {
/** @worker: worker that executes a VF operations */
struct work_struct worker;
/** @list: list of VF entries that have a pending work */
struct list_head list;
/** @lock: protects VF pending list */
spinlock_t lock;
};
#endif

View File

@ -9,6 +9,7 @@
#include <linux/types.h>
#include "xe_gt_sriov_pf_config_types.h"
#include "xe_gt_sriov_pf_control_types.h"
#include "xe_gt_sriov_pf_monitor_types.h"
#include "xe_gt_sriov_pf_policy_types.h"
#include "xe_gt_sriov_pf_service_types.h"
@ -23,6 +24,9 @@ struct xe_gt_sriov_metadata {
/** @monitor: per-VF monitoring data. */
struct xe_gt_sriov_monitor monitor;
/** @control: per-VF control data. */
struct xe_gt_sriov_control_state control;
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
};
@ -30,12 +34,14 @@ struct xe_gt_sriov_metadata {
/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
* @service: service data.
* @control: control data.
* @policy: policy data.
* @spare: PF-only provisioning configuration.
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;

View File

@ -329,12 +329,6 @@ struct xe_gt {
/** @eclass: per hardware engine class interface on the GT */
struct xe_hw_engine_class_intf eclass[XE_ENGINE_CLASS_MAX];
/** @pcode: GT's PCODE */
struct {
/** @pcode.lock: protecting GT's PCODE mailbox data */
struct mutex lock;
} pcode;
/** @sysfs: sysfs' kobj used by xe_gt_sysfs */
struct kobject *sysfs;

View File

@ -915,7 +915,7 @@ static void pc_init_pcode_freq(struct xe_guc_pc *pc)
u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
}
static int pc_init_freqs(struct xe_guc_pc *pc)

View File

@ -8,7 +8,7 @@
#include <linux/nospec.h>
#include <drm/drm_managed.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
@ -273,7 +273,6 @@ static void hw_engine_fini(void *arg)
if (hwe->exl_port)
xe_execlist_port_destroy(hwe->exl_port);
xe_lrc_put(hwe->kernel_lrc);
hwe->gt = NULL;
}
@ -558,21 +557,13 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
goto err_name;
}
hwe->kernel_lrc = xe_lrc_create(hwe, NULL, SZ_16K);
if (IS_ERR(hwe->kernel_lrc)) {
err = PTR_ERR(hwe->kernel_lrc);
goto err_hwsp;
}
if (!xe_device_uc_enabled(xe)) {
hwe->exl_port = xe_execlist_port_create(xe, hwe);
if (IS_ERR(hwe->exl_port)) {
err = PTR_ERR(hwe->exl_port);
goto err_kernel_lrc;
goto err_hwsp;
}
}
if (xe_device_uc_enabled(xe)) {
} else {
/* GSCCS has a special interrupt for reset */
if (hwe->class == XE_ENGINE_CLASS_OTHER)
hwe->irq_handler = xe_gsc_hwe_irq_handler;
@ -587,8 +578,6 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
err_kernel_lrc:
xe_lrc_put(hwe->kernel_lrc);
err_hwsp:
xe_bo_unpin_map_no_vm(hwe->hwsp);
err_name:

View File

@ -136,8 +136,6 @@ struct xe_hw_engine {
enum xe_force_wake_domains domain;
/** @hwsp: hardware status page buffer object */
struct xe_bo *hwsp;
/** @kernel_lrc: Kernel LRC (should be replaced /w an xe_engine) */
struct xe_lrc *kernel_lrc;
/** @exl_port: execlists port */
struct xe_execlist_port *exl_port;
/** @fence_irq: fence IRQ to run when a hw engine IRQ is received */

View File

@ -12,7 +12,6 @@
#include "regs/xe_mchbar_regs.h"
#include "regs/xe_pcode_regs.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_hwmon.h"
#include "xe_mmio.h"
#include "xe_pcode.h"
@ -65,8 +64,8 @@ struct xe_hwmon_energy_info {
struct xe_hwmon {
/** @hwmon_dev: hwmon device for xe */
struct device *hwmon_dev;
/** @gt: primary gt */
struct xe_gt *gt;
/** @xe: Xe device */
struct xe_device *xe;
/** @hwmon_lock: lock for rw attributes*/
struct mutex hwmon_lock;
/** @scl_shift_power: pkg power unit */
@ -82,7 +81,7 @@ struct xe_hwmon {
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
int channel)
{
struct xe_device *xe = gt_to_xe(hwmon->gt);
struct xe_device *xe = hwmon->xe;
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
@ -148,8 +147,9 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val, min, max;
struct xe_device *xe = gt_to_xe(hwmon->gt);
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_gt *mmio = xe_root_mmio_gt(xe);
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@ -166,7 +166,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
mutex_lock(&hwmon->hwmon_lock);
reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
reg_val = xe_mmio_read32(mmio, rapl_limit);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
@ -176,7 +176,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
reg_val = xe_mmio_read64_2x32(hwmon->gt, pkg_power_sku);
reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
@ -190,6 +190,7 @@ unlock:
static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
{
struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
int ret = 0;
u64 reg_val;
struct xe_reg rapl_limit;
@ -200,10 +201,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0);
reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
reg_val = xe_mmio_read32(mmio, rapl_limit);
if (reg_val & PKG_PWR_LIM_1_EN) {
drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n");
drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
ret = -EOPNOTSUPP;
}
goto unlock;
@ -212,7 +213,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
@ -221,6 +222,7 @@ unlock:
static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
u64 reg_val;
@ -229,7 +231,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
* for this register can be skipped.
* See xe_hwmon_power_is_visible.
*/
reg_val = xe_mmio_read32(hwmon->gt, reg);
reg_val = xe_mmio_read32(mmio, reg);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@ -257,11 +259,12 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
static void
xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
channel));
reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
channel));
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
@ -279,19 +282,20 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
int sensor_index = to_sensor_dev_attr(attr)->index;
xe_pm_runtime_get(gt_to_xe(hwmon->gt));
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
r = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(gt_to_xe(hwmon->gt));
xe_pm_runtime_put(hwmon->xe);
x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
@ -319,6 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
@ -371,16 +376,16 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
xe_pm_runtime_get(gt_to_xe(hwmon->gt));
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
r = xe_mmio_rmw32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
PKG_PWR_LIM_1_TIME, rxy);
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(gt_to_xe(hwmon->gt));
xe_pm_runtime_put(hwmon->xe);
return count;
}
@ -406,11 +411,11 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
xe_pm_runtime_get(gt_to_xe(hwmon->gt));
xe_pm_runtime_get(hwmon->xe);
ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
xe_pm_runtime_put(gt_to_xe(hwmon->gt));
xe_pm_runtime_put(hwmon->xe);
return ret;
}
@ -435,22 +440,26 @@ static const struct hwmon_channel_info * const hwmon_info[] = {
};
/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval)
{
struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
/* Avoid Illegal Subcommand error */
if (gt_to_xe(gt)->info.platform == XE_DG2)
if (hwmon->xe->info.platform == XE_DG2)
return -ENXIO;
return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0),
uval, NULL);
}
static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
{
return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
uval);
(uval & POWER_SETUP_I1_DATA_MASK));
}
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
@ -461,7 +470,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
ret = xe_hwmon_pcode_read_i1(hwmon, &uval);
if (ret)
goto unlock;
@ -481,7 +490,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
ret = xe_hwmon_pcode_write_i1(hwmon, uval);
mutex_unlock(&hwmon->hwmon_lock);
return ret;
@ -489,9 +498,10 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u64 reg_val;
reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
/* HW register value in units of 2.5 millivolt */
*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
}
@ -510,7 +520,7 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
channel)) ? 0444 : 0;
case hwmon_power_crit:
if (channel == CHANNEL_PKG)
return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
break;
case hwmon_power_label:
@ -563,10 +573,10 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
switch (attr) {
case hwmon_curr_crit:
return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
case hwmon_curr_label:
return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
break;
default:
@ -654,7 +664,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
xe_pm_runtime_get(gt_to_xe(hwmon->gt));
xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@ -674,7 +684,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
break;
}
xe_pm_runtime_put(gt_to_xe(hwmon->gt));
xe_pm_runtime_put(hwmon->xe);
return ret;
}
@ -686,7 +696,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
xe_pm_runtime_get(gt_to_xe(hwmon->gt));
xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@ -706,7 +716,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
xe_pm_runtime_put(gt_to_xe(hwmon->gt));
xe_pm_runtime_put(hwmon->xe);
return ret;
}
@ -718,7 +728,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
xe_pm_runtime_get(gt_to_xe(hwmon->gt));
xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@ -732,7 +742,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
xe_pm_runtime_put(gt_to_xe(hwmon->gt));
xe_pm_runtime_put(hwmon->xe);
return ret;
}
@ -771,6 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = {
static void
xe_hwmon_get_preregistration_info(struct xe_device *xe)
{
struct xe_gt *mmio = xe_root_mmio_gt(xe);
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u64 val_sku_unit = 0;
@ -783,7 +794,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
*/
pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
if (xe_reg_is_valid(pkg_power_sku_unit)) {
val_sku_unit = xe_mmio_read32(hwmon->gt, pkg_power_sku_unit);
val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
@ -828,8 +839,8 @@ void xe_hwmon_register(struct xe_device *xe)
if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
return;
/* primary GT to access device level properties */
hwmon->gt = xe->tiles[0].primary_gt;
/* There's only one instance of hwmon per device */
hwmon->xe = xe;
xe_hwmon_get_preregistration_info(xe);

View File

@ -10,7 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>

View File

@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "abi/guc_actions_slpc_abi.h"
#include "instructions/xe_mi_commands.h"

View File

@ -11,7 +11,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"

View File

@ -6,7 +6,7 @@
#include <linux/errno.h>
#include <linux/sysctl.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "xe_oa.h"
#include "xe_observation.h"

View File

@ -5,7 +5,7 @@
#include "xe_pat.h"
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>

View File

@ -12,7 +12,6 @@
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
@ -30,7 +29,7 @@
* - PCODE for display operations
*/
static int pcode_mailbox_status(struct xe_gt *gt)
static int pcode_mailbox_status(struct xe_tile *tile)
{
u32 err;
static const struct pcode_err_decode err_decode[] = {
@ -45,9 +44,9 @@ static int pcode_mailbox_status(struct xe_gt *gt)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
err_decode[err].str ?: "Unknown");
return err_decode[err].errno ?: -EPROTO;
}
@ -55,84 +54,85 @@ static int pcode_mailbox_status(struct xe_gt *gt)
return 0;
}
static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
unsigned int timeout_ms, bool return_data,
bool atomic)
{
struct xe_gt *mmio = tile->primary_gt;
int err;
if (gt_to_xe(gt)->info.skip_pcode)
if (tile_to_xe(tile)->info.skip_pcode)
return 0;
if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
if ((xe_mmio_read32(mmio, PCODE_MAILBOX) & PCODE_READY) != 0)
return -EAGAIN;
xe_mmio_write32(gt, PCODE_DATA0, *data0);
xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
xe_mmio_write32(mmio, PCODE_DATA0, *data0);
xe_mmio_write32(mmio, PCODE_DATA1, data1 ? *data1 : 0);
xe_mmio_write32(mmio, PCODE_MAILBOX, PCODE_READY | mbox);
err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
err = xe_mmio_wait32(mmio, PCODE_MAILBOX, PCODE_READY, 0,
timeout_ms * USEC_PER_MSEC, NULL, atomic);
if (err)
return err;
if (return_data) {
*data0 = xe_mmio_read32(gt, PCODE_DATA0);
*data0 = xe_mmio_read32(mmio, PCODE_DATA0);
if (data1)
*data1 = xe_mmio_read32(gt, PCODE_DATA1);
*data1 = xe_mmio_read32(mmio, PCODE_DATA1);
}
return pcode_mailbox_status(gt);
return pcode_mailbox_status(tile);
}
static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
static int pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
unsigned int timeout_ms, bool return_data,
bool atomic)
{
if (gt_to_xe(gt)->info.skip_pcode)
if (tile_to_xe(tile)->info.skip_pcode)
return 0;
lockdep_assert_held(&gt->pcode.lock);
lockdep_assert_held(&tile->pcode.lock);
return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
return __pcode_mailbox_rw(tile, mbox, data0, data1, timeout_ms, return_data, atomic);
}
int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout)
{
int err;
mutex_lock(&gt->pcode.lock);
err = pcode_mailbox_rw(gt, mbox, &data, NULL, timeout, false, false);
mutex_unlock(&gt->pcode.lock);
mutex_lock(&tile->pcode.lock);
err = pcode_mailbox_rw(tile, mbox, &data, NULL, timeout, false, false);
mutex_unlock(&tile->pcode.lock);
return err;
}
int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
{
int err;
mutex_lock(&gt->pcode.lock);
err = pcode_mailbox_rw(gt, mbox, val, val1, 1, true, false);
mutex_unlock(&gt->pcode.lock);
mutex_lock(&tile->pcode.lock);
err = pcode_mailbox_rw(tile, mbox, val, val1, 1, true, false);
mutex_unlock(&tile->pcode.lock);
return err;
}
static int pcode_try_request(struct xe_gt *gt, u32 mbox,
static int pcode_try_request(struct xe_tile *tile, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status, bool atomic, int timeout_us, bool locked)
{
int slept, wait = 10;
xe_gt_assert(gt, timeout_us > 0);
xe_tile_assert(tile, timeout_us > 0);
for (slept = 0; slept < timeout_us; slept += wait) {
if (locked)
*status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
*status = pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
atomic);
else
*status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
*status = __pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
atomic);
if ((*status == 0) && ((request & reply_mask) == reply))
return 0;
@ -149,7 +149,7 @@ static int pcode_try_request(struct xe_gt *gt, u32 mbox,
/**
* xe_pcode_request - send PCODE request until acknowledgment
* @gt: gt
* @tile: tile
* @mbox: PCODE mailbox ID the request is targeted for
* @request: request ID
* @reply_mask: mask used to check for request acknowledgment
@ -166,17 +166,17 @@ static int pcode_try_request(struct xe_gt *gt, u32 mbox,
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
* other error as reported by PCODE.
*/
int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms)
int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms)
{
u32 status;
int ret;
xe_gt_assert(gt, timeout_base_ms <= 3);
xe_tile_assert(tile, timeout_base_ms <= 3);
mutex_lock(&gt->pcode.lock);
mutex_lock(&tile->pcode.lock);
ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
false, timeout_base_ms * 1000, true);
if (!ret)
goto out;
@ -191,20 +191,20 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
drm_err(&gt_to_xe(gt)->drm,
drm_err(&tile_to_xe(tile)->drm,
"PCODE timeout, retrying with preemption disabled\n");
preempt_disable();
ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
true, 50 * 1000, true);
preempt_enable();
out:
mutex_unlock(&gt->pcode.lock);
mutex_unlock(&tile->pcode.lock);
return status ? status : ret;
}
/**
* xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
* @gt: gt instance
* @tile: tile instance
* @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
* @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
*
@ -227,30 +227,30 @@ out:
* - -EACCES, "PCODE Rejected"
* - -EPROTO, "Unknown"
*/
int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
u32 max_gt_freq)
{
int ret;
u32 freq;
if (!gt_to_xe(gt)->info.has_llc)
if (!tile_to_xe(tile)->info.has_llc)
return 0;
if (max_gt_freq <= min_gt_freq)
return -EINVAL;
mutex_lock(&gt->pcode.lock);
mutex_lock(&tile->pcode.lock);
for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
ret = pcode_mailbox_rw(gt, PCODE_WRITE_MIN_FREQ_TABLE,
ret = pcode_mailbox_rw(tile, PCODE_WRITE_MIN_FREQ_TABLE,
&data, NULL, 1, false, false);
if (ret)
goto unlock;
}
unlock:
mutex_unlock(&gt->pcode.lock);
mutex_unlock(&tile->pcode.lock);
return ret;
}
@ -270,7 +270,7 @@ unlock:
int xe_pcode_ready(struct xe_device *xe, bool locked)
{
u32 status, request = DGFX_GET_INIT_STATUS;
struct xe_gt *gt = xe_root_mmio_gt(xe);
struct xe_tile *tile = xe_device_get_root_tile(xe);
int timeout_us = 180000000; /* 3 min */
int ret;
@ -281,15 +281,15 @@ int xe_pcode_ready(struct xe_device *xe, bool locked)
return 0;
if (locked)
mutex_lock(&gt->pcode.lock);
mutex_lock(&tile->pcode.lock);
ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
ret = pcode_try_request(tile, DGFX_PCODE_STATUS, request,
DGFX_INIT_STATUS_COMPLETE,
DGFX_INIT_STATUS_COMPLETE,
&status, false, timeout_us, locked);
if (locked)
mutex_unlock(&gt->pcode.lock);
mutex_unlock(&tile->pcode.lock);
if (ret)
drm_err(&xe->drm,
@ -300,14 +300,14 @@ int xe_pcode_ready(struct xe_device *xe, bool locked)
/**
* xe_pcode_init: initialize components of PCODE
* @gt: gt instance
* @tile: tile instance
*
* This function initializes the xe_pcode component.
* To be called once only during probe.
*/
void xe_pcode_init(struct xe_gt *gt)
void xe_pcode_init(struct xe_tile *tile)
{
drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
drmm_mutex_init(&tile_to_xe(tile)->drm, &tile->pcode.lock);
}
/**

View File

@ -7,21 +7,21 @@
#define _XE_PCODE_H_
#include <linux/types.h>
struct xe_gt;
struct xe_tile;
struct xe_device;
void xe_pcode_init(struct xe_gt *gt);
void xe_pcode_init(struct xe_tile *tile);
int xe_pcode_probe_early(struct xe_device *xe);
int xe_pcode_ready(struct xe_device *xe, bool locked);
int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
u32 max_gt_freq);
int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 val,
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1);
int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 val,
int timeout_ms);
#define xe_pcode_write(gt, mbox, val) \
xe_pcode_write_timeout(gt, mbox, val, 1)
#define xe_pcode_write(tile, mbox, val) \
xe_pcode_write_timeout(tile, mbox, val, 1)
int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_ms);
#define PCODE_MBOX(mbcmd, param1, param2)\

View File

@ -79,7 +79,14 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = {
};
#endif
static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe)
/**
* xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
* @xe: The xe device.
*
* Return: true if it is safe to runtime resume from reclaim context.
* false otherwise.
*/
bool xe_rpm_reclaim_safe(const struct xe_device *xe)
{
return !xe->d3cold.capable && !xe->info.has_sriov;
}

View File

@ -31,6 +31,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
int xe_pm_module_init(void);

View File

@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
#include <linux/dma-fence-array.h>
#include "xe_pt.h"
#include "regs/xe_gtt_defs.h"
@ -1627,9 +1629,11 @@ xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
{
int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
xe->info.tile_count);
xe->info.tile_count << shift);
return 0;
}
@ -1816,6 +1820,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[tile->id];
struct xe_vma_op *op;
int shift = tile->media_gt ? 1 : 0;
int err;
lockdep_assert_held(&vops->vm->lock);
@ -1824,7 +1829,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
xe_pt_update_ops_init(pt_update_ops);
err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
tile_to_xe(tile)->info.tile_count);
tile_to_xe(tile)->info.tile_count << shift);
if (err)
return err;
@ -1849,13 +1854,20 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence)
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
if (fence2)
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
vma->tile_present |= BIT(tile->id);
vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
@ -1875,13 +1887,20 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence)
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
if (fence2)
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
vma->tile_present &= ~BIT(tile->id);
if (!vma->tile_present) {
list_del_init(&vma->combined_links.rebind);
@ -1898,7 +1917,8 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
static void op_commit(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma_op *op, struct dma_fence *fence)
struct xe_vma_op *op, struct dma_fence *fence,
struct dma_fence *fence2)
{
xe_vm_assert_held(vm);
@ -1907,26 +1927,28 @@ static void op_commit(struct xe_vm *vm,
if (!op->map.immediate && xe_vm_in_fault_mode(vm))
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence);
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
fence2);
break;
case DRM_GPUVA_OP_REMAP:
unbind_op_commit(vm, tile, pt_update_ops,
gpuva_to_vma(op->base.remap.unmap->va), fence);
gpuva_to_vma(op->base.remap.unmap->va), fence,
fence2);
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
fence);
fence, fence2);
if (op->remap.next)
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
fence);
fence, fence2);
break;
case DRM_GPUVA_OP_UNMAP:
unbind_op_commit(vm, tile, pt_update_ops,
gpuva_to_vma(op->base.unmap.va), fence);
gpuva_to_vma(op->base.unmap.va), fence, fence2);
break;
case DRM_GPUVA_OP_PREFETCH:
bind_op_commit(vm, tile, pt_update_ops,
gpuva_to_vma(op->base.prefetch.va), fence);
gpuva_to_vma(op->base.prefetch.va), fence, fence2);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@ -1963,7 +1985,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[tile->id];
struct dma_fence *fence;
struct invalidation_fence *ifence = NULL;
struct invalidation_fence *ifence = NULL, *mfence = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
struct xe_range_fence *rfence;
struct xe_vma_op *op;
int err = 0, i;
@ -1996,6 +2020,23 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
err = -ENOMEM;
goto kill_vm_tile1;
}
if (tile->media_gt) {
mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!mfence) {
err = -ENOMEM;
goto free_ifence;
}
fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
if (!fences) {
err = -ENOMEM;
goto free_ifence;
}
cf = dma_fence_array_alloc(2);
if (!cf) {
err = -ENOMEM;
goto free_ifence;
}
}
}
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
@ -2027,19 +2068,50 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
/* tlb invalidation must be done before signaling rebind */
if (ifence) {
if (mfence)
dma_fence_get(fence);
invalidation_fence_init(tile->primary_gt, ifence, fence,
pt_update_ops->start,
pt_update_ops->last, vm->usm.asid);
fence = &ifence->base.base;
if (mfence) {
invalidation_fence_init(tile->media_gt, mfence, fence,
pt_update_ops->start,
pt_update_ops->last, vm->usm.asid);
fences[0] = &ifence->base.base;
fences[1] = &mfence->base.base;
dma_fence_array_init(cf, 2, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
false);
fence = &cf->base;
} else {
fence = &ifence->base.base;
}
}
dma_resv_add_fence(xe_vm_resv(vm), fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
if (!mfence) {
dma_resv_add_fence(xe_vm_resv(vm), fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
list_for_each_entry(op, &vops->list, link)
op_commit(vops->vm, tile, pt_update_ops, op, fence);
list_for_each_entry(op, &vops->list, link)
op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
} else {
dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
list_for_each_entry(op, &vops->list, link)
op_commit(vops->vm, tile, pt_update_ops, op,
&ifence->base.base, &mfence->base.base);
}
if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
@ -2049,6 +2121,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
free_rfence:
kfree(rfence);
free_ifence:
kfree(cf);
kfree(fences);
kfree(mfence);
kfree(ifence);
kill_vm_tile1:
if (err != -EAGAIN && tile->id)

View File

@ -9,7 +9,7 @@
#include <linux/sched/clock.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"

View File

@ -7,7 +7,7 @@
#include <kunit/visibility.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "xe_gt.h"
#include "xe_gt_topology.h"

View File

@ -5,7 +5,7 @@
#include "xe_sched_job.h"
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <linux/dma-fence-chain.h>
#include <linux/slab.h>

View File

@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "xe_device_types.h"
#include "xe_exec_queue.h"

View File

@ -9,6 +9,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
@ -124,6 +125,8 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
if (IS_ERR(tile->primary_gt))
return PTR_ERR(tile->primary_gt);
xe_pcode_init(tile);
return 0;
}

View File

@ -8,6 +8,7 @@
#include <drm/drm_debugfs.h>
#include "xe_gt.h"
#include "xe_gsc_debugfs.h"
#include "xe_guc_debugfs.h"
#include "xe_huc_debugfs.h"
#include "xe_macros.h"
@ -23,6 +24,7 @@ void xe_uc_debugfs_register(struct xe_uc *uc, struct dentry *parent)
return;
}
xe_gsc_debugfs_register(&uc->gsc, root);
xe_guc_debugfs_register(&uc->guc, root);
xe_huc_debugfs_register(&uc->huc, root);
}

View File

@ -129,8 +129,8 @@ struct fw_blobs_by_type {
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 1, 0, 0)) \
fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0))
fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@ -141,6 +141,8 @@ struct fw_blobs_by_type {
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a))
#define fw_filename_no_ver(dir_, uc_, shortname_) \
MAKE_FW_PATH(dir_, uc_, shortname_, "")
#define fw_filename_gsc(dir_, uc_, shortname_, a, b, c) \
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(b))
#define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \
{ fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \
@ -151,6 +153,9 @@ struct fw_blobs_by_type {
#define uc_fw_entry_no_ver(dir_, uc_, shortname_) \
{ fw_filename_no_ver(dir_, uc_, shortname_), \
0, 0 }
#define uc_fw_entry_gsc(dir_, uc_, shortname_, a, b, c) \
{ fw_filename_gsc(dir_, uc_, shortname_, a, b, c), \
a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
@ -166,7 +171,7 @@ XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_major_ver)
XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_no_ver)
XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_major_ver)
XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_gsc)
static struct xe_gt *
__uc_fw_to_gt(struct xe_uc_fw *uc_fw, enum xe_uc_fw_type type)
@ -209,7 +214,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
uc_fw_entry_no_ver)
};
static const struct uc_fw_entry entries_gsc[] = {
XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_major_ver)
XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_gsc)
};
static const struct fw_blobs_by_type blobs_all[XE_UC_FW_NUM_TYPES] = {
[XE_UC_FW_TYPE_GUC] = { entries_guc, ARRAY_SIZE(entries_guc) },

View File

@ -65,7 +65,7 @@ const char *xe_uc_fw_status_repr(enum xe_uc_fw_status status)
return "<invalid>";
}
static inline int xe_uc_fw_status_to_error(enum xe_uc_fw_status status)
static inline int xe_uc_fw_status_to_error(const enum xe_uc_fw_status status)
{
switch (status) {
case XE_UC_FIRMWARE_NOT_SUPPORTED:
@ -108,7 +108,7 @@ static inline const char *xe_uc_fw_type_repr(enum xe_uc_fw_type type)
}
static inline enum xe_uc_fw_status
__xe_uc_fw_status(struct xe_uc_fw *uc_fw)
__xe_uc_fw_status(const struct xe_uc_fw *uc_fw)
{
/* shouldn't call this before checking hw/blob availability */
XE_WARN_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED);
@ -156,6 +156,11 @@ static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw)
return uc_fw->user_overridden;
}
static inline bool xe_uc_fw_is_in_error_state(const struct xe_uc_fw *uc_fw)
{
return xe_uc_fw_status_to_error(__xe_uc_fw_status(uc_fw)) < 0;
}
static inline void xe_uc_fw_sanitize(struct xe_uc_fw *uc_fw)
{
if (xe_uc_fw_is_loadable(uc_fw))

View File

@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>

View File

@ -34,7 +34,6 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
struct xe_gt *gt = tile->primary_gt;
u32 val, mbox;
int err;
@ -42,7 +41,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
| REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_P0)
| REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
err = xe_pcode_read(gt, mbox, &val, NULL);
err = xe_pcode_read(tile, mbox, &val, NULL);
if (err)
return err;
@ -57,7 +56,6 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
struct xe_gt *gt = tile->primary_gt;
u32 val, mbox;
int err;
@ -65,7 +63,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
| REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_PN)
| REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
err = xe_pcode_read(gt, mbox, &val, NULL);
err = xe_pcode_read(tile, mbox, &val, NULL);
if (err)
return err;

View File

@ -8,7 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_utils.h>
#include <drm/xe_drm.h>
#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"

View File

@ -79,6 +79,12 @@ to_dma_fence_array(struct dma_fence *fence)
for (index = 0, fence = dma_fence_array_first(head); fence; \
++(index), fence = dma_fence_array_next(head, index))
struct dma_fence_array *dma_fence_array_alloc(int num_fences);
void dma_fence_array_init(struct dma_fence_array *array,
int num_fences, struct dma_fence **fences,
u64 context, unsigned seqno,
bool signal_on_any);
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,