drm/xe: Normalize bo flags macros

The flags stored in the BO grew over time without following
much a naming pattern. First of all, get rid of the _BIT suffix that was
banned from everywhere else due to the guideline in
drivers/gpu/drm/i915/i915_reg.h that xe kind of follows:

	Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.

Here the flags aren't for a register, but it's good practice to keep it
consistent.

Second divergence on names is the use or not of "CREATE". This is
because most of the flags are passed to xe_bo_create*() family of
functions, changing its behavior. However, since the flags are also
stored in the bo itself and checked elsewhere in the code, it seems
better to just omit the CREATE part.

With those 2 guidelines, all the flags are given the form
XE_BO_FLAG_<FLAG_NAME> with the following commands:

	git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i \
		-e "s/XE_BO_\([_A-Z0-9]*\)_BIT/XE_BO_\1/g" \
		-e 's/XE_BO_CREATE_/XE_BO_FLAG_/g'
	git grep -le "XE_BO_" -- drivers/gpu/drm/xe | xargs sed -i -r \
		-e 's/XE_BO_(DEFER_BACKING|SCANOUT|FIXED_PLACEMENT|PAGETABLE|NEEDS_CPU_ACCESS|NEEDS_UC|INTERNAL_TEST|INTERNAL_64K|GGTT_INVALIDATE)/XE_BO_FLAG_\1/g'

And then the defines in drivers/gpu/drm/xe/xe_bo.h are adjusted to
follow the coding style.

Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240322142702.186529-3-lucas.demarchi@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
Lucas De Marchi 2024-03-22 07:27:02 -07:00
parent e27f8a45c8
commit 62742d1266
33 changed files with 202 additions and 203 deletions

View File

@ -17,7 +17,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
{
struct xe_bo *bo;
int err;
u32 flags = XE_BO_CREATE_PINNED_BIT | XE_BO_CREATE_STOLEN_BIT;
u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
if (align)
size = ALIGN(size, align);

View File

@ -11,7 +11,7 @@
void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
{
if (bo->flags & XE_BO_CREATE_PINNED_BIT) {
if (bo->flags & XE_BO_FLAG_PINNED) {
/* Unpin our kernel fb first */
xe_bo_lock(bo, false);
xe_bo_unpin(bo);
@ -33,9 +33,9 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
if (ret)
return ret;
if (!(bo->flags & XE_BO_SCANOUT_BIT)) {
if (!(bo->flags & XE_BO_FLAG_SCANOUT)) {
/*
* XE_BO_SCANOUT_BIT should ideally be set at creation, or is
* XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
* mode when the boect is VM_BINDed, so we can only set
* coherency with display when unbound.
@ -44,7 +44,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
ttm_bo_unreserve(&bo->ttm);
return -EINVAL;
}
bo->flags |= XE_BO_SCANOUT_BIT;
bo->flags |= XE_BO_FLAG_SCANOUT;
}
ttm_bo_unreserve(&bo->ttm);

View File

@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
if (!IS_DGFX(dev_priv)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv),
NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_PINNED_BIT);
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_PINNED);
if (!IS_ERR(obj))
drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n");
else
@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size,
ttm_bo_type_kernel, XE_BO_SCANOUT_BIT |
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
XE_BO_CREATE_PINNED_BIT);
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) |
XE_BO_FLAG_PINNED);
}
if (IS_ERR(obj)) {
@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) {
if (obj->flags & XE_BO_CREATE_STOLEN_BIT)
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0);
else
info->fix.smem_start =

View File

@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915),
NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) |
XE_BO_FLAG_GGTT);
if (IS_ERR(obj)) {
kfree(vma);
return false;

View File

@ -99,21 +99,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb,
if (IS_DGFX(xe))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM0_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_PAGETABLE);
XE_BO_FLAG_VRAM0 |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE);
else
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_PAGETABLE);
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_PAGETABLE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@ -262,7 +262,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) {
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
struct xe_tile *tile = xe_device_get_root_tile(xe);
/*
@ -355,7 +355,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT));
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
if (IS_ERR(vma))

View File

@ -73,8 +73,8 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
xe_device_mem_access_get(xe);
bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");

View File

@ -62,7 +62,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT;
flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
@ -79,7 +79,7 @@ initial_plane_bo(struct xe_device *xe,
}
phys_base = pte & ~(page_size - 1);
flags |= XE_BO_CREATE_VRAM0_BIT;
flags |= XE_BO_FLAG_VRAM0;
/*
* We don't currently expect this to ever be placed in the
@ -101,7 +101,7 @@ initial_plane_bo(struct xe_device *xe,
if (!stolen)
return NULL;
phys_base = base;
flags |= XE_BO_CREATE_STOLEN_BIT;
flags |= XE_BO_FLAG_STOLEN;
/*
* If the FB is too big, just don't use it since fbdev is not very

View File

@ -116,7 +116,7 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
int ret;
/* TODO: Sanity check */
unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
@ -186,7 +186,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit);
static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test)
{
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile);
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
struct xe_gt *__gt;
int err, i, id;

View File

@ -36,14 +36,14 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
xe_bo_assert_held(imported);
mem_type = XE_PL_VRAM0;
if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
/* No VRAM allowed */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !p2p_enabled(params))
/* No P2P */
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
(params->mem_mask & XE_BO_FLAG_SYSTEM))
/* Pin migrated to TT */
mem_type = XE_PL_TT;
@ -93,7 +93,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* possible, saving a migration step as the transfer is just
* likely as fast from system memory.
*/
if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
if (params->mem_mask & XE_BO_FLAG_SYSTEM)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
else
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
@ -115,11 +115,11 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* No VRAM on this device? */
if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
(params->mem_mask & XE_BO_FLAG_VRAM0))
return;
size = PAGE_SIZE;
if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
size = SZ_64K;
@ -148,7 +148,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
*/
if (params->force_different_devices &&
!p2p_enabled(params) &&
!(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
!(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
@ -161,7 +161,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
/* Pinning in VRAM is not allowed. */
if (!is_dynamic(params) &&
params->force_different_devices &&
!(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
!(params->mem_mask & XE_BO_FLAG_SYSTEM))
KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Otherwise only expect interrupts or success. */
else if (err && err != -EINTR && err != -ERESTARTSYS)
@ -180,7 +180,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(import));
} else if (!params->force_different_devices ||
p2p_enabled(params) ||
(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
/* Shouldn't fail if we can reuse same bo, use p2p or use system */
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
@ -203,52 +203,52 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
* gem object.
*/
static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT},
{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_VRAM0},
{.mem_mask = XE_BO_FLAG_VRAM0,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM},
{.mem_mask = XE_BO_FLAG_SYSTEM,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &nop2p_attach_ops,
.force_different_devices = true},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.force_different_devices = true},
{}

View File

@ -113,7 +113,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
XE_BO_NEEDS_CPU_ACCESS);
XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n",
str, PTR_ERR(remote));
@ -191,7 +191,7 @@ out_unlock:
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
struct kunit *test)
{
test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT);
test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
@ -203,9 +203,9 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
return;
if (bo->ttm.resource->mem_type == XE_PL_VRAM0)
region = XE_BO_CREATE_VRAM1_BIT;
region = XE_BO_FLAG_VRAM1;
else
region = XE_BO_CREATE_VRAM0_BIT;
region = XE_BO_FLAG_VRAM0;
test_copy(m, bo, test, region);
}
@ -281,8 +281,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@ -290,8 +290,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@ -301,8 +301,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));

View File

@ -111,7 +111,7 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
static bool xe_bo_is_user(struct xe_bo *bo)
{
return bo->flags & XE_BO_CREATE_USER_BIT;
return bo->flags & XE_BO_FLAG_USER;
}
static struct xe_migrate *
@ -137,7 +137,7 @@ static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
if (bo_flags & XE_BO_CREATE_SYSTEM_BIT) {
if (bo_flags & XE_BO_FLAG_SYSTEM) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) {
@ -164,12 +164,12 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
* For eviction / restore on suspend / resume objects
* pinned in VRAM must be contiguous
*/
if (bo_flags & (XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_GGTT_BIT))
if (bo_flags & (XE_BO_FLAG_PINNED |
XE_BO_FLAG_GGTT))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
place.fpfn = 0;
place.lpfn = io_size >> PAGE_SHIFT;
} else {
@ -183,22 +183,22 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
if (bo_flags & XE_BO_CREATE_VRAM0_BIT)
if (bo_flags & XE_BO_FLAG_VRAM0)
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
if (bo_flags & XE_BO_CREATE_VRAM1_BIT)
if (bo_flags & XE_BO_FLAG_VRAM1)
add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
}
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
u32 bo_flags, u32 *c)
{
if (bo_flags & XE_BO_CREATE_STOLEN_BIT) {
if (bo_flags & XE_BO_FLAG_STOLEN) {
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
.flags = bo_flags & (XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_GGTT_BIT) ?
.flags = bo_flags & (XE_BO_FLAG_PINNED |
XE_BO_FLAG_GGTT) ?
TTM_PL_FLAG_CONTIGUOUS : 0,
};
*c += 1;
@ -339,7 +339,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
break;
}
WARN_ON((bo->flags & XE_BO_CREATE_USER_BIT) && !bo->cpu_caching);
WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
/*
* Display scanout is always non-coherent with the CPU cache.
@ -347,8 +347,8 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
* For Xe_LPG and beyond, PPGTT PTE lookups are also non-coherent and
* require a CPU:WC mapping.
*/
if ((!bo->cpu_caching && bo->flags & XE_BO_SCANOUT_BIT) ||
(xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_PAGETABLE))
if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
(xe->info.graphics_verx100 >= 1270 && bo->flags & XE_BO_FLAG_PAGETABLE))
caching = ttm_write_combined;
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
@ -1102,7 +1102,7 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
struct drm_device *ddev = tbo->base.dev;
struct xe_device *xe = to_xe_device(ddev);
struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;
bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
vm_fault_t ret;
int idx;
@ -1215,19 +1215,19 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
if (flags & (XE_BO_CREATE_VRAM_MASK | XE_BO_CREATE_STOLEN_BIT) &&
!(flags & XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT) &&
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
(flags & XE_BO_NEEDS_64K))) {
aligned_size = ALIGN(size, SZ_64K);
if (type != ttm_bo_type_device)
size = ALIGN(size, SZ_64K);
flags |= XE_BO_INTERNAL_64K;
flags |= XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_64K >> PAGE_SHIFT;
} else {
aligned_size = ALIGN(size, SZ_4K);
flags &= ~XE_BO_INTERNAL_64K;
flags &= ~XE_BO_FLAG_INTERNAL_64K;
alignment = SZ_4K >> PAGE_SHIFT;
}
@ -1256,11 +1256,11 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
if (resv) {
ctx.allow_res_evict = !(flags & XE_BO_CREATE_NO_RESV_EVICT);
ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
ctx.resv = resv;
}
if (!(flags & XE_BO_FIXED_PLACEMENT_BIT)) {
if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
if (WARN_ON(err)) {
xe_ttm_bo_destroy(&bo->ttm);
@ -1270,7 +1270,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
/* Defer populating type_sg bos */
placement = (type == ttm_bo_type_sg ||
bo->flags & XE_BO_DEFER_BACKING) ? &sys_placement :
bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
&bo->placement;
err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
placement, alignment,
@ -1325,21 +1325,21 @@ static int __xe_bo_fixed_placement(struct xe_device *xe,
{
struct ttm_place *place = bo->placements;
if (flags & (XE_BO_CREATE_USER_BIT|XE_BO_CREATE_SYSTEM_BIT))
if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
return -EINVAL;
place->flags = TTM_PL_FLAG_CONTIGUOUS;
place->fpfn = start >> PAGE_SHIFT;
place->lpfn = end >> PAGE_SHIFT;
switch (flags & (XE_BO_CREATE_STOLEN_BIT | XE_BO_CREATE_VRAM_MASK)) {
case XE_BO_CREATE_VRAM0_BIT:
switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
case XE_BO_FLAG_VRAM0:
place->mem_type = XE_PL_VRAM0;
break;
case XE_BO_CREATE_VRAM1_BIT:
case XE_BO_FLAG_VRAM1:
place->mem_type = XE_PL_VRAM1;
break;
case XE_BO_CREATE_STOLEN_BIT:
case XE_BO_FLAG_STOLEN:
place->mem_type = XE_PL_STOLEN;
break;
@ -1373,7 +1373,7 @@ __xe_bo_create_locked(struct xe_device *xe,
if (IS_ERR(bo))
return bo;
flags |= XE_BO_FIXED_PLACEMENT_BIT;
flags |= XE_BO_FLAG_FIXED_PLACEMENT;
err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
if (err) {
xe_bo_free(bo);
@ -1383,7 +1383,7 @@ __xe_bo_create_locked(struct xe_device *xe,
bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
vm && !xe_vm_in_fault_mode(vm) &&
flags & XE_BO_CREATE_USER_BIT ?
flags & XE_BO_FLAG_USER ?
&vm->lru_bulk_move : NULL, size,
cpu_caching, type, flags);
if (IS_ERR(bo))
@ -1400,13 +1400,13 @@ __xe_bo_create_locked(struct xe_device *xe,
xe_vm_get(vm);
bo->vm = vm;
if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
if (bo->flags & XE_BO_FLAG_GGTT) {
if (!tile && flags & XE_BO_FLAG_STOLEN)
tile = xe_device_get_root_tile(xe);
xe_assert(xe, tile);
if (flags & XE_BO_FIXED_PLACEMENT_BIT) {
if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
start + bo->size, U64_MAX);
} else {
@ -1449,7 +1449,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
cpu_caching, type,
flags | XE_BO_CREATE_USER_BIT);
flags | XE_BO_FLAG_USER);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
@ -1478,12 +1478,12 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
u64 start = offset == ~0ull ? 0 : offset;
u64 end = offset == ~0ull ? offset : start + size;
if (flags & XE_BO_CREATE_STOLEN_BIT &&
if (flags & XE_BO_FLAG_STOLEN &&
xe_ttm_stolen_cpu_access_needs_ggtt(xe))
flags |= XE_BO_CREATE_GGTT_BIT;
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
flags | XE_BO_NEEDS_CPU_ACCESS);
flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo))
return bo;
@ -1580,9 +1580,9 @@ struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_til
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
{
struct xe_bo *bo;
u32 dst_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile) | XE_BO_CREATE_GGTT_BIT;
u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
dst_flags |= (*src)->flags & XE_BO_GGTT_INVALIDATE;
dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
@ -1663,8 +1663,8 @@ int xe_bo_pin(struct xe_bo *bo)
xe_assert(xe, !xe_bo_is_user(bo));
/* Pinned object must be in GGTT or have pinned flag */
xe_assert(xe, bo->flags & (XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_GGTT_BIT));
xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
XE_BO_FLAG_GGTT));
/*
* No reason we can't support pinning imported dma-bufs we just don't
@ -1685,7 +1685,7 @@ int xe_bo_pin(struct xe_bo *bo)
* during suspend / resume (force restore to same physical address).
*/
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) {
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
@ -1753,7 +1753,7 @@ void xe_bo_unpin(struct xe_bo *bo)
xe_assert(xe, xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) {
bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
@ -1856,7 +1856,7 @@ int xe_bo_vmap(struct xe_bo *bo)
xe_bo_assert_held(bo);
if (!(bo->flags & XE_BO_NEEDS_CPU_ACCESS))
if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))
@ -1938,29 +1938,29 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
bo_flags = 0;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
bo_flags |= XE_BO_DEFER_BACKING;
bo_flags |= XE_BO_FLAG_DEFER_BACKING;
if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
bo_flags |= XE_BO_SCANOUT_BIT;
bo_flags |= XE_BO_FLAG_SCANOUT;
bo_flags |= args->placement << (ffs(XE_BO_CREATE_SYSTEM_BIT) - 1);
bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_CREATE_VRAM_MASK)))
if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
return -EINVAL;
bo_flags |= XE_BO_NEEDS_CPU_ACCESS;
bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
}
if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL;
if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_CREATE_VRAM_MASK &&
if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
return -EINVAL;
if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_SCANOUT_BIT &&
if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
return -EINVAL;
@ -2209,7 +2209,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
* can't be used since there's no CCS storage associated with
* non-VRAM addresses.
*/
if (IS_DGFX(xe) && (bo->flags & XE_BO_CREATE_SYSTEM_BIT))
if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false;
return true;
@ -2278,9 +2278,9 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
bo = xe_bo_create_user(xe, NULL, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
ttm_bo_type_device,
XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_SCANOUT_BIT |
XE_BO_NEEDS_CPU_ACCESS);
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -23,33 +23,32 @@
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define XE_BO_CREATE_USER_BIT BIT(0)
#define XE_BO_FLAG_USER BIT(0)
/* The bits below need to be contiguous, or things break */
#define XE_BO_CREATE_SYSTEM_BIT BIT(1)
#define XE_BO_CREATE_VRAM0_BIT BIT(2)
#define XE_BO_CREATE_VRAM1_BIT BIT(3)
#define XE_BO_CREATE_VRAM_MASK (XE_BO_CREATE_VRAM0_BIT | \
XE_BO_CREATE_VRAM1_BIT)
#define XE_BO_FLAG_SYSTEM BIT(1)
#define XE_BO_FLAG_VRAM0 BIT(2)
#define XE_BO_FLAG_VRAM1 BIT(3)
#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
/* -- */
#define XE_BO_CREATE_STOLEN_BIT BIT(4)
#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
(IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
XE_BO_CREATE_SYSTEM_BIT)
#define XE_BO_CREATE_GGTT_BIT BIT(5)
#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
#define XE_BO_CREATE_PINNED_BIT BIT(7)
#define XE_BO_CREATE_NO_RESV_EVICT BIT(8)
#define XE_BO_DEFER_BACKING BIT(9)
#define XE_BO_SCANOUT_BIT BIT(10)
#define XE_BO_FIXED_PLACEMENT_BIT BIT(11)
#define XE_BO_PAGETABLE BIT(12)
#define XE_BO_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_NEEDS_UC BIT(14)
#define XE_BO_FLAG_STOLEN BIT(4)
#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
XE_BO_FLAG_VRAM0 << (tile)->id : \
XE_BO_FLAG_SYSTEM)
#define XE_BO_FLAG_GGTT BIT(5)
#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
#define XE_BO_FLAG_PINNED BIT(7)
#define XE_BO_FLAG_NO_RESV_EVICT BIT(8)
#define XE_BO_FLAG_DEFER_BACKING BIT(9)
#define XE_BO_FLAG_SCANOUT BIT(10)
#define XE_BO_FLAG_FIXED_PLACEMENT BIT(11)
#define XE_BO_FLAG_PAGETABLE BIT(12)
#define XE_BO_FLAG_NEEDS_CPU_ACCESS BIT(13)
#define XE_BO_FLAG_NEEDS_UC BIT(14)
#define XE_BO_NEEDS_64K BIT(15)
#define XE_BO_GGTT_INVALIDATE BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(16)
/* this one is trigger internally only */
#define XE_BO_INTERNAL_TEST BIT(30)
#define XE_BO_INTERNAL_64K BIT(31)
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
#define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
#define XE2_PPGTT_PTE_PAT4 BIT_ULL(61)

View File

@ -146,7 +146,7 @@ int xe_bo_restore_kernel(struct xe_device *xe)
return ret;
}
if (bo->flags & XE_BO_CREATE_GGTT_BIT) {
if (bo->flags & XE_BO_FLAG_GGTT) {
struct xe_tile *tile = bo->tile;
mutex_lock(&tile->mem.ggtt->lock);

View File

@ -217,7 +217,7 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
dma_resv_lock(resv, NULL);
bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
0, /* Will require 1way or 2way for vm_bind */
ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto error;

View File

@ -224,11 +224,11 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
* scratch entires, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
flags = XE_BO_CREATE_PINNED_BIT;
flags = XE_BO_FLAG_PINNED;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_CREATE_SYSTEM_BIT;
flags |= XE_BO_FLAG_SYSTEM;
else
flags |= XE_BO_CREATE_VRAM_IF_DGFX(ggtt->tile);
flags |= XE_BO_FLAG_VRAM_IF_DGFX(ggtt->tile);
ggtt->scratch = xe_managed_bo_create_pin_map(xe, ggtt->tile, XE_PAGE_SIZE, flags);
if (IS_ERR(ggtt->scratch)) {
@ -375,7 +375,7 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
u16 cache_mode = bo->flags & XE_BO_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
@ -413,7 +413,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_ggtt_map_bo(ggtt, bo);
mutex_unlock(&ggtt->lock);
if (!err && bo->flags & XE_BO_GGTT_INVALIDATE)
if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
xe_ggtt_invalidate(ggtt);
xe_device_mem_access_put(tile_to_xe(ggtt->tile));
@ -457,7 +457,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
xe_tile_assert(ggtt->tile, bo->ggtt_node.size == bo->size);
xe_ggtt_remove_node(ggtt, &bo->ggtt_node,
bo->flags & XE_BO_GGTT_INVALIDATE);
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p)

View File

@ -130,8 +130,8 @@ static int query_compatibility_version(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo);
@ -468,8 +468,8 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4M,
ttm_bo_type_kernel,
XE_BO_CREATE_STOLEN_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_STOLEN |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -411,8 +411,8 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo)) {
kfree(csme);
return PTR_ERR(bo);

View File

@ -273,9 +273,9 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
ads->regset_size = calculate_regset_size(gt);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -159,9 +159,9 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
primelockdep(ct);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -78,9 +78,9 @@ int xe_guc_hwconfig_init(struct xe_guc *guc)
return -EINVAL;
bo = xe_managed_bo_create_pin_map(xe, tile, PAGE_ALIGN(size),
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);
guc->hwconfig.bo = bo;

View File

@ -84,9 +84,9 @@ int xe_guc_log_init(struct xe_guc_log *log)
struct xe_bo *bo;
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -929,9 +929,9 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return err;
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -59,8 +59,8 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
PXP43_HUC_AUTH_INOUT_SIZE * 2,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -518,9 +518,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(hwe->hwsp)) {
err = PTR_ERR(hwe->hwsp);
goto err_name;

View File

@ -70,8 +70,8 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
XE_BO_NEEDS_64K | XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
XE_BO_NEEDS_64K | XE_BO_FLAG_PINNED);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;

View File

@ -743,9 +743,9 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
lrc->bo = xe_bo_create_pin_map(xe, tile, vm,
ring_size + xe_lrc_size(xe, hwe->class),
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);

View File

@ -127,11 +127,11 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
/* XXX: convert to managed bo */
bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_SYSTEM_BIT |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE |
XE_BO_NEEDS_UC |
XE_BO_NEEDS_CPU_ACCESS);
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE |
XE_BO_FLAG_NEEDS_UC |
XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;

View File

@ -155,8 +155,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_PINNED_BIT);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_PINNED);
if (IS_ERR(bo))
return PTR_ERR(bo);

View File

@ -108,11 +108,11 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT |
XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_NO_RESV_EVICT |
XE_BO_PAGETABLE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
XE_BO_FLAG_PINNED |
XE_BO_FLAG_NO_RESV_EVICT |
XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;

View File

@ -48,9 +48,9 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
sa_manager->bo = NULL;
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo));

View File

@ -303,7 +303,7 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
XE_WARN_ON(IS_DGFX(xe));
/* XXX: Require BO to be mapped to GGTT? */
if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_GGTT)))
return -EIO;
/* GGTT is always contiguously mapped */

View File

@ -763,8 +763,8 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
return 0;
err = uc_fw_copy(uc_fw, fw->data, fw->size,
XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_GGTT_BIT |
XE_BO_GGTT_INVALIDATE);
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE);
uc_fw_release(fw);

View File

@ -3069,7 +3069,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto put_obj;
}
if (bos[i]->flags & XE_BO_INTERNAL_64K) {
if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||