Revert "drm/xe/lnl: Offload system clear page activity to GPU"

This optimization relied on having to clear CCS on allocations.
If there is no need to clear CCS on allocations then this would mostly
help in reducing CPU utilization.

Revert this patch at this moment because of:
1 Currently Xe can't do clear on free and using a invalid ttm flag,
TTM_TT_FLAG_CLEARED_ON_FREE which could poison global ttm pool on
multi-device setup.

2 Also for LNL CPU:WB doesn't require clearing CCS as such BO will
not be allowed to bind with compression PTE. Subsequent patch will
disable clearing CCS for CPU:WB BOs for LNL.

This reverts commit 2368306180.

Cc: Christian König <christian.koenig@amd.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240828083635.23601-1-nirmoy.das@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
Nirmoy Das 2024-08-28 10:36:34 +02:00 committed by Lucas De Marchi
parent 014125c64d
commit 7546a8201b
3 changed files with 2 additions and 38 deletions

View File

@ -396,14 +396,6 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
caching = ttm_uncached;
}
/*
* If the device can support gpu clear system pages then set proper ttm
* flag. Zeroed pages are only required for ttm_bo_type_device so
* unwanted data is not leaked to userspace.
*/
if (ttm_bo->type == ttm_bo_type_device && xe->mem.gpu_page_clear_sys)
page_flags |= TTM_TT_FLAG_CLEARED_ON_FREE;
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
kfree(tt);
@ -425,10 +417,6 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
return 0;
/* Clear TTM_TT_FLAG_ZERO_ALLOC when GPU is set to clear system pages */
if (tt->page_flags & TTM_TT_FLAG_CLEARED_ON_FREE)
tt->page_flags &= ~TTM_TT_FLAG_ZERO_ALLOC;
err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
if (err)
return err;
@ -671,16 +659,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
bool needs_clear;
bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
ttm && ttm_tt_is_populated(ttm)) ? true : false;
bool clear_system_pages;
int ret = 0;
/*
* Clear TTM_TT_FLAG_CLEARED_ON_FREE on bo creation path when
* moving to system as the bo doesn't have dma_mapping.
*/
if (!old_mem && ttm && !ttm_tt_is_populated(ttm))
ttm->page_flags &= ~TTM_TT_FLAG_CLEARED_ON_FREE;
/* Bo creation path, moving to system or TT. */
if ((!old_mem && ttm) && !handle_system_ccs) {
if (new_mem->mem_type == XE_PL_TT)
@ -703,10 +683,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
move_lacks_source = handle_system_ccs ? (!bo->ccs_cleared) :
(!mem_type_is_vram(old_mem_type) && !tt_has_data);
clear_system_pages = ttm && (ttm->page_flags & TTM_TT_FLAG_CLEARED_ON_FREE);
needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
(!ttm && ttm_bo->type == ttm_bo_type_device) ||
clear_system_pages;
(!ttm && ttm_bo->type == ttm_bo_type_device);
if (new_mem->mem_type == XE_PL_TT) {
ret = xe_tt_map_sg(ttm);
@ -818,7 +796,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (move_lacks_source) {
u32 flags = 0;
if (mem_type_is_vram(new_mem->mem_type) || clear_system_pages)
if (mem_type_is_vram(new_mem->mem_type))
flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
else if (handle_system_ccs)
flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;

View File

@ -339,8 +339,6 @@ struct xe_device {
struct xe_mem_region vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
/** @mem.gpu_page_clear_sys: clear system pages offloaded to GPU */
bool gpu_page_clear_sys;
} mem;
/** @sriov: device level virtualization data */

View File

@ -117,17 +117,5 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe)
ttm_resource_manager_init(man, &xe->ttm, gtt_size >> PAGE_SHIFT);
ttm_set_driver_manager(&xe->ttm, XE_PL_TT, man);
ttm_resource_manager_set_used(man, true);
/*
* On iGFX device with flat CCS, we clear CCS metadata, let's extend that
* and use GPU to clear pages as well.
*
* Disable this when init_on_free and/or init_on_alloc is on to avoid double
* zeroing pages with CPU and GPU.
*/
if (xe_device_has_flat_ccs(xe) && !IS_DGFX(xe) &&
!want_init_on_alloc(GFP_USER) && !want_init_on_free())
xe->mem.gpu_page_clear_sys = true;
return drmm_add_action_or_reset(&xe->drm, ttm_sys_mgr_fini, xe);
}