mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
drm/amdgpu: Enable clear page functionality
Add clear page support in vram memory region. v1(Christian): - Dont handle clear page as TTM flag since when moving the BO back in from GTT again we don't need that. - Make a specialized version of amdgpu_fill_buffer() which only clears the VRAM areas which are not already cleared - Drop the TTM_PL_FLAG_WIPE_ON_RELEASE check in amdgpu_object.c v2: - Modify the function name amdgpu_ttm_* (Alex) - Drop the delayed parameter (Christian) - handle amdgpu_res_cleared(&cursor) just above the size calculation (Christian) - Use AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE for clearing the buffers in the free path to properly wait for fences etc.. (Christian) v3(Christian): - Remove buffer clear code in VRAM manager instead change the AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE handling to set the DRM_BUDDY_CLEARED flag. - Remove ! from amdgpu_res_cleared(&cursor) check. v4(Christian): - vres flag setting move to vram manager file - use dma_fence_get_stub in amdgpu_ttm_clear_buffer function - make fence a mandatory parameter and drop the if and the get/put dance Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com> Suggested-by: Christian König <christian.koenig@amd.com> Acked-by: Felix Kuehling <felix.kuehling@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240419063538.11957-2-Arunpravin.PaneerSelvam@amd.com Signed-off-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
96950929eb
commit
a68c7eaa7a
@ -39,6 +39,7 @@
|
|||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
|
#include "amdgpu_vram_mgr.h"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: amdgpu_object
|
* DOC: amdgpu_object
|
||||||
@ -601,8 +602,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||||||
if (!amdgpu_bo_support_uswc(bo->flags))
|
if (!amdgpu_bo_support_uswc(bo->flags))
|
||||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||||
|
|
||||||
if (adev->ras_enabled)
|
bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
|
||||||
bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
|
|
||||||
|
|
||||||
bo->tbo.bdev = &adev->mman.bdev;
|
bo->tbo.bdev = &adev->mman.bdev;
|
||||||
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
|
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
|
||||||
@ -634,7 +634,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||||||
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
|
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
|
|
||||||
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
|
r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
goto fail_unreserve;
|
goto fail_unreserve;
|
||||||
|
|
||||||
@ -1365,8 +1365,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||||||
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
|
if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence, true);
|
r = amdgpu_fill_buffer(abo, 0, bo->base.resv, &fence, true);
|
||||||
if (!WARN_ON(r)) {
|
if (!WARN_ON(r)) {
|
||||||
|
amdgpu_vram_mgr_set_cleared(bo->resource);
|
||||||
amdgpu_bo_fence(abo, fence, false);
|
amdgpu_bo_fence(abo, fence, false);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
}
|
}
|
||||||
|
@ -164,4 +164,29 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_res_cleared - check if blocks are cleared
|
||||||
|
*
|
||||||
|
* @cur: the cursor to extract the block
|
||||||
|
*
|
||||||
|
* Check if the @cur block is cleared
|
||||||
|
*/
|
||||||
|
static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
|
||||||
|
{
|
||||||
|
struct drm_buddy_block *block;
|
||||||
|
|
||||||
|
switch (cur->mem_type) {
|
||||||
|
case TTM_PL_VRAM:
|
||||||
|
block = cur->node;
|
||||||
|
|
||||||
|
if (!amdgpu_vram_mgr_is_cleared(block))
|
||||||
|
return false;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -378,11 +378,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||||||
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
|
(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
|
||||||
struct dma_fence *wipe_fence = NULL;
|
struct dma_fence *wipe_fence = NULL;
|
||||||
|
|
||||||
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence,
|
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
|
||||||
false);
|
false);
|
||||||
if (r) {
|
if (r) {
|
||||||
goto error;
|
goto error;
|
||||||
} else if (wipe_fence) {
|
} else if (wipe_fence) {
|
||||||
|
amdgpu_vram_mgr_set_cleared(bo->resource);
|
||||||
dma_fence_put(fence);
|
dma_fence_put(fence);
|
||||||
fence = wipe_fence;
|
fence = wipe_fence;
|
||||||
}
|
}
|
||||||
@ -2215,6 +2216,71 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_ttm_clear_buffer - clear memory buffers
|
||||||
|
* @bo: amdgpu buffer object
|
||||||
|
* @resv: reservation object
|
||||||
|
* @fence: dma_fence associated with the operation
|
||||||
|
*
|
||||||
|
* Clear the memory buffer resource.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0 for success or a negative error code on failure.
|
||||||
|
*/
|
||||||
|
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
|
||||||
|
struct dma_resv *resv,
|
||||||
|
struct dma_fence **fence)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
|
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||||
|
struct amdgpu_res_cursor cursor;
|
||||||
|
u64 addr;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (!adev->mman.buffer_funcs_enabled)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!fence)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
*fence = dma_fence_get_stub();
|
||||||
|
|
||||||
|
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
|
||||||
|
|
||||||
|
mutex_lock(&adev->mman.gtt_window_lock);
|
||||||
|
while (cursor.remaining) {
|
||||||
|
struct dma_fence *next = NULL;
|
||||||
|
u64 size;
|
||||||
|
|
||||||
|
if (amdgpu_res_cleared(&cursor)) {
|
||||||
|
amdgpu_res_next(&cursor, cursor.size);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Never clear more than 256MiB at once to avoid timeouts */
|
||||||
|
size = min(cursor.size, 256ULL << 20);
|
||||||
|
|
||||||
|
r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
|
||||||
|
1, ring, false, &size, &addr);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
|
||||||
|
&next, true, true);
|
||||||
|
if (r)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
dma_fence_put(*fence);
|
||||||
|
*fence = next;
|
||||||
|
|
||||||
|
amdgpu_res_next(&cursor, size);
|
||||||
|
}
|
||||||
|
err:
|
||||||
|
mutex_unlock(&adev->mman.gtt_window_lock);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||||
uint32_t src_data,
|
uint32_t src_data,
|
||||||
struct dma_resv *resv,
|
struct dma_resv *resv,
|
||||||
|
@ -38,8 +38,6 @@
|
|||||||
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
|
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
|
||||||
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
|
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
|
||||||
|
|
||||||
#define AMDGPU_POISON 0xd0bed0be
|
|
||||||
|
|
||||||
extern const struct attribute_group amdgpu_vram_mgr_attr_group;
|
extern const struct attribute_group amdgpu_vram_mgr_attr_group;
|
||||||
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
|
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
|
||||||
|
|
||||||
@ -155,6 +153,9 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
|||||||
uint64_t size, bool tmz,
|
uint64_t size, bool tmz,
|
||||||
struct dma_resv *resv,
|
struct dma_resv *resv,
|
||||||
struct dma_fence **f);
|
struct dma_fence **f);
|
||||||
|
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
|
||||||
|
struct dma_resv *resv,
|
||||||
|
struct dma_fence **fence);
|
||||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||||
uint32_t src_data,
|
uint32_t src_data,
|
||||||
struct dma_resv *resv,
|
struct dma_resv *resv,
|
||||||
|
@ -450,6 +450,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||||||
{
|
{
|
||||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||||
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
||||||
|
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
|
||||||
u64 vis_usage = 0, max_bytes, min_block_size;
|
u64 vis_usage = 0, max_bytes, min_block_size;
|
||||||
struct amdgpu_vram_mgr_resource *vres;
|
struct amdgpu_vram_mgr_resource *vres;
|
||||||
u64 size, remaining_size, lpfn, fpfn;
|
u64 size, remaining_size, lpfn, fpfn;
|
||||||
@ -501,6 +502,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
|||||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
|
if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
|
||||||
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
|
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
|
||||||
|
|
||||||
|
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
|
||||||
|
vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
|
||||||
|
|
||||||
if (fpfn || lpfn != mgr->mm.size)
|
if (fpfn || lpfn != mgr->mm.size)
|
||||||
/* Allocate blocks in desired range */
|
/* Allocate blocks in desired range */
|
||||||
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||||
@ -604,7 +608,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
|||||||
|
|
||||||
amdgpu_vram_mgr_do_reserve(man);
|
amdgpu_vram_mgr_do_reserve(man);
|
||||||
|
|
||||||
drm_buddy_free_list(mm, &vres->blocks, 0);
|
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
|
||||||
mutex_unlock(&mgr->lock);
|
mutex_unlock(&mgr->lock);
|
||||||
|
|
||||||
atomic64_sub(vis_usage, &mgr->vis_usage);
|
atomic64_sub(vis_usage, &mgr->vis_usage);
|
||||||
|
@ -53,10 +53,20 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
|
|||||||
return (u64)PAGE_SIZE << drm_buddy_block_order(block);
|
return (u64)PAGE_SIZE << drm_buddy_block_order(block);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block)
|
||||||
|
{
|
||||||
|
return drm_buddy_block_is_clear(block);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct amdgpu_vram_mgr_resource *
|
static inline struct amdgpu_vram_mgr_resource *
|
||||||
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
|
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
|
||||||
{
|
{
|
||||||
return container_of(res, struct amdgpu_vram_mgr_resource, base);
|
return container_of(res, struct amdgpu_vram_mgr_resource, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
|
||||||
|
{
|
||||||
|
to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user