drm/amdgpu: nuke amdgpu_ttm_is_bound() v2
Rename amdgpu_gtt_mgr_is_allocated() to amdgpu_gtt_mgr_has_gart_addr() and use that instead. v2: rename the function as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
34a4d2bf06
commit
3da917b6c6
@@ -79,13 +79,13 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_gtt_mgr_is_allocated - Check if mem has address space
|
* amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
|
||||||
*
|
*
|
||||||
* @mem: the mem object to check
|
* @mem: the mem object to check
|
||||||
*
|
*
|
||||||
* Check if a mem object has already address space allocated.
|
* Check if a mem object has already address space allocated.
|
||||||
*/
|
*/
|
||||||
bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
|
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *node = mem->mm_node;
|
struct drm_mm_node *node = mem->mm_node;
|
||||||
|
|
||||||
@@ -114,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
|||||||
unsigned long fpfn, lpfn;
|
unsigned long fpfn, lpfn;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (amdgpu_gtt_mgr_is_allocated(mem))
|
if (amdgpu_gtt_mgr_has_gart_addr(mem))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (place)
|
if (place)
|
||||||
|
|||||||
@@ -982,7 +982,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
|||||||
{
|
{
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
|
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
|
||||||
!amdgpu_ttm_is_bound(bo->tbo.ttm));
|
!amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
|
||||||
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
|
||||||
!bo->pin_count);
|
!bo->pin_count);
|
||||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
|
|||||||
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
|
static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
switch (bo->tbo.mem.mem_type) {
|
switch (bo->tbo.mem.mem_type) {
|
||||||
case TTM_PL_TT: return amdgpu_ttm_is_bound(bo->tbo.ttm);
|
case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
|
||||||
case TTM_PL_VRAM: return true;
|
case TTM_PL_VRAM: return true;
|
||||||
default: return false;
|
default: return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -282,8 +282,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
|
|||||||
{
|
{
|
||||||
uint64_t addr = 0;
|
uint64_t addr = 0;
|
||||||
|
|
||||||
if (mem->mem_type != TTM_PL_TT ||
|
if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
|
||||||
amdgpu_gtt_mgr_is_allocated(mem)) {
|
|
||||||
addr = mm_node->start << PAGE_SHIFT;
|
addr = mm_node->start << PAGE_SHIFT;
|
||||||
addr += bo->bdev->man[mem->mem_type].gpu_offset;
|
addr += bo->bdev->man[mem->mem_type].gpu_offset;
|
||||||
}
|
}
|
||||||
@@ -369,7 +368,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
|||||||
* dst to window 1
|
* dst to window 1
|
||||||
*/
|
*/
|
||||||
if (src->mem->mem_type == TTM_PL_TT &&
|
if (src->mem->mem_type == TTM_PL_TT &&
|
||||||
!amdgpu_gtt_mgr_is_allocated(src->mem)) {
|
!amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
|
||||||
r = amdgpu_map_buffer(src->bo, src->mem,
|
r = amdgpu_map_buffer(src->bo, src->mem,
|
||||||
PFN_UP(cur_size + src_page_offset),
|
PFN_UP(cur_size + src_page_offset),
|
||||||
src_node_start, 0, ring,
|
src_node_start, 0, ring,
|
||||||
@@ -383,7 +382,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (dst->mem->mem_type == TTM_PL_TT &&
|
if (dst->mem->mem_type == TTM_PL_TT &&
|
||||||
!amdgpu_gtt_mgr_is_allocated(dst->mem)) {
|
!amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
|
||||||
r = amdgpu_map_buffer(dst->bo, dst->mem,
|
r = amdgpu_map_buffer(dst->bo, dst->mem,
|
||||||
PFN_UP(cur_size + dst_page_offset),
|
PFN_UP(cur_size + dst_page_offset),
|
||||||
dst_node_start, 1, ring,
|
dst_node_start, 1, ring,
|
||||||
@@ -861,8 +860,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
|
|||||||
bo_mem->mem_type == AMDGPU_PL_OA)
|
bo_mem->mem_type == AMDGPU_PL_OA)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
|
if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
|
||||||
|
gtt->offset = AMDGPU_BO_INVALID_OFFSET;
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(>t->adev->gtt_list_lock);
|
spin_lock(>t->adev->gtt_list_lock);
|
||||||
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
|
||||||
@@ -882,23 +883,16 @@ error_gart_bind:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
|
|
||||||
{
|
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
|
||||||
|
|
||||||
return gtt && !list_empty(>t->list);
|
|
||||||
}
|
|
||||||
|
|
||||||
int amdgpu_ttm_bind(struct ttm_buffer_object *bo)
|
int amdgpu_ttm_bind(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
struct ttm_tt *ttm = bo->ttm;
|
|
||||||
struct ttm_mem_reg tmp;
|
struct ttm_mem_reg tmp;
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
struct ttm_place placements;
|
struct ttm_place placements;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!ttm || amdgpu_ttm_is_bound(ttm))
|
if (bo->mem.mem_type != TTM_PL_TT ||
|
||||||
|
amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
tmp = bo->mem;
|
tmp = bo->mem;
|
||||||
@@ -959,7 +953,7 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
|||||||
if (gtt->userptr)
|
if (gtt->userptr)
|
||||||
amdgpu_ttm_tt_unpin_userptr(ttm);
|
amdgpu_ttm_tt_unpin_userptr(ttm);
|
||||||
|
|
||||||
if (!amdgpu_ttm_is_bound(ttm))
|
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ struct amdgpu_copy_mem {
|
|||||||
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
|
||||||
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
|
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
|
||||||
|
|
||||||
bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
|
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
|
||||||
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
|
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
|
||||||
|
|
||||||
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
|
||||||
@@ -90,7 +90,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||||||
struct dma_fence **fence);
|
struct dma_fence **fence);
|
||||||
|
|
||||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||||
bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
|
|
||||||
int amdgpu_ttm_bind(struct ttm_buffer_object *bo);
|
int amdgpu_ttm_bind(struct ttm_buffer_object *bo);
|
||||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
|
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user