forked from Minki/linux
drm/ttm: remove lru_lock around ttm_bo_reserve
There should no longer be assumptions that reserve will always succeed with the lru lock held, so we can safely break the whole atomic reserve/lru thing. As a bonus this fixes most lockdep annotations for reservations. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
This commit is contained in:
parent
979ee290ff
commit
63d0a41955
@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
|
||||
return put_count;
|
||||
}
|
||||
|
||||
int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
||||
int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool no_wait, bool use_sequence, uint32_t sequence)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
int ret;
|
||||
|
||||
while (unlikely(atomic_read(&bo->reserved) != 0)) {
|
||||
while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
|
||||
/**
|
||||
* Deadlock avoidance for multi-bo reserving.
|
||||
*/
|
||||
@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
||||
if (no_wait)
|
||||
return -EBUSY;
|
||||
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_bo_wait_unreserved(bo, interruptible);
|
||||
spin_lock(&glob->lru_lock);
|
||||
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
atomic_set(&bo->reserved, 1);
|
||||
if (use_sequence) {
|
||||
bool wake_up = false;
|
||||
/**
|
||||
* Wake up waiters that may need to recheck for deadlock,
|
||||
* if we decreased the sequence number.
|
||||
*/
|
||||
if (unlikely((bo->val_seq - sequence < (1 << 31))
|
||||
|| !bo->seq_valid))
|
||||
wake_up_all(&bo->event_queue);
|
||||
wake_up = true;
|
||||
|
||||
/*
|
||||
* In the worst case with memory ordering these values can be
|
||||
* seen in the wrong order. However since we call wake_up_all
|
||||
* in that case, this will hopefully not pose a problem,
|
||||
* and the worst case would only cause someone to accidentally
|
||||
* hit -EAGAIN in ttm_bo_reserve when they see old value of
|
||||
* val_seq. However this would only happen if seq_valid was
|
||||
* written before val_seq was, and just means some slightly
|
||||
* increased cpu usage
|
||||
*/
|
||||
bo->val_seq = sequence;
|
||||
bo->seq_valid = true;
|
||||
if (wake_up)
|
||||
wake_up_all(&bo->event_queue);
|
||||
} else {
|
||||
bo->seq_valid = false;
|
||||
}
|
||||
@ -289,14 +298,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||
int put_count = 0;
|
||||
int ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
|
||||
sequence);
|
||||
if (likely(ret == 0))
|
||||
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
|
||||
sequence);
|
||||
if (likely(ret == 0)) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
put_count = ttm_bo_del_from_lru(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
ttm_bo_list_ref_sub(bo, put_count, true);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ttm_bo_list_ref_sub(bo, put_count, true);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -510,7 +519,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
||||
int ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
||||
|
||||
spin_lock(&bdev->fence_lock);
|
||||
(void) ttm_bo_wait(bo, false, false, true);
|
||||
@ -603,7 +612,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
|
||||
return ret;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
||||
|
||||
/*
|
||||
* We raced, and lost, someone else holds the reservation now,
|
||||
@ -667,7 +676,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
|
||||
kref_get(&nentry->list_kref);
|
||||
}
|
||||
|
||||
ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
|
||||
ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
|
||||
if (remove_all && ret) {
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ret = ttm_bo_reserve_nolru(entry, false, false,
|
||||
false, 0);
|
||||
spin_lock(&glob->lru_lock);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
|
||||
!remove_all);
|
||||
@ -815,7 +831,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
list_for_each_entry(bo, &man->lru, lru) {
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
@ -1796,7 +1812,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
list_for_each_entry(bo, &glob->swap_lru, swap) {
|
||||
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
|
||||
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ retry:
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
retry_this_bo:
|
||||
ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
|
||||
ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
|
@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
|
||||
* to make room for a buffer already reserved. (Buffers are reserved before
|
||||
* they are evicted). The following algorithm prevents such deadlocks from
|
||||
* occurring:
|
||||
* 1) Buffers are reserved with the lru spinlock held. Upon successful
|
||||
* reservation they are removed from the lru list. This stops a reserved buffer
|
||||
* from being evicted. However the lru spinlock is released between the time
|
||||
* a buffer is selected for eviction and the time it is reserved.
|
||||
* Therefore a check is made when a buffer is reserved for eviction, that it
|
||||
* is still the first buffer in the lru list, before it is removed from the
|
||||
* list. @check_lru == 1 forces this check. If it fails, the function returns
|
||||
* -EINVAL, and the caller should then choose a new buffer to evict and repeat
|
||||
* the procedure.
|
||||
* 2) Processes attempting to reserve multiple buffers other than for eviction,
|
||||
* Processes attempting to reserve multiple buffers other than for eviction,
|
||||
* (typically execbuf), should first obtain a unique 32-bit
|
||||
* validation sequence number,
|
||||
* and call this function with @use_sequence == 1 and @sequence == the unique
|
||||
@ -832,7 +823,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||
|
||||
|
||||
/**
|
||||
* ttm_bo_reserve_locked:
|
||||
* ttm_bo_reserve_nolru:
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @interruptible: Sleep interruptible if waiting.
|
||||
@ -840,9 +831,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||
* @use_sequence: If @bo is already reserved, Only sleep waiting for
|
||||
* it to become unreserved if @sequence < (@bo)->sequence.
|
||||
*
|
||||
* Must be called with struct ttm_bo_global::lru_lock held,
|
||||
* and will not remove reserved buffers from the lru lists.
|
||||
* The function may release the LRU spinlock if it needs to sleep.
|
||||
* Will not remove reserved buffers from the lru lists.
|
||||
* Otherwise identical to ttm_bo_reserve.
|
||||
*
|
||||
* Returns:
|
||||
@ -855,7 +844,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
|
||||
* -EDEADLK: Bo already reserved using @sequence. This error code will only
|
||||
* be returned if @use_sequence is set to true.
|
||||
*/
|
||||
extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
|
||||
extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
|
||||
bool interruptible,
|
||||
bool no_wait, bool use_sequence,
|
||||
uint32_t sequence);
|
||||
|
Loading…
Reference in New Issue
Block a user