mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 23:23:03 +00:00
drm/amdgpu: use scheduler dependencies for VM updates
Instead of putting that into the job sync object. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-9-christian.koenig@amd.com
This commit is contained in:
parent
1b2d5eda5a
commit
aab9cf7b69
@ -259,6 +259,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Free the entry back to the slab */
|
||||||
|
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
|
||||||
|
{
|
||||||
|
hash_del(&e->node);
|
||||||
|
dma_fence_put(e->fence);
|
||||||
|
kmem_cache_free(amdgpu_sync_slab, e);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_sync_peek_fence - get the next fence not signaled yet
|
* amdgpu_sync_peek_fence - get the next fence not signaled yet
|
||||||
*
|
*
|
||||||
@ -280,9 +288,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
|||||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||||
|
|
||||||
if (dma_fence_is_signaled(f)) {
|
if (dma_fence_is_signaled(f)) {
|
||||||
hash_del(&e->node);
|
amdgpu_sync_entry_free(e);
|
||||||
dma_fence_put(f);
|
|
||||||
kmem_cache_free(amdgpu_sync_slab, e);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (ring && s_fence) {
|
if (ring && s_fence) {
|
||||||
@ -355,15 +361,42 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
} else {
|
} else {
|
||||||
hash_del(&e->node);
|
amdgpu_sync_entry_free(e);
|
||||||
dma_fence_put(f);
|
|
||||||
kmem_cache_free(amdgpu_sync_slab, e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_sync_push_to_job - push fences into job
|
||||||
|
* @sync: sync object to get the fences from
|
||||||
|
* @job: job to push the fences into
|
||||||
|
*
|
||||||
|
* Add all unsignaled fences from sync to job.
|
||||||
|
*/
|
||||||
|
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
|
||||||
|
{
|
||||||
|
struct amdgpu_sync_entry *e;
|
||||||
|
struct hlist_node *tmp;
|
||||||
|
struct dma_fence *f;
|
||||||
|
int i, r;
|
||||||
|
|
||||||
|
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
||||||
|
f = e->fence;
|
||||||
|
if (dma_fence_is_signaled(f)) {
|
||||||
|
amdgpu_sync_entry_free(e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_fence_get(f);
|
||||||
|
r = drm_sched_job_add_dependency(&job->base, f);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
|
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
|
||||||
{
|
{
|
||||||
struct amdgpu_sync_entry *e;
|
struct amdgpu_sync_entry *e;
|
||||||
@ -375,9 +408,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
hash_del(&e->node);
|
amdgpu_sync_entry_free(e);
|
||||||
dma_fence_put(e->fence);
|
|
||||||
kmem_cache_free(amdgpu_sync_slab, e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -396,11 +427,8 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
|
|||||||
struct hlist_node *tmp;
|
struct hlist_node *tmp;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
hash_for_each_safe(sync->fences, i, tmp, e, node)
|
||||||
hash_del(&e->node);
|
amdgpu_sync_entry_free(e);
|
||||||
dma_fence_put(e->fence);
|
|
||||||
kmem_cache_free(amdgpu_sync_slab, e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -30,6 +30,7 @@ struct dma_fence;
|
|||||||
struct dma_resv;
|
struct dma_resv;
|
||||||
struct amdgpu_device;
|
struct amdgpu_device;
|
||||||
struct amdgpu_ring;
|
struct amdgpu_ring;
|
||||||
|
struct amdgpu_job;
|
||||||
|
|
||||||
enum amdgpu_sync_mode {
|
enum amdgpu_sync_mode {
|
||||||
AMDGPU_SYNC_ALWAYS,
|
AMDGPU_SYNC_ALWAYS,
|
||||||
@ -54,6 +55,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
|||||||
struct amdgpu_ring *ring);
|
struct amdgpu_ring *ring);
|
||||||
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
||||||
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
|
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
|
||||||
|
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
|
||||||
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
|
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
|
||||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||||
int amdgpu_sync_init(void);
|
int amdgpu_sync_init(void);
|
||||||
|
@ -87,6 +87,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
|||||||
struct dma_resv *resv,
|
struct dma_resv *resv,
|
||||||
enum amdgpu_sync_mode sync_mode)
|
enum amdgpu_sync_mode sync_mode)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_sync sync;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_vm_sdma_alloc_job(p, 0);
|
r = amdgpu_vm_sdma_alloc_job(p, 0);
|
||||||
@ -96,7 +97,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
|||||||
if (!resv)
|
if (!resv)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
|
amdgpu_sync_create(&sync);
|
||||||
|
r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
|
||||||
|
if (!r)
|
||||||
|
r = amdgpu_sync_push_to_job(&sync, p->job);
|
||||||
|
amdgpu_sync_free(&sync);
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -225,7 +231,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
|||||||
/* Wait for PD/PT moves to be completed */
|
/* Wait for PD/PT moves to be completed */
|
||||||
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
|
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
|
||||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||||
r = amdgpu_sync_fence(&p->job->sync, fence);
|
r = drm_sched_job_add_dependency(&p->job->base, fence);
|
||||||
if (r) {
|
if (r) {
|
||||||
dma_resv_iter_end(&cursor);
|
dma_resv_iter_end(&cursor);
|
||||||
return r;
|
return r;
|
||||||
|
Loading…
Reference in New Issue
Block a user