forked from Minki/linux
drm/msm: Add a way for userspace to allocate GPU iova
The motivation at this point is mainly native userspace mesa driver in a VM guest. The one remaining synchronous "hotpath" is buffer allocation, because guest needs to wait to know the bo's iova before it can start emitting cmdstream/state that references the new bo. By allocating the iova in the guest userspace, we no longer need to wait for a response from the host, but can just rely on the allocation request being processed before the cmdstream submission. Allocation failures (OoM, etc) would just be treated as context-lost (ie. GL_GUILTY_CONTEXT_RESET) or subsequent allocations (or readpix, etc) can raise GL_OUT_OF_MEMORY. v2: Fix inuse check v3: Change mismatched iova case to -EBUSY Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Link: https://lore.kernel.org/r/20220411215849.297838-11-robdclark@gmail.com Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
parent
95d1deb02a
commit
a636a0ff11
@ -281,6 +281,16 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
|
||||
case MSM_PARAM_SUSPENDS:
|
||||
*value = gpu->suspend_count;
|
||||
return 0;
|
||||
case MSM_PARAM_VA_START:
|
||||
if (ctx->aspace == gpu->aspace)
|
||||
return -EINVAL;
|
||||
*value = ctx->aspace->va_start;
|
||||
return 0;
|
||||
case MSM_PARAM_VA_SIZE:
|
||||
if (ctx->aspace == gpu->aspace)
|
||||
return -EINVAL;
|
||||
*value = ctx->aspace->va_size;
|
||||
return 0;
|
||||
default:
|
||||
DBG("%s: invalid param: %u", gpu->name, param);
|
||||
return -EINVAL;
|
||||
|
@ -722,6 +722,23 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
|
||||
return msm_gem_get_iova(obj, ctx->aspace, iova);
|
||||
}
|
||||
|
||||
static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
|
||||
struct drm_file *file, struct drm_gem_object *obj,
|
||||
uint64_t iova)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_file_private *ctx = file->driver_priv;
|
||||
|
||||
if (!priv->gpu)
|
||||
return -EINVAL;
|
||||
|
||||
/* Only supported if per-process address space is supported: */
|
||||
if (priv->gpu->aspace == ctx->aspace)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return msm_gem_set_iova(obj, ctx->aspace, iova);
|
||||
}
|
||||
|
||||
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
@ -736,6 +753,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
||||
switch (args->info) {
|
||||
case MSM_INFO_GET_OFFSET:
|
||||
case MSM_INFO_GET_IOVA:
|
||||
case MSM_INFO_SET_IOVA:
|
||||
/* value returned as immediate, not pointer, so len==0: */
|
||||
if (args->len)
|
||||
return -EINVAL;
|
||||
@ -760,6 +778,9 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
||||
case MSM_INFO_GET_IOVA:
|
||||
ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
|
||||
break;
|
||||
case MSM_INFO_SET_IOVA:
|
||||
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
|
||||
break;
|
||||
case MSM_INFO_SET_NAME:
|
||||
/* length check should leave room for terminating null: */
|
||||
if (args->len >= sizeof(msm_obj->name)) {
|
||||
|
@ -525,6 +525,54 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int clear_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace)
|
||||
{
|
||||
struct msm_gem_vma *vma = lookup_vma(obj, aspace);
|
||||
|
||||
if (!vma)
|
||||
return 0;
|
||||
|
||||
if (msm_gem_vma_inuse(vma))
|
||||
return -EBUSY;
|
||||
|
||||
msm_gem_purge_vma(vma->aspace, vma);
|
||||
msm_gem_close_vma(vma->aspace, vma);
|
||||
del_vma(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the requested iova but don't pin it. Fails if the requested iova is
|
||||
* not available. Doesn't need a put because iovas are currently valid for
|
||||
* the life of the object.
|
||||
*
|
||||
* Setting an iova of zero will clear the vma.
|
||||
*/
|
||||
int msm_gem_set_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t iova)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
msm_gem_lock(obj);
|
||||
if (!iova) {
|
||||
ret = clear_iova(obj, aspace);
|
||||
} else {
|
||||
struct msm_gem_vma *vma;
|
||||
vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
} else if (GEM_WARN_ON(vma->iova != iova)) {
|
||||
clear_iova(obj, aspace);
|
||||
ret = -EBUSY;
|
||||
}
|
||||
}
|
||||
msm_gem_unlock(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unpin a iova by updating the reference counts. The memory isn't actually
|
||||
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
|
||||
|
@ -38,6 +38,12 @@ struct msm_gem_address_space {
|
||||
|
||||
/* @faults: the number of GPU hangs associated with this address space */
|
||||
int faults;
|
||||
|
||||
/** @va_start: lowest possible address to allocate */
|
||||
uint64_t va_start;
|
||||
|
||||
/** @va_size: the size of the address space (in bytes) */
|
||||
uint64_t va_size;
|
||||
};
|
||||
|
||||
struct msm_gem_address_space *
|
||||
@ -144,6 +150,8 @@ struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova);
|
||||
int msm_gem_set_iova(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t iova);
|
||||
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
|
||||
struct msm_gem_address_space *aspace, uint64_t *iova,
|
||||
u64 range_start, u64 range_end);
|
||||
|
@ -184,6 +184,8 @@ msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
|
||||
spin_lock_init(&aspace->lock);
|
||||
aspace->name = name;
|
||||
aspace->mmu = mmu;
|
||||
aspace->va_start = va_start;
|
||||
aspace->va_size = size;
|
||||
|
||||
drm_mm_init(&aspace->mm, va_start, size);
|
||||
|
||||
|
@ -84,6 +84,8 @@ struct drm_msm_timespec {
|
||||
#define MSM_PARAM_SYSPROF 0x0b /* WO: 1 preserves perfcntrs, 2 also disables suspend */
|
||||
#define MSM_PARAM_COMM 0x0c /* WO: override for task->comm */
|
||||
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
|
||||
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
|
||||
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
|
||||
|
||||
/* For backwards compat. The original support for preemption was based on
|
||||
* a single ring per priority level so # of priority levels equals the #
|
||||
@ -135,6 +137,7 @@ struct drm_msm_gem_new {
|
||||
#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
|
||||
#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
|
||||
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
|
||||
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
|
||||
|
||||
struct drm_msm_gem_info {
|
||||
__u32 handle; /* in */
|
||||
|
Loading…
Reference in New Issue
Block a user