mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
drm/nouveau/svm: new ioctl to migrate process memory to GPU memory
This add an ioctl to migrate a range of process address space to the device memory. On platform without cache coherent bus (x86, ARM, ...) this means that CPU can not access that range directly, instead CPU will fault which will migrate the memory back to system memory. This is behind a staging flag so that we can evolve the API. Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
This commit is contained in:
parent
5be73b6908
commit
f180bf12ac
@ -1043,6 +1043,7 @@ nouveau_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "nouveau_svm.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_chan.h"
|
||||
#include "nouveau_dmem.h"
|
||||
|
||||
#include <nvif/notify.h>
|
||||
#include <nvif/object.h>
|
||||
@ -104,6 +105,101 @@ struct nouveau_svmm {
|
||||
#define SVMM_ERR(s,f,a...) \
|
||||
NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
|
||||
|
||||
int
|
||||
nouveau_svmm_bind(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct drm_nouveau_svm_bind *args = data;
|
||||
unsigned target, cmd, priority;
|
||||
unsigned long addr, end, size;
|
||||
struct mm_struct *mm;
|
||||
|
||||
args->va_start &= PAGE_MASK;
|
||||
args->va_end &= PAGE_MASK;
|
||||
|
||||
/* Sanity check arguments */
|
||||
if (args->reserved0 || args->reserved1)
|
||||
return -EINVAL;
|
||||
if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
|
||||
return -EINVAL;
|
||||
if (args->va_start >= args->va_end)
|
||||
return -EINVAL;
|
||||
if (!args->npages)
|
||||
return -EINVAL;
|
||||
|
||||
cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
|
||||
cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
|
||||
switch (cmd) {
|
||||
case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
|
||||
priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
|
||||
|
||||
/* FIXME support CPU target ie all target value < GPU_VRAM */
|
||||
target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
|
||||
target &= NOUVEAU_SVM_BIND_TARGET_MASK;
|
||||
switch (target) {
|
||||
case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: For now refuse non 0 stride, we need to change the migrate
|
||||
* kernel function to handle stride to avoid to create a mess within
|
||||
* each device driver.
|
||||
*/
|
||||
if (args->stride)
|
||||
return -EINVAL;
|
||||
|
||||
size = ((unsigned long)args->npages) << PAGE_SHIFT;
|
||||
if ((args->va_start + size) <= args->va_start)
|
||||
return -EINVAL;
|
||||
if ((args->va_start + size) > args->va_end)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Ok we are ask to do something sane, for now we only support migrate
|
||||
* commands but we will add things like memory policy (what to do on
|
||||
* page fault) and maybe some other commands.
|
||||
*/
|
||||
|
||||
mm = get_task_mm(current);
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
for (addr = args->va_start, end = args->va_start + size; addr < end;) {
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long next;
|
||||
|
||||
vma = find_vma_intersection(mm, addr, end);
|
||||
if (!vma)
|
||||
break;
|
||||
|
||||
next = min(vma->vm_end, end);
|
||||
/* This is a best effort so we ignore errors */
|
||||
nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
|
||||
addr = next;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME Return the number of page we have migrated, again we need to
|
||||
* update the migrate API to return that information so that we can
|
||||
* report it to user space.
|
||||
*/
|
||||
args->result = 0;
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unlink channel instance from SVMM. */
|
||||
void
|
||||
nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
|
||||
|
@ -17,6 +17,7 @@ int nouveau_svmm_init(struct drm_device *, void *, struct drm_file *);
|
||||
void nouveau_svmm_fini(struct nouveau_svmm **);
|
||||
int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
|
||||
void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
|
||||
int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
|
||||
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
|
||||
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
|
||||
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
|
||||
@ -37,5 +38,11 @@ static inline int nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
|
||||
}
|
||||
|
||||
static inline void nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst) {}
|
||||
|
||||
static inline int nouveau_svmm_bind(struct drm_device *device, void *p,
|
||||
struct drm_file *file)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
|
||||
#endif
|
||||
|
@ -134,6 +134,7 @@ struct drm_nouveau_gem_cpu_fini {
|
||||
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
|
||||
#define DRM_NOUVEAU_NVIF 0x07
|
||||
#define DRM_NOUVEAU_SVM_INIT 0x08
|
||||
#define DRM_NOUVEAU_SVM_BIND 0x09
|
||||
#define DRM_NOUVEAU_GEM_NEW 0x40
|
||||
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
|
||||
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
|
||||
@ -145,7 +146,49 @@ struct drm_nouveau_svm_init {
|
||||
__u64 unmanaged_size;
|
||||
};
|
||||
|
||||
struct drm_nouveau_svm_bind {
|
||||
__u64 header;
|
||||
__u64 va_start;
|
||||
__u64 va_end;
|
||||
__u64 npages;
|
||||
__u64 stride;
|
||||
__u64 result;
|
||||
__u64 reserved0;
|
||||
__u64 reserved1;
|
||||
};
|
||||
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
|
||||
#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
|
||||
#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
|
||||
#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
|
||||
#define NOUVEAU_SVM_BIND_TARGET_BITS 32
|
||||
#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
|
||||
|
||||
/*
|
||||
* Below is use to validate ioctl argument, userspace can also use it to make
|
||||
* sure that no bit are set beyond known fields for a given kernel version.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_VALID_BITS 48
|
||||
#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
|
||||
|
||||
|
||||
/*
|
||||
* NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
|
||||
* result: number of page successfuly migrate to the target memory.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
|
||||
|
||||
/*
|
||||
* NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
|
||||
*/
|
||||
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
|
||||
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
|
||||
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
|
||||
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
|
||||
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
|
||||
|
Loading…
Reference in New Issue
Block a user