mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 07:01:57 +00:00
drm/gem: Split drm_gem_mmap() into object search and object mapping
The drm_gem_mmap() function first finds the GEM object to be mapped based on the fake mmap offset and then maps the object. Split the object mapping code into a standalone drm_gem_mmap_obj() function that can be used to implement dma-buf mmap() operations. Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> Reviewed-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
328a4719b6
commit
1c5aafa6ee
@ -644,6 +644,55 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_vm_close);
|
EXPORT_SYMBOL(drm_gem_vm_close);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_gem_mmap_obj - memory map a GEM object
|
||||||
|
* @obj: the GEM object to map
|
||||||
|
* @obj_size: the object size to be mapped, in bytes
|
||||||
|
* @vma: VMA for the area to be mapped
|
||||||
|
*
|
||||||
|
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
|
||||||
|
* provided by the driver. Depending on their requirements, drivers can either
|
||||||
|
* provide a fault handler in their gem_vm_ops (in which case any accesses to
|
||||||
|
* the object will be trapped, to perform migration, GTT binding, surface
|
||||||
|
* register allocation, or performance monitoring), or mmap the buffer memory
|
||||||
|
* synchronously after calling drm_gem_mmap_obj.
|
||||||
|
*
|
||||||
|
* This function is mainly intended to implement the DMABUF mmap operation, when
|
||||||
|
* the GEM object is not looked up based on its fake offset. To implement the
|
||||||
|
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
|
||||||
|
*
|
||||||
|
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
|
||||||
|
* size, or if no gem_vm_ops are provided.
|
||||||
|
*/
|
||||||
|
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||||
|
struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = obj->dev;
|
||||||
|
|
||||||
|
/* Check for valid size. */
|
||||||
|
if (obj_size < vma->vm_end - vma->vm_start)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!dev->driver->gem_vm_ops)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||||
|
vma->vm_ops = dev->driver->gem_vm_ops;
|
||||||
|
vma->vm_private_data = obj;
|
||||||
|
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
||||||
|
|
||||||
|
/* Take a ref for this mapping of the object, so that the fault
|
||||||
|
* handler can dereference the mmap offset's pointer to the object.
|
||||||
|
* This reference is cleaned up by the corresponding vm_close
|
||||||
|
* (which should happen whether the vma was created by this call, or
|
||||||
|
* by a vm_open due to mremap or partial unmap or whatever).
|
||||||
|
*/
|
||||||
|
drm_gem_object_reference(obj);
|
||||||
|
|
||||||
|
drm_vm_open_locked(dev, vma);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_gem_mmap_obj);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_gem_mmap - memory map routine for GEM objects
|
* drm_gem_mmap - memory map routine for GEM objects
|
||||||
@ -653,11 +702,9 @@ EXPORT_SYMBOL(drm_gem_vm_close);
|
|||||||
* If a driver supports GEM object mapping, mmap calls on the DRM file
|
* If a driver supports GEM object mapping, mmap calls on the DRM file
|
||||||
* descriptor will end up here.
|
* descriptor will end up here.
|
||||||
*
|
*
|
||||||
* If we find the object based on the offset passed in (vma->vm_pgoff will
|
* Look up the GEM object based on the offset passed in (vma->vm_pgoff will
|
||||||
* contain the fake offset we created when the GTT map ioctl was called on
|
* contain the fake offset we created when the GTT map ioctl was called on
|
||||||
* the object), we set up the driver fault handler so that any accesses
|
* the object) and map it with a call to drm_gem_mmap_obj().
|
||||||
* to the object can be trapped, to perform migration, GTT binding, surface
|
|
||||||
* register allocation, or performance monitoring.
|
|
||||||
*/
|
*/
|
||||||
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
@ -665,7 +712,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||||||
struct drm_device *dev = priv->minor->dev;
|
struct drm_device *dev = priv->minor->dev;
|
||||||
struct drm_gem_mm *mm = dev->mm_private;
|
struct drm_gem_mm *mm = dev->mm_private;
|
||||||
struct drm_local_map *map = NULL;
|
struct drm_local_map *map = NULL;
|
||||||
struct drm_gem_object *obj;
|
|
||||||
struct drm_hash_item *hash;
|
struct drm_hash_item *hash;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -686,32 +732,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for valid size. */
|
ret = drm_gem_mmap_obj(map->handle, map->size, vma);
|
||||||
if (map->size < vma->vm_end - vma->vm_start) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
obj = map->handle;
|
|
||||||
if (!obj->dev->driver->gem_vm_ops) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
|
||||||
vma->vm_ops = obj->dev->driver->gem_vm_ops;
|
|
||||||
vma->vm_private_data = map->handle;
|
|
||||||
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
|
|
||||||
|
|
||||||
/* Take a ref for this mapping of the object, so that the fault
|
|
||||||
* handler can dereference the mmap offset's pointer to the object.
|
|
||||||
* This reference is cleaned up by the corresponding vm_close
|
|
||||||
* (which should happen whether the vma was created by this call, or
|
|
||||||
* by a vm_open due to mremap or partial unmap or whatever).
|
|
||||||
*/
|
|
||||||
drm_gem_object_reference(obj);
|
|
||||||
|
|
||||||
drm_vm_open_locked(dev, vma);
|
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
@ -1616,6 +1616,8 @@ int drm_gem_private_object_init(struct drm_device *dev,
|
|||||||
void drm_gem_object_handle_free(struct drm_gem_object *obj);
|
void drm_gem_object_handle_free(struct drm_gem_object *obj);
|
||||||
void drm_gem_vm_open(struct vm_area_struct *vma);
|
void drm_gem_vm_open(struct vm_area_struct *vma);
|
||||||
void drm_gem_vm_close(struct vm_area_struct *vma);
|
void drm_gem_vm_close(struct vm_area_struct *vma);
|
||||||
|
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
|
||||||
|
struct vm_area_struct *vma);
|
||||||
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||||
|
|
||||||
#include <drm/drm_global.h>
|
#include <drm/drm_global.h>
|
||||||
|
Loading…
Reference in New Issue
Block a user