linux/drivers/gpu/drm/i915/gem/i915_gem_mman.c

1033 lines
26 KiB
C
Raw Normal View History

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2014-2016 Intel Corporation
*/
#include <linux/anon_inodes.h>
#include <linux/mman.h>
#include <linux/pfn_t.h>
#include <linux/sizes.h>
#include <drm/drm_cache.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
Merge tag 'drm-intel-gt-next-2022-02-17' of git://anongit.freedesktop.org/drm/drm-intel into drm-intel-next UAPI Changes: - Weak parallel submission support for execlists Minimal implementation of the parallel submission support for execlists backend that was previously only implemented for GuC. Support one sibling non-virtual engine. Core Changes: - Two backmerges of drm/drm-next for header file renames/changes and i915_regs reorganization Driver Changes: - Add new DG2 subplatform: DG2-G12 (Matt R) - Add new DG2 workarounds (Matt R, Ram, Bruce) - Handle pre-programmed WOPCM registers for DG2+ (Daniele) - Update guc shim control programming on XeHP SDV+ (Daniele) - Add RPL-S C0/D0 stepping information (Anusha) - Improve GuC ADS initialization to work on ARM64 on dGFX (Lucas) - Fix KMD and GuC race on accessing PMU busyness (Umesh) - Use PM timestamp instead of RING TIMESTAMP for reference in PMU with GuC (Umesh) - Report error on invalid reset notification from GuC (John) - Avoid WARN splat by holding RPM wakelock during PXP unbind (Juston) - Fixes to parallel submission implementation (Matt B.) - Improve GuC loading status check/error reports (John) - Tweak TTM LRU priority hint selection (Matt A.) - Align the plane_vma to min_page_size of stolen mem (Ram) - Introduce vma resources and implement async unbinding (Thomas) - Use struct vma_resource instead of struct vma_snapshot (Thomas) - Return some TTM accel move errors instead of trying memcpy move (Thomas) - Fix a race between vma / object destruction and unbinding (Thomas) - Remove short-term pins from execbuf (Maarten) - Update to GuC version 69.0.3 (John, Michal Wa.) - Improvements to GT reset paths in GuC backend (Matt B.) - Use shrinker_release_pages instead of writeback in shmem object hooks (Matt A., Tvrtko) - Use trylock instead of blocking lock when freeing GEM objects (Maarten) - Allocate intel_engine_coredump_alloc with ALLOW_FAIL (Matt B.) - Fixes to object unmapping and purging (Matt A) - Check for wedged device in GuC backend (John) - Avoid lockdep splat by locking dpt_obj around set_cache_level (Maarten) - Allow dead vm to unbind vma's without lock (Maarten) - s/engine->i915/i915/ for DG2 engine workarounds (Matt R) - Use to_gt() helper for GGTT accesses (Michal Wi.) - Selftest improvements (Matt B., Thomas, Ram) - Coding style and compiler warning fixes (Matt B., Jasmine, Andi, Colin, Gustavo, Dan) From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/Yg4i2aCZvvee5Eai@jlahtine-mobl.ger.corp.intel.com Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> [Fixed conflicts while applying, using the fixups/drm-intel-gt-next.patch from drm-rerere's 1f2b1742abdd ("2022y-02m-23d-16h-07m-57s UTC: drm-tip rerere cache update")]
2022-02-23 19:19:43 +00:00
#include "i915_gem_evict.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_gem_mman.h"
#include "i915_mm.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
#include "i915_gem_ttm.h"
#include "i915_vma.h"
static inline bool
__vma_matches(struct vm_area_struct *vma, struct file *filp,
unsigned long addr, unsigned long size)
{
if (vma->vm_file != filp)
return false;
return vma->vm_start == addr &&
(vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
}
/**
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
* it is mapped to.
* @dev: drm device
* @data: ioctl data blob
* @file: drm file
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
*
* IMPORTANT:
*
* DRM driver writers who look a this function as an example for how to do GEM
* mmap support, please don't implement mmap support like here. The modern way
* to implement DRM mmap support is with an mmap offset ioctl (like
* i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
* That way debug tooling like valgrind will understand what's going on, hiding
* the mmap call in a driver private ioctl will break that. The i915 driver only
* does cpu mmaps this way because we didn't know better.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_mmap *args = data;
struct drm_i915_gem_object *obj;
unsigned long addr;
/*
* mmap ioctl is disallowed for all discrete platforms,
* and for all platforms with GRAPHICS_VER > 12.
*/
if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
return -EOPNOTSUPP;
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
if (args->flags & I915_MMAP_WC && !pat_enabled())
return -ENODEV;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* prime objects have no backing filp to GEM mmap
* pages from.
*/
if (!obj->base.filp) {
addr = -ENXIO;
goto err;
}
if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
addr = -EINVAL;
goto err;
}
addr = vm_mmap(obj->base.filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
if (IS_ERR_VALUE(addr))
goto err;
if (args->flags & I915_MMAP_WC) {
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 04:33:25 +00:00
if (mmap_write_lock_killable(mm)) {
addr = -EINTR;
goto err;
}
vma = find_vma(mm, addr);
if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-09 04:33:25 +00:00
mmap_write_unlock(mm);
if (IS_ERR_VALUE(addr))
goto err;
}
i915_gem_object_put(obj);
args->addr_ptr = (u64)addr;
return 0;
err:
i915_gem_object_put(obj);
return addr;
}
static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
{
return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
}
/**
* i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
*
* A history of the GTT mmap interface:
*
* 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
* aligned and suitable for fencing, and still fit into the available
* mappable space left by the pinned display objects. A classic problem
* we called the page-fault-of-doom where we would ping-pong between
* two objects that could not fit inside the GTT and so the memcpy
* would page one object in at the expense of the other between every
* single byte.
*
* 1 - Objects can be any size, and have any compatible fencing (X Y, or none
* as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
* object is too large for the available space (or simply too large
* for the mappable aperture!), a view is created instead and faulted
* into userspace. (This view is aligned and sized appropriately for
* fenced access.)
*
* 2 - Recognise WC as a separate cache domain so that we can flush the
* delayed writes via GTT before performing direct access via WC.
*
* 3 - Remove implicit set-domain(GTT) and synchronisation on initial
* pagefault; swapin remains transparent.
*
* 4 - Support multiple fault handlers per object depending on object's
* backing storage (a.k.a. MMAP_OFFSET).
*
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
* hangs on some architectures, corruption on others. An attempt to service
* a GTT page fault from a snoopable object will generate a SIGBUS.
*
* * the object must be able to fit into RAM (physical memory, though no
* limited to the mappable aperture).
*
*
* Caveats:
*
* * a new GTT page fault will synchronize rendering from the GPU and flush
* all data to system memory. Subsequent access will not be synchronized.
*
* * all mappings are revoked on runtime device suspend.
*
* * there are only 8, 16 or 32 fence registers to share between all users
* (older machines require fence register for display and blitter access
* as well). Contention of the fence registers will cause the previous users
* to be unmapped and any new access will generate new page faults.
*
* * running out of memory while servicing a fault may generate a SIGBUS,
* rather than the expected SIGSEGV.
*/
int i915_gem_mmap_gtt_version(void)
{
return 4;
}
static inline struct i915_ggtt_view
compute_partial_view(const struct drm_i915_gem_object *obj,
pgoff_t page_offset,
unsigned int chunk)
{
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
view.partial.size =
min_t(unsigned int, chunk,
(obj->base.size >> PAGE_SHIFT) - view.partial.offset);
/* If the partial covers the entire object, just create a normal VMA. */
if (chunk >= obj->base.size >> PAGE_SHIFT)
view.type = I915_GGTT_VIEW_NORMAL;
return view;
}
static vm_fault_t i915_error_to_vmf_fault(int err)
{
switch (err) {
default:
WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
fallthrough;
case -EIO: /* shmemfs failure from swap device */
case -EFAULT: /* purged object */
case -ENODEV: /* bad object, how did you get here! */
case -ENXIO: /* unable to access backing store (on device) */
return VM_FAULT_SIGBUS;
case -ENOMEM: /* our allocation failure */
return VM_FAULT_OOM;
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
/*
* EBUSY is ok: this just means that another thread
* already did the job.
*/
return VM_FAULT_NOPAGE;
}
}
static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
{
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
resource_size_t iomap;
int err;
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) &&
area->vm_flags & VM_WRITE))
return VM_FAULT_SIGBUS;
if (i915_gem_object_lock_interruptible(obj, NULL))
return VM_FAULT_NOPAGE;
err = i915_gem_object_pin_pages(obj);
if (err)
goto out;
iomap = -1;
if (!i915_gem_object_has_struct_page(obj)) {
iomap = obj->mm.region->iomap.base;
iomap -= obj->mm.region->region.start;
}
/* PTEs are revoked in obj->ops->put_pages() */
err = remap_io_sg(area,
area->vm_start, area->vm_end - area->vm_start,
obj->mm.pages->sgl, iomap);
if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
obj->mm.dirty = true;
}
i915_gem_object_unpin_pages(obj);
out:
i915_gem_object_unlock(obj);
return i915_error_to_vmf_fault(err);
}
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *i915 = to_i915(dev);
struct intel_runtime_pm *rpm = &i915->runtime_pm;
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int srcu;
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
trace_i915_gem_object_fault(obj, page_offset, true, write);
wakeref = intel_runtime_pm_get(rpm);
i915_gem_ww_ctx_init(&ww, true);
retry:
ret = i915_gem_object_lock(obj, &ww);
if (ret)
goto err_rpm;
/* Sanity check that we allow writing into this object */
if (i915_gem_object_is_readonly(obj) && write) {
ret = -EFAULT;
goto err_rpm;
}
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err_rpm;
ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
if (ret)
goto err_pages;
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONBLOCK /* NOWARN */ |
PIN_NOEVICT);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
unsigned int flags;
flags = PIN_MAPPABLE | PIN_NOSEARCH;
if (view.type == I915_GGTT_VIEW_NORMAL)
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
/*
* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
flags = PIN_MAPPABLE;
view.type = I915_GGTT_VIEW_PARTIAL;
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
/*
* The entire mappable GGTT is pinned? Unexpected!
* Try to evict the object we locked too, as normally we skip it
* due to lack of short term pinning inside execbuf.
*/
if (vma == ERR_PTR(-ENOSPC)) {
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
if (!ret) {
ret = i915_gem_evict_vm(&ggtt->vm, &ww);
mutex_unlock(&ggtt->vm.mutex);
}
if (ret)
goto err_reset;
vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
}
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
drm/i915: Pull i915_vma_pin under the vm->mutex Replace the struct_mutex requirement for pinning the i915_vma with the local vm->mutex instead. Note that the vm->mutex is tainted by the shrinker (we require unbinding from inside fs-reclaim) and so we cannot allocate while holding that mutex. Instead we have to preallocate workers to do allocate and apply the PTE updates after we have we reserved their slot in the drm_mm (using fences to order the PTE writes with the GPU work and with later unbind). In adding the asynchronous vma binding, one subtle requirement is to avoid coupling the binding fence into the backing object->resv. That is the asynchronous binding only applies to the vma timeline itself and not to the pages as that is a more global timeline (the binding of one vma does not need to be ordered with another vma, nor does the implicit GEM fencing depend on a vma, only on writes to the backing store). Keeping the vma binding distinct from the backing store timelines is verified by a number of async gem_exec_fence and gem_exec_schedule tests. The way we do this is quite simple, we keep the fence for the vma binding separate and only wait on it as required, and never add it to the obj->resv itself. Another consequence in reducing the locking around the vma is the destruction of the vma is no longer globally serialised by struct_mutex. A natural solution would be to add a kref to i915_vma, but that requires decoupling the reference cycles, possibly by introducing a new i915_mm_pages object that is own by both obj->mm and vma->pages. However, we have not taken that route due to the overshadowing lmem/ttm discussions, and instead play a series of complicated games with trylocks to (hopefully) ensure that only one destruction path is called! v2: Add some commentary, and some helpers to reduce patch churn. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
goto err_reset;
}
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
ret = -EFAULT;
goto err_unpin;
}
ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
/* Finally, remap it using the new GTT offset */
Revert "i915: use io_mapping_map_user" This reverts commit b739f125e4ebd73d10ed30a856574e13649119ed. We are unfortunately seeing more issues like we did in 293837b9ac8d ("Revert "i915: fix remap_io_sg to verify the pgprot""), except this is now for the vm_fault_gtt path, where we are now hitting the same BUG_ON(!pte_none(*pte)): [10887.466150] kernel BUG at mm/memory.c:2183! [10887.466162] invalid opcode: 0000 [#1] PREEMPT SMP PTI [10887.466168] CPU: 0 PID: 7775 Comm: ffmpeg Tainted: G U 5.13.0-rc3-CI-Nightly #1 [10887.466174] Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./J4205-ITX, BIOS P1.40 07/14/2017 [10887.466177] RIP: 0010:remap_pfn_range_notrack+0x30f/0x440 [10887.466188] Code: e8 96 d7 e0 ff 84 c0 0f 84 27 01 00 00 48 ba 00 f0 ff ff ff ff 0f 00 4c 89 e0 48 c1 e0 0c 4d 85 ed 75 96 48 21 d0 31 f6 eb a9 <0f> 0b 48 39 37 0f 85 0e 01 00 00 48 8b 0c 24 48 39 4f 08 0f 85 00 [10887.466193] RSP: 0018:ffffc90006e33c50 EFLAGS: 00010286 [10887.466198] RAX: 800000000000002f RBX: 00007f5e01800000 RCX: 0000000000000028 [10887.466201] RDX: 0000000000000001 RSI: ffffea0000000000 RDI: 0000000000000000 [10887.466204] RBP: ffffea000033fea8 R08: 800000000000002f R09: ffff8881072256e0 [10887.466207] R10: ffffc9000b84fff8 R11: 0000000017dab000 R12: 0000000000089f9f [10887.466210] R13: 800000000000002f R14: 00007f5e017e4000 R15: ffff88800cffaf20 [10887.466213] FS: 00007f5e04849640(0000) GS:ffff888278000000(0000) knlGS:0000000000000000 [10887.466216] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [10887.466220] CR2: 00007fd9b191a2ac CR3: 00000001829ac000 CR4: 00000000003506f0 [10887.466223] Call Trace: [10887.466233] vm_fault_gtt+0x1ca/0x5d0 [i915] [10887.466381] ? ktime_get+0x38/0x90 [10887.466389] __do_fault+0x37/0x90 [10887.466395] __handle_mm_fault+0xc46/0x1200 [10887.466402] handle_mm_fault+0xce/0x2a0 [10887.466407] do_user_addr_fault+0x1c5/0x660 Reverting this commit is reported to fix the issue. Reported-by: Eero Tamminen <eero.t.tamminen@intel.com> References: https://gitlab.freedesktop.org/drm/intel/-/issues/3519 Fixes: b739f125e4eb ("i915: use io_mapping_map_user") Cc: Christoph Hellwig <hch@lst.de> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20210527185145.458021-1-matthew.auld@intel.com
2021-05-27 18:51:45 +00:00
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret)
goto err_fence;
assert_rpm_wakelock_held(rpm);
/* Mark as being mmapped into userspace for later revocation */
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
/* Track the mmo associated with the fenced vma */
vma->mmo = mmo;
drm/i915: remove IS_ACTIVE When trying to bring IS_ACTIVE to linux/kconfig.h I thought it wouldn't provide much value just encapsulating it in a boolean context. So I also added the support for handling undefined macros as the IS_ENABLED() counterpart. However the feedback received from Masahiro Yamada was that it is too ugly, not providing much value. And just wrapping in a boolean context is too dumb - we could simply open code it. As detailed in commit babaab2f4738 ("drm/i915: Encapsulate kconfig constant values inside boolean predicates"), the IS_ACTIVE macro was added to workaround a compilation warning. However after checking again our current uses of IS_ACTIVE it turned out there is only 1 case in which it triggers a warning in clang (due -Wconstant-logical-operand) and 2 in smatch. All the others can simply use the shorter version, without wrapping it in any macro. So here I'm dialing all the way back to simply removing the macro. That single case hit by clang can be changed to make the constant come first, so it doesn't think it's mask: - if (context && CONFIG_DRM_I915_FENCE_TIMEOUT) + if (CONFIG_DRM_I915_FENCE_TIMEOUT && context) As talked with Dan Carpenter, that logic will be added in smatch as well, so it will also stop warning about it. Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Reviewed-by: Masahiro Yamada <masahiroy@kernel.org> Link: https://patchwork.freedesktop.org/patch/msgid/20211005171728.3147094-1-lucas.demarchi@intel.com
2021-10-05 17:17:28 +00:00
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
i915_vma_set_ggtt_write(vma);
obj->mm.dirty = true;
}
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
err_reset:
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_pages:
i915_gem_object_unpin_pages(obj);
err_rpm:
if (ret == -EDEADLK) {
ret = i915_gem_ww_ctx_backoff(&ww);
if (!ret)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
intel_runtime_pm_put(rpm, wakeref);
return i915_error_to_vmf_fault(ret);
}
static int
vm_access(struct vm_area_struct *area, unsigned long addr,
void *buf, int len, int write)
{
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
struct i915_gem_ww_ctx ww;
void *vaddr;
int err = 0;
if (i915_gem_object_is_readonly(obj) && write)
return -EACCES;
addr -= area->vm_start;
drm/i915/gem: add missing boundary check in vm_access A missing bounds check in vm_access() can lead to an out-of-bounds read or write in the adjacent memory area, since the len attribute is not validated before the memcpy later in the function, potentially hitting: [ 183.637831] BUG: unable to handle page fault for address: ffffc90000c86000 [ 183.637934] #PF: supervisor read access in kernel mode [ 183.637997] #PF: error_code(0x0000) - not-present page [ 183.638059] PGD 100000067 P4D 100000067 PUD 100258067 PMD 106341067 PTE 0 [ 183.638144] Oops: 0000 [#2] PREEMPT SMP NOPTI [ 183.638201] CPU: 3 PID: 1790 Comm: poc Tainted: G D 5.17.0-rc6-ci-drm-11296+ #1 [ 183.638298] Hardware name: Intel Corporation CoffeeLake Client Platform/CoffeeLake H DDR4 RVP, BIOS CNLSFWR1.R00.X208.B00.1905301319 05/30/2019 [ 183.638430] RIP: 0010:memcpy_erms+0x6/0x10 [ 183.640213] RSP: 0018:ffffc90001763d48 EFLAGS: 00010246 [ 183.641117] RAX: ffff888109c14000 RBX: ffff888111bece40 RCX: 0000000000000ffc [ 183.642029] RDX: 0000000000001000 RSI: ffffc90000c86000 RDI: ffff888109c14004 [ 183.642946] RBP: 0000000000000ffc R08: 800000000000016b R09: 0000000000000000 [ 183.643848] R10: ffffc90000c85000 R11: 0000000000000048 R12: 0000000000001000 [ 183.644742] R13: ffff888111bed190 R14: ffff888109c14000 R15: 0000000000001000 [ 183.645653] FS: 00007fe5ef807540(0000) GS:ffff88845b380000(0000) knlGS:0000000000000000 [ 183.646570] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 183.647481] CR2: ffffc90000c86000 CR3: 000000010ff02006 CR4: 00000000003706e0 [ 183.648384] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 183.649271] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 183.650142] Call Trace: [ 183.650988] <TASK> [ 183.651793] vm_access+0x1f0/0x2a0 [i915] [ 183.652726] __access_remote_vm+0x224/0x380 [ 183.653561] mem_rw.isra.0+0xf9/0x190 [ 183.654402] vfs_read+0x9d/0x1b0 [ 183.655238] ksys_read+0x63/0xe0 [ 183.656065] do_syscall_64+0x38/0xc0 [ 183.656882] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 183.657663] RIP: 0033:0x7fe5ef725142 [ 183.659351] RSP: 002b:00007ffe1e81c7e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [ 183.660227] RAX: ffffffffffffffda RBX: 0000557055dfb780 RCX: 00007fe5ef725142 [ 183.661104] RDX: 0000000000001000 RSI: 00007ffe1e81d880 RDI: 0000000000000005 [ 183.661972] RBP: 00007ffe1e81e890 R08: 0000000000000030 R09: 0000000000000046 [ 183.662832] R10: 0000557055dfc2e0 R11: 0000000000000246 R12: 0000557055dfb1c0 [ 183.663691] R13: 00007ffe1e81e980 R14: 0000000000000000 R15: 0000000000000000 Changes since v1: - Updated if condition with range_overflows_t [Chris Wilson] Fixes: 9f909e215fea ("drm/i915: Implement vm_ops->access for gdb access into mmaps") Signed-off-by: Mastan Katragadda <mastanx.katragadda@intel.com> Suggested-by: Adam Zabrocki <adamza@microsoft.com> Reported-by: Jackson Cody <cody.jackson@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Jon Bloomfield <jon.bloomfield@intel.com> Cc: Sudeep Dutt <sudeep.dutt@intel.com> Cc: <stable@vger.kernel.org> # v5.8+ Reviewed-by: Matthew Auld <matthew.auld@intel.com> [mauld: tidy up the commit message and add Cc: stable] Signed-off-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220303060428.1668844-1-mastanx.katragadda@intel.com (cherry picked from commit 661412e301e2ca86799aa4f400d1cf0bd38c57c6) Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
2022-03-03 06:04:28 +00:00
if (range_overflows_t(u64, addr, len, obj->base.size))
return -EINVAL;
i915_gem_ww_ctx_init(&ww, true);
retry:
err = i915_gem_object_lock(obj, &ww);
if (err)
goto out;
/* As this is primarily for debugging, let's focus on simplicity */
vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto out;
}
if (write) {
memcpy(vaddr + addr, buf, len);
__i915_gem_object_flush_map(obj, addr, len);
} else {
memcpy(buf, vaddr + addr, len);
}
i915_gem_object_unpin_map(obj);
out:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err)
return err;
return len;
}
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
for_each_ggtt_vma(vma, obj)
i915_vma_revoke_mmap(vma);
GEM_BUG_ON(obj->userfault_count);
}
/*
* It is vital that we remove the page mapping if we have mapped a tiled
* object through the GTT and then lose the fence register due to
* resource pressure. Similarly if the object has been moved out of the
* aperture, than pages mapped into userspace must be revoked. Removing the
* mapping will then trigger a page fault on the next user access, allowing
* fixup by vm_fault_gtt().
*/
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
intel_wakeref_t wakeref;
/*
* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
*
* Note that RPM complicates somewhat by adding an additional
* requirement that operations to the GGTT be made holding the RPM
* wakeref.
*/
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
if (!obj->userfault_count)
goto out;
__i915_gem_object_release_mmap_gtt(obj);
/*
* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
* flushing implied above by changing the PTE above *should* be
* sufficient, an extra barrier here just provides us with a bit
* of paranoid documentation about our requirement to serialise
* memory writes before touching registers / GSM.
*/
wmb();
out:
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo, *mn;
if (obj->ops->unmap_virtual)
obj->ops->unmap_virtual(obj);
spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets, offset) {
/*
* vma_node_unmap for GTT mmaps handled already in
* __i915_gem_object_release_mmap_gtt
*/
if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
continue;
spin_unlock(&obj->mmo.lock);
drm_vma_node_unmap(&mmo->vma_node,
obj->base.dev->anon_inode->i_mapping);
spin_lock(&obj->mmo.lock);
}
spin_unlock(&obj->mmo.lock);
}
static struct i915_mmap_offset *
lookup_mmo(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type)
{
struct rb_node *rb;
spin_lock(&obj->mmo.lock);
rb = obj->mmo.offsets.rb_node;
while (rb) {
struct i915_mmap_offset *mmo =
rb_entry(rb, typeof(*mmo), offset);
if (mmo->mmap_type == mmap_type) {
spin_unlock(&obj->mmo.lock);
return mmo;
}
if (mmo->mmap_type < mmap_type)
rb = rb->rb_right;
else
rb = rb->rb_left;
}
spin_unlock(&obj->mmo.lock);
return NULL;
}
static struct i915_mmap_offset *
insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
{
struct rb_node *rb, **p;
spin_lock(&obj->mmo.lock);
rb = NULL;
p = &obj->mmo.offsets.rb_node;
while (*p) {
struct i915_mmap_offset *pos;
rb = *p;
pos = rb_entry(rb, typeof(*pos), offset);
if (pos->mmap_type == mmo->mmap_type) {
spin_unlock(&obj->mmo.lock);
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
return pos;
}
if (pos->mmap_type < mmo->mmap_type)
p = &rb->rb_right;
else
p = &rb->rb_left;
}
rb_link_node(&mmo->offset, rb, p);
rb_insert_color(&mmo->offset, &obj->mmo.offsets);
spin_unlock(&obj->mmo.lock);
return mmo;
}
static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_mmap_offset *mmo;
int err;
GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
mmo = lookup_mmo(obj, mmap_type);
if (mmo)
goto out;
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
if (!mmo)
return ERR_PTR(-ENOMEM);
mmo->obj = obj;
mmo->mmap_type = mmap_type;
drm_vma_node_reset(&mmo->vma_node);
err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
&mmo->vma_node, obj->base.size / PAGE_SIZE);
if (likely(!err))
goto insert;
/* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
NULL);
if (err)
goto err;
i915_gem_drain_freed_objects(i915);
err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
&mmo->vma_node, obj->base.size / PAGE_SIZE);
if (err)
goto err;
insert:
mmo = insert_mmo(obj, mmo);
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
drm_vma_node_allow(&mmo->vma_node, file);
return mmo;
err:
kfree(mmo);
return ERR_PTR(err);
}
static int
__assign_mmap_offset(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
u64 *offset, struct drm_file *file)
{
struct i915_mmap_offset *mmo;
if (i915_gem_object_never_mmap(obj))
return -ENODEV;
if (obj->ops->mmap_offset) {
if (mmap_type != I915_MMAP_TYPE_FIXED)
return -ENODEV;
*offset = obj->ops->mmap_offset(obj);
return 0;
drm/i915/userptr: Never allow userptr into the mappable GGTT Daniel Vetter uncovered a nasty cycle in using the mmu-notifiers to invalidate userptr objects which also happen to be pulled into GGTT mmaps. That is when we unbind the userptr object (on mmu invalidation), we revoke all CPU mmaps, which may then recurse into mmu invalidation. We looked for ways of breaking the cycle, but the revocation on invalidation is required and cannot be avoided. The only solution we could see was to not allow such GGTT bindings of userptr objects in the first place. In practice, no one really wants to use a GGTT mmapping of a CPU pointer... Just before Daniel's explosive lockdep patches land in v5.4-rc1, we got a genuine blip from CI: <4>[ 246.793958] ====================================================== <4>[ 246.793972] WARNING: possible circular locking dependency detected <4>[ 246.793989] 5.3.0-gbd6c56f50d15-drmtip_372+ #1 Tainted: G U <4>[ 246.794003] ------------------------------------------------------ <4>[ 246.794017] kswapd0/145 is trying to acquire lock: <4>[ 246.794030] 000000003f565be6 (&dev->struct_mutex/1){+.+.}, at: userptr_mn_invalidate_range_start+0x18f/0x220 [i915] <4>[ 246.794250] but task is already holding lock: <4>[ 246.794263] 000000001799cef9 (&anon_vma->rwsem){++++}, at: page_lock_anon_vma_read+0xe6/0x2a0 <4>[ 246.794291] which lock already depends on the new lock. <4>[ 246.794307] the existing dependency chain (in reverse order) is: <4>[ 246.794322] -> #3 (&anon_vma->rwsem){++++}: <4>[ 246.794344] down_write+0x33/0x70 <4>[ 246.794357] __vma_adjust+0x3d9/0x7b0 <4>[ 246.794370] __split_vma+0x16a/0x180 <4>[ 246.794385] mprotect_fixup+0x2a5/0x320 <4>[ 246.794399] do_mprotect_pkey+0x208/0x2e0 <4>[ 246.794413] __x64_sys_mprotect+0x16/0x20 <4>[ 246.794429] do_syscall_64+0x55/0x1c0 <4>[ 246.794443] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4>[ 246.794456] -> #2 (&mapping->i_mmap_rwsem){++++}: <4>[ 246.794478] down_write+0x33/0x70 <4>[ 246.794493] unmap_mapping_pages+0x48/0x130 <4>[ 246.794519] i915_vma_revoke_mmap+0x81/0x1b0 [i915] <4>[ 246.794519] i915_vma_unbind+0x11d/0x4a0 [i915] <4>[ 246.794519] i915_vma_destroy+0x31/0x300 [i915] <4>[ 246.794519] __i915_gem_free_objects+0xb8/0x4b0 [i915] <4>[ 246.794519] drm_file_free.part.0+0x1e6/0x290 <4>[ 246.794519] drm_release+0xa6/0xe0 <4>[ 246.794519] __fput+0xc2/0x250 <4>[ 246.794519] task_work_run+0x82/0xb0 <4>[ 246.794519] do_exit+0x35b/0xdb0 <4>[ 246.794519] do_group_exit+0x34/0xb0 <4>[ 246.794519] __x64_sys_exit_group+0xf/0x10 <4>[ 246.794519] do_syscall_64+0x55/0x1c0 <4>[ 246.794519] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4>[ 246.794519] -> #1 (&vm->mutex){+.+.}: <4>[ 246.794519] i915_gem_shrinker_taints_mutex+0x6d/0xe0 [i915] <4>[ 246.794519] i915_address_space_init+0x9f/0x160 [i915] <4>[ 246.794519] i915_ggtt_init_hw+0x55/0x170 [i915] <4>[ 246.794519] i915_driver_probe+0xc9f/0x1620 [i915] <4>[ 246.794519] i915_pci_probe+0x43/0x1b0 [i915] <4>[ 246.794519] pci_device_probe+0x9e/0x120 <4>[ 246.794519] really_probe+0xea/0x3d0 <4>[ 246.794519] driver_probe_device+0x10b/0x120 <4>[ 246.794519] device_driver_attach+0x4a/0x50 <4>[ 246.794519] __driver_attach+0x97/0x130 <4>[ 246.794519] bus_for_each_dev+0x74/0xc0 <4>[ 246.794519] bus_add_driver+0x13f/0x210 <4>[ 246.794519] driver_register+0x56/0xe0 <4>[ 246.794519] do_one_initcall+0x58/0x300 <4>[ 246.794519] do_init_module+0x56/0x1f6 <4>[ 246.794519] load_module+0x25bd/0x2a40 <4>[ 246.794519] __se_sys_finit_module+0xd3/0xf0 <4>[ 246.794519] do_syscall_64+0x55/0x1c0 <4>[ 246.794519] entry_SYSCALL_64_after_hwframe+0x49/0xbe <4>[ 246.794519] -> #0 (&dev->struct_mutex/1){+.+.}: <4>[ 246.794519] __lock_acquire+0x15d8/0x1e90 <4>[ 246.794519] lock_acquire+0xa6/0x1c0 <4>[ 246.794519] __mutex_lock+0x9d/0x9b0 <4>[ 246.794519] userptr_mn_invalidate_range_start+0x18f/0x220 [i915] <4>[ 246.794519] __mmu_notifier_invalidate_range_start+0x85/0x110 <4>[ 246.794519] try_to_unmap_one+0x76b/0x860 <4>[ 246.794519] rmap_walk_anon+0x104/0x280 <4>[ 246.794519] try_to_unmap+0xc0/0xf0 <4>[ 246.794519] shrink_page_list+0x561/0xc10 <4>[ 246.794519] shrink_inactive_list+0x220/0x440 <4>[ 246.794519] shrink_node_memcg+0x36e/0x740 <4>[ 246.794519] shrink_node+0xcb/0x490 <4>[ 246.794519] balance_pgdat+0x241/0x580 <4>[ 246.794519] kswapd+0x16c/0x530 <4>[ 246.794519] kthread+0x119/0x130 <4>[ 246.794519] ret_from_fork+0x24/0x50 <4>[ 246.794519] other info that might help us debug this: <4>[ 246.794519] Chain exists of: &dev->struct_mutex/1 --> &mapping->i_mmap_rwsem --> &anon_vma->rwsem <4>[ 246.794519] Possible unsafe locking scenario: <4>[ 246.794519] CPU0 CPU1 <4>[ 246.794519] ---- ---- <4>[ 246.794519] lock(&anon_vma->rwsem); <4>[ 246.794519] lock(&mapping->i_mmap_rwsem); <4>[ 246.794519] lock(&anon_vma->rwsem); <4>[ 246.794519] lock(&dev->struct_mutex/1); <4>[ 246.794519] *** DEADLOCK *** v2: Say no to mmap_ioctl Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111744 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111870 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: stable@vger.kernel.org Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190928082546.3473-1-chris@chris-wilson.co.uk
2019-09-28 08:25:46 +00:00
}
if (mmap_type == I915_MMAP_TYPE_FIXED)
return -ENODEV;
if (mmap_type != I915_MMAP_TYPE_GTT &&
!i915_gem_object_has_struct_page(obj) &&
!i915_gem_object_has_iomem(obj))
return -ENODEV;
mmo = mmap_offset_attach(obj, mmap_type, file);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
*offset = drm_vma_node_offset_addr(&mmo->vma_node);
return 0;
}
static int
__assign_mmap_offset_handle(struct drm_file *file,
u32 handle,
enum i915_mmap_type mmap_type,
u64 *offset)
{
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
err = i915_gem_object_lock_interruptible(obj, NULL);
if (err)
goto out_put;
err = __assign_mmap_offset(obj, mmap_type, offset, file);
i915_gem_object_unlock(obj);
out_put:
i915_gem_object_put(obj);
return err;
}
int
i915_gem_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
u32 handle,
u64 *offset)
{
struct drm_i915_private *i915 = to_i915(dev);
enum i915_mmap_type mmap_type;
if (HAS_LMEM(to_i915(dev)))
mmap_type = I915_MMAP_TYPE_FIXED;
else if (pat_enabled())
mmap_type = I915_MMAP_TYPE_WC;
else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
else
mmap_type = I915_MMAP_TYPE_GTT;
return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
}
/**
* i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
* up so we can get faults in the handler above.
*
* The fault handler will take care of binding the object into the GTT
* (since it may have been evicted to make room for something), allocating
* a fence register, and mapping the appropriate aperture address into
* userspace.
*/
int
i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_mmap_offset *args = data;
enum i915_mmap_type type;
int err;
/*
* Historically we failed to check args.pad and args.offset
* and so we cannot use those fields for user input and we cannot
* add -EINVAL for them as the ABI is fixed, i.e. old userspace
* may be feeding in garbage in those fields.
*
* if (args->pad) return -EINVAL; is verbotten!
*/
err = i915_user_extensions(u64_to_user_ptr(args->extensions),
NULL, 0, NULL);
if (err)
return err;
switch (args->flags) {
case I915_MMAP_OFFSET_GTT:
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
return -ENODEV;
type = I915_MMAP_TYPE_GTT;
break;
case I915_MMAP_OFFSET_WC:
if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_WC;
break;
case I915_MMAP_OFFSET_WB:
type = I915_MMAP_TYPE_WB;
break;
case I915_MMAP_OFFSET_UC:
if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_UC;
break;
case I915_MMAP_OFFSET_FIXED:
type = I915_MMAP_TYPE_FIXED;
break;
default:
return -EINVAL;
}
return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
}
static void vm_open(struct vm_area_struct *vma)
{
struct i915_mmap_offset *mmo = vma->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
GEM_BUG_ON(!obj);
i915_gem_object_get(obj);
}
static void vm_close(struct vm_area_struct *vma)
{
struct i915_mmap_offset *mmo = vma->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
GEM_BUG_ON(!obj);
i915_gem_object_put(obj);
}
static const struct vm_operations_struct vm_ops_gtt = {
.fault = vm_fault_gtt,
.access = vm_access,
.open = vm_open,
.close = vm_close,
};
static const struct vm_operations_struct vm_ops_cpu = {
.fault = vm_fault_cpu,
.access = vm_access,
.open = vm_open,
.close = vm_close,
};
static int singleton_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = file->private_data;
cmpxchg(&i915->gem.mmap_singleton, file, NULL);
drm_dev_put(&i915->drm);
return 0;
}
static const struct file_operations singleton_fops = {
.owner = THIS_MODULE,
.release = singleton_release,
};
static struct file *mmap_singleton(struct drm_i915_private *i915)
{
struct file *file;
rcu_read_lock();
file = READ_ONCE(i915->gem.mmap_singleton);
if (file && !get_file_rcu(file))
file = NULL;
rcu_read_unlock();
if (file)
return file;
file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
if (IS_ERR(file))
return file;
/* Everyone shares a single global address space */
file->f_mapping = i915->drm.anon_inode->i_mapping;
smp_store_mb(i915->gem.mmap_singleton, file);
drm_dev_get(&i915->drm);
return file;
}
/*
* This overcomes the limitation in drm_gem_mmap's assignment of a
* drm_gem_object as the vma->vm_private_data. Since we need to
* be able to resolve multiple mmap offsets which could be tied
* to a single gem object.
*/
int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_vma_offset_node *node;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_i915_gem_object *obj = NULL;
struct i915_mmap_offset *mmo = NULL;
struct file *anon;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
* destroyed and will be invalid when the vma manager lock
* is released.
*/
if (!node->driver_private) {
mmo = container_of(node, struct i915_mmap_offset, vma_node);
obj = i915_gem_object_get_rcu(mmo->obj);
GEM_BUG_ON(obj && obj->ops->mmap_ops);
} else {
obj = i915_gem_object_get_rcu
(container_of(node, struct drm_i915_gem_object,
base.vma_node));
GEM_BUG_ON(obj && !obj->ops->mmap_ops);
}
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
rcu_read_unlock();
if (!obj)
return node ? -EACCES : -EINVAL;
if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
i915_gem_object_put(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
anon = mmap_singleton(to_i915(dev));
if (IS_ERR(anon)) {
i915_gem_object_put(obj);
return PTR_ERR(anon);
}
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
/*
* We keep the ref on mmo->obj, not vm_file, but we require
* vma->vm_file->f_mapping, see vma_link(), for later revocation.
* Our userspace is accustomed to having per-file resource cleanup
* (i.e. contexts, objects and requests) on their close(fd), which
* requires avoiding extraneous references to their filp, hence why
* we prefer to use an anonymous file for their mmaps.
*/
vma_set_file(vma, anon);
/* Drop the initial creation reference, the vma is now holding one. */
fput(anon);
if (obj->ops->mmap_ops) {
vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = obj->ops->mmap_ops;
vma->vm_private_data = node->driver_private;
return 0;
}
vma->vm_private_data = mmo;
switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = &vm_ops_cpu;
break;
case I915_MMAP_TYPE_FIXED:
GEM_WARN_ON(1);
fallthrough;
case I915_MMAP_TYPE_WB:
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = &vm_ops_cpu;
break;
case I915_MMAP_TYPE_UC:
vma->vm_page_prot =
pgprot_noncached(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = &vm_ops_cpu;
break;
case I915_MMAP_TYPE_GTT:
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_ops = &vm_ops_gtt;
break;
}
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_mman.c"
#endif