2019-05-28 09:29:46 +00:00
|
|
|
/*
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
*
|
|
|
|
* Copyright © 2014-2016 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "i915_drv.h"
|
|
|
|
#include "i915_gem_object.h"
|
2019-05-28 09:29:50 +00:00
|
|
|
#include "i915_scatterlist.h"
|
2019-10-25 15:37:24 +00:00
|
|
|
#include "i915_gem_lmem.h"
|
2019-12-04 12:00:32 +00:00
|
|
|
#include "i915_gem_mman.h"
|
2019-05-28 09:29:46 +00:00
|
|
|
|
2021-10-19 12:27:10 +00:00
|
|
|
#include "gt/intel_gt.h"
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages,
|
|
|
|
unsigned int sg_page_sizes)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
unsigned long supported = INTEL_INFO(i915)->page_sizes;
|
2021-01-19 21:43:34 +00:00
|
|
|
bool shrinkable;
|
2019-05-28 09:29:46 +00:00
|
|
|
int i;
|
|
|
|
|
2021-03-23 15:50:21 +00:00
|
|
|
assert_object_held_shared(obj);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
2019-10-08 16:01:16 +00:00
|
|
|
if (i915_gem_object_is_volatile(obj))
|
|
|
|
obj->mm.madv = I915_MADV_DONTNEED;
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
/* Make the pages coherent with the GPU (flushing any swapin). */
|
|
|
|
if (obj->cache_dirty) {
|
2021-10-27 16:18:13 +00:00
|
|
|
WARN_ON_ONCE(IS_DGFX(i915));
|
2019-05-28 09:29:46 +00:00
|
|
|
obj->write_domain = 0;
|
|
|
|
if (i915_gem_object_has_struct_page(obj))
|
|
|
|
drm_clflush_sg(pages);
|
|
|
|
obj->cache_dirty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
obj->mm.get_page.sg_pos = pages->sgl;
|
|
|
|
obj->mm.get_page.sg_idx = 0;
|
2020-10-06 09:25:08 +00:00
|
|
|
obj->mm.get_dma_page.sg_pos = pages->sgl;
|
|
|
|
obj->mm.get_dma_page.sg_idx = 0;
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
obj->mm.pages = pages;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!sg_page_sizes);
|
|
|
|
obj->mm.page_sizes.phys = sg_page_sizes;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the supported page-sizes which fit into the given
|
|
|
|
* sg_page_sizes. This will give us the page-sizes which we may be able
|
|
|
|
* to use opportunistically when later inserting into the GTT. For
|
|
|
|
* example if phys=2G, then in theory we should be able to use 1G, 2M,
|
|
|
|
* 64K or 4K pages, although in practice this will depend on a number of
|
|
|
|
* other factors.
|
|
|
|
*/
|
|
|
|
obj->mm.page_sizes.sg = 0;
|
|
|
|
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
|
|
|
if (obj->mm.page_sizes.phys & ~0u << i)
|
|
|
|
obj->mm.page_sizes.sg |= BIT(i);
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
|
|
|
|
|
2021-01-19 21:43:34 +00:00
|
|
|
shrinkable = i915_gem_object_is_shrinkable(obj);
|
|
|
|
|
|
|
|
if (i915_gem_object_is_tiled(obj) &&
|
|
|
|
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
|
|
|
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
|
|
|
|
i915_gem_object_set_tiling_quirk(obj);
|
2021-05-17 08:46:40 +00:00
|
|
|
GEM_BUG_ON(!list_empty(&obj->mm.link));
|
|
|
|
atomic_inc(&obj->mm.shrink_pin);
|
2021-01-19 21:43:34 +00:00
|
|
|
shrinkable = false;
|
|
|
|
}
|
|
|
|
|
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 09:10:53 +00:00
|
|
|
if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
|
2019-06-12 10:57:20 +00:00
|
|
|
struct list_head *list;
|
2019-06-10 14:54:30 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
2021-03-23 15:50:50 +00:00
|
|
|
assert_object_held(obj);
|
2019-06-10 14:54:30 +00:00
|
|
|
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
|
|
|
|
2019-05-30 20:35:00 +00:00
|
|
|
i915->mm.shrink_count++;
|
|
|
|
i915->mm.shrink_memory += obj->base.size;
|
2019-06-12 10:57:20 +00:00
|
|
|
|
|
|
|
if (obj->mm.madv != I915_MADV_WILLNEED)
|
|
|
|
list = &i915->mm.purge_list;
|
|
|
|
else
|
|
|
|
list = &i915->mm.shrink_list;
|
|
|
|
list_add_tail(&obj->mm.link, list);
|
2019-06-10 14:54:30 +00:00
|
|
|
|
2019-09-10 21:22:04 +00:00
|
|
|
atomic_set(&obj->mm.shrink_pin, 0);
|
2019-06-10 14:54:30 +00:00
|
|
|
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
2019-05-30 20:35:00 +00:00
|
|
|
}
|
2019-05-28 09:29:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2020-01-22 12:57:50 +00:00
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
2019-05-28 09:29:46 +00:00
|
|
|
int err;
|
|
|
|
|
2021-03-23 15:50:21 +00:00
|
|
|
assert_object_held_shared(obj);
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
|
2020-01-22 12:57:50 +00:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"Attempting to obtain a purgeable object\n");
|
2019-05-28 09:29:46 +00:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = obj->ops->get_pages(obj);
|
|
|
|
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure that the associated pages are gathered from the backing storage
|
|
|
|
* and pinned into our object. i915_gem_object_pin_pages() may be called
|
|
|
|
* multiple times before they are released by a single call to
|
|
|
|
* i915_gem_object_unpin_pages() - once the pages are no longer referenced
|
|
|
|
* either as a result of memory pressure (reaping pages under the shrinker)
|
|
|
|
* or as the object is itself released.
|
|
|
|
*/
|
|
|
|
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2021-03-23 15:50:50 +00:00
|
|
|
assert_object_held(obj);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
2021-03-23 15:50:21 +00:00
|
|
|
assert_object_held_shared(obj);
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
|
|
|
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
|
|
|
|
|
|
|
err = ____i915_gem_object_get_pages(obj);
|
|
|
|
if (err)
|
2021-03-23 15:50:50 +00:00
|
|
|
return err;
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
smp_mb__before_atomic();
|
|
|
|
}
|
|
|
|
atomic_inc(&obj->mm.pages_pin_count);
|
|
|
|
|
2021-03-23 15:50:50 +00:00
|
|
|
return 0;
|
2019-05-28 09:29:46 +00:00
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:25 +00:00
|
|
|
int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct i915_gem_ww_ctx ww;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
i915_gem_ww_ctx_init(&ww, true);
|
|
|
|
retry:
|
|
|
|
err = i915_gem_object_lock(obj, &ww);
|
|
|
|
if (!err)
|
|
|
|
err = i915_gem_object_pin_pages(obj);
|
|
|
|
|
|
|
|
if (err == -EDEADLK) {
|
|
|
|
err = i915_gem_ww_ctx_backoff(&ww);
|
|
|
|
if (!err)
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
i915_gem_ww_ctx_fini(&ww);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
/* Immediately discard the backing storage */
|
2021-10-18 09:10:49 +00:00
|
|
|
int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
2019-05-28 09:29:46 +00:00
|
|
|
{
|
|
|
|
if (obj->ops->truncate)
|
2021-10-18 09:10:49 +00:00
|
|
|
return obj->ops->truncate(obj);
|
|
|
|
|
|
|
|
return 0;
|
2019-05-28 09:29:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to discard unwanted pages */
|
|
|
|
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
2021-03-23 15:50:21 +00:00
|
|
|
assert_object_held_shared(obj);
|
2019-05-28 09:29:46 +00:00
|
|
|
GEM_BUG_ON(i915_gem_object_has_pages(obj));
|
|
|
|
|
|
|
|
if (obj->ops->writeback)
|
|
|
|
obj->ops->writeback(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct radix_tree_iter iter;
|
|
|
|
void __rcu **slot;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
|
|
|
|
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
|
2020-10-06 09:25:08 +00:00
|
|
|
radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
|
|
|
|
radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
|
2019-05-28 09:29:46 +00:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:24 +00:00
|
|
|
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
|
|
|
|
{
|
2020-01-02 20:42:15 +00:00
|
|
|
if (is_vmalloc_addr(ptr))
|
2019-10-25 15:37:24 +00:00
|
|
|
vunmap(ptr);
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
struct sg_table *
|
|
|
|
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct sg_table *pages;
|
|
|
|
|
2021-03-23 15:50:21 +00:00
|
|
|
assert_object_held_shared(obj);
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
pages = fetch_and_zero(&obj->mm.pages);
|
|
|
|
if (IS_ERR_OR_NULL(pages))
|
|
|
|
return pages;
|
|
|
|
|
2019-10-08 16:01:16 +00:00
|
|
|
if (i915_gem_object_is_volatile(obj))
|
|
|
|
obj->mm.madv = I915_MADV_WILLNEED;
|
|
|
|
|
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 09:10:53 +00:00
|
|
|
if (!i915_gem_object_has_self_managed_shrink_list(obj))
|
|
|
|
i915_gem_object_make_unshrinkable(obj);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
if (obj->mm.mapping) {
|
2019-10-25 15:37:24 +00:00
|
|
|
unmap_object(obj, page_mask_bits(obj->mm.mapping));
|
2019-05-28 09:29:46 +00:00
|
|
|
obj->mm.mapping = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
__i915_gem_object_reset_page_iter(obj);
|
|
|
|
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
|
|
|
|
|
2021-10-19 12:27:10 +00:00
|
|
|
if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
|
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
|
|
|
|
with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
|
|
|
|
intel_gt_invalidate_tlbs(to_gt(i915));
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
return pages;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:50 +00:00
|
|
|
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
|
2019-05-28 09:29:46 +00:00
|
|
|
{
|
|
|
|
struct sg_table *pages;
|
|
|
|
|
|
|
|
if (i915_gem_object_has_pinned_pages(obj))
|
|
|
|
return -EBUSY;
|
|
|
|
|
2021-03-23 15:50:21 +00:00
|
|
|
/* May be called by shrinker from within get_pages() (on another bo) */
|
|
|
|
assert_object_held_shared(obj);
|
|
|
|
|
2019-12-04 12:00:32 +00:00
|
|
|
i915_gem_object_release_mmap_offset(obj);
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
/*
|
|
|
|
* ->put_pages might need to allocate memory for the bit17 swizzle
|
|
|
|
* array, hence protect them from being reaped by removing them from gtt
|
|
|
|
* lists early.
|
|
|
|
*/
|
|
|
|
pages = __i915_gem_object_unset_pages(obj);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX Temporary hijinx to avoid updating all backends to handle
|
|
|
|
* NULL pages. In the future, when we have more asynchronous
|
|
|
|
* get_pages backends we should be better able to handle the
|
|
|
|
* cancellation of the async task in a more uniform manner.
|
|
|
|
*/
|
2021-03-23 15:50:06 +00:00
|
|
|
if (!IS_ERR_OR_NULL(pages))
|
2019-05-28 09:29:46 +00:00
|
|
|
obj->ops->put_pages(obj, pages);
|
|
|
|
|
2021-03-23 15:50:06 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
/* The 'mapping' part of i915_gem_object_pin_map() below */
|
2020-10-17 23:15:28 +00:00
|
|
|
static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
|
2020-11-27 19:53:34 +00:00
|
|
|
enum i915_map_type type)
|
2019-05-28 09:29:46 +00:00
|
|
|
{
|
2020-10-17 23:15:28 +00:00
|
|
|
unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
|
|
|
|
struct page *stack[32], **pages = stack, *page;
|
|
|
|
struct sgt_iter iter;
|
2019-05-28 09:29:46 +00:00
|
|
|
pgprot_t pgprot;
|
2020-10-17 23:15:28 +00:00
|
|
|
void *vaddr;
|
2019-10-25 15:37:24 +00:00
|
|
|
|
2020-10-17 23:15:28 +00:00
|
|
|
switch (type) {
|
|
|
|
default:
|
|
|
|
MISSING_CASE(type);
|
|
|
|
fallthrough; /* to use PAGE_KERNEL anyway */
|
|
|
|
case I915_MAP_WB:
|
2020-09-15 09:14:15 +00:00
|
|
|
/*
|
|
|
|
* On 32b, highmem using a finite set of indirect PTE (i.e.
|
|
|
|
* vmap) to provide virtual mappings of the high pages.
|
|
|
|
* As these are finite, map_new_virtual() must wait for some
|
|
|
|
* other kmap() to finish when it runs out. If we map a large
|
|
|
|
* number of objects, there is no method for it to tell us
|
|
|
|
* to release the mappings, and we deadlock.
|
|
|
|
*
|
|
|
|
* However, if we make an explicit vmap of the page, that
|
|
|
|
* uses a larger vmalloc arena, and also has the ability
|
|
|
|
* to tell us to release unwanted mappings. Most importantly,
|
|
|
|
* it will fail and propagate an error instead of waiting
|
|
|
|
* forever.
|
|
|
|
*
|
|
|
|
* So if the page is beyond the 32b boundary, make an explicit
|
2020-10-17 23:15:25 +00:00
|
|
|
* vmap.
|
2020-09-15 09:14:15 +00:00
|
|
|
*/
|
2020-10-17 23:15:28 +00:00
|
|
|
if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
|
|
|
|
return page_address(sg_page(obj->mm.pages->sgl));
|
2019-05-28 09:29:46 +00:00
|
|
|
pgprot = PAGE_KERNEL;
|
|
|
|
break;
|
|
|
|
case I915_MAP_WC:
|
|
|
|
pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-10-17 23:15:28 +00:00
|
|
|
if (n_pages > ARRAY_SIZE(stack)) {
|
|
|
|
/* Too big for stack -- allocate temporary array instead */
|
|
|
|
pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
|
|
|
|
if (!pages)
|
2020-11-27 19:53:34 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2020-10-17 23:15:28 +00:00
|
|
|
}
|
2020-01-02 20:42:15 +00:00
|
|
|
|
2020-10-17 23:15:28 +00:00
|
|
|
i = 0;
|
|
|
|
for_each_sgt_page(page, iter, obj->mm.pages)
|
|
|
|
pages[i++] = page;
|
|
|
|
vaddr = vmap(pages, n_pages, 0, pgprot);
|
|
|
|
if (pages != stack)
|
|
|
|
kvfree(pages);
|
2020-12-01 21:54:41 +00:00
|
|
|
|
|
|
|
return vaddr ?: ERR_PTR(-ENOMEM);
|
2020-10-17 23:15:28 +00:00
|
|
|
}
|
2020-01-02 20:42:15 +00:00
|
|
|
|
2020-10-17 23:15:28 +00:00
|
|
|
static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
|
2020-11-27 19:53:34 +00:00
|
|
|
enum i915_map_type type)
|
2020-10-17 23:15:28 +00:00
|
|
|
{
|
|
|
|
resource_size_t iomap = obj->mm.region->iomap.base -
|
|
|
|
obj->mm.region->region.start;
|
|
|
|
unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
|
|
|
|
unsigned long stack[32], *pfns = stack, i;
|
|
|
|
struct sgt_iter iter;
|
|
|
|
dma_addr_t addr;
|
|
|
|
void *vaddr;
|
|
|
|
|
2021-07-05 13:53:06 +00:00
|
|
|
GEM_BUG_ON(type != I915_MAP_WC);
|
2020-01-02 20:42:15 +00:00
|
|
|
|
2020-10-17 23:15:28 +00:00
|
|
|
if (n_pfn > ARRAY_SIZE(stack)) {
|
|
|
|
/* Too big for stack -- allocate temporary array instead */
|
|
|
|
pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
|
|
|
|
if (!pfns)
|
2020-11-27 19:53:34 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2020-01-02 20:42:15 +00:00
|
|
|
}
|
|
|
|
|
2020-10-17 23:15:28 +00:00
|
|
|
i = 0;
|
|
|
|
for_each_sgt_daddr(addr, iter, obj->mm.pages)
|
|
|
|
pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
|
|
|
|
vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
|
|
|
|
if (pfns != stack)
|
|
|
|
kvfree(pfns);
|
2020-12-01 21:54:41 +00:00
|
|
|
|
|
|
|
return vaddr ?: ERR_PTR(-ENOMEM);
|
2019-05-28 09:29:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get, pin, and map the pages of the object into kernel space */
|
|
|
|
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
|
|
|
enum i915_map_type type)
|
|
|
|
{
|
|
|
|
enum i915_map_type has_type;
|
|
|
|
bool pinned;
|
|
|
|
void *ptr;
|
|
|
|
int err;
|
|
|
|
|
2021-03-23 15:49:56 +00:00
|
|
|
if (!i915_gem_object_has_struct_page(obj) &&
|
2021-06-24 08:42:38 +00:00
|
|
|
!i915_gem_object_has_iomem(obj))
|
2019-05-28 09:29:46 +00:00
|
|
|
return ERR_PTR(-ENXIO);
|
|
|
|
|
2021-03-23 15:50:50 +00:00
|
|
|
assert_object_held(obj);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
pinned = !(type & I915_MAP_OVERRIDE);
|
|
|
|
type &= ~I915_MAP_OVERRIDE;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
|
|
|
if (unlikely(!i915_gem_object_has_pages(obj))) {
|
|
|
|
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
|
|
|
|
|
|
|
|
err = ____i915_gem_object_get_pages(obj);
|
2021-03-23 15:50:50 +00:00
|
|
|
if (err)
|
|
|
|
return ERR_PTR(err);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
smp_mb__before_atomic();
|
|
|
|
}
|
|
|
|
atomic_inc(&obj->mm.pages_pin_count);
|
|
|
|
pinned = false;
|
|
|
|
}
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
|
|
|
|
2021-07-05 13:53:06 +00:00
|
|
|
/*
|
|
|
|
* For discrete our CPU mappings needs to be consistent in order to
|
|
|
|
* function correctly on !x86. When mapping things through TTM, we use
|
|
|
|
* the same rules to determine the caching type.
|
|
|
|
*
|
|
|
|
* The caching rules, starting from DG1:
|
|
|
|
*
|
|
|
|
* - If the object can be placed in device local-memory, then the
|
|
|
|
* pages should be allocated and mapped as write-combined only.
|
|
|
|
*
|
|
|
|
* - Everything else is always allocated and mapped as write-back,
|
|
|
|
* with the guarantee that everything is also coherent with the
|
|
|
|
* GPU.
|
|
|
|
*
|
|
|
|
* Internal users of lmem are already expected to get this right, so no
|
|
|
|
* fudging needed there.
|
|
|
|
*/
|
|
|
|
if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
|
|
|
|
if (type != I915_MAP_WC && !obj->mm.n_placements) {
|
|
|
|
ptr = ERR_PTR(-ENODEV);
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
type = I915_MAP_WC;
|
|
|
|
} else if (IS_DGFX(to_i915(obj->base.dev))) {
|
|
|
|
type = I915_MAP_WB;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
|
|
|
if (ptr && has_type != type) {
|
|
|
|
if (pinned) {
|
2020-11-27 19:53:34 +00:00
|
|
|
ptr = ERR_PTR(-EBUSY);
|
2019-05-28 09:29:46 +00:00
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:24 +00:00
|
|
|
unmap_object(obj, ptr);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
ptr = obj->mm.mapping = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ptr) {
|
2021-11-22 21:45:49 +00:00
|
|
|
err = i915_gem_object_wait_moving_fence(obj, true);
|
|
|
|
if (err) {
|
|
|
|
ptr = ERR_PTR(err);
|
|
|
|
goto err_unpin;
|
|
|
|
}
|
|
|
|
|
2021-12-02 00:30:48 +00:00
|
|
|
if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
|
2020-11-27 19:53:34 +00:00
|
|
|
ptr = ERR_PTR(-ENODEV);
|
2020-10-17 23:15:28 +00:00
|
|
|
else if (i915_gem_object_has_struct_page(obj))
|
|
|
|
ptr = i915_gem_object_map_page(obj, type);
|
|
|
|
else
|
|
|
|
ptr = i915_gem_object_map_pfn(obj, type);
|
2020-11-27 19:53:34 +00:00
|
|
|
if (IS_ERR(ptr))
|
2019-05-28 09:29:46 +00:00
|
|
|
goto err_unpin;
|
|
|
|
|
|
|
|
obj->mm.mapping = page_pack_bits(ptr, type);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
|
|
|
|
err_unpin:
|
|
|
|
atomic_dec(&obj->mm.pages_pin_count);
|
2021-03-23 15:50:50 +00:00
|
|
|
return ptr;
|
2019-05-28 09:29:46 +00:00
|
|
|
}
|
|
|
|
|
2021-01-28 16:25:40 +00:00
|
|
|
void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
|
|
|
|
enum i915_map_type type)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
i915_gem_object_lock(obj, NULL);
|
|
|
|
ret = i915_gem_object_pin_map(obj, type);
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long offset,
|
|
|
|
unsigned long size)
|
|
|
|
{
|
|
|
|
enum i915_map_type has_type;
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
|
|
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
|
|
|
|
offset, size, obj->base.size));
|
|
|
|
|
2020-05-11 14:13:03 +00:00
|
|
|
wmb(); /* let all previous writes be visible to coherent partners */
|
2019-05-28 09:29:46 +00:00
|
|
|
obj->mm.dirty = true;
|
|
|
|
|
|
|
|
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
|
|
|
|
if (has_type == I915_MAP_WC)
|
|
|
|
return;
|
|
|
|
|
|
|
|
drm_clflush_virt_range(ptr + offset, size);
|
|
|
|
if (size == obj->base.size) {
|
|
|
|
obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
|
|
|
|
obj->cache_dirty = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-08 17:37:47 +00:00
|
|
|
void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
GEM_BUG_ON(!obj->mm.mapping);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We allow removing the mapping from underneath pinned pages!
|
|
|
|
*
|
|
|
|
* Furthermore, since this is an unsafe operation reserved only
|
|
|
|
* for construction time manipulation, we ignore locking prudence.
|
|
|
|
*/
|
|
|
|
unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
|
|
|
|
|
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
struct scatterlist *
|
2020-10-06 09:25:08 +00:00
|
|
|
__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
|
|
|
|
struct i915_gem_object_page_iter *iter,
|
|
|
|
unsigned int n,
|
2021-03-23 15:49:52 +00:00
|
|
|
unsigned int *offset,
|
2021-07-14 19:34:17 +00:00
|
|
|
bool dma)
|
2019-05-28 09:29:46 +00:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int idx, count;
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
|
2021-06-02 08:38:08 +00:00
|
|
|
if (!i915_gem_object_has_pinned_pages(obj))
|
|
|
|
assert_object_held(obj);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
/* As we iterate forward through the sg, we record each entry in a
|
|
|
|
* radixtree for quick repeated (backwards) lookups. If we have seen
|
|
|
|
* this index previously, we will have an entry for it.
|
|
|
|
*
|
|
|
|
* Initial lookup is O(N), but this is amortized to O(1) for
|
|
|
|
* sequential page access (where each new request is consecutive
|
|
|
|
* to the previous one). Repeated lookups are O(lg(obj->base.size)),
|
|
|
|
* i.e. O(1) with a large constant!
|
|
|
|
*/
|
|
|
|
if (n < READ_ONCE(iter->sg_idx))
|
|
|
|
goto lookup;
|
|
|
|
|
|
|
|
mutex_lock(&iter->lock);
|
|
|
|
|
|
|
|
/* We prefer to reuse the last sg so that repeated lookup of this
|
|
|
|
* (or the subsequent) sg are fast - comparing against the last
|
|
|
|
* sg is faster than going through the radixtree.
|
|
|
|
*/
|
|
|
|
|
|
|
|
sg = iter->sg_pos;
|
|
|
|
idx = iter->sg_idx;
|
2020-10-06 09:25:08 +00:00
|
|
|
count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
while (idx + count <= n) {
|
|
|
|
void *entry;
|
|
|
|
unsigned long i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* If we cannot allocate and insert this entry, or the
|
|
|
|
* individual pages from this range, cancel updating the
|
|
|
|
* sg_idx so that on this lookup we are forced to linearly
|
|
|
|
* scan onwards, but on future lookups we will try the
|
|
|
|
* insertion again (in which case we need to be careful of
|
|
|
|
* the error return reporting that we have already inserted
|
|
|
|
* this index).
|
|
|
|
*/
|
|
|
|
ret = radix_tree_insert(&iter->radix, idx, sg);
|
|
|
|
if (ret && ret != -EEXIST)
|
|
|
|
goto scan;
|
|
|
|
|
|
|
|
entry = xa_mk_value(idx);
|
|
|
|
for (i = 1; i < count; i++) {
|
|
|
|
ret = radix_tree_insert(&iter->radix, idx + i, entry);
|
|
|
|
if (ret && ret != -EEXIST)
|
|
|
|
goto scan;
|
|
|
|
}
|
|
|
|
|
|
|
|
idx += count;
|
|
|
|
sg = ____sg_next(sg);
|
2020-10-06 09:25:08 +00:00
|
|
|
count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
|
2019-05-28 09:29:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
scan:
|
|
|
|
iter->sg_pos = sg;
|
|
|
|
iter->sg_idx = idx;
|
|
|
|
|
|
|
|
mutex_unlock(&iter->lock);
|
|
|
|
|
|
|
|
if (unlikely(n < idx)) /* insertion completed by another thread */
|
|
|
|
goto lookup;
|
|
|
|
|
2021-07-14 19:34:17 +00:00
|
|
|
/* In case we failed to insert the entry into the radixtree, we need
|
2019-05-28 09:29:46 +00:00
|
|
|
* to look beyond the current sg.
|
|
|
|
*/
|
|
|
|
while (idx + count <= n) {
|
|
|
|
idx += count;
|
|
|
|
sg = ____sg_next(sg);
|
2020-10-06 09:25:08 +00:00
|
|
|
count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
|
2019-05-28 09:29:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*offset = n - idx;
|
|
|
|
return sg;
|
|
|
|
|
|
|
|
lookup:
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
sg = radix_tree_lookup(&iter->radix, n);
|
|
|
|
GEM_BUG_ON(!sg);
|
|
|
|
|
|
|
|
/* If this index is in the middle of multi-page sg entry,
|
|
|
|
* the radix tree will contain a value entry that points
|
|
|
|
* to the start of that range. We will return the pointer to
|
|
|
|
* the base page and the offset of this page within the
|
|
|
|
* sg entry's range.
|
|
|
|
*/
|
|
|
|
*offset = 0;
|
|
|
|
if (unlikely(xa_is_value(sg))) {
|
|
|
|
unsigned long base = xa_to_value(sg);
|
|
|
|
|
|
|
|
sg = radix_tree_lookup(&iter->radix, base);
|
|
|
|
GEM_BUG_ON(!sg);
|
|
|
|
|
|
|
|
*offset = n - base;
|
|
|
|
}
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return sg;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page *
|
|
|
|
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int offset;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
|
|
|
|
|
2021-07-14 19:34:17 +00:00
|
|
|
sg = i915_gem_object_get_sg(obj, n, &offset);
|
2019-05-28 09:29:46 +00:00
|
|
|
return nth_page(sg_page(sg), offset);
|
|
|
|
}
|
|
|
|
|
2020-09-08 05:40:43 +00:00
|
|
|
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
|
|
|
|
struct page *
|
|
|
|
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned int n)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = i915_gem_object_get_page(obj, n);
|
|
|
|
if (!obj->mm.dirty)
|
|
|
|
set_page_dirty(page);
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:46 +00:00
|
|
|
dma_addr_t
|
|
|
|
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long n,
|
|
|
|
unsigned int *len)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int offset;
|
|
|
|
|
2021-07-14 19:34:17 +00:00
|
|
|
sg = i915_gem_object_get_sg_dma(obj, n, &offset);
|
2019-05-28 09:29:46 +00:00
|
|
|
|
|
|
|
if (len)
|
|
|
|
*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
|
|
|
|
|
|
|
|
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
dma_addr_t
|
|
|
|
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
|
|
|
|
unsigned long n)
|
|
|
|
{
|
|
|
|
return i915_gem_object_get_dma_address_len(obj, n, NULL);
|
|
|
|
}
|