2017-10-06 22:18:29 +00:00
|
|
|
/*
|
2019-05-28 09:29:49 +00:00
|
|
|
* SPDX-License-Identifier: MIT
|
2017-10-06 22:18:29 +00:00
|
|
|
*
|
2019-05-28 09:29:49 +00:00
|
|
|
* Copyright © 2017 Intel Corporation
|
2017-10-06 22:18:29 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/prime_numbers.h>
|
|
|
|
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "i915_selftest.h"
|
|
|
|
|
2019-10-08 16:01:14 +00:00
|
|
|
#include "gem/i915_gem_region.h"
|
2019-10-25 15:37:26 +00:00
|
|
|
#include "gem/i915_gem_lmem.h"
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "gem/i915_gem_pm.h"
|
|
|
|
|
2019-06-21 07:08:02 +00:00
|
|
|
#include "gt/intel_gt.h"
|
|
|
|
|
2019-04-26 16:33:36 +00:00
|
|
|
#include "igt_gem_utils.h"
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "mock_context.h"
|
|
|
|
|
|
|
|
#include "selftests/mock_drm.h"
|
|
|
|
#include "selftests/mock_gem_device.h"
|
2019-10-08 16:01:14 +00:00
|
|
|
#include "selftests/mock_region.h"
|
2019-05-28 09:29:49 +00:00
|
|
|
#include "selftests/i915_random.h"
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
static const unsigned int page_sizes[] = {
|
|
|
|
I915_GTT_PAGE_SIZE_2M,
|
|
|
|
I915_GTT_PAGE_SIZE_64K,
|
|
|
|
I915_GTT_PAGE_SIZE_4K,
|
|
|
|
};
|
|
|
|
|
|
|
|
static unsigned int get_largest_page_size(struct drm_i915_private *i915,
|
|
|
|
u64 rem)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
|
|
|
|
unsigned int page_size = page_sizes[i];
|
|
|
|
|
|
|
|
if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
|
|
|
|
return page_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void huge_pages_free_pages(struct sg_table *st)
|
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
|
|
|
|
for (sg = st->sgl; sg; sg = __sg_next(sg)) {
|
|
|
|
if (sg_page(sg))
|
|
|
|
__free_pages(sg_page(sg), get_order(sg->length));
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_free_table(st);
|
|
|
|
kfree(st);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_huge_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
|
|
|
|
unsigned int page_mask = obj->mm.page_mask;
|
|
|
|
struct sg_table *st;
|
|
|
|
struct scatterlist *sg;
|
2017-10-09 11:00:24 +00:00
|
|
|
unsigned int sg_page_sizes;
|
2017-10-06 22:18:29 +00:00
|
|
|
u64 rem;
|
|
|
|
|
|
|
|
st = kmalloc(sizeof(*st), GFP);
|
|
|
|
if (!st)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
|
|
|
|
kfree(st);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
rem = obj->base.size;
|
|
|
|
sg = st->sgl;
|
|
|
|
st->nents = 0;
|
2017-10-09 11:00:24 +00:00
|
|
|
sg_page_sizes = 0;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Our goal here is simple, we want to greedily fill the object from
|
|
|
|
* largest to smallest page-size, while ensuring that we use *every*
|
|
|
|
* page-size as per the given page-mask.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
unsigned int bit = ilog2(page_mask);
|
|
|
|
unsigned int page_size = BIT(bit);
|
|
|
|
int order = get_order(page_size);
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
GEM_BUG_ON(order >= MAX_ORDER);
|
|
|
|
page = alloc_pages(GFP | __GFP_ZERO, order);
|
|
|
|
if (!page)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
sg_set_page(sg, page, page_size, 0);
|
2017-10-09 11:00:24 +00:00
|
|
|
sg_page_sizes |= page_size;
|
2017-10-06 22:18:29 +00:00
|
|
|
st->nents++;
|
|
|
|
|
|
|
|
rem -= page_size;
|
|
|
|
if (!rem) {
|
|
|
|
sg_mark_end(sg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg = __sg_next(sg);
|
|
|
|
} while ((rem - ((page_size-1) & page_mask)) >= page_size);
|
|
|
|
|
|
|
|
page_mask &= (page_size-1);
|
|
|
|
} while (page_mask);
|
|
|
|
|
|
|
|
if (i915_gem_gtt_prepare_pages(obj, st))
|
|
|
|
goto err;
|
|
|
|
|
2017-10-09 11:00:24 +00:00
|
|
|
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
|
|
|
|
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
sg_set_page(sg, NULL, 0, 0);
|
|
|
|
sg_mark_end(sg);
|
|
|
|
huge_pages_free_pages(st);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_huge_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages)
|
|
|
|
{
|
|
|
|
i915_gem_gtt_finish_pages(obj, pages);
|
|
|
|
huge_pages_free_pages(pages);
|
|
|
|
|
|
|
|
obj->mm.dirty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_i915_gem_object_ops huge_page_ops = {
|
2020-05-29 18:32:04 +00:00
|
|
|
.name = "huge-gem",
|
2021-03-23 15:49:56 +00:00
|
|
|
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
|
2017-10-06 22:18:29 +00:00
|
|
|
.get_pages = get_huge_pages,
|
|
|
|
.put_pages = put_huge_pages,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
huge_pages_object(struct drm_i915_private *i915,
|
|
|
|
u64 size,
|
|
|
|
unsigned int page_mask)
|
|
|
|
{
|
2019-10-22 14:45:01 +00:00
|
|
|
static struct lock_class_key lock_class;
|
2017-10-06 22:18:29 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!size);
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
|
|
|
|
|
|
|
|
if (size >> PAGE_SHIFT > INT_MAX)
|
|
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
|
|
|
|
if (overflows_type(size, obj->base.size))
|
|
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
|
2019-02-28 10:20:34 +00:00
|
|
|
obj = i915_gem_object_alloc();
|
2017-10-06 22:18:29 +00:00
|
|
|
if (!obj)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
drm_gem_private_object_init(&i915->drm, &obj->base, size);
|
2021-03-23 15:49:56 +00:00
|
|
|
i915_gem_object_init(obj, &huge_page_ops, &lock_class,
|
|
|
|
I915_BO_ALLOC_STRUCT_PAGE);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-08 16:01:16 +00:00
|
|
|
i915_gem_object_set_volatile(obj);
|
|
|
|
|
2018-02-16 12:43:38 +00:00
|
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
|
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
2017-10-06 22:18:29 +00:00
|
|
|
obj->cache_level = I915_CACHE_NONE;
|
|
|
|
|
|
|
|
obj->mm.page_mask = page_mask;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
const u64 max_len = rounddown_pow_of_two(UINT_MAX);
|
|
|
|
struct sg_table *st;
|
|
|
|
struct scatterlist *sg;
|
2017-10-09 11:00:24 +00:00
|
|
|
unsigned int sg_page_sizes;
|
2017-10-06 22:18:29 +00:00
|
|
|
u64 rem;
|
|
|
|
|
|
|
|
st = kmalloc(sizeof(*st), GFP);
|
|
|
|
if (!st)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
|
|
|
|
kfree(st);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Use optimal page sized chunks to fill in the sg table */
|
|
|
|
rem = obj->base.size;
|
|
|
|
sg = st->sgl;
|
|
|
|
st->nents = 0;
|
2017-10-09 11:00:24 +00:00
|
|
|
sg_page_sizes = 0;
|
2017-10-06 22:18:29 +00:00
|
|
|
do {
|
|
|
|
unsigned int page_size = get_largest_page_size(i915, rem);
|
|
|
|
unsigned int len = min(page_size * div_u64(rem, page_size),
|
|
|
|
max_len);
|
|
|
|
|
|
|
|
GEM_BUG_ON(!page_size);
|
|
|
|
|
|
|
|
sg->offset = 0;
|
|
|
|
sg->length = len;
|
|
|
|
sg_dma_len(sg) = len;
|
|
|
|
sg_dma_address(sg) = page_size;
|
|
|
|
|
2017-10-09 11:00:24 +00:00
|
|
|
sg_page_sizes |= len;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
st->nents++;
|
|
|
|
|
|
|
|
rem -= len;
|
|
|
|
if (!rem) {
|
|
|
|
sg_mark_end(sg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg = sg_next(sg);
|
|
|
|
} while (1);
|
|
|
|
|
2018-09-20 14:27:06 +00:00
|
|
|
i915_sg_trim(st);
|
|
|
|
|
2017-10-09 11:00:24 +00:00
|
|
|
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
|
|
struct sg_table *st;
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int page_size;
|
|
|
|
|
|
|
|
st = kmalloc(sizeof(*st), GFP);
|
|
|
|
if (!st)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (sg_alloc_table(st, 1, GFP)) {
|
|
|
|
kfree(st);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg = st->sgl;
|
|
|
|
st->nents = 1;
|
|
|
|
|
|
|
|
page_size = get_largest_page_size(i915, obj->base.size);
|
|
|
|
GEM_BUG_ON(!page_size);
|
|
|
|
|
|
|
|
sg->offset = 0;
|
|
|
|
sg->length = obj->base.size;
|
|
|
|
sg_dma_len(sg) = obj->base.size;
|
|
|
|
sg_dma_address(sg) = page_size;
|
|
|
|
|
|
|
|
__i915_gem_object_set_pages(obj, st, sg->length);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
#undef GFP
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages)
|
|
|
|
{
|
|
|
|
sg_free_table(pages);
|
|
|
|
kfree(pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
|
|
|
|
struct sg_table *pages)
|
|
|
|
{
|
|
|
|
fake_free_huge_pages(obj, pages);
|
|
|
|
obj->mm.dirty = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_i915_gem_object_ops fake_ops = {
|
2020-05-29 18:32:04 +00:00
|
|
|
.name = "fake-gem",
|
2017-10-06 22:18:29 +00:00
|
|
|
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
|
|
|
|
.get_pages = fake_get_huge_pages,
|
|
|
|
.put_pages = fake_put_huge_pages,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct drm_i915_gem_object_ops fake_ops_single = {
|
2020-05-29 18:32:04 +00:00
|
|
|
.name = "fake-gem",
|
2017-10-06 22:18:29 +00:00
|
|
|
.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
|
|
|
|
.get_pages = fake_get_huge_pages_single,
|
|
|
|
.put_pages = fake_put_huge_pages,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
|
|
|
|
{
|
2019-10-22 14:45:01 +00:00
|
|
|
static struct lock_class_key lock_class;
|
2017-10-06 22:18:29 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!size);
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
|
|
|
|
|
|
|
|
if (size >> PAGE_SHIFT > UINT_MAX)
|
|
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
|
|
|
|
if (overflows_type(size, obj->base.size))
|
|
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
|
2019-02-28 10:20:34 +00:00
|
|
|
obj = i915_gem_object_alloc();
|
2017-10-06 22:18:29 +00:00
|
|
|
if (!obj)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
drm_gem_private_object_init(&i915->drm, &obj->base, size);
|
|
|
|
|
|
|
|
if (single)
|
2021-03-23 15:49:56 +00:00
|
|
|
i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
|
2017-10-06 22:18:29 +00:00
|
|
|
else
|
2021-03-23 15:49:56 +00:00
|
|
|
i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-08 16:01:16 +00:00
|
|
|
i915_gem_object_set_volatile(obj);
|
|
|
|
|
2018-02-16 12:43:38 +00:00
|
|
|
obj->write_domain = I915_GEM_DOMAIN_CPU;
|
|
|
|
obj->read_domains = I915_GEM_DOMAIN_CPU;
|
2017-10-06 22:18:29 +00:00
|
|
|
obj->cache_level = I915_CACHE_NONE;
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_check_page_sizes(struct i915_vma *vma)
|
|
|
|
{
|
2018-06-05 15:37:58 +00:00
|
|
|
struct drm_i915_private *i915 = vma->vm->i915;
|
2017-10-06 22:18:29 +00:00
|
|
|
unsigned int supported = INTEL_INFO(i915)->page_sizes;
|
|
|
|
struct drm_i915_gem_object *obj = vma->obj;
|
drm/i915: Pull i915_vma_pin under the vm->mutex
Replace the struct_mutex requirement for pinning the i915_vma with the
local vm->mutex instead. Note that the vm->mutex is tainted by the
shrinker (we require unbinding from inside fs-reclaim) and so we cannot
allocate while holding that mutex. Instead we have to preallocate
workers to do allocate and apply the PTE updates after we have we
reserved their slot in the drm_mm (using fences to order the PTE writes
with the GPU work and with later unbind).
In adding the asynchronous vma binding, one subtle requirement is to
avoid coupling the binding fence into the backing object->resv. That is
the asynchronous binding only applies to the vma timeline itself and not
to the pages as that is a more global timeline (the binding of one vma
does not need to be ordered with another vma, nor does the implicit GEM
fencing depend on a vma, only on writes to the backing store). Keeping
the vma binding distinct from the backing store timelines is verified by
a number of async gem_exec_fence and gem_exec_schedule tests. The way we
do this is quite simple, we keep the fence for the vma binding separate
and only wait on it as required, and never add it to the obj->resv
itself.
Another consequence in reducing the locking around the vma is the
destruction of the vma is no longer globally serialised by struct_mutex.
A natural solution would be to add a kref to i915_vma, but that requires
decoupling the reference cycles, possibly by introducing a new
i915_mm_pages object that is own by both obj->mm and vma->pages.
However, we have not taken that route due to the overshadowing lmem/ttm
discussions, and instead play a series of complicated games with
trylocks to (hopefully) ensure that only one destruction path is called!
v2: Add some commentary, and some helpers to reduce patch churn.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-4-chris@chris-wilson.co.uk
2019-10-04 13:39:58 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
/* We have to wait for the async bind to complete before our asserts */
|
|
|
|
err = i915_vma_sync(vma);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
|
|
|
|
pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
|
|
|
|
vma->page_sizes.sg & ~supported, supported);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
|
|
|
|
pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
|
|
|
|
vma->page_sizes.gtt & ~supported, supported);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
|
|
|
|
pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
|
|
|
|
vma->page_sizes.phys, obj->mm.page_sizes.phys);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
|
|
|
|
pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
|
|
|
|
vma->page_sizes.sg, obj->mm.page_sizes.sg);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-11-30 14:18:09 +00:00
|
|
|
/*
|
|
|
|
* The dma-api is like a box of chocolates when it comes to the
|
|
|
|
* alignment of dma addresses, however for LMEM we have total control
|
|
|
|
* and so can guarantee alignment, likewise when we allocate our blocks
|
|
|
|
* they should appear in descending order, and if we know that we align
|
|
|
|
* to the largest page size for the GTT address, we should be able to
|
|
|
|
* assert that if we see 2M physical pages then we should also get 2M
|
|
|
|
* GTT pages. If we don't then something might be wrong in our
|
|
|
|
* construction of the backing pages.
|
|
|
|
*
|
|
|
|
* Maintaining alignment is required to utilise huge pages in the ppGGT.
|
|
|
|
*/
|
|
|
|
if (i915_gem_object_is_lmem(obj) &&
|
|
|
|
IS_ALIGNED(vma->node.start, SZ_2M) &&
|
|
|
|
vma->page_sizes.sg & SZ_2M &&
|
|
|
|
vma->page_sizes.gtt < SZ_2M) {
|
|
|
|
pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
|
|
|
|
vma->page_sizes.sg, vma->page_sizes.gtt);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-10-06 22:18:29 +00:00
|
|
|
if (obj->mm.page_sizes.gtt) {
|
|
|
|
pr_err("obj->page_sizes.gtt(%u) should never be set\n",
|
|
|
|
obj->mm.page_sizes.gtt);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_mock_exhaust_device_supported_pages(void *arg)
|
|
|
|
{
|
2019-06-11 09:12:38 +00:00
|
|
|
struct i915_ppgtt *ppgtt = arg;
|
2018-06-05 15:37:58 +00:00
|
|
|
struct drm_i915_private *i915 = ppgtt->vm.i915;
|
2017-10-06 22:18:29 +00:00
|
|
|
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int i, j, single;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity check creating objects with every valid page support
|
|
|
|
* combination for our mock device.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
|
2020-07-29 16:42:18 +00:00
|
|
|
unsigned int combination = SZ_4K; /* Required for ppGTT */
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
|
|
|
|
if (i & BIT(j))
|
|
|
|
combination |= page_sizes[j];
|
|
|
|
}
|
|
|
|
|
|
|
|
mkwrite_device_info(i915)->page_sizes = combination;
|
|
|
|
|
|
|
|
for (single = 0; single <= 1; ++single) {
|
|
|
|
obj = fake_huge_pages_object(i915, combination, !!single);
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (obj->base.size != combination) {
|
|
|
|
pr_err("obj->base.size=%zu, expected=%u\n",
|
|
|
|
obj->base.size, combination);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
2018-06-05 15:37:58 +00:00
|
|
|
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
|
|
|
|
if (vma->page_sizes.sg != combination) {
|
|
|
|
pr_err("page_sizes.sg=%u, expected=%u\n",
|
|
|
|
vma->page_sizes.sg, combination);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
goto out_device;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
goto out_device;
|
|
|
|
|
|
|
|
out_put:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
out_device:
|
|
|
|
mkwrite_device_info(i915)->page_sizes = saved_mask;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-10-08 16:01:14 +00:00
|
|
|
static int igt_mock_memory_region_huge_pages(void *arg)
|
|
|
|
{
|
2019-10-08 16:01:15 +00:00
|
|
|
const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
|
2019-10-08 16:01:14 +00:00
|
|
|
struct i915_ppgtt *ppgtt = arg;
|
|
|
|
struct drm_i915_private *i915 = ppgtt->vm.i915;
|
|
|
|
unsigned long supported = INTEL_INFO(i915)->page_sizes;
|
|
|
|
struct intel_memory_region *mem;
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int bit;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
|
|
|
|
if (IS_ERR(mem)) {
|
|
|
|
pr_err("%s failed to create memory region\n", __func__);
|
|
|
|
return PTR_ERR(mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
|
|
|
unsigned int page_size = BIT(bit);
|
|
|
|
resource_size_t phys;
|
2019-10-08 16:01:15 +00:00
|
|
|
int i;
|
2019-10-08 16:01:14 +00:00
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(flags); ++i) {
|
|
|
|
obj = i915_gem_object_create_region(mem, page_size,
|
|
|
|
flags[i]);
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
goto out_region;
|
|
|
|
}
|
2019-10-08 16:01:14 +00:00
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto out_put;
|
|
|
|
}
|
2019-10-08 16:01:14 +00:00
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2019-10-08 16:01:14 +00:00
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
if (err)
|
|
|
|
goto out_unpin;
|
2019-10-08 16:01:14 +00:00
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
phys = i915_gem_object_get_dma_address(obj, 0);
|
|
|
|
if (!IS_ALIGNED(phys, page_size)) {
|
|
|
|
pr_err("%s addr misaligned(%pa) page_size=%u\n",
|
|
|
|
__func__, &phys, page_size);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_unpin;
|
|
|
|
}
|
2019-10-08 16:01:14 +00:00
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
if (vma->page_sizes.gtt != page_size) {
|
|
|
|
pr_err("%s page_sizes.gtt=%u, expected=%u\n",
|
|
|
|
__func__, vma->page_sizes.gtt,
|
|
|
|
page_size);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_unpin;
|
|
|
|
}
|
2019-10-08 16:01:14 +00:00
|
|
|
|
2019-10-08 16:01:15 +00:00
|
|
|
i915_vma_unpin(vma);
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 09:01:48 +00:00
|
|
|
__i915_gem_object_put_pages(obj);
|
2019-10-08 16:01:15 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
}
|
2019-10-08 16:01:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
goto out_region;
|
|
|
|
|
|
|
|
out_unpin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
out_put:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
out_region:
|
|
|
|
intel_memory_region_put(mem);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-06 22:18:29 +00:00
|
|
|
static int igt_mock_ppgtt_misaligned_dma(void *arg)
|
|
|
|
{
|
2019-06-11 09:12:38 +00:00
|
|
|
struct i915_ppgtt *ppgtt = arg;
|
2018-06-05 15:37:58 +00:00
|
|
|
struct drm_i915_private *i915 = ppgtt->vm.i915;
|
2017-10-06 22:18:29 +00:00
|
|
|
unsigned long supported = INTEL_INFO(i915)->page_sizes;
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
int bit;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity check dma misalignment for huge pages -- the dma addresses we
|
|
|
|
* insert into the paging structures need to always respect the page
|
|
|
|
* size alignment.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bit = ilog2(I915_GTT_PAGE_SIZE_64K);
|
|
|
|
|
|
|
|
for_each_set_bit_from(bit, &supported,
|
|
|
|
ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
|
|
|
|
IGT_TIMEOUT(end_time);
|
|
|
|
unsigned int page_size = BIT(bit);
|
|
|
|
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
|
|
|
|
unsigned int offset;
|
|
|
|
unsigned int size =
|
|
|
|
round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
|
|
|
|
obj = fake_huge_pages_object(i915, size, true);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
|
|
if (obj->base.size != size) {
|
|
|
|
pr_err("obj->base.size=%zu, expected=%u\n",
|
|
|
|
obj->base.size, size);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (err)
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
/* Force the page size for this object */
|
|
|
|
obj->mm.page_sizes.sg = page_size;
|
|
|
|
|
2018-06-05 15:37:58 +00:00
|
|
|
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto out_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, flags);
|
2020-04-22 19:05:58 +00:00
|
|
|
if (err)
|
2017-10-06 22:18:29 +00:00
|
|
|
goto out_unpin;
|
|
|
|
|
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
|
|
|
|
if (vma->page_sizes.gtt != page_size) {
|
|
|
|
pr_err("page_sizes.gtt=%u, expected %u\n",
|
|
|
|
vma->page_sizes.gtt, page_size);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
2020-04-22 19:05:58 +00:00
|
|
|
if (err)
|
2017-10-06 22:18:29 +00:00
|
|
|
goto out_unpin;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try all the other valid offsets until the next
|
|
|
|
* boundary -- should always fall back to using 4K
|
|
|
|
* pages.
|
|
|
|
*/
|
|
|
|
for (offset = 4096; offset < page_size; offset += 4096) {
|
|
|
|
err = i915_vma_unbind(vma);
|
2020-04-22 19:05:58 +00:00
|
|
|
if (err)
|
2017-10-06 22:18:29 +00:00
|
|
|
goto out_unpin;
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, flags | offset);
|
2020-04-22 19:05:58 +00:00
|
|
|
if (err)
|
2017-10-06 22:18:29 +00:00
|
|
|
goto out_unpin;
|
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
|
|
|
|
if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
|
2018-10-25 09:18:22 +00:00
|
|
|
pr_err("page_sizes.gtt=%u, expected %llu\n",
|
2017-10-06 22:18:29 +00:00
|
|
|
vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
2020-04-22 19:05:58 +00:00
|
|
|
if (err)
|
2017-10-06 22:18:29 +00:00
|
|
|
goto out_unpin;
|
|
|
|
|
|
|
|
if (igt_timeout(end_time,
|
|
|
|
"%s timed out at offset %x with page-size %x\n",
|
|
|
|
__func__, offset, page_size))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 09:01:48 +00:00
|
|
|
__i915_gem_object_put_pages(obj);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_unlock(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_unpin:
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_unlock(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
out_put:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void close_object_list(struct list_head *objects,
|
2019-06-11 09:12:38 +00:00
|
|
|
struct i915_ppgtt *ppgtt)
|
2017-10-06 22:18:29 +00:00
|
|
|
{
|
|
|
|
struct drm_i915_gem_object *obj, *on;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(obj, on, objects, st_link) {
|
|
|
|
list_del(&obj->st_link);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 09:01:48 +00:00
|
|
|
__i915_gem_object_put_pages(obj);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_unlock(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_mock_ppgtt_huge_fill(void *arg)
|
|
|
|
{
|
2019-06-11 09:12:38 +00:00
|
|
|
struct i915_ppgtt *ppgtt = arg;
|
2018-06-05 15:37:58 +00:00
|
|
|
struct drm_i915_private *i915 = ppgtt->vm.i915;
|
|
|
|
unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
|
2017-10-06 22:18:29 +00:00
|
|
|
unsigned long page_num;
|
|
|
|
bool single = false;
|
|
|
|
LIST_HEAD(objects);
|
|
|
|
IGT_TIMEOUT(end_time);
|
2017-10-17 10:37:23 +00:00
|
|
|
int err = -ENODEV;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
for_each_prime_number_from(page_num, 1, max_pages) {
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
u64 size = page_num << PAGE_SHIFT;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
unsigned int expected_gtt = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
obj = fake_huge_pages_object(i915, size, single);
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (obj->base.size != size) {
|
|
|
|
pr_err("obj->base.size=%zd, expected=%llu\n",
|
|
|
|
obj->base.size, size);
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (err) {
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&obj->st_link, &objects);
|
|
|
|
|
2018-06-05 15:37:58 +00:00
|
|
|
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
if (err) {
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Figure out the expected gtt page size knowing that we go from
|
|
|
|
* largest to smallest page size sg chunks, and that we align to
|
|
|
|
* the largest page size.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
|
|
|
|
unsigned int page_size = page_sizes[i];
|
|
|
|
|
|
|
|
if (HAS_PAGE_SIZES(i915, page_size) &&
|
|
|
|
size >= page_size) {
|
|
|
|
expected_gtt |= page_size;
|
|
|
|
size &= page_size-1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(!expected_gtt);
|
|
|
|
GEM_BUG_ON(size);
|
|
|
|
|
|
|
|
if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
|
|
|
|
expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
|
|
|
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
|
|
|
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
|
|
|
|
if (!IS_ALIGNED(vma->node.start,
|
|
|
|
I915_GTT_PAGE_SIZE_2M)) {
|
|
|
|
pr_err("node.start(%llx) not aligned to 2M\n",
|
|
|
|
vma->node.start);
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(vma->node.size,
|
|
|
|
I915_GTT_PAGE_SIZE_2M)) {
|
|
|
|
pr_err("node.size(%llx) not aligned to 2M\n",
|
|
|
|
vma->node.size);
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vma->page_sizes.gtt != expected_gtt) {
|
|
|
|
pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
|
|
|
|
vma->page_sizes.gtt, expected_gtt,
|
|
|
|
obj->base.size, yesno(!!single));
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (igt_timeout(end_time,
|
|
|
|
"%s timed out at size %zd\n",
|
|
|
|
__func__, obj->base.size))
|
|
|
|
break;
|
|
|
|
|
|
|
|
single = !single;
|
|
|
|
}
|
|
|
|
|
|
|
|
close_object_list(&objects, ppgtt);
|
|
|
|
|
|
|
|
if (err == -ENOMEM || err == -ENOSPC)
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_mock_ppgtt_64K(void *arg)
|
|
|
|
{
|
2019-06-11 09:12:38 +00:00
|
|
|
struct i915_ppgtt *ppgtt = arg;
|
2018-06-05 15:37:58 +00:00
|
|
|
struct drm_i915_private *i915 = ppgtt->vm.i915;
|
2017-10-06 22:18:29 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
const struct object_info {
|
|
|
|
unsigned int size;
|
|
|
|
unsigned int gtt;
|
|
|
|
unsigned int offset;
|
|
|
|
} objects[] = {
|
|
|
|
/* Cases with forced padding/alignment */
|
|
|
|
{
|
|
|
|
.size = SZ_64K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_64K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_64K + SZ_4K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_4K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_64K - SZ_4K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_4K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_2M,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_64K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_2M - SZ_4K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_4K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_2M + SZ_4K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_2M + SZ_64K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_64K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_2M - SZ_64K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_64K,
|
|
|
|
.offset = 0,
|
|
|
|
},
|
|
|
|
/* Try without any forced padding/alignment */
|
|
|
|
{
|
|
|
|
.size = SZ_64K,
|
|
|
|
.offset = SZ_2M,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_4K,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.size = SZ_128K,
|
|
|
|
.offset = SZ_2M - SZ_64K,
|
|
|
|
.gtt = I915_GTT_PAGE_SIZE_4K,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int i, single;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity check some of the trickiness with 64K pages -- either we can
|
|
|
|
* safely mark the whole page-table(2M block) as 64K, or we have to
|
|
|
|
* always fallback to 4K.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(objects); ++i) {
|
|
|
|
unsigned int size = objects[i].size;
|
|
|
|
unsigned int expected_gtt = objects[i].gtt;
|
|
|
|
unsigned int offset = objects[i].offset;
|
|
|
|
unsigned int flags = PIN_USER;
|
|
|
|
|
|
|
|
for (single = 0; single <= 1; single++) {
|
|
|
|
obj = fake_huge_pages_object(i915, size, !!single);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (err)
|
|
|
|
goto out_object_put;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable 2M pages -- We only want to use 64K/4K pages
|
|
|
|
* for this test.
|
|
|
|
*/
|
|
|
|
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
|
|
|
|
|
2018-06-05 15:37:58 +00:00
|
|
|
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto out_object_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (offset)
|
|
|
|
flags |= PIN_OFFSET_FIXED | offset;
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, flags);
|
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_object_unpin;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
if (err)
|
|
|
|
goto out_vma_unpin;
|
|
|
|
|
|
|
|
if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
|
|
|
|
if (!IS_ALIGNED(vma->node.start,
|
|
|
|
I915_GTT_PAGE_SIZE_2M)) {
|
|
|
|
pr_err("node.start(%llx) not aligned to 2M\n",
|
|
|
|
vma->node.start);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_vma_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!IS_ALIGNED(vma->node.size,
|
|
|
|
I915_GTT_PAGE_SIZE_2M)) {
|
|
|
|
pr_err("node.size(%llx) not aligned to 2M\n",
|
|
|
|
vma->node.size);
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_vma_unpin;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vma->page_sizes.gtt != expected_gtt) {
|
|
|
|
pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
|
|
|
|
vma->page_sizes.gtt, expected_gtt, i,
|
|
|
|
yesno(!!single));
|
|
|
|
err = -EINVAL;
|
|
|
|
goto out_vma_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
i915_vma_unpin(vma);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 09:01:48 +00:00
|
|
|
__i915_gem_object_put_pages(obj);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_unlock(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_vma_unpin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
out_object_unpin:
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_unlock(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
out_object_put:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
static int gpu_write(struct intel_context *ce,
|
|
|
|
struct i915_vma *vma,
|
2019-08-10 10:50:08 +00:00
|
|
|
u32 dw,
|
|
|
|
u32 val)
|
2017-10-06 22:18:29 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:45 +00:00
|
|
|
i915_gem_object_lock(vma->obj, NULL);
|
2019-08-10 10:50:08 +00:00
|
|
|
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
|
|
|
|
i915_gem_object_unlock(vma->obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (err)
|
2019-08-10 10:50:08 +00:00
|
|
|
return err;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
|
2019-08-10 10:50:08 +00:00
|
|
|
vma->size >> PAGE_SHIFT, val);
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:26 +00:00
|
|
|
static int
|
|
|
|
__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
2017-10-06 22:18:29 +00:00
|
|
|
{
|
|
|
|
unsigned int needs_flush;
|
|
|
|
unsigned long n;
|
|
|
|
int err;
|
|
|
|
|
2020-08-19 14:08:46 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2019-05-28 09:29:48 +00:00
|
|
|
err = i915_gem_object_prepare_read(obj, &needs_flush);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (err)
|
2020-08-19 14:08:46 +00:00
|
|
|
goto err_unlock;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
|
|
|
|
u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
|
|
|
|
|
|
|
|
if (needs_flush & CLFLUSH_BEFORE)
|
|
|
|
drm_clflush_virt_range(ptr, PAGE_SIZE);
|
|
|
|
|
|
|
|
if (ptr[dword] != val) {
|
|
|
|
pr_err("n=%lu ptr[%u]=%u, val=%u\n",
|
|
|
|
n, dword, ptr[dword], val);
|
|
|
|
kunmap_atomic(ptr);
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
kunmap_atomic(ptr);
|
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:48 +00:00
|
|
|
i915_gem_object_finish_access(obj);
|
2020-08-19 14:08:46 +00:00
|
|
|
err_unlock:
|
|
|
|
i915_gem_object_unlock(obj);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-01-02 20:42:15 +00:00
|
|
|
static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
2019-10-25 15:37:26 +00:00
|
|
|
{
|
2020-01-02 20:42:15 +00:00
|
|
|
unsigned long n = obj->base.size >> PAGE_SHIFT;
|
|
|
|
u32 *ptr;
|
2019-10-25 15:37:26 +00:00
|
|
|
int err;
|
|
|
|
|
2020-01-02 20:42:15 +00:00
|
|
|
err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
|
2019-10-25 15:37:26 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
2020-01-02 20:42:15 +00:00
|
|
|
if (IS_ERR(ptr))
|
|
|
|
return PTR_ERR(ptr);
|
2019-10-25 15:37:26 +00:00
|
|
|
|
2020-01-02 20:42:15 +00:00
|
|
|
ptr += dword;
|
|
|
|
while (n--) {
|
|
|
|
if (*ptr != val) {
|
|
|
|
pr_err("base[%u]=%08x, val=%08x\n",
|
|
|
|
dword, *ptr, val);
|
2019-10-25 15:37:26 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2020-01-02 20:42:15 +00:00
|
|
|
|
|
|
|
ptr += PAGE_SIZE / sizeof(*ptr);
|
2019-10-25 15:37:26 +00:00
|
|
|
}
|
|
|
|
|
2020-01-02 20:42:15 +00:00
|
|
|
i915_gem_object_unpin_map(obj);
|
2019-10-25 15:37:26 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
|
|
|
{
|
|
|
|
if (i915_gem_object_has_struct_page(obj))
|
|
|
|
return __cpu_check_shmem(obj, dword, val);
|
2020-01-02 20:42:15 +00:00
|
|
|
else
|
|
|
|
return __cpu_check_vmap(obj, dword, val);
|
2019-10-25 15:37:26 +00:00
|
|
|
}
|
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
static int __igt_write_huge(struct intel_context *ce,
|
2017-11-23 13:54:21 +00:00
|
|
|
struct drm_i915_gem_object *obj,
|
|
|
|
u64 size, u64 offset,
|
|
|
|
u32 dword, u32 val)
|
|
|
|
{
|
|
|
|
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
int err;
|
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
vma = i915_vma_instance(obj, ce->vm, NULL);
|
2017-11-23 13:54:21 +00:00
|
|
|
if (IS_ERR(vma))
|
|
|
|
return PTR_ERR(vma);
|
|
|
|
|
|
|
|
err = i915_vma_unbind(vma);
|
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
return err;
|
2017-11-23 13:54:21 +00:00
|
|
|
|
|
|
|
err = i915_vma_pin(vma, size, 0, flags | offset);
|
|
|
|
if (err) {
|
|
|
|
/*
|
|
|
|
* The ggtt may have some pages reserved so
|
|
|
|
* refrain from erroring out.
|
|
|
|
*/
|
2019-08-23 23:51:41 +00:00
|
|
|
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
|
2017-11-23 13:54:21 +00:00
|
|
|
err = 0;
|
|
|
|
|
2020-04-22 19:05:58 +00:00
|
|
|
return err;
|
2017-11-23 13:54:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
if (err)
|
|
|
|
goto out_vma_unpin;
|
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
err = gpu_write(ce, vma, dword, val);
|
2017-11-23 13:54:21 +00:00
|
|
|
if (err) {
|
|
|
|
pr_err("gpu-write failed at offset=%llx\n", offset);
|
|
|
|
goto out_vma_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = cpu_check(obj, dword, val);
|
|
|
|
if (err) {
|
|
|
|
pr_err("cpu-check failed at offset=%llx\n", offset);
|
|
|
|
goto out_vma_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_vma_unpin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-10 13:30:30 +00:00
|
|
|
static int igt_write_huge(struct i915_gem_context *ctx,
|
|
|
|
struct drm_i915_gem_object *obj)
|
2017-10-06 22:18:29 +00:00
|
|
|
{
|
2019-08-23 23:51:41 +00:00
|
|
|
struct i915_gem_engines *engines;
|
|
|
|
struct i915_gem_engines_iter it;
|
|
|
|
struct intel_context *ce;
|
2017-11-23 13:54:20 +00:00
|
|
|
I915_RND_STATE(prng);
|
|
|
|
IGT_TIMEOUT(end_time);
|
2017-10-06 22:18:29 +00:00
|
|
|
unsigned int max_page_size;
|
2019-08-23 23:51:41 +00:00
|
|
|
unsigned int count;
|
2017-10-06 22:18:29 +00:00
|
|
|
u64 max;
|
|
|
|
u64 num;
|
|
|
|
u64 size;
|
2017-11-23 13:54:20 +00:00
|
|
|
int *order;
|
|
|
|
int i, n;
|
2017-10-06 22:18:29 +00:00
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
|
|
|
|
|
|
|
size = obj->base.size;
|
|
|
|
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
|
|
|
|
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
|
|
|
|
|
2017-11-23 13:54:20 +00:00
|
|
|
n = 0;
|
2019-08-23 23:51:41 +00:00
|
|
|
count = 0;
|
|
|
|
max = U64_MAX;
|
|
|
|
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
|
|
|
count++;
|
|
|
|
if (!intel_engine_can_store_dword(ce->engine))
|
2017-10-06 22:18:29 +00:00
|
|
|
continue;
|
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
max = min(max, ce->vm->total);
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
i915_gem_context_unlock_engines(ctx);
|
2017-11-23 13:54:20 +00:00
|
|
|
if (!n)
|
|
|
|
return 0;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2017-11-23 13:54:20 +00:00
|
|
|
/*
|
|
|
|
* To keep things interesting when alternating between engines in our
|
|
|
|
* randomized order, lets also make feeding to the same engine a few
|
|
|
|
* times in succession a possibility by enlarging the permutation array.
|
|
|
|
*/
|
2019-08-23 23:51:41 +00:00
|
|
|
order = i915_random_order(count * count, &prng);
|
2017-11-23 13:54:20 +00:00
|
|
|
if (!order)
|
|
|
|
return -ENOMEM;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
|
|
|
|
max = div_u64(max - size, max_page_size);
|
|
|
|
|
2017-11-23 13:54:20 +00:00
|
|
|
/*
|
2017-11-23 13:54:21 +00:00
|
|
|
* Try various offsets in an ascending/descending fashion until we
|
|
|
|
* timeout -- we want to avoid issues hidden by effectively always using
|
|
|
|
* offset = 0.
|
2017-11-23 13:54:20 +00:00
|
|
|
*/
|
|
|
|
i = 0;
|
2019-08-23 23:51:41 +00:00
|
|
|
engines = i915_gem_context_lock_engines(ctx);
|
2017-11-23 13:54:20 +00:00
|
|
|
for_each_prime_number_from(num, 0, max) {
|
2017-11-23 13:54:21 +00:00
|
|
|
u64 offset_low = num * max_page_size;
|
|
|
|
u64 offset_high = (max - num) * max_page_size;
|
|
|
|
u32 dword = offset_in_page(num) / 4;
|
2019-08-23 23:51:41 +00:00
|
|
|
struct intel_context *ce;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
ce = engines->engines[order[i] % engines->num_engines];
|
|
|
|
i = (i + 1) % (count * count);
|
|
|
|
if (!ce || !intel_engine_can_store_dword(ce->engine))
|
|
|
|
continue;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2018-10-29 20:37:34 +00:00
|
|
|
/*
|
|
|
|
* In order to utilize 64K pages we need to both pad the vma
|
|
|
|
* size and ensure the vma offset is at the start of the pt
|
|
|
|
* boundary, however to improve coverage we opt for testing both
|
|
|
|
* aligned and unaligned offsets.
|
|
|
|
*/
|
|
|
|
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
|
|
|
|
offset_low = round_down(offset_low,
|
|
|
|
I915_GTT_PAGE_SIZE_2M);
|
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
err = __igt_write_huge(ce, obj, size, offset_low,
|
2018-10-29 20:37:34 +00:00
|
|
|
dword, num + 1);
|
2017-11-23 13:54:21 +00:00
|
|
|
if (err)
|
|
|
|
break;
|
2017-11-23 13:54:20 +00:00
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
err = __igt_write_huge(ce, obj, size, offset_high,
|
2018-10-29 20:37:34 +00:00
|
|
|
dword, num + 1);
|
2017-11-23 13:54:21 +00:00
|
|
|
if (err)
|
|
|
|
break;
|
2017-11-23 13:54:20 +00:00
|
|
|
|
|
|
|
if (igt_timeout(end_time,
|
2019-08-23 23:51:41 +00:00
|
|
|
"%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
|
|
|
|
__func__, ce->engine->name, offset_low, offset_high,
|
2018-10-29 20:37:34 +00:00
|
|
|
max_page_size))
|
2017-11-23 13:54:20 +00:00
|
|
|
break;
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
2019-08-23 23:51:41 +00:00
|
|
|
i915_gem_context_unlock_engines(ctx);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2017-11-23 13:54:20 +00:00
|
|
|
kfree(order);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
typedef struct drm_i915_gem_object *
|
|
|
|
(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
|
|
|
|
{
|
|
|
|
return i915->mm.gemfs && has_transparent_hugepage();
|
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
|
2017-10-06 22:18:29 +00:00
|
|
|
{
|
|
|
|
if (!igt_can_allocate_thp(i915)) {
|
2019-10-25 15:37:27 +00:00
|
|
|
pr_info("%s missing THP support, skipping\n", __func__);
|
|
|
|
return ERR_PTR(-ENODEV);
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
return i915_gem_object_create_shmem(i915, size);
|
|
|
|
}
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
|
|
|
|
{
|
|
|
|
return i915_gem_object_create_internal(i915, size);
|
|
|
|
}
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-25 15:37:28 +00:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
|
|
|
|
{
|
|
|
|
return huge_pages_object(i915, size, size);
|
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
|
|
|
|
{
|
|
|
|
return i915_gem_object_create_lmem(i915, size, flags);
|
|
|
|
}
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
static u32 igt_random_size(struct rnd_state *prng,
|
|
|
|
u32 min_page_size,
|
|
|
|
u32 max_page_size)
|
|
|
|
{
|
|
|
|
u64 mask;
|
|
|
|
u32 size;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
GEM_BUG_ON(!is_power_of_2(min_page_size));
|
|
|
|
GEM_BUG_ON(!is_power_of_2(max_page_size));
|
|
|
|
GEM_BUG_ON(min_page_size < PAGE_SIZE);
|
|
|
|
GEM_BUG_ON(min_page_size > max_page_size);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
|
|
|
|
size = prandom_u32_state(prng) & mask;
|
|
|
|
if (size < min_page_size)
|
|
|
|
size |= min_page_size;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
return size;
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
static int igt_ppgtt_smoke_huge(void *arg)
|
2019-10-25 15:37:26 +00:00
|
|
|
{
|
|
|
|
struct i915_gem_context *ctx = arg;
|
|
|
|
struct drm_i915_private *i915 = ctx->i915;
|
|
|
|
struct drm_i915_gem_object *obj;
|
2019-10-25 15:37:27 +00:00
|
|
|
I915_RND_STATE(prng);
|
|
|
|
struct {
|
|
|
|
igt_create_fn fn;
|
|
|
|
u32 min;
|
|
|
|
u32 max;
|
|
|
|
} backends[] = {
|
|
|
|
{ igt_create_internal, SZ_64K, SZ_2M, },
|
|
|
|
{ igt_create_shmem, SZ_64K, SZ_32M, },
|
|
|
|
{ igt_create_local, SZ_64K, SZ_1G, },
|
2019-10-25 15:37:26 +00:00
|
|
|
};
|
|
|
|
int err;
|
2019-10-25 15:37:27 +00:00
|
|
|
int i;
|
2019-10-25 15:37:26 +00:00
|
|
|
|
|
|
|
/*
|
2019-10-25 15:37:27 +00:00
|
|
|
* Sanity check that the HW uses huge pages correctly through our
|
|
|
|
* various backends -- ensure that our writes land in the right place.
|
2019-10-25 15:37:26 +00:00
|
|
|
*/
|
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(backends); ++i) {
|
|
|
|
u32 min = backends[i].min;
|
|
|
|
u32 max = backends[i].max;
|
|
|
|
u32 size = max;
|
|
|
|
try_again:
|
|
|
|
size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
|
2019-10-25 15:37:26 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
obj = backends[i].fn(i915, size, 0);
|
2019-10-25 15:37:26 +00:00
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
if (err == -E2BIG) {
|
2019-10-25 15:37:27 +00:00
|
|
|
size >>= 1;
|
|
|
|
goto try_again;
|
|
|
|
} else if (err == -ENODEV) {
|
|
|
|
err = 0;
|
|
|
|
continue;
|
2019-10-25 15:37:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
2019-10-25 15:37:27 +00:00
|
|
|
if (err) {
|
2019-12-16 12:26:03 +00:00
|
|
|
if (err == -ENXIO || err == -E2BIG) {
|
2019-10-25 15:37:27 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
size >>= 1;
|
|
|
|
goto try_again;
|
|
|
|
}
|
2019-10-25 15:37:26 +00:00
|
|
|
goto out_put;
|
2019-10-25 15:37:27 +00:00
|
|
|
}
|
2019-10-25 15:37:26 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
if (obj->mm.page_sizes.phys < min) {
|
|
|
|
pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
|
|
|
|
__func__, size, i);
|
|
|
|
err = -ENOMEM;
|
2019-10-25 15:37:26 +00:00
|
|
|
goto out_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = igt_write_huge(ctx, obj);
|
|
|
|
if (err) {
|
2019-10-25 15:37:27 +00:00
|
|
|
pr_err("%s write-huge failed with size=%u, i=%d\n",
|
|
|
|
__func__, size, i);
|
2019-10-25 15:37:26 +00:00
|
|
|
}
|
2019-10-25 15:37:27 +00:00
|
|
|
out_unpin:
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2019-10-25 15:37:26 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 09:01:48 +00:00
|
|
|
__i915_gem_object_put_pages(obj);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_unlock(obj);
|
2019-10-25 15:37:27 +00:00
|
|
|
out_put:
|
2019-10-25 15:37:26 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
if (err == -ENOMEM || err == -ENXIO)
|
|
|
|
err = 0;
|
2019-10-25 15:37:26 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
if (err)
|
|
|
|
break;
|
2019-10-25 15:37:26 +00:00
|
|
|
|
2019-10-25 15:37:27 +00:00
|
|
|
cond_resched();
|
|
|
|
}
|
2019-10-25 15:37:26 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-10-25 15:37:28 +00:00
|
|
|
static int igt_ppgtt_sanity_check(void *arg)
|
|
|
|
{
|
|
|
|
struct i915_gem_context *ctx = arg;
|
|
|
|
struct drm_i915_private *i915 = ctx->i915;
|
|
|
|
unsigned int supported = INTEL_INFO(i915)->page_sizes;
|
|
|
|
struct {
|
|
|
|
igt_create_fn fn;
|
|
|
|
unsigned int flags;
|
|
|
|
} backends[] = {
|
|
|
|
{ igt_create_system, 0, },
|
2020-11-30 14:18:08 +00:00
|
|
|
{ igt_create_local, 0, },
|
2019-10-25 15:37:28 +00:00
|
|
|
{ igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
|
|
|
|
};
|
|
|
|
struct {
|
|
|
|
u32 size;
|
|
|
|
u32 pages;
|
|
|
|
} combos[] = {
|
|
|
|
{ SZ_64K, SZ_64K },
|
|
|
|
{ SZ_2M, SZ_2M },
|
|
|
|
{ SZ_2M, SZ_64K },
|
|
|
|
{ SZ_2M - SZ_64K, SZ_64K },
|
|
|
|
{ SZ_2M - SZ_4K, SZ_64K | SZ_4K },
|
|
|
|
{ SZ_2M + SZ_4K, SZ_64K | SZ_4K },
|
|
|
|
{ SZ_2M + SZ_4K, SZ_2M | SZ_4K },
|
|
|
|
{ SZ_2M + SZ_64K, SZ_2M | SZ_64K },
|
|
|
|
};
|
|
|
|
int i, j;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (supported == I915_GTT_PAGE_SIZE_4K)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity check that the HW behaves with a limited set of combinations.
|
|
|
|
* We already have a bunch of randomised testing, which should give us
|
|
|
|
* a decent amount of variation between runs, however we should keep
|
|
|
|
* this to limit the chances of introducing a temporary regression, by
|
|
|
|
* testing the most obvious cases that might make something blow up.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(backends); ++i) {
|
|
|
|
for (j = 0; j < ARRAY_SIZE(combos); ++j) {
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
u32 size = combos[j].size;
|
|
|
|
u32 pages = combos[j].pages;
|
|
|
|
|
|
|
|
obj = backends[i].fn(i915, size, backends[i].flags);
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
if (err == -ENODEV) {
|
|
|
|
pr_info("Device lacks local memory, skipping\n");
|
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
2019-10-25 15:37:28 +00:00
|
|
|
if (err) {
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(pages > obj->base.size);
|
|
|
|
pages = pages & supported;
|
|
|
|
|
|
|
|
if (pages)
|
|
|
|
obj->mm.page_sizes.sg = pages;
|
|
|
|
|
|
|
|
err = igt_write_huge(ctx, obj);
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2019-10-25 15:37:28 +00:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
drm/i915: Switch obj->mm.lock lockdep annotations on its head
The trouble with having a plain nesting flag for locks which do not
naturally nest (unlike block devices and their partitions, which is
the original motivation for nesting levels) is that lockdep will
never spot a true deadlock if you screw up.
This patch is an attempt at trying better, by highlighting a bit more
of the actual nature of the nesting that's going on. Essentially we
have two kinds of objects:
- objects without pages allocated, which cannot be on any lru and are
hence inaccessible to the shrinker.
- objects which have pages allocated, which are on an lru, and which
the shrinker can decide to throw out.
For the former type of object, memory allocations while holding
obj->mm.lock are permissible. For the latter they are not. And
get/put_pages transitions between the two types of objects.
This is still not entirely fool-proof since the rules might change.
But as long as we run such a code ever at runtime lockdep should be
able to observe the inconsistency and complain (like with any other
lockdep class that we've split up in multiple classes). But there are
a few clear benefits:
- We can drop the nesting flag parameter from
__i915_gem_object_put_pages, because that function by definition is
never going allocate memory, and calling it on an object which
doesn't have its pages allocated would be a bug.
- We strictly catch more bugs, since there's not only one place in the
entire tree which is annotated with the special class. All the
other places that had explicit lockdep nesting annotations we're now
going to leave up to lockdep again.
- Specifically this catches stuff like calling get_pages from
put_pages (which isn't really a good idea, if we can call get_pages
so could the shrinker). I've seen patches do exactly that.
Of course I fully expect CI will show me for the fool I am with this
one here :-)
v2: There can only be one (lockdep only has a cache for the first
subclass, not for deeper ones, and we don't want to make these locks
even slower). Still separate enums for better documentation.
Real fix: don't forget about phys objs and pin_map(), and fix the
shrinker to have the right annotations ... silly me.
v3: Forgot usertptr too ...
v4: Improve comment for pages_pin_count, drop the IMPORTANT comment
and instead prime lockdep (Chris).
v5: Appease checkpatch, no double empty lines (Chris)
v6: More rebasing over selftest changes. Also somehow I forgot to
push this patch :-/
Also format comments consistently while at it.
v7: Fix typo in commit message (Joonas)
Also drop the priming, with the lmem merge we now have allocations
while holding the lmem lock, which wreaks the generic priming I've
done in earlier patches. Should probably be resurrected when lmem is
fixed. See
commit 232a6ebae419193f5b8da4fa869ae5089ab105c2
Author: Matthew Auld <matthew.auld@intel.com>
Date: Tue Oct 8 17:01:14 2019 +0100
drm/i915: introduce intel_memory_region
I'm keeping the priming patch locally so it wont get lost.
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: "Tang, CQ" <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v5)
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v6)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191105090148.30269-1-daniel.vetter@ffwll.ch
[mlankhorst: Fix commit typos pointed out by Michael Ruhl]
2019-11-05 09:01:48 +00:00
|
|
|
__i915_gem_object_put_pages(obj);
|
2021-03-23 15:50:30 +00:00
|
|
|
i915_gem_object_unlock(obj);
|
2019-10-25 15:37:28 +00:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
|
|
|
|
__func__, size, pages, i, j);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (err == -ENOMEM)
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-10-06 22:18:29 +00:00
|
|
|
static int igt_tmpfs_fallback(void *arg)
|
|
|
|
{
|
2017-10-10 13:30:30 +00:00
|
|
|
struct i915_gem_context *ctx = arg;
|
|
|
|
struct drm_i915_private *i915 = ctx->i915;
|
2017-10-06 22:18:29 +00:00
|
|
|
struct vfsmount *gemfs = i915->mm.gemfs;
|
2019-10-04 13:40:09 +00:00
|
|
|
struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
|
2017-10-06 22:18:29 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
struct i915_vma *vma;
|
|
|
|
u32 *vaddr;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that we don't burst into a ball of flames upon falling back
|
|
|
|
* to tmpfs, which we rely on if on the off-chance we encouter a failure
|
|
|
|
* when setting up gemfs.
|
|
|
|
*/
|
|
|
|
|
|
|
|
i915->mm.gemfs = NULL;
|
|
|
|
|
2019-05-28 09:29:45 +00:00
|
|
|
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
goto out_restore;
|
|
|
|
}
|
|
|
|
|
2021-03-23 15:50:30 +00:00
|
|
|
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (IS_ERR(vaddr)) {
|
|
|
|
err = PTR_ERR(vaddr);
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
*vaddr = 0xdeadbeaf;
|
|
|
|
|
drm/i915: Flush pages on acquisition
When we return pages to the system, we ensure that they are marked as
being in the CPU domain since any external access is uncontrolled and we
must assume the worst. This means that we need to always flush the pages
on acquisition if we need to use them on the GPU, and from the beginning
have used set-domain. Set-domain is overkill for the purpose as it is a
general synchronisation barrier, but our intent is to only flush the
pages being swapped in. If we move that flush into the pages acquisition
phase, we know then that when we have obj->mm.pages, they are coherent
with the GPU and need only maintain that status without resorting to
heavy handed use of set-domain.
The principle knock-on effect for userspace is through mmap-gtt
pagefaulting. Our uAPI has always implied that the GTT mmap was async
(especially as when any pagefault occurs is unpredicatable to userspace)
and so userspace had to apply explicit domain control itself
(set-domain). However, swapping is transparent to the kernel, and so on
first fault we need to acquire the pages and make them coherent for
access through the GTT. Our use of set-domain here leaks into the uABI
that the first pagefault was synchronous. This is unintentional and
baring a few igt should be unoticed, nevertheless we bump the uABI
version for mmap-gtt to reflect the change in behaviour.
Another implication of the change is that gem_create() is presumed to
create an object that is coherent with the CPU and is in the CPU write
domain, so a set-domain(CPU) following a gem_create() would be a minor
operation that merely checked whether we could allocate all pages for
the object. On applying this change, a set-domain(CPU) causes a clflush
as we acquire the pages. This will have a small impact on mesa as we move
the clflush here on !llc from execbuf time to create, but that should
have minimal performance impact as the same clflush exists but is now
done early and because of the clflush issue, userspace recycles bo and
so should resist allocating fresh objects.
Internally, the presumption that objects are created in the CPU
write-domain and remain so through writes to obj->mm.mapping is more
prevalent than I expected; but easy enough to catch and apply a manual
flush.
For the future, we should push the page flush from the central
set_pages() into the callers so that we can more finely control when it
is applied, but for now doing it one location is easier to validate, at
the cost of sometimes flushing when there is no need.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190321161908.8007-1-chris@chris-wilson.co.uk
2019-03-21 16:19:07 +00:00
|
|
|
__i915_gem_object_flush_map(obj, 0, 64);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
|
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
out_put:
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
out_restore:
|
|
|
|
i915->mm.gemfs = gemfs;
|
|
|
|
|
2019-10-04 13:40:09 +00:00
|
|
|
i915_vm_put(vm);
|
2017-10-06 22:18:29 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int igt_shrink_thp(void *arg)
|
|
|
|
{
|
2017-10-10 13:30:30 +00:00
|
|
|
struct i915_gem_context *ctx = arg;
|
|
|
|
struct drm_i915_private *i915 = ctx->i915;
|
2019-10-04 13:40:09 +00:00
|
|
|
struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
|
2017-10-06 22:18:29 +00:00
|
|
|
struct drm_i915_gem_object *obj;
|
2019-08-23 23:51:41 +00:00
|
|
|
struct i915_gem_engines_iter it;
|
|
|
|
struct intel_context *ce;
|
2017-10-06 22:18:29 +00:00
|
|
|
struct i915_vma *vma;
|
|
|
|
unsigned int flags = PIN_USER;
|
2019-07-04 21:23:43 +00:00
|
|
|
unsigned int n;
|
2019-10-04 13:40:09 +00:00
|
|
|
int err = 0;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity check shrinking huge-paged object -- make sure nothing blows
|
|
|
|
* up.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!igt_can_allocate_thp(i915)) {
|
|
|
|
pr_info("missing THP support, skipping\n");
|
2019-10-04 13:40:09 +00:00
|
|
|
goto out_vm;
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 09:29:45 +00:00
|
|
|
obj = i915_gem_object_create_shmem(i915, SZ_2M);
|
2019-10-04 13:40:09 +00:00
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
goto out_vm;
|
|
|
|
}
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
vma = i915_vma_instance(obj, vm, NULL);
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
goto out_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, flags);
|
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
|
|
|
|
pr_info("failed to allocate THP, finishing test early\n");
|
|
|
|
goto out_unpin;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = igt_check_page_sizes(vma);
|
|
|
|
if (err)
|
|
|
|
goto out_unpin;
|
|
|
|
|
2019-07-04 21:23:43 +00:00
|
|
|
n = 0;
|
2019-08-23 23:51:41 +00:00
|
|
|
|
|
|
|
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
|
|
|
if (!intel_engine_can_store_dword(ce->engine))
|
2019-07-04 21:23:43 +00:00
|
|
|
continue;
|
|
|
|
|
2019-08-23 23:51:41 +00:00
|
|
|
err = gpu_write(ce, vma, n++, 0xdeadbeaf);
|
2019-07-04 21:23:43 +00:00
|
|
|
if (err)
|
2019-08-23 23:51:41 +00:00
|
|
|
break;
|
2019-07-04 21:23:43 +00:00
|
|
|
}
|
2019-08-23 23:51:41 +00:00
|
|
|
i915_gem_context_unlock_engines(ctx);
|
2017-10-06 22:18:29 +00:00
|
|
|
i915_vma_unpin(vma);
|
2019-08-23 23:51:41 +00:00
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that the pages are *unpinned* shrink-all should invoke
|
|
|
|
* shmem to truncate our pages.
|
|
|
|
*/
|
|
|
|
i915_gem_shrink_all(i915);
|
2017-12-18 10:38:55 +00:00
|
|
|
if (i915_gem_object_has_pages(obj)) {
|
2017-10-06 22:18:29 +00:00
|
|
|
pr_err("shrink-all didn't truncate the pages\n");
|
|
|
|
err = -EINVAL;
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
|
|
|
|
pr_err("residual page-size bits left\n");
|
|
|
|
err = -EINVAL;
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_vma_pin(vma, 0, 0, flags);
|
|
|
|
if (err)
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2019-07-04 21:23:43 +00:00
|
|
|
while (n--) {
|
|
|
|
err = cpu_check(obj, n, 0xdeadbeaf);
|
|
|
|
if (err)
|
2019-08-23 23:51:41 +00:00
|
|
|
break;
|
2019-07-04 21:23:43 +00:00
|
|
|
}
|
|
|
|
|
2017-10-06 22:18:29 +00:00
|
|
|
out_unpin:
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
out_put:
|
|
|
|
i915_gem_object_put(obj);
|
2019-10-04 13:40:09 +00:00
|
|
|
out_vm:
|
|
|
|
i915_vm_put(vm);
|
2017-10-06 22:18:29 +00:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int i915_gem_huge_page_mock_selftests(void)
|
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
SUBTEST(igt_mock_exhaust_device_supported_pages),
|
2019-10-08 16:01:14 +00:00
|
|
|
SUBTEST(igt_mock_memory_region_huge_pages),
|
2017-10-06 22:18:29 +00:00
|
|
|
SUBTEST(igt_mock_ppgtt_misaligned_dma),
|
|
|
|
SUBTEST(igt_mock_ppgtt_huge_fill),
|
|
|
|
SUBTEST(igt_mock_ppgtt_64K),
|
|
|
|
};
|
|
|
|
struct drm_i915_private *dev_priv;
|
2019-06-11 09:12:38 +00:00
|
|
|
struct i915_ppgtt *ppgtt;
|
2017-10-06 22:18:29 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
dev_priv = mock_gem_device();
|
|
|
|
if (!dev_priv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Pretend to be a device which supports the 48b PPGTT */
|
2019-03-14 22:38:37 +00:00
|
|
|
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
|
2019-03-14 22:38:36 +00:00
|
|
|
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
|
2017-10-06 22:18:29 +00:00
|
|
|
|
2020-01-07 13:40:09 +00:00
|
|
|
ppgtt = i915_ppgtt_create(&dev_priv->gt);
|
2017-10-06 22:18:29 +00:00
|
|
|
if (IS_ERR(ppgtt)) {
|
|
|
|
err = PTR_ERR(ppgtt);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2019-03-14 22:38:38 +00:00
|
|
|
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
|
2017-10-06 22:18:29 +00:00
|
|
|
pr_err("failed to create 48b PPGTT\n");
|
|
|
|
err = -EINVAL;
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If we were ever hit this then it's time to mock the 64K scratch */
|
2018-06-05 15:37:58 +00:00
|
|
|
if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
|
2017-10-06 22:18:29 +00:00
|
|
|
pr_err("PPGTT missing 64K scratch page\n");
|
|
|
|
err = -EINVAL;
|
2020-04-22 19:05:58 +00:00
|
|
|
goto out_put;
|
2017-10-06 22:18:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
err = i915_subtests(tests, ppgtt);
|
|
|
|
|
2020-04-22 19:05:58 +00:00
|
|
|
out_put:
|
2019-06-11 09:12:37 +00:00
|
|
|
i915_vm_put(&ppgtt->vm);
|
2017-10-06 22:18:29 +00:00
|
|
|
out_unlock:
|
2020-09-18 13:25:02 +00:00
|
|
|
mock_destroy_device(dev_priv);
|
2017-10-06 22:18:29 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
|
2017-10-06 22:18:29 +00:00
|
|
|
{
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
SUBTEST(igt_shrink_thp),
|
|
|
|
SUBTEST(igt_tmpfs_fallback),
|
2019-10-25 15:37:27 +00:00
|
|
|
SUBTEST(igt_ppgtt_smoke_huge),
|
2019-10-25 15:37:28 +00:00
|
|
|
SUBTEST(igt_ppgtt_sanity_check),
|
2017-10-06 22:18:29 +00:00
|
|
|
};
|
2017-10-10 13:30:30 +00:00
|
|
|
struct i915_gem_context *ctx;
|
2019-10-04 13:40:09 +00:00
|
|
|
struct i915_address_space *vm;
|
2019-11-07 21:39:29 +00:00
|
|
|
struct file *file;
|
2017-10-06 22:18:29 +00:00
|
|
|
int err;
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
if (!HAS_PPGTT(i915)) {
|
2017-10-06 22:18:29 +00:00
|
|
|
pr_info("PPGTT not supported, skipping live-selftests\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
if (intel_gt_is_wedged(&i915->gt))
|
2018-07-06 06:53:13 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
file = mock_file(i915);
|
2017-10-10 13:30:30 +00:00
|
|
|
if (IS_ERR(file))
|
|
|
|
return PTR_ERR(file);
|
|
|
|
|
2019-07-12 19:29:53 +00:00
|
|
|
ctx = live_context(i915, file);
|
2017-10-10 13:30:30 +00:00
|
|
|
if (IS_ERR(ctx)) {
|
|
|
|
err = PTR_ERR(ctx);
|
2019-10-04 13:40:14 +00:00
|
|
|
goto out_file;
|
2017-10-10 13:30:30 +00:00
|
|
|
}
|
|
|
|
|
2019-10-04 13:40:09 +00:00
|
|
|
mutex_lock(&ctx->mutex);
|
|
|
|
vm = i915_gem_context_vm(ctx);
|
|
|
|
if (vm)
|
|
|
|
WRITE_ONCE(vm->scrub_64K, true);
|
|
|
|
mutex_unlock(&ctx->mutex);
|
2018-05-11 09:51:40 +00:00
|
|
|
|
2017-10-10 13:30:30 +00:00
|
|
|
err = i915_subtests(tests, ctx);
|
|
|
|
|
2019-10-04 13:40:14 +00:00
|
|
|
out_file:
|
2019-11-07 21:39:29 +00:00
|
|
|
fput(file);
|
2017-10-06 22:18:29 +00:00
|
|
|
return err;
|
|
|
|
}
|