2019-05-28 10:29:47 +01:00
|
|
|
/*
|
|
|
|
|
* SPDX-License-Identifier: MIT
|
|
|
|
|
*
|
|
|
|
|
* Copyright © 2016 Intel Corporation
|
|
|
|
|
*/
|
|
|
|
|
|
2022-03-03 20:19:31 +02:00
|
|
|
#include <linux/highmem.h>
|
2019-05-28 10:29:47 +01:00
|
|
|
#include <linux/prime_numbers.h>
|
|
|
|
|
|
2022-02-10 17:45:39 +02:00
|
|
|
#include "gem/i915_gem_internal.h"
|
|
|
|
|
#include "gem/i915_gem_region.h"
|
2022-02-28 12:36:07 +00:00
|
|
|
#include "gem/i915_gem_ttm.h"
|
2019-11-25 10:58:56 +00:00
|
|
|
#include "gt/intel_engine_pm.h"
|
2020-12-16 13:54:52 +00:00
|
|
|
#include "gt/intel_gpu_commands.h"
|
2019-06-21 08:08:01 +01:00
|
|
|
#include "gt/intel_gt.h"
|
2019-05-28 10:29:47 +01:00
|
|
|
#include "gt/intel_gt_pm.h"
|
2022-02-28 12:36:07 +00:00
|
|
|
#include "gt/intel_migrate.h"
|
|
|
|
|
#include "i915_ttm_buddy_manager.h"
|
2022-02-10 17:45:39 +02:00
|
|
|
|
2019-05-28 10:29:49 +01:00
|
|
|
#include "huge_gem_object.h"
|
2019-05-28 10:29:47 +01:00
|
|
|
#include "i915_selftest.h"
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
#include "selftests/i915_random.h"
|
2019-05-28 10:29:47 +01:00
|
|
|
#include "selftests/igt_flush_test.h"
|
2019-11-07 18:06:00 +00:00
|
|
|
#include "selftests/igt_mmap.h"
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
struct tile {
|
|
|
|
|
unsigned int width;
|
|
|
|
|
unsigned int height;
|
|
|
|
|
unsigned int stride;
|
|
|
|
|
unsigned int size;
|
|
|
|
|
unsigned int tiling;
|
|
|
|
|
unsigned int swizzle;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static u64 swizzle_bit(unsigned int bit, u64 offset)
|
|
|
|
|
{
|
|
|
|
|
return (offset & BIT_ULL(bit)) >> (bit - 6);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u64 tiled_offset(const struct tile *tile, u64 v)
|
|
|
|
|
{
|
|
|
|
|
u64 x, y;
|
|
|
|
|
|
|
|
|
|
if (tile->tiling == I915_TILING_NONE)
|
|
|
|
|
return v;
|
|
|
|
|
|
|
|
|
|
y = div64_u64_rem(v, tile->stride, &x);
|
|
|
|
|
v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
|
|
|
|
|
|
|
|
|
|
if (tile->tiling == I915_TILING_X) {
|
|
|
|
|
v += y * tile->width;
|
|
|
|
|
v += div64_u64_rem(x, tile->width, &x) << tile->size;
|
|
|
|
|
v += x;
|
|
|
|
|
} else if (tile->width == 128) {
|
|
|
|
|
const unsigned int ytile_span = 16;
|
|
|
|
|
const unsigned int ytile_height = 512;
|
|
|
|
|
|
|
|
|
|
v += y * ytile_span;
|
|
|
|
|
v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
|
|
|
|
|
v += x;
|
|
|
|
|
} else {
|
|
|
|
|
const unsigned int ytile_span = 32;
|
|
|
|
|
const unsigned int ytile_height = 256;
|
|
|
|
|
|
|
|
|
|
v += y * ytile_span;
|
|
|
|
|
v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
|
|
|
|
|
v += x;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (tile->swizzle) {
|
|
|
|
|
case I915_BIT_6_SWIZZLE_9:
|
|
|
|
|
v ^= swizzle_bit(9, v);
|
|
|
|
|
break;
|
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10:
|
|
|
|
|
v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
|
|
|
|
|
break;
|
|
|
|
|
case I915_BIT_6_SWIZZLE_9_11:
|
|
|
|
|
v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
|
|
|
|
|
break;
|
|
|
|
|
case I915_BIT_6_SWIZZLE_9_10_11:
|
|
|
|
|
v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return v;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int check_partial_mapping(struct drm_i915_gem_object *obj,
|
|
|
|
|
const struct tile *tile,
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
struct rnd_state *prng)
|
2019-05-28 10:29:47 +01:00
|
|
|
{
|
|
|
|
|
const unsigned long npages = obj->base.size / PAGE_SIZE;
|
2021-12-14 21:33:35 +02:00
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
struct i915_ggtt_view view;
|
2019-05-28 10:29:47 +01:00
|
|
|
struct i915_vma *vma;
|
|
|
|
|
unsigned long page;
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
u32 __iomem *io;
|
|
|
|
|
struct page *p;
|
|
|
|
|
unsigned int n;
|
|
|
|
|
u64 offset;
|
|
|
|
|
u32 *cpu;
|
2019-05-28 10:29:47 +01:00
|
|
|
int err;
|
|
|
|
|
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
|
|
|
|
|
tile->tiling, tile->stride, err);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
|
|
|
|
|
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
|
|
|
|
|
|
2020-08-19 16:08:45 +02:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to flush to GTT write domain; err=%d\n", err);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
page = i915_prandom_u32_max_state(npages, prng);
|
|
|
|
|
view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
|
|
|
|
|
|
|
|
|
|
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
|
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
|
pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
|
|
|
|
|
page, (int)PTR_ERR(vma));
|
|
|
|
|
return PTR_ERR(vma);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n = page - view.partial.offset;
|
|
|
|
|
GEM_BUG_ON(n >= view.partial.size);
|
|
|
|
|
|
|
|
|
|
io = i915_vma_pin_iomap(vma);
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
if (IS_ERR(io)) {
|
|
|
|
|
pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
|
|
|
|
|
page, (int)PTR_ERR(io));
|
|
|
|
|
err = PTR_ERR(io);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
|
|
|
|
|
i915_vma_unpin_iomap(vma);
|
|
|
|
|
|
|
|
|
|
offset = tiled_offset(tile, page << PAGE_SHIFT);
|
|
|
|
|
if (offset >= obj->base.size)
|
|
|
|
|
goto out;
|
|
|
|
|
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_flush_ggtt_writes(to_gt(i915));
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
|
|
|
|
|
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
|
|
|
|
|
cpu = kmap(p) + offset_in_page(offset);
|
|
|
|
|
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
|
|
|
|
if (*cpu != (u32)page) {
|
|
|
|
|
pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
|
|
|
|
|
page, n,
|
|
|
|
|
view.partial.offset,
|
|
|
|
|
view.partial.size,
|
|
|
|
|
vma->size >> PAGE_SHIFT,
|
|
|
|
|
tile->tiling ? tile_row_pages(obj) : 0,
|
|
|
|
|
vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
|
|
|
|
|
offset >> PAGE_SHIFT,
|
|
|
|
|
(unsigned int)offset_in_page(offset),
|
|
|
|
|
offset,
|
|
|
|
|
(u32)page, *cpu);
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
*cpu = 0;
|
|
|
|
|
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
|
|
|
|
kunmap(p);
|
|
|
|
|
|
|
|
|
|
out:
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
drm/i915: Clarify vma lifetime
It's unclear what reference the initial vma kref reference refers to.
A vma can have multiple weak references, the object vma list,
the vm's bound list and the GT's closed_list, and the initial vma
reference can be put from lookups of all these lists.
With the current implementation this means
that any holder of yet another vma refcount (currently only
i915_gem_object_unbind()) needs to be holding two of either
*) An object refcount,
*) A vm open count
*) A vma open count
in order for us to not risk leaking a reference by having the
initial vma reference being put twice.
Address this by re-introducing i915_vma_destroy() which removes all
weak references of the vma and *then* puts the initial vma refcount.
This makes a strong vma reference hold on to the vma unconditionally.
Perhaps a better name would be i915_vma_revoke() or i915_vma_zombify(),
since other callers may still hold a refcount, but with the prospect of
being able to replace the vma refcount with the object lock in the near
future, let's stick with i915_vma_destroy().
Finally this commit fixes a race in that previously i915_vma_release() and
now i915_vma_destroy() could destroy a vma without taking the vm->mutex
after an advisory check that the vma mm_node was not allocated.
This would race with the ungrab_vma() function creating a trace similar
to the below one. This was fixed in one of the __i915_vma_put() callsites
in
commit bc1922e5d349 ("drm/i915: Fix a race between vma / object destruction and unbinding")
but although not seemingly triggered by CI, that
is not sufficient. This patch is needed to fix that properly.
[823.012188] Console: switching to colour dummy device 80x25
[823.012422] [IGT] gem_ppgtt: executing
[823.016667] [IGT] gem_ppgtt: starting subtest blt-vs-render-ctx0
[852.436465] stack segment: 0000 [#1] PREEMPT SMP NOPTI
[852.436480] CPU: 0 PID: 3200 Comm: gem_ppgtt Not tainted 5.16.0-CI-CI_DRM_11115+ #1
[852.436489] Hardware name: Intel Corporation Alder Lake Client Platform/AlderLake-P DDR5 RVP, BIOS ADLPFWI1.R00.2422.A00.2110131104 10/13/2021
[852.436499] RIP: 0010:ungrab_vma+0x9/0x80 [i915]
[852.436711] Code: ef e8 4b 85 cf e0 e8 36 a3 d6 e0 8b 83 f8 9c 00 00 85 c0 75 e1 5b 5d 41 5c 41 5d c3 e9 d6 fd 14 00 55 53 48 8b af c0 00 00 00 <8b> 45 00 85 c0 75 03 5b 5d c3 48 8b 85 a0 02 00 00 48 89 fb 48 8b
[852.436727] RSP: 0018:ffffc90006db7880 EFLAGS: 00010246
[852.436734] RAX: 0000000000000000 RBX: ffffc90006db7598 RCX: 0000000000000000
[852.436742] RDX: ffff88815349e898 RSI: ffff88815349e858 RDI: ffff88810a284140
[852.436748] RBP: 6b6b6b6b6b6b6b6b R08: ffff88815349e898 R09: ffff88815349e8e8
[852.436754] R10: 0000000000000001 R11: 0000000051ef1141 R12: ffff88810a284140
[852.436762] R13: 0000000000000000 R14: ffff88815349e868 R15: ffff88810a284458
[852.436770] FS: 00007f5c04b04e40(0000) GS:ffff88849f000000(0000) knlGS:0000000000000000
[852.436781] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[852.436788] CR2: 00007f5c04b38fe0 CR3: 000000010a6e8001 CR4: 0000000000770ef0
[852.436797] PKRU: 55555554
[852.436801] Call Trace:
[852.436806] <TASK>
[852.436811] i915_gem_evict_for_node+0x33c/0x3c0 [i915]
[852.437014] i915_gem_gtt_reserve+0x106/0x130 [i915]
[852.437211] i915_vma_pin_ww+0x8f4/0xb60 [i915]
[852.437412] eb_validate_vmas+0x688/0x860 [i915]
[852.437596] i915_gem_do_execbuffer+0xc0e/0x25b0 [i915]
[852.437770] ? deactivate_slab+0x5f2/0x7d0
[852.437778] ? _raw_spin_unlock_irqrestore+0x50/0x60
[852.437789] ? i915_gem_execbuffer2_ioctl+0xc6/0x2c0 [i915]
[852.437944] ? init_object+0x49/0x80
[852.437950] ? __lock_acquire+0x5e6/0x2580
[852.437963] i915_gem_execbuffer2_ioctl+0x116/0x2c0 [i915]
[852.438129] ? i915_gem_do_execbuffer+0x25b0/0x25b0 [i915]
[852.438300] drm_ioctl_kernel+0xac/0x140
[852.438310] drm_ioctl+0x201/0x3d0
[852.438316] ? i915_gem_do_execbuffer+0x25b0/0x25b0 [i915]
[852.438490] __x64_sys_ioctl+0x6a/0xa0
[852.438498] do_syscall_64+0x37/0xb0
[852.438507] entry_SYSCALL_64_after_hwframe+0x44/0xae
[852.438515] RIP: 0033:0x7f5c0415b317
[852.438523] Code: b3 66 90 48 8b 05 71 4b 2d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 41 4b 2d 00 f7 d8 64 89 01 48
[852.438542] RSP: 002b:00007ffd765039a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[852.438553] RAX: ffffffffffffffda RBX: 000055e4d7829dd0 RCX: 00007f5c0415b317
[852.438562] RDX: 00007ffd76503a00 RSI: 00000000c0406469 RDI: 0000000000000017
[852.438571] RBP: 00007ffd76503a00 R08: 0000000000000000 R09: 0000000000000081
[852.438579] R10: 00000000ffffff7f R11: 0000000000000246 R12: 00000000c0406469
[852.438587] R13: 0000000000000017 R14: 00007ffd76503a00 R15: 0000000000000000
[852.438598] </TASK>
[852.438602] Modules linked in: snd_hda_codec_hdmi i915 mei_hdcp x86_pkg_temp_thermal snd_hda_intel snd_intel_dspcfg drm_buddy coretemp crct10dif_pclmul crc32_pclmul snd_hda_codec ttm ghash_clmulni_intel snd_hwdep snd_hda_core e1000e drm_dp_helper ptp snd_pcm mei_me drm_kms_helper pps_core mei syscopyarea sysfillrect sysimgblt fb_sys_fops prime_numbers intel_lpss_pci smsc75xx usbnet mii
[852.440310] ---[ end trace e52cdd2fe4fd911c ]---
v2: Fix typos in the commit message.
Fixes: 7e00897be8bf ("drm/i915: Add object locking to i915_gem_evict_for_node and i915_gem_evict_something, v2.")
Fixes: bc1922e5d349 ("drm/i915: Fix a race between vma / object destruction and unbinding")
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220222133209.587978-1-thomas.hellstrom@linux.intel.com
2022-02-22 14:32:09 +01:00
|
|
|
i915_vma_destroy(vma);
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_object_unlock(obj);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int check_partial_mappings(struct drm_i915_gem_object *obj,
|
|
|
|
|
const struct tile *tile,
|
|
|
|
|
unsigned long end_time)
|
|
|
|
|
{
|
|
|
|
|
const unsigned int nreal = obj->scratch / PAGE_SIZE;
|
|
|
|
|
const unsigned long npages = obj->base.size / PAGE_SIZE;
|
2021-12-14 21:33:35 +02:00
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
struct i915_vma *vma;
|
|
|
|
|
unsigned long page;
|
|
|
|
|
int err;
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
|
|
|
|
|
tile->tiling, tile->stride, err);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
|
|
|
|
|
GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
|
|
|
|
|
|
2020-08-19 16:08:45 +02:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2019-06-04 13:00:20 +01:00
|
|
|
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to flush to GTT write domain; err=%d\n", err);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:29:47 +01:00
|
|
|
for_each_prime_number_from(page, 1, npages) {
|
|
|
|
|
struct i915_ggtt_view view =
|
|
|
|
|
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
|
|
|
|
|
u32 __iomem *io;
|
|
|
|
|
struct page *p;
|
|
|
|
|
unsigned int n;
|
|
|
|
|
u64 offset;
|
|
|
|
|
u32 *cpu;
|
|
|
|
|
|
|
|
|
|
GEM_BUG_ON(view.partial.size > nreal);
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
|
|
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
|
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
|
pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
|
|
|
|
|
page, (int)PTR_ERR(vma));
|
|
|
|
|
return PTR_ERR(vma);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n = page - view.partial.offset;
|
|
|
|
|
GEM_BUG_ON(n >= view.partial.size);
|
|
|
|
|
|
|
|
|
|
io = i915_vma_pin_iomap(vma);
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
if (IS_ERR(io)) {
|
|
|
|
|
pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
|
|
|
|
|
page, (int)PTR_ERR(io));
|
|
|
|
|
return PTR_ERR(io);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
|
|
|
|
|
i915_vma_unpin_iomap(vma);
|
|
|
|
|
|
|
|
|
|
offset = tiled_offset(tile, page << PAGE_SHIFT);
|
|
|
|
|
if (offset >= obj->base.size)
|
|
|
|
|
continue;
|
|
|
|
|
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_flush_ggtt_writes(to_gt(i915));
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
|
|
|
|
|
cpu = kmap(p) + offset_in_page(offset);
|
|
|
|
|
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
|
|
|
|
if (*cpu != (u32)page) {
|
|
|
|
|
pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
|
|
|
|
|
page, n,
|
|
|
|
|
view.partial.offset,
|
|
|
|
|
view.partial.size,
|
|
|
|
|
vma->size >> PAGE_SHIFT,
|
|
|
|
|
tile->tiling ? tile_row_pages(obj) : 0,
|
|
|
|
|
vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
|
|
|
|
|
offset >> PAGE_SHIFT,
|
|
|
|
|
(unsigned int)offset_in_page(offset),
|
|
|
|
|
offset,
|
|
|
|
|
(u32)page, *cpu);
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
*cpu = 0;
|
|
|
|
|
drm_clflush_virt_range(cpu, sizeof(*cpu));
|
|
|
|
|
kunmap(p);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
drm/i915: Clarify vma lifetime
It's unclear what reference the initial vma kref reference refers to.
A vma can have multiple weak references, the object vma list,
the vm's bound list and the GT's closed_list, and the initial vma
reference can be put from lookups of all these lists.
With the current implementation this means
that any holder of yet another vma refcount (currently only
i915_gem_object_unbind()) needs to be holding two of either
*) An object refcount,
*) A vm open count
*) A vma open count
in order for us to not risk leaking a reference by having the
initial vma reference being put twice.
Address this by re-introducing i915_vma_destroy() which removes all
weak references of the vma and *then* puts the initial vma refcount.
This makes a strong vma reference hold on to the vma unconditionally.
Perhaps a better name would be i915_vma_revoke() or i915_vma_zombify(),
since other callers may still hold a refcount, but with the prospect of
being able to replace the vma refcount with the object lock in the near
future, let's stick with i915_vma_destroy().
Finally this commit fixes a race in that previously i915_vma_release() and
now i915_vma_destroy() could destroy a vma without taking the vm->mutex
after an advisory check that the vma mm_node was not allocated.
This would race with the ungrab_vma() function creating a trace similar
to the below one. This was fixed in one of the __i915_vma_put() callsites
in
commit bc1922e5d349 ("drm/i915: Fix a race between vma / object destruction and unbinding")
but although not seemingly triggered by CI, that
is not sufficient. This patch is needed to fix that properly.
[823.012188] Console: switching to colour dummy device 80x25
[823.012422] [IGT] gem_ppgtt: executing
[823.016667] [IGT] gem_ppgtt: starting subtest blt-vs-render-ctx0
[852.436465] stack segment: 0000 [#1] PREEMPT SMP NOPTI
[852.436480] CPU: 0 PID: 3200 Comm: gem_ppgtt Not tainted 5.16.0-CI-CI_DRM_11115+ #1
[852.436489] Hardware name: Intel Corporation Alder Lake Client Platform/AlderLake-P DDR5 RVP, BIOS ADLPFWI1.R00.2422.A00.2110131104 10/13/2021
[852.436499] RIP: 0010:ungrab_vma+0x9/0x80 [i915]
[852.436711] Code: ef e8 4b 85 cf e0 e8 36 a3 d6 e0 8b 83 f8 9c 00 00 85 c0 75 e1 5b 5d 41 5c 41 5d c3 e9 d6 fd 14 00 55 53 48 8b af c0 00 00 00 <8b> 45 00 85 c0 75 03 5b 5d c3 48 8b 85 a0 02 00 00 48 89 fb 48 8b
[852.436727] RSP: 0018:ffffc90006db7880 EFLAGS: 00010246
[852.436734] RAX: 0000000000000000 RBX: ffffc90006db7598 RCX: 0000000000000000
[852.436742] RDX: ffff88815349e898 RSI: ffff88815349e858 RDI: ffff88810a284140
[852.436748] RBP: 6b6b6b6b6b6b6b6b R08: ffff88815349e898 R09: ffff88815349e8e8
[852.436754] R10: 0000000000000001 R11: 0000000051ef1141 R12: ffff88810a284140
[852.436762] R13: 0000000000000000 R14: ffff88815349e868 R15: ffff88810a284458
[852.436770] FS: 00007f5c04b04e40(0000) GS:ffff88849f000000(0000) knlGS:0000000000000000
[852.436781] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[852.436788] CR2: 00007f5c04b38fe0 CR3: 000000010a6e8001 CR4: 0000000000770ef0
[852.436797] PKRU: 55555554
[852.436801] Call Trace:
[852.436806] <TASK>
[852.436811] i915_gem_evict_for_node+0x33c/0x3c0 [i915]
[852.437014] i915_gem_gtt_reserve+0x106/0x130 [i915]
[852.437211] i915_vma_pin_ww+0x8f4/0xb60 [i915]
[852.437412] eb_validate_vmas+0x688/0x860 [i915]
[852.437596] i915_gem_do_execbuffer+0xc0e/0x25b0 [i915]
[852.437770] ? deactivate_slab+0x5f2/0x7d0
[852.437778] ? _raw_spin_unlock_irqrestore+0x50/0x60
[852.437789] ? i915_gem_execbuffer2_ioctl+0xc6/0x2c0 [i915]
[852.437944] ? init_object+0x49/0x80
[852.437950] ? __lock_acquire+0x5e6/0x2580
[852.437963] i915_gem_execbuffer2_ioctl+0x116/0x2c0 [i915]
[852.438129] ? i915_gem_do_execbuffer+0x25b0/0x25b0 [i915]
[852.438300] drm_ioctl_kernel+0xac/0x140
[852.438310] drm_ioctl+0x201/0x3d0
[852.438316] ? i915_gem_do_execbuffer+0x25b0/0x25b0 [i915]
[852.438490] __x64_sys_ioctl+0x6a/0xa0
[852.438498] do_syscall_64+0x37/0xb0
[852.438507] entry_SYSCALL_64_after_hwframe+0x44/0xae
[852.438515] RIP: 0033:0x7f5c0415b317
[852.438523] Code: b3 66 90 48 8b 05 71 4b 2d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 41 4b 2d 00 f7 d8 64 89 01 48
[852.438542] RSP: 002b:00007ffd765039a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[852.438553] RAX: ffffffffffffffda RBX: 000055e4d7829dd0 RCX: 00007f5c0415b317
[852.438562] RDX: 00007ffd76503a00 RSI: 00000000c0406469 RDI: 0000000000000017
[852.438571] RBP: 00007ffd76503a00 R08: 0000000000000000 R09: 0000000000000081
[852.438579] R10: 00000000ffffff7f R11: 0000000000000246 R12: 00000000c0406469
[852.438587] R13: 0000000000000017 R14: 00007ffd76503a00 R15: 0000000000000000
[852.438598] </TASK>
[852.438602] Modules linked in: snd_hda_codec_hdmi i915 mei_hdcp x86_pkg_temp_thermal snd_hda_intel snd_intel_dspcfg drm_buddy coretemp crct10dif_pclmul crc32_pclmul snd_hda_codec ttm ghash_clmulni_intel snd_hwdep snd_hda_core e1000e drm_dp_helper ptp snd_pcm mei_me drm_kms_helper pps_core mei syscopyarea sysfillrect sysimgblt fb_sys_fops prime_numbers intel_lpss_pci smsc75xx usbnet mii
[852.440310] ---[ end trace e52cdd2fe4fd911c ]---
v2: Fix typos in the commit message.
Fixes: 7e00897be8bf ("drm/i915: Add object locking to i915_gem_evict_for_node and i915_gem_evict_something, v2.")
Fixes: bc1922e5d349 ("drm/i915: Fix a race between vma / object destruction and unbinding")
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220222133209.587978-1-thomas.hellstrom@linux.intel.com
2022-02-22 14:32:09 +01:00
|
|
|
i915_vma_destroy(vma);
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_object_unlock(obj);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
|
|
|
|
|
if (igt_timeout(end_time,
|
|
|
|
|
"%s: timed out after tiling=%d stride=%d\n",
|
|
|
|
|
__func__, tile->tiling, tile->stride))
|
|
|
|
|
return -EINTR;
|
2019-05-28 10:29:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
static unsigned int
|
|
|
|
|
setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
|
|
|
|
|
{
|
2021-06-05 08:53:54 -07:00
|
|
|
if (GRAPHICS_VER(i915) <= 2) {
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
tile->height = 16;
|
|
|
|
|
tile->width = 128;
|
|
|
|
|
tile->size = 11;
|
|
|
|
|
} else if (tile->tiling == I915_TILING_Y &&
|
|
|
|
|
HAS_128_BYTE_Y_TILING(i915)) {
|
|
|
|
|
tile->height = 32;
|
|
|
|
|
tile->width = 128;
|
|
|
|
|
tile->size = 12;
|
|
|
|
|
} else {
|
|
|
|
|
tile->height = 8;
|
|
|
|
|
tile->width = 512;
|
|
|
|
|
tile->size = 12;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-05 08:53:54 -07:00
|
|
|
if (GRAPHICS_VER(i915) < 4)
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
return 8192 / tile->width;
|
2021-06-05 08:53:54 -07:00
|
|
|
else if (GRAPHICS_VER(i915) < 7)
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
|
|
|
|
|
else
|
|
|
|
|
return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:29:47 +01:00
|
|
|
static int igt_partial_tiling(void *arg)
|
|
|
|
|
{
|
|
|
|
|
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
|
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
|
int tiling;
|
|
|
|
|
int err;
|
|
|
|
|
|
2021-12-19 23:24:57 +02:00
|
|
|
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
|
2019-10-29 09:58:56 +00:00
|
|
|
return 0;
|
|
|
|
|
|
2019-05-28 10:29:47 +01:00
|
|
|
/* We want to check the page mapping and fencing of a large object
|
|
|
|
|
* mmapped through the GTT. The object we create is larger than can
|
|
|
|
|
* possibly be mmaped as a whole, and so we must use partial GGTT vma.
|
|
|
|
|
* We then check that a write through each partial GGTT vma ends up
|
|
|
|
|
* in the right set of pages within the object, and with the expected
|
|
|
|
|
* tiling, which we verify by manual swizzling.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
obj = huge_gem_object(i915,
|
|
|
|
|
nreal << PAGE_SHIFT,
|
2021-12-19 23:24:57 +02:00
|
|
|
(1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
2021-03-23 16:50:36 +01:00
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
|
|
|
|
|
nreal, obj->base.size / PAGE_SIZE, err);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-13 16:21:54 -07:00
|
|
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
if (1) {
|
|
|
|
|
IGT_TIMEOUT(end);
|
|
|
|
|
struct tile tile;
|
|
|
|
|
|
|
|
|
|
tile.height = 1;
|
|
|
|
|
tile.width = 1;
|
|
|
|
|
tile.size = 0;
|
|
|
|
|
tile.stride = 0;
|
|
|
|
|
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
|
|
|
|
|
tile.tiling = I915_TILING_NONE;
|
|
|
|
|
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
err = check_partial_mappings(obj, &tile, end);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (err && err != -EINTR)
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
|
|
|
|
|
IGT_TIMEOUT(end);
|
|
|
|
|
unsigned int max_pitch;
|
|
|
|
|
unsigned int pitch;
|
|
|
|
|
struct tile tile;
|
|
|
|
|
|
|
|
|
|
if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
|
|
|
|
/*
|
|
|
|
|
* The swizzling pattern is actually unknown as it
|
|
|
|
|
* varies based on physical address of each page.
|
|
|
|
|
* See i915_gem_detect_bit_6_swizzle().
|
|
|
|
|
*/
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
tile.tiling = tiling;
|
|
|
|
|
switch (tiling) {
|
|
|
|
|
case I915_TILING_X:
|
2021-12-19 23:24:57 +02:00
|
|
|
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
|
2019-05-28 10:29:47 +01:00
|
|
|
break;
|
|
|
|
|
case I915_TILING_Y:
|
2021-12-19 23:24:57 +02:00
|
|
|
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
|
2019-05-28 10:29:47 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
|
|
|
|
|
if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
|
|
|
|
|
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
|
|
|
|
|
continue;
|
|
|
|
|
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
max_pitch = setup_tile_size(&tile, i915);
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
for (pitch = max_pitch; pitch; pitch >>= 1) {
|
|
|
|
|
tile.stride = tile.width * pitch;
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
err = check_partial_mappings(obj, &tile, end);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (err == -EINTR)
|
|
|
|
|
goto next_tiling;
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
2021-06-05 08:53:54 -07:00
|
|
|
if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
|
2019-05-28 10:29:47 +01:00
|
|
|
tile.stride = tile.width * (pitch - 1);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
err = check_partial_mappings(obj, &tile, end);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (err == -EINTR)
|
|
|
|
|
goto next_tiling;
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-05 08:53:54 -07:00
|
|
|
if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
|
2019-05-28 10:29:47 +01:00
|
|
|
tile.stride = tile.width * (pitch + 1);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
err = check_partial_mappings(obj, &tile, end);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (err == -EINTR)
|
|
|
|
|
goto next_tiling;
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-05 08:53:54 -07:00
|
|
|
if (GRAPHICS_VER(i915) >= 4) {
|
2019-05-28 10:29:47 +01:00
|
|
|
for_each_prime_number(pitch, max_pitch) {
|
|
|
|
|
tile.stride = tile.width * pitch;
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
err = check_partial_mappings(obj, &tile, end);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (err == -EINTR)
|
|
|
|
|
goto next_tiling;
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
next_tiling: ;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_unlock:
|
2019-06-13 16:21:54 -07:00
|
|
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
2019-05-28 10:29:47 +01:00
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
out:
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
static int igt_smoke_tiling(void *arg)
|
|
|
|
|
{
|
|
|
|
|
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
|
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
intel_wakeref_t wakeref;
|
|
|
|
|
I915_RND_STATE(prng);
|
|
|
|
|
unsigned long count;
|
|
|
|
|
IGT_TIMEOUT(end);
|
|
|
|
|
int err;
|
|
|
|
|
|
2021-12-19 23:24:57 +02:00
|
|
|
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
|
2019-10-29 09:58:56 +00:00
|
|
|
return 0;
|
|
|
|
|
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
/*
|
|
|
|
|
* igt_partial_tiling() does an exhastive check of partial tiling
|
|
|
|
|
* chunking, but will undoubtably run out of time. Here, we do a
|
|
|
|
|
* randomised search and hope over many runs of 1s with different
|
|
|
|
|
* seeds we will do a thorough check.
|
|
|
|
|
*
|
|
|
|
|
* Remember to look at the st_seed if we see a flip-flop in BAT!
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
obj = huge_gem_object(i915,
|
|
|
|
|
nreal << PAGE_SHIFT,
|
2021-12-19 23:24:57 +02:00
|
|
|
(1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
2021-03-23 16:50:36 +01:00
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
|
|
|
|
|
nreal, obj->base.size / PAGE_SIZE, err);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
|
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
|
do {
|
|
|
|
|
struct tile tile;
|
|
|
|
|
|
|
|
|
|
tile.tiling =
|
|
|
|
|
i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
|
|
|
|
|
switch (tile.tiling) {
|
|
|
|
|
case I915_TILING_NONE:
|
|
|
|
|
tile.height = 1;
|
|
|
|
|
tile.width = 1;
|
|
|
|
|
tile.size = 0;
|
|
|
|
|
tile.stride = 0;
|
|
|
|
|
tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case I915_TILING_X:
|
2021-12-19 23:24:57 +02:00
|
|
|
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
break;
|
|
|
|
|
case I915_TILING_Y:
|
2021-12-19 23:24:57 +02:00
|
|
|
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
|
|
|
|
|
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (tile.tiling != I915_TILING_NONE) {
|
|
|
|
|
unsigned int max_pitch = setup_tile_size(&tile, i915);
|
|
|
|
|
|
|
|
|
|
tile.stride =
|
|
|
|
|
i915_prandom_u32_max_state(max_pitch, &prng);
|
|
|
|
|
tile.stride = (1 + tile.stride) * tile.width;
|
2021-06-05 08:53:54 -07:00
|
|
|
if (GRAPHICS_VER(i915) < 4)
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
tile.stride = rounddown_pow_of_two(tile.stride);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = check_partial_mapping(obj, &tile, &prng);
|
|
|
|
|
if (err)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
count++;
|
|
|
|
|
} while (!__igt_timeout(end, NULL));
|
|
|
|
|
|
|
|
|
|
pr_info("%s: Completed %lu trials\n", __func__, count);
|
|
|
|
|
|
|
|
|
|
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
|
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
out:
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:29:47 +01:00
|
|
|
static int make_obj_busy(struct drm_i915_gem_object *obj)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
2019-07-04 22:23:43 +01:00
|
|
|
struct intel_engine_cs *engine;
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2019-10-22 11:17:04 +01:00
|
|
|
for_each_uabi_engine(engine, i915) {
|
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
struct i915_vma *vma;
|
2020-08-19 16:09:01 +02:00
|
|
|
struct i915_gem_ww_ctx ww;
|
2019-10-22 11:17:04 +01:00
|
|
|
int err;
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2019-10-22 11:17:04 +01:00
|
|
|
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
|
|
|
|
|
if (IS_ERR(vma))
|
|
|
|
|
return PTR_ERR(vma);
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2020-08-19 16:09:01 +02:00
|
|
|
i915_gem_ww_ctx_init(&ww, false);
|
|
|
|
|
retry:
|
|
|
|
|
err = i915_gem_object_lock(obj, &ww);
|
|
|
|
|
if (!err)
|
|
|
|
|
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
|
2019-10-22 11:17:04 +01:00
|
|
|
if (err)
|
2020-08-19 16:09:01 +02:00
|
|
|
goto err;
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2019-11-25 10:58:56 +00:00
|
|
|
rq = intel_engine_create_kernel_request(engine);
|
2019-07-04 22:23:43 +01:00
|
|
|
if (IS_ERR(rq)) {
|
2020-08-19 16:09:01 +02:00
|
|
|
err = PTR_ERR(rq);
|
|
|
|
|
goto err_unpin;
|
2019-07-04 22:23:43 +01:00
|
|
|
}
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2019-08-19 12:20:33 +01:00
|
|
|
err = i915_request_await_object(rq, vma->obj, true);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = i915_vma_move_to_active(vma, rq,
|
|
|
|
|
EXEC_OBJECT_WRITE);
|
2019-07-04 22:23:43 +01:00
|
|
|
|
|
|
|
|
i915_request_add(rq);
|
2020-08-19 16:09:01 +02:00
|
|
|
err_unpin:
|
2019-10-22 11:17:04 +01:00
|
|
|
i915_vma_unpin(vma);
|
2020-08-19 16:09:01 +02:00
|
|
|
err:
|
|
|
|
|
if (err == -EDEADLK) {
|
|
|
|
|
err = i915_gem_ww_ctx_backoff(&ww);
|
|
|
|
|
if (!err)
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
|
|
|
|
i915_gem_ww_ctx_fini(&ww);
|
2019-10-22 11:17:04 +01:00
|
|
|
if (err)
|
|
|
|
|
return err;
|
2019-07-04 22:23:43 +01:00
|
|
|
}
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2019-05-28 10:29:56 +01:00
|
|
|
i915_gem_object_put(obj); /* leave it only alive via its active ref */
|
2019-10-22 11:17:04 +01:00
|
|
|
return 0;
|
2019-05-28 10:29:47 +01:00
|
|
|
}
|
|
|
|
|
|
2021-07-14 14:28:33 +02:00
|
|
|
static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
if (HAS_LMEM(i915))
|
|
|
|
|
return I915_MMAP_TYPE_FIXED;
|
|
|
|
|
|
|
|
|
|
return I915_MMAP_TYPE_GTT;
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-31 14:29:31 +02:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
|
create_sys_or_internal(struct drm_i915_private *i915,
|
|
|
|
|
unsigned long size)
|
|
|
|
|
{
|
|
|
|
|
if (HAS_LMEM(i915)) {
|
|
|
|
|
struct intel_memory_region *sys_region =
|
|
|
|
|
i915->mm.regions[INTEL_REGION_SMEM];
|
|
|
|
|
|
|
|
|
|
return __i915_gem_object_create_user(i915, size, &sys_region, 1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return i915_gem_object_create_internal(i915, size);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:29:47 +01:00
|
|
|
static bool assert_mmap_offset(struct drm_i915_private *i915,
|
|
|
|
|
unsigned long size,
|
|
|
|
|
int expected)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
2021-06-10 09:01:52 +02:00
|
|
|
u64 offset;
|
|
|
|
|
int ret;
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2021-08-31 14:29:31 +02:00
|
|
|
obj = create_sys_or_internal(i915, size);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (IS_ERR(obj))
|
2021-06-10 09:01:52 +02:00
|
|
|
return expected && expected == PTR_ERR(obj);
|
2019-05-28 10:29:47 +01:00
|
|
|
|
2021-07-14 14:28:33 +02:00
|
|
|
ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
|
2019-05-28 10:29:47 +01:00
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
return ret == expected;
|
2019-05-28 10:29:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void disable_retire_worker(struct drm_i915_private *i915)
|
|
|
|
|
{
|
2019-08-06 13:42:59 +01:00
|
|
|
i915_gem_driver_unregister__shrinker(i915);
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_pm_get(to_gt(i915));
|
|
|
|
|
cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
|
2019-05-28 10:29:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void restore_retire_worker(struct drm_i915_private *i915)
|
|
|
|
|
{
|
2019-10-04 14:40:02 +01:00
|
|
|
igt_flush_test(i915);
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_pm_put(to_gt(i915));
|
2019-08-06 13:42:59 +01:00
|
|
|
i915_gem_driver_register__shrinker(i915);
|
2019-05-28 10:29:47 +01:00
|
|
|
}
|
|
|
|
|
|
2019-07-11 07:51:59 +01:00
|
|
|
static void mmap_offset_lock(struct drm_i915_private *i915)
|
|
|
|
|
__acquires(&i915->drm.vma_offset_manager->vm_lock)
|
|
|
|
|
{
|
|
|
|
|
write_lock(&i915->drm.vma_offset_manager->vm_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void mmap_offset_unlock(struct drm_i915_private *i915)
|
|
|
|
|
__releases(&i915->drm.vma_offset_manager->vm_lock)
|
|
|
|
|
{
|
|
|
|
|
write_unlock(&i915->drm.vma_offset_manager->vm_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:29:47 +01:00
|
|
|
static int igt_mmap_offset_exhaustion(void *arg)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
2019-11-11 12:27:06 +00:00
|
|
|
struct drm_mm_node *hole, *next;
|
2019-12-04 12:00:32 +00:00
|
|
|
int loop, err = 0;
|
2021-06-10 09:01:52 +02:00
|
|
|
u64 offset;
|
2021-08-31 14:29:31 +02:00
|
|
|
int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
/* Disable background reaper */
|
|
|
|
|
disable_retire_worker(i915);
|
2021-12-14 21:33:35 +02:00
|
|
|
GEM_BUG_ON(!to_gt(i915)->awake);
|
|
|
|
|
intel_gt_retire_requests(to_gt(i915));
|
2019-11-11 12:27:06 +00:00
|
|
|
i915_gem_drain_freed_objects(i915);
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
/* Trim the device mmap space to only a page */
|
2019-11-11 12:27:06 +00:00
|
|
|
mmap_offset_lock(i915);
|
|
|
|
|
loop = 1; /* PAGE_SIZE units */
|
|
|
|
|
list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
|
|
|
|
|
struct drm_mm_node *resv;
|
|
|
|
|
|
|
|
|
|
resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
|
|
|
|
|
if (!resv) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto out_park;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
resv->start = drm_mm_hole_node_start(hole) + loop;
|
|
|
|
|
resv->size = hole->hole_size - loop;
|
|
|
|
|
resv->color = -1ul;
|
|
|
|
|
loop = 0;
|
|
|
|
|
|
|
|
|
|
if (!resv->size) {
|
|
|
|
|
kfree(resv);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pr_debug("Reserving hole [%llx + %llx]\n",
|
|
|
|
|
resv->start, resv->size);
|
|
|
|
|
|
|
|
|
|
err = drm_mm_reserve_node(mm, resv);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to trim VMA manager, err=%d\n", err);
|
2019-11-11 12:27:06 +00:00
|
|
|
kfree(resv);
|
2019-05-28 10:29:47 +01:00
|
|
|
goto out_park;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-11-11 12:27:06 +00:00
|
|
|
GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
|
|
|
|
|
mmap_offset_unlock(i915);
|
2019-05-28 10:29:47 +01:00
|
|
|
|
|
|
|
|
/* Just fits! */
|
|
|
|
|
if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
|
|
|
|
|
pr_err("Unable to insert object into single page hole\n");
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Too large */
|
2021-08-31 14:29:31 +02:00
|
|
|
if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
|
2019-05-28 10:29:47 +01:00
|
|
|
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Fill the hole, further allocation attempts should then fail */
|
2021-08-31 14:29:31 +02:00
|
|
|
obj = create_sys_or_internal(i915, PAGE_SIZE);
|
2019-05-28 10:29:47 +01:00
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
|
err = PTR_ERR(obj);
|
2021-06-10 09:01:52 +02:00
|
|
|
pr_err("Unable to create object for reclaimed hole\n");
|
2019-05-28 10:29:47 +01:00
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-14 14:28:33 +02:00
|
|
|
err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
|
2021-06-10 09:01:52 +02:00
|
|
|
if (err) {
|
2019-05-28 10:29:47 +01:00
|
|
|
pr_err("Unable to insert object into reclaimed hole\n");
|
|
|
|
|
goto err_obj;
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-31 14:29:31 +02:00
|
|
|
if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
|
2019-05-28 10:29:47 +01:00
|
|
|
pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto err_obj;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
|
|
|
|
|
/* Now fill with busy dead objects that we expect to reap */
|
|
|
|
|
for (loop = 0; loop < 3; loop++) {
|
2021-12-14 21:33:35 +02:00
|
|
|
if (intel_gt_is_wedged(to_gt(i915)))
|
2019-05-28 10:29:47 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = make_obj_busy(obj);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("[loop %d] Failed to busy the object\n", loop);
|
|
|
|
|
goto err_obj;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
2019-07-11 07:51:59 +01:00
|
|
|
mmap_offset_lock(i915);
|
2019-05-28 10:29:47 +01:00
|
|
|
out_park:
|
2019-11-11 12:27:06 +00:00
|
|
|
drm_mm_for_each_node_safe(hole, next, mm) {
|
|
|
|
|
if (hole->color != -1ul)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
drm_mm_remove_node(hole);
|
|
|
|
|
kfree(hole);
|
|
|
|
|
}
|
|
|
|
|
mmap_offset_unlock(i915);
|
2019-05-28 10:29:47 +01:00
|
|
|
restore_retire_worker(i915);
|
|
|
|
|
return err;
|
|
|
|
|
err_obj:
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
static int gtt_set(struct drm_i915_gem_object *obj)
|
2019-11-07 18:06:00 +00:00
|
|
|
{
|
2020-01-03 20:41:36 +00:00
|
|
|
struct i915_vma *vma;
|
|
|
|
|
void __iomem *map;
|
|
|
|
|
int err = 0;
|
2019-11-07 18:06:00 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
|
|
|
|
|
if (IS_ERR(vma))
|
|
|
|
|
return PTR_ERR(vma);
|
2019-11-07 18:06:00 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
intel_gt_pm_get(vma->vm->gt);
|
|
|
|
|
map = i915_vma_pin_iomap(vma);
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
if (IS_ERR(map)) {
|
|
|
|
|
err = PTR_ERR(map);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset_io(map, POISON_INUSE, obj->base.size);
|
|
|
|
|
i915_vma_unpin_iomap(vma);
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
intel_gt_pm_put(vma->vm->gt);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int gtt_check(struct drm_i915_gem_object *obj)
|
|
|
|
|
{
|
|
|
|
|
struct i915_vma *vma;
|
|
|
|
|
void __iomem *map;
|
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
|
|
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
|
|
|
|
|
if (IS_ERR(vma))
|
|
|
|
|
return PTR_ERR(vma);
|
2019-11-07 18:06:00 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
intel_gt_pm_get(vma->vm->gt);
|
|
|
|
|
map = i915_vma_pin_iomap(vma);
|
|
|
|
|
i915_vma_unpin(vma);
|
|
|
|
|
if (IS_ERR(map)) {
|
|
|
|
|
err = PTR_ERR(map);
|
2019-11-07 18:06:00 +00:00
|
|
|
goto out;
|
|
|
|
|
}
|
2020-01-03 20:41:36 +00:00
|
|
|
|
|
|
|
|
if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
|
|
|
|
|
pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
|
|
|
|
|
obj->mm.region->name);
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
i915_vma_unpin_iomap(vma);
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
intel_gt_pm_put(vma->vm->gt);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int wc_set(struct drm_i915_gem_object *obj)
|
|
|
|
|
{
|
|
|
|
|
void *vaddr;
|
|
|
|
|
|
2021-03-23 16:50:36 +01:00
|
|
|
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
2020-01-03 20:41:36 +00:00
|
|
|
if (IS_ERR(vaddr))
|
|
|
|
|
return PTR_ERR(vaddr);
|
|
|
|
|
|
|
|
|
|
memset(vaddr, POISON_INUSE, obj->base.size);
|
2019-11-07 18:06:00 +00:00
|
|
|
i915_gem_object_flush_map(obj);
|
|
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int wc_check(struct drm_i915_gem_object *obj)
|
|
|
|
|
{
|
|
|
|
|
void *vaddr;
|
|
|
|
|
int err = 0;
|
|
|
|
|
|
2021-03-23 16:50:36 +01:00
|
|
|
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
|
2020-01-03 20:41:36 +00:00
|
|
|
if (IS_ERR(vaddr))
|
|
|
|
|
return PTR_ERR(vaddr);
|
|
|
|
|
|
|
|
|
|
if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
|
|
|
|
|
pr_err("%s: Write via mmap did not land in backing store (WC)\n",
|
|
|
|
|
obj->mm.region->name);
|
|
|
|
|
err = -EINVAL;
|
2019-12-04 12:00:32 +00:00
|
|
|
}
|
2020-01-03 20:41:36 +00:00
|
|
|
i915_gem_object_unpin_map(obj);
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
|
|
|
|
|
{
|
2021-12-19 23:24:57 +02:00
|
|
|
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
2021-06-24 10:42:38 +02:00
|
|
|
bool no_map;
|
|
|
|
|
|
2021-08-31 14:29:31 +02:00
|
|
|
if (obj->ops->mmap_offset)
|
2021-07-14 14:28:33 +02:00
|
|
|
return type == I915_MMAP_TYPE_FIXED;
|
|
|
|
|
else if (type == I915_MMAP_TYPE_FIXED)
|
|
|
|
|
return false;
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
if (type == I915_MMAP_TYPE_GTT &&
|
2021-12-19 23:24:57 +02:00
|
|
|
!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
|
2020-01-03 20:41:36 +00:00
|
|
|
return false;
|
|
|
|
|
|
2021-06-24 10:42:38 +02:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
|
|
|
|
no_map = (type != I915_MMAP_TYPE_GTT &&
|
|
|
|
|
!i915_gem_object_has_struct_page(obj) &&
|
|
|
|
|
!i915_gem_object_has_iomem(obj));
|
|
|
|
|
i915_gem_object_unlock(obj);
|
2020-01-03 20:41:36 +00:00
|
|
|
|
2021-06-24 10:42:38 +02:00
|
|
|
return !no_map;
|
2020-01-03 20:41:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
|
|
|
|
|
static int __igt_mmap(struct drm_i915_private *i915,
|
|
|
|
|
struct drm_i915_gem_object *obj,
|
|
|
|
|
enum i915_mmap_type type)
|
|
|
|
|
{
|
|
|
|
|
struct vm_area_struct *area;
|
|
|
|
|
unsigned long addr;
|
|
|
|
|
int err, i;
|
2021-06-10 09:01:52 +02:00
|
|
|
u64 offset;
|
2020-01-03 20:41:36 +00:00
|
|
|
|
|
|
|
|
if (!can_mmap(obj, type))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err = wc_set(obj);
|
|
|
|
|
if (err == -ENXIO)
|
|
|
|
|
err = gtt_set(obj);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
2019-11-07 18:06:00 +00:00
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
2020-01-03 20:41:36 +00:00
|
|
|
if (IS_ERR_VALUE(addr))
|
|
|
|
|
return addr;
|
2019-11-07 18:06:00 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
|
2019-11-07 18:06:00 +00:00
|
|
|
|
2021-09-15 12:59:46 +02:00
|
|
|
mmap_read_lock(current->mm);
|
2021-06-28 19:38:53 -07:00
|
|
|
area = vma_lookup(current->mm, addr);
|
2021-09-15 12:59:46 +02:00
|
|
|
mmap_read_unlock(current->mm);
|
2019-11-07 18:06:00 +00:00
|
|
|
if (!area) {
|
2020-01-03 20:41:36 +00:00
|
|
|
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
|
|
|
|
|
obj->mm.region->name);
|
2019-11-07 18:06:00 +00:00
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
|
2019-11-07 18:06:00 +00:00
|
|
|
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
|
|
|
|
|
u32 x;
|
|
|
|
|
|
|
|
|
|
if (get_user(x, ux)) {
|
2020-01-03 20:41:36 +00:00
|
|
|
pr_err("%s: Unable to read from mmap, offset:%zd\n",
|
|
|
|
|
obj->mm.region->name, i * sizeof(x));
|
2019-11-07 18:06:00 +00:00
|
|
|
err = -EFAULT;
|
2020-01-03 20:41:36 +00:00
|
|
|
goto out_unmap;
|
2019-11-07 18:06:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (x != expand32(POISON_INUSE)) {
|
2020-01-03 20:41:36 +00:00
|
|
|
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
|
|
|
|
|
obj->mm.region->name,
|
2019-11-07 18:06:00 +00:00
|
|
|
i * sizeof(x), x, expand32(POISON_INUSE));
|
|
|
|
|
err = -EINVAL;
|
2020-01-03 20:41:36 +00:00
|
|
|
goto out_unmap;
|
2019-11-07 18:06:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
x = expand32(POISON_FREE);
|
|
|
|
|
if (put_user(x, ux)) {
|
2020-01-03 20:41:36 +00:00
|
|
|
pr_err("%s: Unable to write to mmap, offset:%zd\n",
|
|
|
|
|
obj->mm.region->name, i * sizeof(x));
|
2019-11-07 18:06:00 +00:00
|
|
|
err = -EFAULT;
|
2020-01-03 20:41:36 +00:00
|
|
|
goto out_unmap;
|
2019-11-07 18:06:00 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
if (type == I915_MMAP_TYPE_GTT)
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_flush_ggtt_writes(to_gt(i915));
|
2019-11-07 18:06:00 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
err = wc_check(obj);
|
|
|
|
|
if (err == -ENXIO)
|
|
|
|
|
err = gtt_check(obj);
|
|
|
|
|
out_unmap:
|
|
|
|
|
vm_munmap(addr, obj->base.size);
|
2019-11-07 18:06:00 +00:00
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
static int igt_mmap(void *arg)
|
2019-12-04 12:00:32 +00:00
|
|
|
{
|
2020-01-03 20:41:36 +00:00
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
|
enum intel_region_id id;
|
2019-12-04 12:00:32 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
for_each_memory_region(mr, i915, id) {
|
|
|
|
|
unsigned long sizes[] = {
|
|
|
|
|
PAGE_SIZE,
|
|
|
|
|
mr->min_page_size,
|
|
|
|
|
SZ_4M,
|
|
|
|
|
};
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(sizes); i++) {
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
int err;
|
|
|
|
|
|
2021-07-29 10:47:31 +01:00
|
|
|
obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
|
2020-01-03 20:41:36 +00:00
|
|
|
if (obj == ERR_PTR(-ENODEV))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
|
|
|
|
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
|
2021-07-14 14:28:33 +02:00
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
|
2020-01-03 20:41:36 +00:00
|
|
|
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
2019-12-04 12:00:32 +00:00
|
|
|
}
|
|
|
|
|
|
2022-02-28 12:36:07 +00:00
|
|
|
static void igt_close_objects(struct drm_i915_private *i915,
|
|
|
|
|
struct list_head *objects)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_gem_object *obj, *on;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry_safe(obj, on, objects, st_link) {
|
|
|
|
|
i915_gem_object_lock(obj, NULL);
|
|
|
|
|
if (i915_gem_object_has_pinned_pages(obj))
|
|
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
/* No polluting the memory region between tests */
|
|
|
|
|
__i915_gem_object_put_pages(obj);
|
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
list_del(&obj->st_link);
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
|
|
i915_gem_drain_freed_objects(i915);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void igt_make_evictable(struct list_head *objects)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(obj, objects, st_link) {
|
|
|
|
|
i915_gem_object_lock(obj, NULL);
|
|
|
|
|
if (i915_gem_object_has_pinned_pages(obj))
|
|
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igt_fill_mappable(struct intel_memory_region *mr,
|
|
|
|
|
struct list_head *objects)
|
|
|
|
|
{
|
|
|
|
|
u64 size, total;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
total = 0;
|
|
|
|
|
size = mr->io_size;
|
|
|
|
|
do {
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
|
|
|
|
|
obj = i915_gem_object_create_region(mr, size, 0, 0);
|
|
|
|
|
if (IS_ERR(obj)) {
|
|
|
|
|
err = PTR_ERR(obj);
|
|
|
|
|
goto err_close;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
list_add(&obj->st_link, objects);
|
|
|
|
|
|
|
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
|
|
|
|
if (err) {
|
|
|
|
|
if (err != -ENXIO && err != -ENOMEM)
|
|
|
|
|
goto err_close;
|
|
|
|
|
|
|
|
|
|
if (size == mr->min_page_size) {
|
|
|
|
|
err = 0;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size >>= 1;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
total += obj->base.size;
|
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
|
|
pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_close:
|
|
|
|
|
igt_close_objects(mr->i915, objects);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int ___igt_mmap_migrate(struct drm_i915_private *i915,
|
|
|
|
|
struct drm_i915_gem_object *obj,
|
|
|
|
|
unsigned long addr,
|
|
|
|
|
bool unfaultable)
|
|
|
|
|
{
|
|
|
|
|
struct vm_area_struct *area;
|
|
|
|
|
int err = 0, i;
|
|
|
|
|
|
|
|
|
|
pr_info("igt_mmap(%s, %d) @ %lx\n",
|
|
|
|
|
obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
|
|
|
|
|
|
|
|
|
|
mmap_read_lock(current->mm);
|
|
|
|
|
area = vma_lookup(current->mm, addr);
|
|
|
|
|
mmap_read_unlock(current->mm);
|
|
|
|
|
if (!area) {
|
|
|
|
|
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
|
|
|
|
|
obj->mm.region->name);
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
|
|
|
|
|
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
|
|
|
|
|
u32 x;
|
|
|
|
|
|
|
|
|
|
if (get_user(x, ux)) {
|
|
|
|
|
err = -EFAULT;
|
|
|
|
|
if (!unfaultable) {
|
|
|
|
|
pr_err("%s: Unable to read from mmap, offset:%zd\n",
|
|
|
|
|
obj->mm.region->name, i * sizeof(x));
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unfaultable) {
|
|
|
|
|
pr_err("%s: Faulted unmappable memory\n",
|
|
|
|
|
obj->mm.region->name);
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (x != expand32(POISON_INUSE)) {
|
|
|
|
|
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
|
|
|
|
|
obj->mm.region->name,
|
|
|
|
|
i * sizeof(x), x, expand32(POISON_INUSE));
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
x = expand32(POISON_FREE);
|
|
|
|
|
if (put_user(x, ux)) {
|
|
|
|
|
pr_err("%s: Unable to write to mmap, offset:%zd\n",
|
|
|
|
|
obj->mm.region->name, i * sizeof(x));
|
|
|
|
|
err = -EFAULT;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unfaultable) {
|
|
|
|
|
if (err == -EFAULT)
|
|
|
|
|
err = 0;
|
|
|
|
|
} else {
|
|
|
|
|
obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
|
|
|
|
|
err = wc_check(obj);
|
|
|
|
|
}
|
|
|
|
|
out_unmap:
|
|
|
|
|
vm_munmap(addr, obj->base.size);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0)
|
|
|
|
|
#define IGT_MMAP_MIGRATE_FILL (1 << 1)
|
|
|
|
|
#define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2)
|
|
|
|
|
#define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
|
|
|
|
|
static int __igt_mmap_migrate(struct intel_memory_region **placements,
|
|
|
|
|
int n_placements,
|
|
|
|
|
struct intel_memory_region *expected_mr,
|
|
|
|
|
unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *i915 = placements[0]->i915;
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
struct i915_request *rq = NULL;
|
|
|
|
|
unsigned long addr;
|
|
|
|
|
LIST_HEAD(objects);
|
|
|
|
|
u64 offset;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
|
|
|
|
|
placements,
|
|
|
|
|
n_placements);
|
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
|
|
|
|
if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
|
|
|
|
|
obj->flags |= I915_BO_ALLOC_GPU_ONLY;
|
|
|
|
|
|
|
|
|
|
err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This will eventually create a GEM context, due to opening dummy drm
|
|
|
|
|
* file, which needs a tiny amount of mappable device memory for the top
|
|
|
|
|
* level paging structures(and perhaps scratch), so make sure we
|
|
|
|
|
* allocate early, to avoid tears.
|
|
|
|
|
*/
|
|
|
|
|
addr = igt_mmap_offset(i915, offset, obj->base.size,
|
|
|
|
|
PROT_WRITE, MAP_SHARED);
|
|
|
|
|
if (IS_ERR_VALUE(addr)) {
|
|
|
|
|
err = addr;
|
|
|
|
|
goto out_put;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (flags & IGT_MMAP_MIGRATE_FILL) {
|
|
|
|
|
err = igt_fill_mappable(placements[0], &objects);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_put;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = i915_gem_object_lock(obj, NULL);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
|
|
err = i915_gem_object_pin_pages(obj);
|
|
|
|
|
if (err) {
|
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
goto out_put;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
|
|
|
|
|
obj->mm.pages->sgl, obj->cache_level,
|
|
|
|
|
i915_gem_object_is_lmem(obj),
|
|
|
|
|
expand32(POISON_INUSE), &rq);
|
|
|
|
|
i915_gem_object_unpin_pages(obj);
|
|
|
|
|
if (rq) {
|
2021-11-09 11:08:18 +01:00
|
|
|
dma_resv_add_fence(obj->base.resv, &rq->fence,
|
2021-12-22 11:23:56 +01:00
|
|
|
DMA_RESV_USAGE_KERNEL);
|
2022-02-28 12:36:07 +00:00
|
|
|
i915_request_put(rq);
|
|
|
|
|
}
|
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_put;
|
|
|
|
|
|
|
|
|
|
if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
|
|
|
|
|
igt_make_evictable(&objects);
|
|
|
|
|
|
|
|
|
|
err = ___igt_mmap_migrate(i915, obj, addr,
|
|
|
|
|
flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
|
|
|
|
|
if (!err && obj->mm.region != expected_mr) {
|
|
|
|
|
pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_put:
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
igt_close_objects(i915, &objects);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igt_mmap_migrate(void *arg)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
|
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
|
enum intel_region_id id;
|
|
|
|
|
|
|
|
|
|
for_each_memory_region(mr, i915, id) {
|
|
|
|
|
struct intel_memory_region *mixed[] = { mr, system };
|
|
|
|
|
struct intel_memory_region *single[] = { mr };
|
|
|
|
|
struct ttm_resource_manager *man = mr->region_private;
|
|
|
|
|
resource_size_t saved_io_size;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (mr->private)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (!mr->io_size)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* For testing purposes let's force small BAR, if not already
|
|
|
|
|
* present.
|
|
|
|
|
*/
|
|
|
|
|
saved_io_size = mr->io_size;
|
|
|
|
|
if (mr->io_size == mr->total) {
|
|
|
|
|
resource_size_t io_size = mr->io_size;
|
|
|
|
|
|
|
|
|
|
io_size = rounddown_pow_of_two(io_size >> 1);
|
|
|
|
|
if (io_size < PAGE_SIZE)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
mr->io_size = io_size;
|
|
|
|
|
i915_ttm_buddy_man_force_visible_size(man,
|
|
|
|
|
io_size >> PAGE_SHIFT);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Allocate in the mappable portion, should be no suprises here.
|
|
|
|
|
*/
|
|
|
|
|
err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_io_size;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Allocate in the non-mappable portion, but force migrating to
|
|
|
|
|
* the mappable portion on fault (LMEM -> LMEM)
|
|
|
|
|
*/
|
|
|
|
|
err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
|
|
|
|
|
IGT_MMAP_MIGRATE_TOPDOWN |
|
|
|
|
|
IGT_MMAP_MIGRATE_FILL |
|
|
|
|
|
IGT_MMAP_MIGRATE_EVICTABLE);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_io_size;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Allocate in the non-mappable portion, but force spilling into
|
|
|
|
|
* system memory on fault (LMEM -> SMEM)
|
|
|
|
|
*/
|
|
|
|
|
err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
|
|
|
|
|
IGT_MMAP_MIGRATE_TOPDOWN |
|
|
|
|
|
IGT_MMAP_MIGRATE_FILL);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_io_size;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Allocate in the non-mappable portion, but since the mappable
|
|
|
|
|
* portion is already full, and we can't spill to system memory,
|
|
|
|
|
* then we should expect the fault to fail.
|
|
|
|
|
*/
|
|
|
|
|
err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
|
|
|
|
|
IGT_MMAP_MIGRATE_TOPDOWN |
|
|
|
|
|
IGT_MMAP_MIGRATE_FILL |
|
|
|
|
|
IGT_MMAP_MIGRATE_UNFAULTABLE);
|
|
|
|
|
out_io_size:
|
|
|
|
|
mr->io_size = saved_io_size;
|
|
|
|
|
i915_ttm_buddy_man_force_visible_size(man,
|
|
|
|
|
mr->io_size >> PAGE_SHIFT);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-01 15:51:20 +01:00
|
|
|
static const char *repr_mmap_type(enum i915_mmap_type type)
|
|
|
|
|
{
|
|
|
|
|
switch (type) {
|
|
|
|
|
case I915_MMAP_TYPE_GTT: return "gtt";
|
|
|
|
|
case I915_MMAP_TYPE_WB: return "wb";
|
|
|
|
|
case I915_MMAP_TYPE_WC: return "wc";
|
|
|
|
|
case I915_MMAP_TYPE_UC: return "uc";
|
2021-07-14 14:28:33 +02:00
|
|
|
case I915_MMAP_TYPE_FIXED: return "fixed";
|
2020-05-01 15:51:20 +01:00
|
|
|
default: return "unknown";
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-24 10:42:38 +02:00
|
|
|
static bool can_access(struct drm_i915_gem_object *obj)
|
2020-05-01 15:51:20 +01:00
|
|
|
{
|
2021-06-24 10:42:38 +02:00
|
|
|
bool access;
|
|
|
|
|
|
|
|
|
|
i915_gem_object_lock(obj, NULL);
|
|
|
|
|
access = i915_gem_object_has_struct_page(obj) ||
|
|
|
|
|
i915_gem_object_has_iomem(obj);
|
|
|
|
|
i915_gem_object_unlock(obj);
|
|
|
|
|
|
|
|
|
|
return access;
|
2020-05-01 15:51:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int __igt_mmap_access(struct drm_i915_private *i915,
|
|
|
|
|
struct drm_i915_gem_object *obj,
|
|
|
|
|
enum i915_mmap_type type)
|
|
|
|
|
{
|
|
|
|
|
unsigned long __user *ptr;
|
|
|
|
|
unsigned long A, B;
|
|
|
|
|
unsigned long x, y;
|
|
|
|
|
unsigned long addr;
|
|
|
|
|
int err;
|
2021-06-10 09:01:52 +02:00
|
|
|
u64 offset;
|
2020-05-01 15:51:20 +01:00
|
|
|
|
|
|
|
|
memset(&A, 0xAA, sizeof(A));
|
|
|
|
|
memset(&B, 0xBB, sizeof(B));
|
|
|
|
|
|
|
|
|
|
if (!can_mmap(obj, type) || !can_access(obj))
|
|
|
|
|
return 0;
|
|
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
2020-05-01 15:51:20 +01:00
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
2020-05-01 15:51:20 +01:00
|
|
|
if (IS_ERR_VALUE(addr))
|
|
|
|
|
return addr;
|
|
|
|
|
ptr = (unsigned long __user *)addr;
|
|
|
|
|
|
|
|
|
|
err = __put_user(A, ptr);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("%s(%s): failed to write into user mmap\n",
|
|
|
|
|
obj->mm.region->name, repr_mmap_type(type));
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_flush_ggtt_writes(to_gt(i915));
|
2020-05-01 15:51:20 +01:00
|
|
|
|
|
|
|
|
err = access_process_vm(current, addr, &x, sizeof(x), 0);
|
|
|
|
|
if (err != sizeof(x)) {
|
|
|
|
|
pr_err("%s(%s): access_process_vm() read failed\n",
|
|
|
|
|
obj->mm.region->name, repr_mmap_type(type));
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
|
|
|
|
|
if (err != sizeof(B)) {
|
|
|
|
|
pr_err("%s(%s): access_process_vm() write failed\n",
|
|
|
|
|
obj->mm.region->name, repr_mmap_type(type));
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_flush_ggtt_writes(to_gt(i915));
|
2020-05-01 15:51:20 +01:00
|
|
|
|
|
|
|
|
err = __get_user(y, ptr);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("%s(%s): failed to read from user mmap\n",
|
|
|
|
|
obj->mm.region->name, repr_mmap_type(type));
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (x != A || y != B) {
|
|
|
|
|
pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
|
|
|
|
|
obj->mm.region->name, repr_mmap_type(type),
|
|
|
|
|
x, y);
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_unmap:
|
|
|
|
|
vm_munmap(addr, obj->base.size);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igt_mmap_access(void *arg)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
|
enum intel_region_id id;
|
|
|
|
|
|
|
|
|
|
for_each_memory_region(mr, i915, id) {
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
int err;
|
|
|
|
|
|
2021-07-29 10:47:31 +01:00
|
|
|
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
|
2020-05-01 15:51:20 +01:00
|
|
|
if (obj == ERR_PTR(-ENODEV))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
|
|
|
|
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
|
2021-07-14 14:28:33 +02:00
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
|
2020-05-01 15:51:20 +01:00
|
|
|
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 20:41:37 +00:00
|
|
|
static int __igt_mmap_gpu(struct drm_i915_private *i915,
|
|
|
|
|
struct drm_i915_gem_object *obj,
|
|
|
|
|
enum i915_mmap_type type)
|
|
|
|
|
{
|
|
|
|
|
struct intel_engine_cs *engine;
|
|
|
|
|
unsigned long addr;
|
2020-01-06 11:42:29 +00:00
|
|
|
u32 __user *ux;
|
|
|
|
|
u32 bbe;
|
2020-01-03 20:41:37 +00:00
|
|
|
int err;
|
2021-06-10 09:01:52 +02:00
|
|
|
u64 offset;
|
2020-01-03 20:41:37 +00:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Verify that the mmap access into the backing store aligns with
|
|
|
|
|
* that of the GPU, i.e. that mmap is indeed writing into the same
|
|
|
|
|
* page as being read by the GPU.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
if (!can_mmap(obj, type))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err = wc_set(obj);
|
|
|
|
|
if (err == -ENXIO)
|
|
|
|
|
err = gtt_set(obj);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
2020-01-03 20:41:37 +00:00
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
2020-01-03 20:41:37 +00:00
|
|
|
if (IS_ERR_VALUE(addr))
|
|
|
|
|
return addr;
|
|
|
|
|
|
|
|
|
|
ux = u64_to_user_ptr((u64)addr);
|
|
|
|
|
bbe = MI_BATCH_BUFFER_END;
|
|
|
|
|
if (put_user(bbe, ux)) {
|
|
|
|
|
pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
|
|
|
|
|
err = -EFAULT;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (type == I915_MMAP_TYPE_GTT)
|
2021-12-14 21:33:35 +02:00
|
|
|
intel_gt_flush_ggtt_writes(to_gt(i915));
|
2020-01-03 20:41:37 +00:00
|
|
|
|
|
|
|
|
for_each_uabi_engine(engine, i915) {
|
|
|
|
|
struct i915_request *rq;
|
|
|
|
|
struct i915_vma *vma;
|
2020-08-19 16:09:01 +02:00
|
|
|
struct i915_gem_ww_ctx ww;
|
2020-01-03 20:41:37 +00:00
|
|
|
|
|
|
|
|
vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
|
|
|
|
|
if (IS_ERR(vma)) {
|
|
|
|
|
err = PTR_ERR(vma);
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-19 16:09:01 +02:00
|
|
|
i915_gem_ww_ctx_init(&ww, false);
|
|
|
|
|
retry:
|
|
|
|
|
err = i915_gem_object_lock(obj, &ww);
|
|
|
|
|
if (!err)
|
|
|
|
|
err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
|
2020-01-03 20:41:37 +00:00
|
|
|
if (err)
|
2020-08-19 16:09:01 +02:00
|
|
|
goto out_ww;
|
2020-01-03 20:41:37 +00:00
|
|
|
|
|
|
|
|
rq = i915_request_create(engine->kernel_context);
|
|
|
|
|
if (IS_ERR(rq)) {
|
|
|
|
|
err = PTR_ERR(rq);
|
|
|
|
|
goto out_unpin;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = i915_request_await_object(rq, vma->obj, false);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = i915_vma_move_to_active(vma, rq, 0);
|
|
|
|
|
|
|
|
|
|
err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
|
|
|
|
|
i915_request_get(rq);
|
|
|
|
|
i915_request_add(rq);
|
|
|
|
|
|
|
|
|
|
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
|
|
|
|
|
struct drm_printer p =
|
|
|
|
|
drm_info_printer(engine->i915->drm.dev);
|
|
|
|
|
|
|
|
|
|
pr_err("%s(%s, %s): Failed to execute batch\n",
|
|
|
|
|
__func__, engine->name, obj->mm.region->name);
|
|
|
|
|
intel_engine_dump(engine, &p,
|
|
|
|
|
"%s\n", engine->name);
|
|
|
|
|
|
|
|
|
|
intel_gt_set_wedged(engine->gt);
|
|
|
|
|
err = -EIO;
|
|
|
|
|
}
|
|
|
|
|
i915_request_put(rq);
|
|
|
|
|
|
|
|
|
|
out_unpin:
|
|
|
|
|
i915_vma_unpin(vma);
|
2020-08-19 16:09:01 +02:00
|
|
|
out_ww:
|
|
|
|
|
if (err == -EDEADLK) {
|
|
|
|
|
err = i915_gem_ww_ctx_backoff(&ww);
|
|
|
|
|
if (!err)
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
|
|
|
|
i915_gem_ww_ctx_fini(&ww);
|
2020-01-03 20:41:37 +00:00
|
|
|
if (err)
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out_unmap:
|
|
|
|
|
vm_munmap(addr, obj->base.size);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int igt_mmap_gpu(void *arg)
|
|
|
|
|
{
|
|
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
|
enum intel_region_id id;
|
|
|
|
|
|
|
|
|
|
for_each_memory_region(mr, i915, id) {
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
int err;
|
|
|
|
|
|
2021-07-29 10:47:31 +01:00
|
|
|
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
|
2020-01-03 20:41:37 +00:00
|
|
|
if (obj == ERR_PTR(-ENODEV))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
|
|
|
|
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
|
2021-07-14 14:28:33 +02:00
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
|
2020-01-03 20:41:37 +00:00
|
|
|
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-07 18:06:01 +00:00
|
|
|
static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
|
|
|
|
|
{
|
|
|
|
|
if (!pte_present(*pte) || pte_none(*pte)) {
|
|
|
|
|
pr_err("missing PTE:%lx\n",
|
|
|
|
|
(addr - (unsigned long)data) >> PAGE_SHIFT);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
|
|
|
|
|
{
|
|
|
|
|
if (pte_present(*pte) && !pte_none(*pte)) {
|
|
|
|
|
pr_err("present PTE:%lx; expected to be revoked\n",
|
|
|
|
|
(addr - (unsigned long)data) >> PAGE_SHIFT);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int check_present(unsigned long addr, unsigned long len)
|
|
|
|
|
{
|
|
|
|
|
return apply_to_page_range(current->mm, addr, len,
|
|
|
|
|
check_present_pte, (void *)addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int check_absent(unsigned long addr, unsigned long len)
|
|
|
|
|
{
|
|
|
|
|
return apply_to_page_range(current->mm, addr, len,
|
|
|
|
|
check_absent_pte, (void *)addr);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int prefault_range(u64 start, u64 len)
|
|
|
|
|
{
|
|
|
|
|
const char __user *addr, *end;
|
|
|
|
|
char __maybe_unused c;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
addr = u64_to_user_ptr(start);
|
|
|
|
|
end = addr + len;
|
|
|
|
|
|
|
|
|
|
for (; addr < end; addr += PAGE_SIZE) {
|
|
|
|
|
err = __get_user(c, addr);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return __get_user(c, end - 1);
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
static int __igt_mmap_revoke(struct drm_i915_private *i915,
|
|
|
|
|
struct drm_i915_gem_object *obj,
|
|
|
|
|
enum i915_mmap_type type)
|
2019-11-07 18:06:01 +00:00
|
|
|
{
|
|
|
|
|
unsigned long addr;
|
|
|
|
|
int err;
|
2021-06-10 09:01:52 +02:00
|
|
|
u64 offset;
|
2019-11-07 18:06:01 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
if (!can_mmap(obj, type))
|
2019-11-07 18:06:01 +00:00
|
|
|
return 0;
|
|
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
err = __assign_mmap_offset(obj, type, &offset, NULL);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
2019-11-07 18:06:01 +00:00
|
|
|
|
2021-06-10 09:01:52 +02:00
|
|
|
addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
|
2020-01-03 20:41:36 +00:00
|
|
|
if (IS_ERR_VALUE(addr))
|
|
|
|
|
return addr;
|
2019-11-07 18:06:01 +00:00
|
|
|
|
|
|
|
|
err = prefault_range(addr, obj->base.size);
|
|
|
|
|
if (err)
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
|
|
|
|
|
err = check_present(addr, obj->base.size);
|
2020-01-03 20:41:36 +00:00
|
|
|
if (err) {
|
|
|
|
|
pr_err("%s: was not present\n", obj->mm.region->name);
|
2019-11-07 18:06:01 +00:00
|
|
|
goto out_unmap;
|
2020-01-03 20:41:36 +00:00
|
|
|
}
|
2019-11-07 18:06:01 +00:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* After unbinding the object from the GGTT, its address may be reused
|
|
|
|
|
* for other objects. Ergo we have to revoke the previous mmap PTE
|
|
|
|
|
* access as it no longer points to the same object.
|
|
|
|
|
*/
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2019-11-07 18:06:01 +00:00
|
|
|
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
|
2022-01-14 14:23:18 +01:00
|
|
|
i915_gem_object_unlock(obj);
|
2019-11-07 18:06:01 +00:00
|
|
|
if (err) {
|
|
|
|
|
pr_err("Failed to unbind object!\n");
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-04 12:00:32 +00:00
|
|
|
if (type != I915_MMAP_TYPE_GTT) {
|
2021-03-23 16:50:36 +01:00
|
|
|
i915_gem_object_lock(obj, NULL);
|
2019-12-04 12:00:32 +00:00
|
|
|
__i915_gem_object_put_pages(obj);
|
2021-03-23 16:50:36 +01:00
|
|
|
i915_gem_object_unlock(obj);
|
2019-12-04 12:00:32 +00:00
|
|
|
if (i915_gem_object_has_pages(obj)) {
|
|
|
|
|
pr_err("Failed to put-pages object!\n");
|
|
|
|
|
err = -EINVAL;
|
|
|
|
|
goto out_unmap;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-06 17:49:09 +00:00
|
|
|
err = check_absent(addr, obj->base.size);
|
|
|
|
|
if (err) {
|
|
|
|
|
pr_err("%s: was not absent\n", obj->mm.region->name);
|
|
|
|
|
goto out_unmap;
|
2020-01-03 20:41:36 +00:00
|
|
|
}
|
2019-11-07 18:06:01 +00:00
|
|
|
|
|
|
|
|
out_unmap:
|
|
|
|
|
vm_munmap(addr, obj->base.size);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
static int igt_mmap_revoke(void *arg)
|
2019-12-04 12:00:32 +00:00
|
|
|
{
|
2020-01-03 20:41:36 +00:00
|
|
|
struct drm_i915_private *i915 = arg;
|
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
|
enum intel_region_id id;
|
2019-12-04 12:00:32 +00:00
|
|
|
|
2020-01-03 20:41:36 +00:00
|
|
|
for_each_memory_region(mr, i915, id) {
|
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
|
int err;
|
|
|
|
|
|
2021-07-29 10:47:31 +01:00
|
|
|
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
|
2020-01-03 20:41:36 +00:00
|
|
|
if (obj == ERR_PTR(-ENODEV))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
|
return PTR_ERR(obj);
|
|
|
|
|
|
|
|
|
|
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
|
|
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
|
2021-07-14 14:28:33 +02:00
|
|
|
if (err == 0)
|
|
|
|
|
err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
|
2020-01-03 20:41:36 +00:00
|
|
|
|
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
2019-12-04 12:00:32 +00:00
|
|
|
}
|
|
|
|
|
|
2019-05-28 10:29:47 +01:00
|
|
|
int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
|
|
|
|
|
{
|
|
|
|
|
static const struct i915_subtest tests[] = {
|
|
|
|
|
SUBTEST(igt_partial_tiling),
|
drm/i915/selftests: Tighten the timeout testing for partial mmaps
Currently, if there is time remaining before the start of the loop, we
do one full iteration over many possible different chunks within the
object. A full loop may take 50+s (depending on speed of indirect GTT
mmapings) and we try separately with LINEAR, X and Y -- at which point
igt times out. If we check more frequently, we will interrupt the loop
upon our timeout -- it is hard to argue for as this significantly reduces
the test coverage as we dramatically reduce the runtime. In practical
terms, the coverage we should prioritise is in using different fence
setups, forcing verification of the tile row computations over the
current preference of checking extracting chunks. Though the exhaustive
search is great given an infinite timeout, to improve our current
coverage, we also add a randomised smoketest of partial mmaps. So let's
do both, add a randomised smoketest of partial tiling chunks and the
exhaustive (though time limited) search for failures.
Even in adding another subtest, we should shave 100s off BAT! (With,
hopefully, no loss in coverage, at least over multiple runs.)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk
2019-09-10 13:10:09 +01:00
|
|
|
SUBTEST(igt_smoke_tiling),
|
2019-05-28 10:29:47 +01:00
|
|
|
SUBTEST(igt_mmap_offset_exhaustion),
|
2020-01-03 20:41:36 +00:00
|
|
|
SUBTEST(igt_mmap),
|
2022-02-28 12:36:07 +00:00
|
|
|
SUBTEST(igt_mmap_migrate),
|
2020-05-01 15:51:20 +01:00
|
|
|
SUBTEST(igt_mmap_access),
|
2020-01-03 20:41:36 +00:00
|
|
|
SUBTEST(igt_mmap_revoke),
|
2020-01-03 20:41:37 +00:00
|
|
|
SUBTEST(igt_mmap_gpu),
|
2019-05-28 10:29:47 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return i915_subtests(tests, i915);
|
|
|
|
|
}
|