UAPI Changes: - GuC hwconfig support and query (John Harrison, Rodrigo Vivi, Tvrtko Ursulin) - Sysfs support for multi-tile devices (Andi Shyti, Sujaritha Sundaresan) - Per client GPU utilisation via fdinfo (Tvrtko Ursulin, Ashutosh Dixit) - Add DRM_I915_QUERY_GEOMETRY_SUBSLICES (Matt Atwood) Cross-subsystem Changes: - Add GSC as a MEI auxiliary device (Tomas Winkler, Alexander Usyskin) Core Changes: - Document fdinfo format specification (Tvrtko Ursulin) Driver Changes: - Fix prime_mmap to work when using LMEM (Gwan-gyeong Mun) - Fix vm open count and remove vma refcount (Thomas Hellström) - Fixup setting screen_size (Matthew Auld) - Opportunistically apply ALLOC_CONTIGIOUS (Matthew Auld) - Limit where we apply TTM_PL_FLAG_CONTIGUOUS (Matthew Auld) - Drop aux table invalidation on FlatCCS platforms (Matt Roper) - Add missing boundary check in vm_access (Mastan Katragadda) - Update topology dumps for Xe_HP (Matt Roper) - Add support for steered register writes (Matt Roper) - Add steering info to GuC register save/restore list (Daniele Ceraolo Spurio) - Small PCI BAR enabling (Matthew Auld, Akeem G Abodunrin, CQ Tang) - Add preemption changes for Wa_14015141709 (Akeem G Abodunrin) - Add logical mapping for video decode engines (Matthew Brost) - Don't evict unmappable VMAs when pinning with PIN_MAPPABLE (v2) (Vivek Kasireddy) - GuC error capture support (Alan Previn, Daniele Ceraolo Spurio) - avoid concurrent writes to aux_inv (Fei Yang) - Add Wa_22014226127 (José Roberto de Souza) - Sunset igpu legacy mmap support based on GRAPHICS_VER_FULL (Matt Roper) - Evict and restore of compressed objects (Ramalingam C) - Update to GuC version 70.1.1 (John Harrison) - Add Wa_22011802037 force cs halt (Tilak Tangudu) - Enable Wa_22011802037 for gen12 GuC based platforms (Umesh Nerlige Ramappa) - GuC based workarounds for DG2 (Vinay Belgaumkar, John Harrison, Matthew Brost, José Roberto de Souza) - consider min_page_size when migrating (Matthew Auld) - Prep work for next GuC firmware release (John Harrison) - Support platforms with CCS engines but no RCS (Matt Roper, Stuart Summers) - Don't overallocate subslice storage (Matt Roper) - Reduce stack usage in debugfs due to SSEU (John Harrison) - Report steering details in debugfs (Matt Roper) - Refactor some x86-ism out to prepare for non-x86 builds (Michael Cheng) - add lmem_size modparam (CQ Tang) - Refactor for non-x86 driver builds (Casey Bowman) - Centralize computation of freq caps (Ashutosh Dixit) - Update dma_buf_ops.unmap_dma_buf callback to use drm_gem_unmap_dma_buf() (Gwan-gyeong Mun) - Limit the async bind to bind_async_flags (Matthew Auld) - Stop checking for NULL vma->obj (Matthew Auld) - Reduce overzealous alignment constraints for GGTT (Matthew Auld) - Remove GEN12_SFC_DONE_MAX from register defs header (Matt Roper) - Fix renamed struct field (Lucas De Marchi) - Do not return '0' if there is nothing to return (Andi Shyti) - fix i915_reg_t initialization (Jani Nikula) - move the migration sanity check (Matthew Auld) - handle more rounding in selftests (Matthew Auld) - Perf and i915 query kerneldoc updates (Matt Roper) - Use i915_probe_error instead of drm_err (Vinay Belgaumkar) - sanity check object size in the buddy allocator (Matthew Auld) - fixup selftests min_alignment usage (Matthew Auld) - tweak selftests misaligned_case (Matthew Auld) Signed-off-by: Dave Airlie <airlied@redhat.com> # Conflicts: # drivers/gpu/drm/i915/i915_vma.c From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/Ymkfy8FjsG2JrodK@tursulin-mobl2
481 lines
11 KiB
C
481 lines
11 KiB
C
/*
|
|
* SPDX-License-Identifier: MIT
|
|
*
|
|
* Copyright © 2016 Intel Corporation
|
|
*/
|
|
|
|
#include "i915_drv.h"
|
|
#include "i915_selftest.h"
|
|
|
|
#include "mock_dmabuf.h"
|
|
#include "selftests/mock_gem_device.h"
|
|
|
|
static int igt_dmabuf_export(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct drm_i915_gem_object *obj;
|
|
struct dma_buf *dmabuf;
|
|
|
|
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
|
|
if (IS_ERR(obj))
|
|
return PTR_ERR(obj);
|
|
|
|
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
|
i915_gem_object_put(obj);
|
|
if (IS_ERR(dmabuf)) {
|
|
pr_err("i915_gem_prime_export failed with err=%d\n",
|
|
(int)PTR_ERR(dmabuf));
|
|
return PTR_ERR(dmabuf);
|
|
}
|
|
|
|
dma_buf_put(dmabuf);
|
|
return 0;
|
|
}
|
|
|
|
static int igt_dmabuf_import_self(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct drm_i915_gem_object *obj, *import_obj;
|
|
struct drm_gem_object *import;
|
|
struct dma_buf *dmabuf;
|
|
int err;
|
|
|
|
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
|
|
if (IS_ERR(obj))
|
|
return PTR_ERR(obj);
|
|
|
|
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
|
if (IS_ERR(dmabuf)) {
|
|
pr_err("i915_gem_prime_export failed with err=%d\n",
|
|
(int)PTR_ERR(dmabuf));
|
|
err = PTR_ERR(dmabuf);
|
|
goto out;
|
|
}
|
|
|
|
import = i915_gem_prime_import(&i915->drm, dmabuf);
|
|
if (IS_ERR(import)) {
|
|
pr_err("i915_gem_prime_import failed with err=%d\n",
|
|
(int)PTR_ERR(import));
|
|
err = PTR_ERR(import);
|
|
goto out_dmabuf;
|
|
}
|
|
import_obj = to_intel_bo(import);
|
|
|
|
if (import != &obj->base) {
|
|
pr_err("i915_gem_prime_import created a new object!\n");
|
|
err = -EINVAL;
|
|
goto out_import;
|
|
}
|
|
|
|
i915_gem_object_lock(import_obj, NULL);
|
|
err = __i915_gem_object_get_pages(import_obj);
|
|
i915_gem_object_unlock(import_obj);
|
|
if (err) {
|
|
pr_err("Same object dma-buf get_pages failed!\n");
|
|
goto out_import;
|
|
}
|
|
|
|
err = 0;
|
|
out_import:
|
|
i915_gem_object_put(import_obj);
|
|
out_dmabuf:
|
|
dma_buf_put(dmabuf);
|
|
out:
|
|
i915_gem_object_put(obj);
|
|
return err;
|
|
}
|
|
|
|
static int igt_dmabuf_import_same_driver_lmem(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0];
|
|
struct drm_i915_gem_object *obj;
|
|
struct drm_gem_object *import;
|
|
struct dma_buf *dmabuf;
|
|
int err;
|
|
|
|
if (!lmem)
|
|
return 0;
|
|
|
|
force_different_devices = true;
|
|
|
|
obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
|
|
if (IS_ERR(obj)) {
|
|
pr_err("__i915_gem_object_create_user failed with err=%ld\n",
|
|
PTR_ERR(obj));
|
|
err = PTR_ERR(obj);
|
|
goto out_ret;
|
|
}
|
|
|
|
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
|
if (IS_ERR(dmabuf)) {
|
|
pr_err("i915_gem_prime_export failed with err=%ld\n",
|
|
PTR_ERR(dmabuf));
|
|
err = PTR_ERR(dmabuf);
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* We expect an import of an LMEM-only object to fail with
|
|
* -EOPNOTSUPP because it can't be migrated to SMEM.
|
|
*/
|
|
import = i915_gem_prime_import(&i915->drm, dmabuf);
|
|
if (!IS_ERR(import)) {
|
|
drm_gem_object_put(import);
|
|
pr_err("i915_gem_prime_import succeeded when it shouldn't have\n");
|
|
err = -EINVAL;
|
|
} else if (PTR_ERR(import) != -EOPNOTSUPP) {
|
|
pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
|
|
PTR_ERR(import));
|
|
err = PTR_ERR(import);
|
|
} else {
|
|
err = 0;
|
|
}
|
|
|
|
dma_buf_put(dmabuf);
|
|
out:
|
|
i915_gem_object_put(obj);
|
|
out_ret:
|
|
force_different_devices = false;
|
|
return err;
|
|
}
|
|
|
|
static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
|
|
struct intel_memory_region **regions,
|
|
unsigned int num_regions)
|
|
{
|
|
struct drm_i915_gem_object *obj, *import_obj;
|
|
struct drm_gem_object *import;
|
|
struct dma_buf *dmabuf;
|
|
struct dma_buf_attachment *import_attach;
|
|
struct sg_table *st;
|
|
long timeout;
|
|
int err;
|
|
|
|
force_different_devices = true;
|
|
|
|
obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
|
|
regions, num_regions);
|
|
if (IS_ERR(obj)) {
|
|
pr_err("__i915_gem_object_create_user failed with err=%ld\n",
|
|
PTR_ERR(obj));
|
|
err = PTR_ERR(obj);
|
|
goto out_ret;
|
|
}
|
|
|
|
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
|
if (IS_ERR(dmabuf)) {
|
|
pr_err("i915_gem_prime_export failed with err=%ld\n",
|
|
PTR_ERR(dmabuf));
|
|
err = PTR_ERR(dmabuf);
|
|
goto out;
|
|
}
|
|
|
|
import = i915_gem_prime_import(&i915->drm, dmabuf);
|
|
if (IS_ERR(import)) {
|
|
pr_err("i915_gem_prime_import failed with err=%ld\n",
|
|
PTR_ERR(import));
|
|
err = PTR_ERR(import);
|
|
goto out_dmabuf;
|
|
}
|
|
import_obj = to_intel_bo(import);
|
|
|
|
if (import == &obj->base) {
|
|
pr_err("i915_gem_prime_import reused gem object!\n");
|
|
err = -EINVAL;
|
|
goto out_import;
|
|
}
|
|
|
|
i915_gem_object_lock(import_obj, NULL);
|
|
err = __i915_gem_object_get_pages(import_obj);
|
|
if (err) {
|
|
pr_err("Different objects dma-buf get_pages failed!\n");
|
|
i915_gem_object_unlock(import_obj);
|
|
goto out_import;
|
|
}
|
|
|
|
/*
|
|
* If the exported object is not in system memory, something
|
|
* weird is going on. TODO: When p2p is supported, this is no
|
|
* longer considered weird.
|
|
*/
|
|
if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) {
|
|
pr_err("Exported dma-buf is not in system memory\n");
|
|
err = -EINVAL;
|
|
}
|
|
|
|
i915_gem_object_unlock(import_obj);
|
|
|
|
/* Now try a fake an importer */
|
|
import_attach = dma_buf_attach(dmabuf, obj->base.dev->dev);
|
|
if (IS_ERR(import_attach)) {
|
|
err = PTR_ERR(import_attach);
|
|
goto out_import;
|
|
}
|
|
|
|
st = dma_buf_map_attachment(import_attach, DMA_BIDIRECTIONAL);
|
|
if (IS_ERR(st)) {
|
|
err = PTR_ERR(st);
|
|
goto out_detach;
|
|
}
|
|
|
|
timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE,
|
|
true, 5 * HZ);
|
|
if (!timeout) {
|
|
pr_err("dmabuf wait for exclusive fence timed out.\n");
|
|
timeout = -ETIME;
|
|
}
|
|
err = timeout > 0 ? 0 : timeout;
|
|
dma_buf_unmap_attachment(import_attach, st, DMA_BIDIRECTIONAL);
|
|
out_detach:
|
|
dma_buf_detach(dmabuf, import_attach);
|
|
out_import:
|
|
i915_gem_object_put(import_obj);
|
|
out_dmabuf:
|
|
dma_buf_put(dmabuf);
|
|
out:
|
|
i915_gem_object_put(obj);
|
|
out_ret:
|
|
force_different_devices = false;
|
|
return err;
|
|
}
|
|
|
|
static int igt_dmabuf_import_same_driver_smem(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];
|
|
|
|
return igt_dmabuf_import_same_driver(i915, &smem, 1);
|
|
}
|
|
|
|
static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct intel_memory_region *regions[2];
|
|
|
|
if (!i915->mm.regions[INTEL_REGION_LMEM_0])
|
|
return 0;
|
|
|
|
regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0];
|
|
regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
|
|
return igt_dmabuf_import_same_driver(i915, regions, 2);
|
|
}
|
|
|
|
static int igt_dmabuf_import(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct drm_i915_gem_object *obj;
|
|
struct dma_buf *dmabuf;
|
|
void *obj_map, *dma_map;
|
|
struct iosys_map map;
|
|
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
|
|
int err, i;
|
|
|
|
dmabuf = mock_dmabuf(1);
|
|
if (IS_ERR(dmabuf))
|
|
return PTR_ERR(dmabuf);
|
|
|
|
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
|
|
if (IS_ERR(obj)) {
|
|
pr_err("i915_gem_prime_import failed with err=%d\n",
|
|
(int)PTR_ERR(obj));
|
|
err = PTR_ERR(obj);
|
|
goto out_dmabuf;
|
|
}
|
|
|
|
if (obj->base.dev != &i915->drm) {
|
|
pr_err("i915_gem_prime_import created a non-i915 object!\n");
|
|
err = -EINVAL;
|
|
goto out_obj;
|
|
}
|
|
|
|
if (obj->base.size != PAGE_SIZE) {
|
|
pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n",
|
|
(long long)obj->base.size, PAGE_SIZE);
|
|
err = -EINVAL;
|
|
goto out_obj;
|
|
}
|
|
|
|
err = dma_buf_vmap(dmabuf, &map);
|
|
dma_map = err ? NULL : map.vaddr;
|
|
if (!dma_map) {
|
|
pr_err("dma_buf_vmap failed\n");
|
|
err = -ENOMEM;
|
|
goto out_obj;
|
|
}
|
|
|
|
if (0) { /* Can not yet map dmabuf */
|
|
obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
|
if (IS_ERR(obj_map)) {
|
|
err = PTR_ERR(obj_map);
|
|
pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
|
|
goto out_dma_map;
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(pattern); i++) {
|
|
memset(dma_map, pattern[i], PAGE_SIZE);
|
|
if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
|
|
err = -EINVAL;
|
|
pr_err("imported vmap not all set to %x!\n", pattern[i]);
|
|
i915_gem_object_unpin_map(obj);
|
|
goto out_dma_map;
|
|
}
|
|
}
|
|
|
|
for (i = 0; i < ARRAY_SIZE(pattern); i++) {
|
|
memset(obj_map, pattern[i], PAGE_SIZE);
|
|
if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
|
|
err = -EINVAL;
|
|
pr_err("exported vmap not all set to %x!\n", pattern[i]);
|
|
i915_gem_object_unpin_map(obj);
|
|
goto out_dma_map;
|
|
}
|
|
}
|
|
|
|
i915_gem_object_unpin_map(obj);
|
|
}
|
|
|
|
err = 0;
|
|
out_dma_map:
|
|
dma_buf_vunmap(dmabuf, &map);
|
|
out_obj:
|
|
i915_gem_object_put(obj);
|
|
out_dmabuf:
|
|
dma_buf_put(dmabuf);
|
|
return err;
|
|
}
|
|
|
|
static int igt_dmabuf_import_ownership(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct drm_i915_gem_object *obj;
|
|
struct dma_buf *dmabuf;
|
|
struct iosys_map map;
|
|
void *ptr;
|
|
int err;
|
|
|
|
dmabuf = mock_dmabuf(1);
|
|
if (IS_ERR(dmabuf))
|
|
return PTR_ERR(dmabuf);
|
|
|
|
err = dma_buf_vmap(dmabuf, &map);
|
|
ptr = err ? NULL : map.vaddr;
|
|
if (!ptr) {
|
|
pr_err("dma_buf_vmap failed\n");
|
|
err = -ENOMEM;
|
|
goto err_dmabuf;
|
|
}
|
|
|
|
memset(ptr, 0xc5, PAGE_SIZE);
|
|
dma_buf_vunmap(dmabuf, &map);
|
|
|
|
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
|
|
if (IS_ERR(obj)) {
|
|
pr_err("i915_gem_prime_import failed with err=%d\n",
|
|
(int)PTR_ERR(obj));
|
|
err = PTR_ERR(obj);
|
|
goto err_dmabuf;
|
|
}
|
|
|
|
dma_buf_put(dmabuf);
|
|
|
|
err = i915_gem_object_pin_pages_unlocked(obj);
|
|
if (err) {
|
|
pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
|
|
goto out_obj;
|
|
}
|
|
|
|
err = 0;
|
|
i915_gem_object_unpin_pages(obj);
|
|
out_obj:
|
|
i915_gem_object_put(obj);
|
|
return err;
|
|
|
|
err_dmabuf:
|
|
dma_buf_put(dmabuf);
|
|
return err;
|
|
}
|
|
|
|
static int igt_dmabuf_export_vmap(void *arg)
|
|
{
|
|
struct drm_i915_private *i915 = arg;
|
|
struct drm_i915_gem_object *obj;
|
|
struct dma_buf *dmabuf;
|
|
struct iosys_map map;
|
|
void *ptr;
|
|
int err;
|
|
|
|
obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
|
|
if (IS_ERR(obj))
|
|
return PTR_ERR(obj);
|
|
|
|
dmabuf = i915_gem_prime_export(&obj->base, 0);
|
|
if (IS_ERR(dmabuf)) {
|
|
pr_err("i915_gem_prime_export failed with err=%d\n",
|
|
(int)PTR_ERR(dmabuf));
|
|
err = PTR_ERR(dmabuf);
|
|
goto err_obj;
|
|
}
|
|
i915_gem_object_put(obj);
|
|
|
|
err = dma_buf_vmap(dmabuf, &map);
|
|
ptr = err ? NULL : map.vaddr;
|
|
if (!ptr) {
|
|
pr_err("dma_buf_vmap failed\n");
|
|
err = -ENOMEM;
|
|
goto out;
|
|
}
|
|
|
|
if (memchr_inv(ptr, 0, dmabuf->size)) {
|
|
pr_err("Exported object not initialiased to zero!\n");
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
memset(ptr, 0xc5, dmabuf->size);
|
|
|
|
err = 0;
|
|
dma_buf_vunmap(dmabuf, &map);
|
|
out:
|
|
dma_buf_put(dmabuf);
|
|
return err;
|
|
|
|
err_obj:
|
|
i915_gem_object_put(obj);
|
|
return err;
|
|
}
|
|
|
|
int i915_gem_dmabuf_mock_selftests(void)
|
|
{
|
|
static const struct i915_subtest tests[] = {
|
|
SUBTEST(igt_dmabuf_export),
|
|
SUBTEST(igt_dmabuf_import_self),
|
|
SUBTEST(igt_dmabuf_import),
|
|
SUBTEST(igt_dmabuf_import_ownership),
|
|
SUBTEST(igt_dmabuf_export_vmap),
|
|
};
|
|
struct drm_i915_private *i915;
|
|
int err;
|
|
|
|
i915 = mock_gem_device();
|
|
if (!i915)
|
|
return -ENOMEM;
|
|
|
|
err = i915_subtests(tests, i915);
|
|
|
|
mock_destroy_device(i915);
|
|
return err;
|
|
}
|
|
|
|
int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
|
|
{
|
|
static const struct i915_subtest tests[] = {
|
|
SUBTEST(igt_dmabuf_export),
|
|
SUBTEST(igt_dmabuf_import_same_driver_lmem),
|
|
SUBTEST(igt_dmabuf_import_same_driver_smem),
|
|
SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
|
|
};
|
|
|
|
return i915_subtests(tests, i915);
|
|
}
|