linux/drivers/gpu/drm/i915/display/intel_dpt.c

305 lines
7.4 KiB
C
Raw Normal View History

// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include "gem/i915_gem_domain.h"
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
struct i915_dpt {
struct i915_address_space vm;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
void __iomem *iomem;
};
#define i915_is_dpt(vm) ((vm)->is_dpt)
static inline struct i915_dpt *
i915_vm_to_dpt(struct i915_address_space *vm)
{
BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
GEM_BUG_ON(!i915_is_dpt(vm));
return container_of(vm, struct i915_dpt, vm);
}
#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
}
static void dpt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
enum i915_cache_level level,
u32 flags)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
gen8_pte_t __iomem *base = dpt->iomem;
gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
vm->pte_encode(addr, level, flags));
}
static void dpt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
enum i915_cache_level level,
u32 flags)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
gen8_pte_t __iomem *base = dpt->iomem;
const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
struct sgt_iter sgt_iter;
dma_addr_t addr;
int i;
/*
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
* not to allow the user to override access to a read only page.
*/
i = vma_res->start / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
gen8_set_pte(&base[i++], pte_encode | addr);
}
static void dpt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
}
static void dpt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma_resource *vma_res,
enum i915_cache_level cache_level,
u32 flags)
{
u32 pte_flags;
if (vma_res->bound_flags)
return;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (vm->has_read_only && vma_res->bi.readonly)
pte_flags |= PTE_READ_ONLY;
if (vma_res->bi.lmem)
pte_flags |= PTE_LM;
vm->insert_entries(vm, vma_res, cache_level, pte_flags);
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
* upgrade to both bound if we bind either to avoid double-binding.
*/
vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
static void dpt_unbind_vma(struct i915_address_space *vm,
struct i915_vma_resource *vma_res)
{
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
static void dpt_cleanup(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
i915_gem_object_put(dpt->obj);
}
struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
{
struct drm_i915_private *i915 = vm->i915;
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *iomem;
struct i915_gem_ww_ctx ww;
int err;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
atomic_inc(&i915->gpu_error.pending_fb_pin);
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(dpt->obj, &ww);
if (err)
continue;
vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
continue;
}
iomem = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(iomem)) {
err = PTR_ERR(iomem);
continue;
}
dpt->vma = vma;
dpt->iomem = iomem;
i915_vma_get(vma);
}
atomic_dec(&i915->gpu_error.pending_fb_pin);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err ? ERR_PTR(err) : vma;
}
void intel_dpt_unpin(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
i915_vma_unpin_iomap(dpt->vma);
i915_vma_put(dpt->vma);
}
/**
* intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
* @i915: device instance
*
* Restore the memory mapping during system resume for all framebuffers which
* are mapped to HW via a GGTT->DPT page table. The content of these page
* tables are not stored in the hibernation image during S4 and S3RST->S4
* transitions, so here we reprogram the PTE entries in those tables.
*
* This function must be called after the mappings in GGTT have been restored calling
* i915_ggtt_resume().
*/
void intel_dpt_resume(struct drm_i915_private *i915)
{
struct drm_framebuffer *drm_fb;
if (!HAS_DISPLAY(i915))
return;
mutex_lock(&i915->drm.mode_config.fb_lock);
drm_for_each_fb(drm_fb, &i915->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_resume_vm(fb->dpt_vm);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
/**
* intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
* @i915: device instance
*
* Suspend the memory mapping during system suspend for all framebuffers which
* are mapped to HW via a GGTT->DPT page table.
*
* This function must be called before the mappings in GGTT are suspended calling
* i915_ggtt_suspend().
*/
void intel_dpt_suspend(struct drm_i915_private *i915)
{
struct drm_framebuffer *drm_fb;
if (!HAS_DISPLAY(i915))
return;
mutex_lock(&i915->drm.mode_config.fb_lock);
drm_for_each_fb(drm_fb, &i915->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
if (fb->dpt_vm)
i915_ggtt_suspend_vm(fb->dpt_vm);
}
mutex_unlock(&i915->drm.mode_config.fb_lock);
}
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb)
{
struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
struct drm_i915_private *i915 = to_i915(obj->dev);
struct drm_i915_gem_object *dpt_obj;
struct i915_address_space *vm;
struct i915_dpt *dpt;
size_t size;
int ret;
if (intel_fb_needs_pot_stride_remap(fb))
size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
else
size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
if (HAS_LMEM(i915))
dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
else
dpt_obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(dpt_obj))
return ERR_CAST(dpt_obj);
drm/i915: Lock dpt_obj around set_cache_level, v2. set_cache_level may unbind the object, which will result in the below lockdep splat: <6> [184.578145] [IGT] kms_addfb_basic: starting subtest addfb25-framebuffer-vs-set-tiling <4> [184.578220] ------------[ cut here ]------------ <4> [184.578221] WARN_ON(debug_locks && !(lock_is_held(&(&((obj)->base.resv)->lock.base)->dep_map) != 0)) <4> [184.578237] WARNING: CPU: 6 PID: 5544 at drivers/gpu/drm/i915/i915_gem.c:123 i915_gem_object_unbind+0x4a9/0x510 [i915] <4> [184.578323] Modules linked in: vgem drm_shmem_helper snd_hda_codec_hdmi i915 mei_hdcp x86_pkg_temp_thermal snd_hda_intel coretemp crct10dif_pclmul snd_intel_dspcfg crc32_pclmul ttm snd_hda_codec ghash_clmulni_intel snd_hwdep drm_kms_helper snd_hda_core e1000e mei_me syscopyarea ptp snd_pcm sysfillrect mei pps_core sysimgblt fb_sys_fops prime_numbers intel_lpss_pci smsc75xx usbnet mii <4> [184.578349] CPU: 6 PID: 5544 Comm: kms_addfb_basic Not tainted 5.16.0-CI-Patchwork_22006+ #1 <4> [184.578351] Hardware name: Intel Corporation Alder Lake Client Platform/AlderLake-P DDR4 RVP, BIOS ADLPFWI1.R00.2422.A00.2110131104 10/13/2021 <4> [184.578352] RIP: 0010:i915_gem_object_unbind+0x4a9/0x510 [i915] <4> [184.578424] Code: 00 be ff ff ff ff 48 8d 78 68 e8 a2 6e 2b e1 85 c0 0f 85 b1 fb ff ff 48 c7 c6 48 37 9e a0 48 c7 c7 d9 fc a1 a0 e8 a3 54 26 e1 <0f> 0b e9 97 fb ff ff 31 ed 48 8b 5c 24 58 65 48 33 1c 25 28 00 00 <4> [184.578426] RSP: 0018:ffffc900013b3b68 EFLAGS: 00010286 <4> [184.578428] RAX: 0000000000000000 RBX: ffffc900013b3bb0 RCX: 0000000000000001 <4> [184.578429] RDX: 0000000080000001 RSI: ffffffff8230b42d RDI: 00000000ffffffff <4> [184.578430] RBP: ffff888120e10000 R08: 0000000000000000 R09: c0000000ffff7fff <4> [184.578431] R10: 0000000000000001 R11: ffffc900013b3980 R12: ffff8881176ea740 <4> [184.578432] R13: ffff888120e10000 R14: 0000000000000000 R15: 0000000000000001 <4> [184.578433] FS: 00007f65074f5e40(0000) GS:ffff88888f300000(0000) knlGS:0000000000000000 <4> [184.578435] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [184.578436] CR2: 00007fff4420ede8 CR3: 000000010c2f2005 CR4: 0000000000770ee0 <4> [184.578437] PKRU: 55555554 <4> [184.578438] Call Trace: <4> [184.578439] <TASK> <4> [184.578440] ? dma_resv_iter_first_unlocked+0x78/0xf0 <4> [184.578447] intel_dpt_create+0x88/0x220 [i915] <4> [184.578530] intel_framebuffer_init+0x5b8/0x620 [i915] <4> [184.578612] intel_framebuffer_create+0x3d/0x60 [i915] <4> [184.578691] intel_user_framebuffer_create+0x18f/0x2c0 [i915] <4> [184.578775] drm_internal_framebuffer_create+0x36d/0x4c0 <4> [184.578779] drm_mode_addfb2+0x2f/0xd0 <4> [184.578781] ? drm_mode_addfb_ioctl+0x10/0x10 <4> [184.578784] drm_ioctl_kernel+0xac/0x140 <4> [184.578787] drm_ioctl+0x201/0x3d0 <4> [184.578789] ? drm_mode_addfb_ioctl+0x10/0x10 <4> [184.578796] __x64_sys_ioctl+0x6a/0xa0 <4> [184.578800] do_syscall_64+0x37/0xb0 <4> [184.578803] entry_SYSCALL_64_after_hwframe+0x44/0xae <4> [184.578805] RIP: 0033:0x7f6506736317 <4> [184.578807] Code: b3 66 90 48 8b 05 71 4b 2d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 41 4b 2d 00 f7 d8 64 89 01 48 <4> [184.578808] RSP: 002b:00007fff44211a98 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 <4> [184.578810] RAX: ffffffffffffffda RBX: 0000000000000006 RCX: 00007f6506736317 <4> [184.578811] RDX: 00007fff44211b30 RSI: 00000000c06864b8 RDI: 0000000000000006 <4> [184.578812] RBP: 00007fff44211b30 R08: 00007fff44311170 R09: 0000000000000000 <4> [184.578813] R10: 0000000000000008 R11: 0000000000000246 R12: 00000000c06864b8 <4> [184.578813] R13: 0000000000000006 R14: 0000000000000000 R15: 0000000000000000 <4> [184.578819] </TASK> <4> [184.578820] irq event stamp: 47931 <4> [184.578821] hardirqs last enabled at (47937): [<ffffffff81130dd2>] __up_console_sem+0x62/0x70 <4> [184.578824] hardirqs last disabled at (47942): [<ffffffff81130db7>] __up_console_sem+0x47/0x70 <4> [184.578826] softirqs last enabled at (47340): [<ffffffff81e0032d>] __do_softirq+0x32d/0x493 <4> [184.578828] softirqs last disabled at (47335): [<ffffffff810b9196>] irq_exit_rcu+0xa6/0xe0 <4> [184.578830] ---[ end trace f17ec219f892c7d4 ]--- Changes since v1: - Fix intel_pin_fb_obj_dpt too. Fixes: 0f341974cbc2 ("drm/i915: Add i915_vma_unbind_unlocked, and take obj lock for i915_vma_unbind, v2.") Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Testcase: kms_addfb_basic Link: https://patchwork.freedesktop.org/patch/msgid/20220126073703.1215696-1-maarten.lankhorst@linux.intel.com Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
2022-01-26 07:37:03 +00:00
ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
if (!ret) {
ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
i915_gem_object_unlock(dpt_obj);
}
if (ret) {
i915_gem_object_put(dpt_obj);
return ERR_PTR(ret);
}
dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
if (!dpt) {
i915_gem_object_put(dpt_obj);
return ERR_PTR(-ENOMEM);
}
vm = &dpt->vm;
vm->gt = to_gt(i915);
vm->i915 = i915;
vm->dma = i915->drm.dev;
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
vm->is_dpt = true;
i915_address_space_init(vm, VM_CLASS_DPT);
vm->insert_page = dpt_insert_page;
vm->clear_range = dpt_clear_range;
vm->insert_entries = dpt_insert_entries;
vm->cleanup = dpt_cleanup;
vm->vma_ops.bind_vma = dpt_bind_vma;
vm->vma_ops.unbind_vma = dpt_unbind_vma;
vm->pte_encode = gen8_ggtt_pte_encode;
dpt->obj = dpt_obj;
return &dpt->vm;
}
void intel_dpt_destroy(struct i915_address_space *vm)
{
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
drm/i915: Remove the vm open count vms are not getting properly closed. Rather than fixing that, Remove the vm open count and instead rely on the vm refcount. The vm open count existed solely to break the strong references the vmas had on the vms. Now instead make those references weak and ensure vmas are destroyed when the vm is destroyed. Unfortunately if the vm destructor and the object destructor both wants to destroy a vma, that may lead to a race in that the vm destructor just unbinds the vma and leaves the actual vma destruction to the object destructor. However in order for the object destructor to ensure the vma is unbound it needs to grab the vm mutex. In order to keep the vm mutex alive until the object destructor is done with it, somewhat hackishly grab a vm_resv refcount that is released late in the vma destruction process, when the vm mutex is no longer needed. v2: Address review-comments from Niranjana - Clarify that the struct i915_address_space::skip_pte_rewrite is a hack and should ideally be replaced in an upcoming patch. - Remove an unneeded continue in clear_vm_list and update comment. v3: - Documentation update - Commit message formatting Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220304082641.308069-2-thomas.hellstrom@linux.intel.com
2022-03-04 08:26:39 +00:00
i915_vm_put(&dpt->vm);
}