mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
drm fixes for 5.5-rc2
dma-buf: - memory leak fix - expand MAINTAINERS scope core: - fix mode matching for drivers not using picture_aspect_ratio nouveau: - panel scaling fix - MST BPC fix - atomic fixes i915: - GPU hang on idle transition - GLK+ FBC corruption fix - non-priv OA access on Tigerlake - HDCP state fix - CI found race fixes amdgpu: - renoir DC fixes - GFX8 fence flush alignment with userspace - Arcturus power profile fix - DC aux + i2c over aux fixes - GPUVM invalidation semaphore fixes - gfx10 golden registers update mgag200: - expand startadd fix panfrost: - devfreq fix - memory fixes mcde: - DSI pointer deref fix -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJd8zcPAAoJEAx081l5xIa+6E4P/Aj21PFPaKv6IVS0cFHxuwUS Cv+zlaKJFUxcj286nZGp4UsnAc2jHTJAjY9D48yHkyRjBvHdmNlh3DqCSsr4g25L UfuFppvzXR1fasapLoXYaxT3R+iDKMp+uLFNqchL+J6w6UwaIhIaSorcCqR6pxX1 YYO+v20RTubvdqUqov/k6/rUXvxDwPsbNZnC2c3+2DKAROLvp1bBdw8v/8V9VLNu 2g85UO8Ck2Eut81B0oiTK5UVtVXpQ4besq1Y9k0NR7DagiG4tDVKin8bjne8Ftn2 GsMuWiN2Kiof/1JfKZkmnZiulDzPmBeaozx22CoBBhMkU5u9bSSTShKlIUOSFik7 /37w/8bKPOiyUVC4xF3/pvh2yVKorGYu2jw2BYGc5PSwAa6e3VmMlgrRUdEUtAHU zvYeSdpRMyA4xwo+knPRM93phqydriBvMaXTxf2GJZ5fzOoDH8C+PigFhJKkfSSw FZfNY3uqMk8/VrPT9uYjDhM13PhK44PLOZDSMuwkNys+kfnz0+Q51ixgOT1BBrKe 605VHwjF/fdEtmZSICtM60GnowfjxNaY61zWY7J5T4E07kqy2etuoTdn9e3OZPPr oZxkc+LMdGPrC5Wu3+A4ZGdks0uhJCEoG+xK/xFqXQOGZWzvGcLdkgPNJO6DuHbt KNPNvSgg31wI6yBP9OZw =qiTA -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-12-13' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Usual round of rc2 fixes. i915 and amdgpu leading the charge, but a few others in here, including some nouveau fixes, all seems pretty for rc2, but hey it's a Fri 13th pull so I'm sure it'll cause untold bad fortune. dma-buf: - memory leak fix - expand MAINTAINERS scope core: - fix mode matching for drivers not using picture_aspect_ratio nouveau: - panel scaling fix - MST BPC fix - atomic fixes i915: - GPU hang on idle transition - GLK+ FBC corruption fix - non-priv OA access on Tigerlake - HDCP state fix - CI found race fixes amdgpu: - renoir DC fixes - GFX8 fence flush alignment with userspace - Arcturus power profile fix - DC aux + i2c over aux fixes - GPUVM invalidation semaphore fixes - gfx10 golden registers update mgag200: - expand startadd fix panfrost: - devfreq fix - memory fixes mcde: - DSI pointer deref fix" * tag 'drm-fixes-2019-12-13' of git://anongit.freedesktop.org/drm/drm: (51 commits) drm/amdgpu: add invalidate semaphore limit for SRIOV in gmc10 drm/amdgpu: add invalidate semaphore limit for SRIOV and picasso in gmc9 drm/amdgpu: avoid using invalidate semaphore for picasso Revert "drm/amdgpu: dont schedule jobs while in reset" drm/amdgpu: fix license on Kconfig and Makefiles drm/amdgpu/gfx10: update gfx golden settings for navi14 drm/amdgpu/gfx10: update gfx golden settings drm/amdgpu/gfx10: update gfx golden settings for navi14 drm/amdgpu/gfx10: update gfx golden settings drm/i915: Serialise with remote retirement drm/amd/display: include linux/slab.h where needed drm/amd/display: fix undefined struct member reference drm/nouveau/kms/nv50-: fix panel scaling drm/nouveau/kms/nv50-: Limit MST BPC to 8 drm/nouveau/kms/nv50-: Store the bpc we're using in nv50_head_atom drm/nouveau/kms/nv50-: Call outp_atomic_check_view() before handling PBN drm/nouveau: Fix drm-core using atomic code-paths on pre-nv50 hardware drm/nouveau: Move the declaration of struct nouveau_conn_atom up a bit drm/i915/gt: Detect if we miss WaIdleLiteRestore drm/i915/hdcp: Nuke intel_hdcp_transcoder_config() ...
This commit is contained in:
commit
b2cb931d72
@ -4970,6 +4970,7 @@ F: include/linux/dma-buf*
|
||||
F: include/linux/reservation.h
|
||||
F: include/linux/*fence.h
|
||||
F: Documentation/driver-api/dma-buf.rst
|
||||
K: dma_(buf|fence|resv)
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
|
||||
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
|
||||
|
@ -221,7 +221,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
|
||||
a_fences = get_fences(a, &a_num_fences);
|
||||
b_fences = get_fences(b, &b_num_fences);
|
||||
if (a_num_fences > INT_MAX - b_num_fences)
|
||||
return NULL;
|
||||
goto err;
|
||||
|
||||
num_fences = a_num_fences + b_num_fences;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: MIT
|
||||
menu "ACP (Audio CoProcessor) Configuration"
|
||||
|
||||
config DRM_AMD_ACP
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: MIT
|
||||
config DRM_AMDGPU_SI
|
||||
bool "Enable amdgpu support for SI parts"
|
||||
depends on DRM_AMDGPU
|
||||
|
@ -604,11 +604,8 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_entities; i++) {
|
||||
mutex_lock(&ctx->adev->lock_reset);
|
||||
for (i = 0; i < num_entities; i++)
|
||||
drm_sched_entity_fini(&ctx->entities[0][i].entity);
|
||||
mutex_unlock(&ctx->adev->lock_reset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -268,23 +268,29 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
/* Put DF on broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, true);
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
|
||||
/* Put DF on broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, true);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
|
||||
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
} else {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V3_6_MGCG_DISABLE;
|
||||
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
if (enable) {
|
||||
tmp = RREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
|
||||
WREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
} else {
|
||||
tmp = RREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
|
||||
tmp |= DF_V3_6_MGCG_DISABLE;
|
||||
WREG32_SOC15(DF, 0,
|
||||
mmDF_PIE_AON0_DfGlobalClkGater, tmp);
|
||||
}
|
||||
|
||||
/* Exit broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, false);
|
||||
}
|
||||
|
||||
/* Exit broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, false);
|
||||
}
|
||||
|
||||
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
|
||||
|
@ -117,10 +117,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0x10000000, 0x10000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffff9fff, 0x00001188),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070104),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000100, 0x00000130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
@ -162,10 +165,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000800, 0x00000820),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL, 0x001f0000, 0x00070105),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
|
||||
|
@ -6146,7 +6146,23 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
|
||||
bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
|
||||
|
||||
/* EVENT_WRITE_EOP - flush caches, send int */
|
||||
/* Workaround for cache flush problems. First send a dummy EOP
|
||||
* event down the pipe with seq one below.
|
||||
*/
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
EOP_TC_WB_ACTION_EN |
|
||||
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
|
||||
EVENT_INDEX(5)));
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
|
||||
DATA_SEL(1) | INT_SEL(0));
|
||||
amdgpu_ring_write(ring, lower_32_bits(seq - 1));
|
||||
amdgpu_ring_write(ring, upper_32_bits(seq - 1));
|
||||
|
||||
/* Then send the real EOP event down the pipe:
|
||||
* EVENT_WRITE_EOP - flush caches, send int */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
|
||||
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
|
||||
EOP_TC_ACTION_EN |
|
||||
@ -6888,7 +6904,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
5 + /* COND_EXEC */
|
||||
7 + /* PIPELINE_SYNC */
|
||||
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
|
||||
8 + /* FENCE for VM_FLUSH */
|
||||
12 + /* FENCE for VM_FLUSH */
|
||||
20 + /* GDS switch */
|
||||
4 + /* double SWITCH_BUFFER,
|
||||
the first COND_EXEC jump to the place just
|
||||
@ -6900,7 +6916,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
31 + /* DE_META */
|
||||
3 + /* CNTX_CTRL */
|
||||
5 + /* HDP_INVL */
|
||||
8 + 8 + /* FENCE x2 */
|
||||
12 + 12 + /* FENCE x2 */
|
||||
2, /* SWITCH_BUFFER */
|
||||
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
|
@ -219,6 +219,21 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
|
||||
return req;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vmhub: vmhub type
|
||||
*
|
||||
*/
|
||||
static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
|
||||
uint32_t vmhub)
|
||||
{
|
||||
return ((vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1) &&
|
||||
(!amdgpu_sriov_vf(adev)));
|
||||
}
|
||||
|
||||
/*
|
||||
* GART
|
||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
||||
@ -229,6 +244,7 @@ static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
|
||||
static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
||||
unsigned int vmhub, uint32_t flush_type)
|
||||
{
|
||||
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
|
||||
u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
|
||||
/* Use register 17 for GART */
|
||||
@ -244,8 +260,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1) {
|
||||
if (use_semaphore) {
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
|
||||
@ -278,8 +293,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
|
||||
}
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1)
|
||||
if (use_semaphore)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
@ -369,6 +383,7 @@ error_alloc:
|
||||
static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
|
||||
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
|
||||
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
|
||||
unsigned eng = ring->vm_inv_eng;
|
||||
@ -381,8 +396,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
if (use_semaphore)
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
amdgpu_ring_emit_reg_wait(ring,
|
||||
hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
|
||||
@ -398,8 +412,7 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
req, 1 << vmid);
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
if (use_semaphore)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
|
@ -416,6 +416,24 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
|
||||
return req;
|
||||
}
|
||||
|
||||
/**
|
||||
* gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @vmhub: vmhub type
|
||||
*
|
||||
*/
|
||||
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
|
||||
uint32_t vmhub)
|
||||
{
|
||||
return ((vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1) &&
|
||||
(!amdgpu_sriov_vf(adev)) &&
|
||||
(!(adev->asic_type == CHIP_RAVEN &&
|
||||
adev->rev_id < 0x8 &&
|
||||
adev->pdev->device == 0x15d8)));
|
||||
}
|
||||
|
||||
/*
|
||||
* GART
|
||||
* VMID 0 is the physical GPU addresses as used by the kernel.
|
||||
@ -435,6 +453,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
|
||||
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
uint32_t vmhub, uint32_t flush_type)
|
||||
{
|
||||
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
|
||||
const unsigned eng = 17;
|
||||
u32 j, tmp;
|
||||
struct amdgpu_vmhub *hub;
|
||||
@ -468,8 +487,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1) {
|
||||
if (use_semaphore) {
|
||||
for (j = 0; j < adev->usec_timeout; j++) {
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
|
||||
@ -499,8 +517,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
}
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (vmhub == AMDGPU_MMHUB_0 ||
|
||||
vmhub == AMDGPU_MMHUB_1)
|
||||
if (use_semaphore)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
@ -518,6 +535,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
unsigned vmid, uint64_t pd_addr)
|
||||
{
|
||||
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
|
||||
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
|
||||
@ -531,8 +549,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
*/
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
if (use_semaphore)
|
||||
/* a read return value of 1 means semaphore acuqire */
|
||||
amdgpu_ring_emit_reg_wait(ring,
|
||||
hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
|
||||
@ -548,8 +565,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
|
||||
req, 1 << vmid);
|
||||
|
||||
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
|
||||
if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
|
||||
ring->funcs->vmhub == AMDGPU_MMHUB_1)
|
||||
if (use_semaphore)
|
||||
/*
|
||||
* add semaphore release after invalidation,
|
||||
* write with 0 means semaphore release
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Heterogenous system architecture configuration
|
||||
#
|
||||
|
@ -1,4 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# SPDX-License-Identifier: MIT
|
||||
menu "Display Engine Configuration"
|
||||
depends on DRM && DRM_AMDGPU
|
||||
|
||||
|
@ -1625,6 +1625,7 @@ static enum bp_result construct_integrated_info(
|
||||
/* Don't need to check major revision as they are all 1 */
|
||||
switch (revision.minor) {
|
||||
case 11:
|
||||
case 12:
|
||||
result = get_integrated_info_v11(bp, info);
|
||||
break;
|
||||
default:
|
||||
|
@ -471,12 +471,28 @@ static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
|
||||
|
||||
}
|
||||
|
||||
static bool rn_are_clock_states_equal(struct dc_clocks *a,
|
||||
struct dc_clocks *b)
|
||||
{
|
||||
if (a->dispclk_khz != b->dispclk_khz)
|
||||
return false;
|
||||
else if (a->dppclk_khz != b->dppclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_khz != b->dcfclk_khz)
|
||||
return false;
|
||||
else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static struct clk_mgr_funcs dcn21_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.update_clocks = rn_update_clocks,
|
||||
.init_clocks = rn_init_clocks,
|
||||
.enable_pme_wa = rn_enable_pme_wa,
|
||||
/* .dump_clk_registers = rn_dump_clk_registers, */
|
||||
.are_clock_states_equal = rn_are_clock_states_equal,
|
||||
.notify_wm_ranges = rn_notify_wm_ranges
|
||||
};
|
||||
|
||||
@ -518,36 +534,83 @@ struct clk_bw_params rn_bw_params = {
|
||||
.num_entries = 4,
|
||||
},
|
||||
|
||||
.wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.valid = true,
|
||||
},
|
||||
};
|
||||
|
||||
struct wm_table ddr4_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 6.09,
|
||||
.sr_enter_plus_exit_time_us = 7.14,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
struct wm_table lpddr4_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 17.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 17.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 17.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 23.84,
|
||||
.sr_exit_time_us = 12.5,
|
||||
.sr_enter_plus_exit_time_us = 17.0,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
@ -561,7 +624,7 @@ static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsi
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id)
|
||||
static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct integrated_info *bios_info)
|
||||
{
|
||||
int i, j = 0;
|
||||
|
||||
@ -593,8 +656,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
|
||||
}
|
||||
|
||||
bw_params->vram_type = asic_id->vram_type;
|
||||
bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH;
|
||||
bw_params->vram_type = bios_info->memory_type;
|
||||
bw_params->num_channels = bios_info->ma_channel_number;
|
||||
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
bw_params->wm_table.entries[i].wm_inst = i;
|
||||
@ -669,15 +732,24 @@ void rn_clk_mgr_construct(
|
||||
ASSERT(clk_mgr->base.dprefclk_khz == 600000);
|
||||
clk_mgr->base.dprefclk_khz = 600000;
|
||||
}
|
||||
|
||||
if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) {
|
||||
rn_bw_params.wm_table = lpddr4_wm_table;
|
||||
} else {
|
||||
rn_bw_params.wm_table = ddr4_wm_table;
|
||||
}
|
||||
}
|
||||
|
||||
dce_clock_read_ss_info(clk_mgr);
|
||||
|
||||
|
||||
clk_mgr->base.bw_params = &rn_bw_params;
|
||||
|
||||
if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) {
|
||||
pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table);
|
||||
rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id);
|
||||
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
|
||||
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
|
||||
}
|
||||
}
|
||||
|
||||
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) {
|
||||
|
@ -372,7 +372,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
|
||||
|
||||
if (GPIO_RESULT_OK != dal_ddc_open(
|
||||
ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
|
||||
dal_gpio_destroy_ddc(&ddc);
|
||||
dal_ddc_close(ddc);
|
||||
|
||||
return present;
|
||||
}
|
||||
|
@ -586,7 +586,7 @@ bool dal_ddc_service_query_ddc_data(
|
||||
bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
|
||||
struct aux_payload *payload)
|
||||
{
|
||||
uint8_t retrieved = 0;
|
||||
uint32_t retrieved = 0;
|
||||
bool ret = 0;
|
||||
|
||||
if (!ddc)
|
||||
|
@ -3522,7 +3522,14 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
|
||||
if (link_enc->funcs->fec_set_enable &&
|
||||
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
|
||||
if (link->fec_state == dc_link_fec_ready && enable) {
|
||||
msleep(1);
|
||||
/* Accord to DP spec, FEC enable sequence can first
|
||||
* be transmitted anytime after 1000 LL codes have
|
||||
* been transmitted on the link after link training
|
||||
* completion. Using 1 lane RBR should have the maximum
|
||||
* time for transmitting 1000 LL codes which is 6.173 us.
|
||||
* So use 7 microseconds delay instead.
|
||||
*/
|
||||
udelay(7);
|
||||
link_enc->funcs->fec_set_enable(link_enc, true);
|
||||
link->fec_state = dc_link_fec_enabled;
|
||||
} else if (link->fec_state == dc_link_fec_enabled && !enable) {
|
||||
|
@ -583,6 +583,8 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
uint8_t reply;
|
||||
bool payload_reply = true;
|
||||
enum aux_channel_operation_result operation_result;
|
||||
bool retry_on_defer = false;
|
||||
|
||||
int aux_ack_retries = 0,
|
||||
aux_defer_retries = 0,
|
||||
aux_i2c_defer_retries = 0,
|
||||
@ -613,8 +615,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
break;
|
||||
|
||||
case AUX_TRANSACTION_REPLY_AUX_DEFER:
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
|
||||
retry_on_defer = true;
|
||||
/* fall through */
|
||||
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
|
||||
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
|
||||
goto fail;
|
||||
} else {
|
||||
@ -647,15 +651,24 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
|
||||
break;
|
||||
|
||||
case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
|
||||
if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
|
||||
goto fail;
|
||||
else {
|
||||
/*
|
||||
* DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
|
||||
* According to the DP spec there should be 3 retries total
|
||||
* with a 400us wait inbetween each. Hardware already waits
|
||||
* for 550us therefore no wait is required here.
|
||||
*/
|
||||
// Check whether a DEFER had occurred before the timeout.
|
||||
// If so, treat timeout as a DEFER.
|
||||
if (retry_on_defer) {
|
||||
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
|
||||
goto fail;
|
||||
else if (payload->defer_delay > 0)
|
||||
msleep(payload->defer_delay);
|
||||
} else {
|
||||
if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
|
||||
goto fail;
|
||||
else {
|
||||
/*
|
||||
* DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
|
||||
* According to the DP spec there should be 3 retries total
|
||||
* with a 400us wait inbetween each. Hardware already waits
|
||||
* for 550us therefore no wait is required here.
|
||||
*/
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Makefile for DCN.
|
||||
|
||||
|
@ -923,7 +923,9 @@ static const struct resource_caps res_cap_nv14 = {
|
||||
.num_dwb = 1,
|
||||
.num_ddc = 5,
|
||||
.num_vmid = 16,
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
.num_dsc = 5,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
@ -1536,13 +1538,20 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
|
||||
|
||||
static void acquire_dsc(struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
struct display_stream_compressor **dsc)
|
||||
struct display_stream_compressor **dsc,
|
||||
int pipe_idx)
|
||||
{
|
||||
int i;
|
||||
|
||||
ASSERT(*dsc == NULL);
|
||||
*dsc = NULL;
|
||||
|
||||
if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
|
||||
*dsc = pool->dscs[pipe_idx];
|
||||
res_ctx->is_dsc_acquired[pipe_idx] = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Find first free DSC */
|
||||
for (i = 0; i < pool->res_cap->num_dsc; i++)
|
||||
if (!res_ctx->is_dsc_acquired[i]) {
|
||||
@ -1585,7 +1594,7 @@ static enum dc_status add_dsc_to_stream_resource(struct dc *dc,
|
||||
if (pipe_ctx->stream != dc_stream)
|
||||
continue;
|
||||
|
||||
acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc);
|
||||
acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
|
||||
|
||||
/* The number of DSCs can be less than the number of pipes */
|
||||
if (!pipe_ctx->stream_res.dsc) {
|
||||
@ -1785,7 +1794,7 @@ bool dcn20_split_stream_for_odm(
|
||||
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
|
||||
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
|
||||
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
|
||||
acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc);
|
||||
acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
|
||||
ASSERT(next_odm_pipe->stream_res.dsc);
|
||||
if (next_odm_pipe->stream_res.dsc == NULL)
|
||||
return false;
|
||||
|
@ -492,15 +492,23 @@ void enc2_stream_encoder_dp_unblank(
|
||||
DP_VID_N_MUL, n_multiply);
|
||||
}
|
||||
|
||||
/* set DIG_START to 0x1 to reset FIFO */
|
||||
/* make sure stream is disabled before resetting steer fifo */
|
||||
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, false);
|
||||
REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, 0, 10, 5000);
|
||||
|
||||
/* set DIG_START to 0x1 to reset FIFO */
|
||||
REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
|
||||
udelay(1);
|
||||
|
||||
/* write 0 to take the FIFO out of reset */
|
||||
|
||||
REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
|
||||
|
||||
/* switch DP encoder to CRTC data */
|
||||
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
|
||||
* that it overflows during mode transition, and sometimes doesn't recover.
|
||||
*/
|
||||
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
|
||||
udelay(10);
|
||||
|
||||
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Makefile for DCN21.
|
||||
|
||||
|
@ -23,6 +23,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dm_services.h"
|
||||
#include "dc.h"
|
||||
|
||||
@ -257,7 +259,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
.vmm_page_size_bytes = 4096,
|
||||
.dram_clock_change_latency_us = 23.84,
|
||||
.return_bus_width_bytes = 64,
|
||||
.dispclk_dppclk_vco_speed_mhz = 3550,
|
||||
.dispclk_dppclk_vco_speed_mhz = 3600,
|
||||
.xfc_bus_transport_time_us = 4,
|
||||
.xfc_xbuf_latency_tolerance_us = 4,
|
||||
.use_urgent_burst_bw = 1,
|
||||
@ -1000,6 +1002,8 @@ static void calculate_wm_set_for_vlevel(
|
||||
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
|
||||
|
||||
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
|
||||
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
|
||||
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
|
||||
|
||||
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
|
||||
@ -1017,14 +1021,21 @@ static void calculate_wm_set_for_vlevel(
|
||||
|
||||
static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb)
|
||||
{
|
||||
int i;
|
||||
|
||||
kernel_fpu_begin();
|
||||
if (dc->bb_overrides.sr_exit_time_ns) {
|
||||
bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
|
||||
dc->bb_overrides.sr_exit_time_ns / 1000.0;
|
||||
}
|
||||
}
|
||||
|
||||
if (dc->bb_overrides.sr_enter_plus_exit_time_ns) {
|
||||
bb->sr_enter_plus_exit_time_us =
|
||||
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
dc->clk_mgr->bw_params->wm_table.entries[i].sr_enter_plus_exit_time_us =
|
||||
dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
|
||||
}
|
||||
}
|
||||
|
||||
if (dc->bb_overrides.urgent_latency_ns) {
|
||||
@ -1032,9 +1043,12 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
|
||||
}
|
||||
|
||||
if (dc->bb_overrides.dram_clock_change_latency_ns) {
|
||||
bb->dram_clock_change_latency_us =
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
dc->clk_mgr->bw_params->wm_table.entries[i].pstate_latency_us =
|
||||
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
|
||||
}
|
||||
}
|
||||
|
||||
kernel_fpu_end();
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Makefile for the 'dsc' sub-component of DAL.
|
||||
|
||||
|
@ -69,6 +69,8 @@ struct wm_range_table_entry {
|
||||
unsigned int wm_inst;
|
||||
unsigned int wm_type;
|
||||
double pstate_latency_us;
|
||||
double sr_exit_time_us;
|
||||
double sr_enter_plus_exit_time_us;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
|
@ -42,7 +42,7 @@ struct aux_payload {
|
||||
bool write;
|
||||
bool mot;
|
||||
uint32_t address;
|
||||
uint8_t length;
|
||||
uint32_t length;
|
||||
uint8_t *data;
|
||||
/*
|
||||
* used to return the reply type of the transaction
|
||||
|
@ -37,8 +37,8 @@
|
||||
#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
|
||||
/* Number of elements in the render times cache array */
|
||||
#define RENDER_TIMES_MAX_COUNT 10
|
||||
/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */
|
||||
#define BTR_MAX_MARGIN 2500
|
||||
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
|
||||
#define BTR_EXIT_MARGIN 2000
|
||||
/* Threshold to change BTR multiplier (to avoid frequent changes) */
|
||||
#define BTR_DRIFT_MARGIN 2000
|
||||
/*Threshold to exit fixed refresh rate*/
|
||||
@ -254,22 +254,24 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
|
||||
unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
|
||||
unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
|
||||
unsigned int frames_to_insert = 0;
|
||||
unsigned int min_frame_duration_in_ns = 0;
|
||||
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
|
||||
unsigned int delta_from_mid_point_delta_in_us;
|
||||
unsigned int max_render_time_in_us =
|
||||
in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us;
|
||||
|
||||
min_frame_duration_in_ns = ((unsigned int) (div64_u64(
|
||||
(1000000000ULL * 1000000),
|
||||
in_out_vrr->max_refresh_in_uhz)));
|
||||
|
||||
/* Program BTR */
|
||||
if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) {
|
||||
if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
|
||||
/* Exit Below the Range */
|
||||
if (in_out_vrr->btr.btr_active) {
|
||||
in_out_vrr->btr.frame_counter = 0;
|
||||
in_out_vrr->btr.btr_active = false;
|
||||
}
|
||||
} else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) {
|
||||
} else if (last_render_time_in_us > max_render_time_in_us) {
|
||||
/* Enter Below the Range */
|
||||
if (!in_out_vrr->btr.btr_active) {
|
||||
in_out_vrr->btr.btr_active = true;
|
||||
}
|
||||
in_out_vrr->btr.btr_active = true;
|
||||
}
|
||||
|
||||
/* BTR set to "not active" so disengage */
|
||||
@ -325,9 +327,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
|
||||
/* Choose number of frames to insert based on how close it
|
||||
* can get to the mid point of the variable range.
|
||||
*/
|
||||
if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us &&
|
||||
(delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 ||
|
||||
mid_point_frames_floor < 2)) {
|
||||
if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
|
||||
frames_to_insert = mid_point_frames_ceil;
|
||||
delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
|
||||
delta_from_mid_point_in_us_1;
|
||||
@ -343,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
|
||||
if (in_out_vrr->btr.frames_to_insert != 0 &&
|
||||
delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
|
||||
if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
|
||||
max_render_time_in_us) &&
|
||||
in_out_vrr->max_duration_in_us) &&
|
||||
((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
|
||||
in_out_vrr->min_duration_in_us))
|
||||
frames_to_insert = in_out_vrr->btr.frames_to_insert;
|
||||
@ -796,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
|
||||
refresh_range = in_out_vrr->max_refresh_in_uhz -
|
||||
in_out_vrr->min_refresh_in_uhz;
|
||||
|
||||
in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
|
||||
2 * in_out_vrr->min_duration_in_us;
|
||||
if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
|
||||
in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
|
||||
|
||||
in_out_vrr->supported = true;
|
||||
}
|
||||
|
||||
@ -816,7 +811,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
|
||||
in_out_vrr->btr.inserted_duration_in_us = 0;
|
||||
in_out_vrr->btr.frames_to_insert = 0;
|
||||
in_out_vrr->btr.frame_counter = 0;
|
||||
|
||||
in_out_vrr->btr.mid_point_in_us =
|
||||
(in_out_vrr->min_duration_in_us +
|
||||
in_out_vrr->max_duration_in_us) / 2;
|
||||
|
@ -92,7 +92,6 @@ struct mod_vrr_params_btr {
|
||||
uint32_t inserted_duration_in_us;
|
||||
uint32_t frames_to_insert;
|
||||
uint32_t frame_counter;
|
||||
uint32_t margin_in_us;
|
||||
};
|
||||
|
||||
struct mod_vrr_params_fixed_refresh {
|
||||
|
@ -1313,12 +1313,17 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
|
||||
"VR",
|
||||
"COMPUTE",
|
||||
"CUSTOM"};
|
||||
static const char *title[] = {
|
||||
"PROFILE_INDEX(NAME)"};
|
||||
uint32_t i, size = 0;
|
||||
int16_t workload_type = 0;
|
||||
|
||||
if (!smu->pm_enabled || !buf)
|
||||
return -EINVAL;
|
||||
|
||||
size += sprintf(buf + size, "%16s\n",
|
||||
title[0]);
|
||||
|
||||
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
|
||||
/*
|
||||
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
|
||||
|
@ -3986,6 +3986,7 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
|
||||
if (conn_state->content_protection ==
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED)
|
||||
intel_hdcp_enable(to_intel_connector(conn_state->connector),
|
||||
crtc_state->cpu_transcoder,
|
||||
(u8)conn_state->hdcp_content_type);
|
||||
}
|
||||
|
||||
@ -4089,7 +4090,9 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
|
||||
if (conn_state->content_protection ==
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED ||
|
||||
content_protection_type_changed)
|
||||
intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
|
||||
intel_hdcp_enable(connector,
|
||||
crtc_state->cpu_transcoder,
|
||||
(u8)conn_state->hdcp_content_type);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2414,9 +2414,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
intel_psr_compute_config(intel_dp, pipe_config);
|
||||
|
||||
intel_hdcp_transcoder_config(intel_connector,
|
||||
pipe_config->cpu_transcoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1284,7 +1284,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
||||
return 0;
|
||||
|
||||
/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
|
||||
if (IS_GEMINILAKE(dev_priv))
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
|
||||
|
@ -1821,23 +1821,6 @@ enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
|
||||
}
|
||||
}
|
||||
|
||||
void intel_hdcp_transcoder_config(struct intel_connector *connector,
|
||||
enum transcoder cpu_transcoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
|
||||
if (!hdcp->shim)
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
mutex_lock(&hdcp->mutex);
|
||||
hdcp->cpu_transcoder = cpu_transcoder;
|
||||
hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int initialize_hdcp_port_data(struct intel_connector *connector,
|
||||
const struct intel_hdcp_shim *shim)
|
||||
{
|
||||
@ -1959,8 +1942,10 @@ int intel_hdcp_init(struct intel_connector *connector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
|
||||
int intel_hdcp_enable(struct intel_connector *connector,
|
||||
enum transcoder cpu_transcoder, u8 content_type)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
|
||||
int ret = -EINVAL;
|
||||
@ -1972,6 +1957,11 @@ int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
|
||||
WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
hdcp->content_type = content_type;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
hdcp->cpu_transcoder = cpu_transcoder;
|
||||
hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
|
||||
}
|
||||
|
||||
/*
|
||||
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
|
||||
* is capable of HDCP2.2, it is preferred to use HDCP2.2.
|
||||
|
@ -21,11 +21,10 @@ enum transcoder;
|
||||
void intel_hdcp_atomic_check(struct drm_connector *connector,
|
||||
struct drm_connector_state *old_state,
|
||||
struct drm_connector_state *new_state);
|
||||
void intel_hdcp_transcoder_config(struct intel_connector *connector,
|
||||
enum transcoder cpu_transcoder);
|
||||
int intel_hdcp_init(struct intel_connector *connector,
|
||||
const struct intel_hdcp_shim *hdcp_shim);
|
||||
int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
|
||||
int intel_hdcp_enable(struct intel_connector *connector,
|
||||
enum transcoder cpu_transcoder, u8 content_type);
|
||||
int intel_hdcp_disable(struct intel_connector *connector);
|
||||
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_hdcp_capable(struct intel_connector *connector);
|
||||
|
@ -2489,9 +2489,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_hdcp_transcoder_config(intel_hdmi->attached_connector,
|
||||
pipe_config->cpu_transcoder);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -845,12 +845,6 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
|
||||
}
|
||||
}
|
||||
|
||||
static void unwind_wa_tail(struct i915_request *rq)
|
||||
{
|
||||
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
|
||||
assert_ring_tail_valid(rq->ring, rq->tail);
|
||||
}
|
||||
|
||||
static struct i915_request *
|
||||
__unwind_incomplete_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
@ -863,12 +857,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
|
||||
list_for_each_entry_safe_reverse(rq, rn,
|
||||
&engine->active.requests,
|
||||
sched.link) {
|
||||
|
||||
if (i915_request_completed(rq))
|
||||
continue; /* XXX */
|
||||
|
||||
__i915_request_unsubmit(rq);
|
||||
unwind_wa_tail(rq);
|
||||
|
||||
/*
|
||||
* Push the request back into the queue for later resubmission.
|
||||
@ -1161,13 +1153,29 @@ execlists_schedule_out(struct i915_request *rq)
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
static u64 execlists_update_context(const struct i915_request *rq)
|
||||
static u64 execlists_update_context(struct i915_request *rq)
|
||||
{
|
||||
struct intel_context *ce = rq->hw_context;
|
||||
u64 desc;
|
||||
u64 desc = ce->lrc_desc;
|
||||
u32 tail;
|
||||
|
||||
ce->lrc_reg_state[CTX_RING_TAIL] =
|
||||
intel_ring_set_tail(rq->ring, rq->tail);
|
||||
/*
|
||||
* WaIdleLiteRestore:bdw,skl
|
||||
*
|
||||
* We should never submit the context with the same RING_TAIL twice
|
||||
* just in case we submit an empty ring, which confuses the HW.
|
||||
*
|
||||
* We append a couple of NOOPs (gen8_emit_wa_tail) after the end of
|
||||
* the normal request to be able to always advance the RING_TAIL on
|
||||
* subsequent resubmissions (for lite restore). Should that fail us,
|
||||
* and we try and submit the same tail again, force the context
|
||||
* reload.
|
||||
*/
|
||||
tail = intel_ring_set_tail(rq->ring, rq->tail);
|
||||
if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
|
||||
desc |= CTX_DESC_FORCE_RESTORE;
|
||||
ce->lrc_reg_state[CTX_RING_TAIL] = tail;
|
||||
rq->tail = rq->wa_tail;
|
||||
|
||||
/*
|
||||
* Make sure the context image is complete before we submit it to HW.
|
||||
@ -1186,13 +1194,11 @@ static u64 execlists_update_context(const struct i915_request *rq)
|
||||
*/
|
||||
mb();
|
||||
|
||||
desc = ce->lrc_desc;
|
||||
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
|
||||
|
||||
/* Wa_1607138340:tgl */
|
||||
if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0))
|
||||
desc |= CTX_DESC_FORCE_RESTORE;
|
||||
|
||||
ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
|
||||
return desc;
|
||||
}
|
||||
|
||||
@ -1703,16 +1709,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* WaIdleLiteRestore:bdw,skl
|
||||
* Apply the wa NOOPs to prevent
|
||||
* ring:HEAD == rq:TAIL as we resubmit the
|
||||
* request. See gen8_emit_fini_breadcrumb() for
|
||||
* where we prepare the padding after the
|
||||
* end of the request.
|
||||
*/
|
||||
last->tail = last->wa_tail;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4120,17 +4116,18 @@ static void virtual_context_destroy(struct kref *kref)
|
||||
for (n = 0; n < ve->num_siblings; n++) {
|
||||
struct intel_engine_cs *sibling = ve->siblings[n];
|
||||
struct rb_node *node = &ve->nodes[sibling->id].rb;
|
||||
unsigned long flags;
|
||||
|
||||
if (RB_EMPTY_NODE(node))
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&sibling->active.lock);
|
||||
spin_lock_irqsave(&sibling->active.lock, flags);
|
||||
|
||||
/* Detachment is lazily performed in the execlists tasklet */
|
||||
if (!RB_EMPTY_NODE(node))
|
||||
rb_erase_cached(node, &sibling->execlists.virtual);
|
||||
|
||||
spin_unlock_irq(&sibling->active.lock);
|
||||
spin_unlock_irqrestore(&sibling->active.lock, flags);
|
||||
}
|
||||
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
|
||||
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "gem/i915_gem_context.h"
|
||||
#include "gem/i915_gem_ioctls.h"
|
||||
#include "gem/i915_gem_pm.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_engine_user.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
@ -1053,6 +1054,18 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __intel_context_flush_retire(struct intel_context *ce)
|
||||
{
|
||||
struct intel_timeline *tl;
|
||||
|
||||
tl = intel_context_timeline_lock(ce);
|
||||
if (IS_ERR(tl))
|
||||
return PTR_ERR(tl);
|
||||
|
||||
intel_context_timeline_unlock(tl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __intel_engines_record_defaults(struct intel_gt *gt)
|
||||
{
|
||||
struct i915_request *requests[I915_NUM_ENGINES] = {};
|
||||
@ -1121,13 +1134,20 @@ err_rq:
|
||||
if (!rq)
|
||||
continue;
|
||||
|
||||
/* We want to be able to unbind the state from the GGTT */
|
||||
GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
|
||||
|
||||
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT,
|
||||
&rq->hw_context->flags));
|
||||
state = rq->hw_context->state;
|
||||
if (!state)
|
||||
continue;
|
||||
|
||||
/* Serialise with retirement on another CPU */
|
||||
err = __intel_context_flush_retire(rq->hw_context);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* We want to be able to unbind the state from the GGTT */
|
||||
GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
|
||||
|
||||
/*
|
||||
* As we will hold a reference to the logical state, it will
|
||||
* not be torn down with the context, and importantly the
|
||||
|
@ -2078,20 +2078,12 @@ gen8_update_reg_state_unlocked(const struct intel_context *ce,
|
||||
u32 *reg_state = ce->lrc_reg_state;
|
||||
int i;
|
||||
|
||||
if (IS_GEN(stream->perf->i915, 12)) {
|
||||
u32 format = stream->oa_buffer.format;
|
||||
reg_state[ctx_oactxctrl + 1] =
|
||||
(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
|
||||
(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
|
||||
GEN8_OA_COUNTER_RESUME;
|
||||
|
||||
reg_state[ctx_oactxctrl + 1] =
|
||||
(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
|
||||
(stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
|
||||
} else {
|
||||
reg_state[ctx_oactxctrl + 1] =
|
||||
(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
|
||||
(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
|
||||
GEN8_OA_COUNTER_RESUME;
|
||||
}
|
||||
|
||||
for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++)
|
||||
for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
|
||||
reg_state[ctx_flexeu0 + i * 2 + 1] =
|
||||
oa_config_flex_reg(stream->oa_config, flex_regs[i]);
|
||||
|
||||
@ -2224,34 +2216,51 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int gen12_emit_oar_config(struct intel_context *ce, bool enable)
|
||||
static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
u32 *cs;
|
||||
int err = 0;
|
||||
int err;
|
||||
struct intel_context *ce = stream->pinned_ctx;
|
||||
u32 format = stream->oa_buffer.format;
|
||||
struct flex regs_context[] = {
|
||||
{
|
||||
GEN8_OACTXCONTROL,
|
||||
stream->perf->ctx_oactxctrl_offset + 1,
|
||||
enable ? GEN8_OA_COUNTER_RESUME : 0,
|
||||
},
|
||||
};
|
||||
/* Offsets in regs_lri are not used since this configuration is only
|
||||
* applied using LRI. Initialize the correct offsets for posterity.
|
||||
*/
|
||||
#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
|
||||
struct flex regs_lri[] = {
|
||||
{
|
||||
GEN12_OAR_OACONTROL,
|
||||
GEN12_OAR_OACONTROL_OFFSET + 1,
|
||||
(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
|
||||
(enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
|
||||
},
|
||||
{
|
||||
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
|
||||
CTX_CONTEXT_CONTROL,
|
||||
_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
|
||||
enable ?
|
||||
GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
|
||||
0)
|
||||
},
|
||||
};
|
||||
|
||||
rq = i915_request_create(ce);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
/* Modify the context image of pinned context with regs_context*/
|
||||
err = intel_context_lock_pinned(ce);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cs = intel_ring_begin(rq, 4);
|
||||
if (IS_ERR(cs)) {
|
||||
err = PTR_ERR(cs);
|
||||
goto out;
|
||||
}
|
||||
err = gen8_modify_context(ce, regs_context, ARRAY_SIZE(regs_context));
|
||||
intel_context_unlock_pinned(ce);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*cs++ = MI_LOAD_REGISTER_IMM(1);
|
||||
*cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base));
|
||||
*cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
|
||||
enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0);
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
out:
|
||||
i915_request_add(rq);
|
||||
|
||||
return err;
|
||||
/* Apply regs_lri using LRI with pinned context */
|
||||
return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2277,53 +2286,16 @@ out:
|
||||
* per-context OA state.
|
||||
*
|
||||
* Note: it's only the RCS/Render context that has any OA state.
|
||||
* Note: the first flex register passed must always be R_PWR_CLK_STATE
|
||||
*/
|
||||
static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
const struct i915_oa_config *oa_config)
|
||||
static int oa_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
struct flex *regs,
|
||||
size_t num_regs)
|
||||
{
|
||||
struct drm_i915_private *i915 = stream->perf->i915;
|
||||
/* The MMIO offsets for Flex EU registers aren't contiguous */
|
||||
const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
|
||||
#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
|
||||
struct flex regs[] = {
|
||||
{
|
||||
GEN8_R_PWR_CLK_STATE,
|
||||
CTX_R_PWR_CLK_STATE,
|
||||
},
|
||||
{
|
||||
IS_GEN(i915, 12) ?
|
||||
GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL,
|
||||
stream->perf->ctx_oactxctrl_offset + 1,
|
||||
},
|
||||
{ EU_PERF_CNTL0, ctx_flexeuN(0) },
|
||||
{ EU_PERF_CNTL1, ctx_flexeuN(1) },
|
||||
{ EU_PERF_CNTL2, ctx_flexeuN(2) },
|
||||
{ EU_PERF_CNTL3, ctx_flexeuN(3) },
|
||||
{ EU_PERF_CNTL4, ctx_flexeuN(4) },
|
||||
{ EU_PERF_CNTL5, ctx_flexeuN(5) },
|
||||
{ EU_PERF_CNTL6, ctx_flexeuN(6) },
|
||||
};
|
||||
#undef ctx_flexeuN
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_context *ctx, *cn;
|
||||
size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs);
|
||||
int i, err;
|
||||
|
||||
if (IS_GEN(i915, 12)) {
|
||||
u32 format = stream->oa_buffer.format;
|
||||
|
||||
regs[1].value =
|
||||
(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
|
||||
(oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0);
|
||||
} else {
|
||||
regs[1].value =
|
||||
(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
|
||||
(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
|
||||
GEN8_OA_COUNTER_RESUME;
|
||||
}
|
||||
|
||||
for (i = 2; !!ctx_flexeu0 && i < array_size; i++)
|
||||
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&stream->perf->lock);
|
||||
|
||||
@ -2353,7 +2325,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
|
||||
spin_unlock(&i915->gem.contexts.lock);
|
||||
|
||||
err = gen8_configure_context(ctx, regs, array_size);
|
||||
err = gen8_configure_context(ctx, regs, num_regs);
|
||||
if (err) {
|
||||
i915_gem_context_put(ctx);
|
||||
return err;
|
||||
@ -2378,7 +2350,7 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
|
||||
regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
|
||||
|
||||
err = gen8_modify_self(ce, regs, array_size);
|
||||
err = gen8_modify_self(ce, regs, num_regs);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@ -2386,6 +2358,56 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
const struct i915_oa_config *oa_config)
|
||||
{
|
||||
struct flex regs[] = {
|
||||
{
|
||||
GEN8_R_PWR_CLK_STATE,
|
||||
CTX_R_PWR_CLK_STATE,
|
||||
},
|
||||
};
|
||||
|
||||
return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
|
||||
}
|
||||
|
||||
static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
const struct i915_oa_config *oa_config)
|
||||
{
|
||||
/* The MMIO offsets for Flex EU registers aren't contiguous */
|
||||
const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
|
||||
#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
|
||||
struct flex regs[] = {
|
||||
{
|
||||
GEN8_R_PWR_CLK_STATE,
|
||||
CTX_R_PWR_CLK_STATE,
|
||||
},
|
||||
{
|
||||
GEN8_OACTXCONTROL,
|
||||
stream->perf->ctx_oactxctrl_offset + 1,
|
||||
},
|
||||
{ EU_PERF_CNTL0, ctx_flexeuN(0) },
|
||||
{ EU_PERF_CNTL1, ctx_flexeuN(1) },
|
||||
{ EU_PERF_CNTL2, ctx_flexeuN(2) },
|
||||
{ EU_PERF_CNTL3, ctx_flexeuN(3) },
|
||||
{ EU_PERF_CNTL4, ctx_flexeuN(4) },
|
||||
{ EU_PERF_CNTL5, ctx_flexeuN(5) },
|
||||
{ EU_PERF_CNTL6, ctx_flexeuN(6) },
|
||||
};
|
||||
#undef ctx_flexeuN
|
||||
int i;
|
||||
|
||||
regs[1].value =
|
||||
(stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
|
||||
(stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
|
||||
GEN8_OA_COUNTER_RESUME;
|
||||
|
||||
for (i = 2; i < ARRAY_SIZE(regs); i++)
|
||||
regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
|
||||
|
||||
return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
|
||||
}
|
||||
|
||||
static int gen8_enable_metric_set(struct i915_perf_stream *stream)
|
||||
{
|
||||
struct intel_uncore *uncore = stream->uncore;
|
||||
@ -2464,7 +2486,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
|
||||
* to make sure all slices/subslices are ON before writing to NOA
|
||||
* registers.
|
||||
*/
|
||||
ret = lrc_configure_all_contexts(stream, oa_config);
|
||||
ret = gen12_configure_all_contexts(stream, oa_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2474,8 +2496,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
|
||||
* requested this.
|
||||
*/
|
||||
if (stream->ctx) {
|
||||
ret = gen12_emit_oar_config(stream->pinned_ctx,
|
||||
oa_config != NULL);
|
||||
ret = gen12_configure_oar_context(stream, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -2509,11 +2530,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
|
||||
struct intel_uncore *uncore = stream->uncore;
|
||||
|
||||
/* Reset all contexts' slices/subslices configurations. */
|
||||
lrc_configure_all_contexts(stream, NULL);
|
||||
gen12_configure_all_contexts(stream, NULL);
|
||||
|
||||
/* disable the context save/restore or OAR counters */
|
||||
if (stream->ctx)
|
||||
gen12_emit_oar_config(stream->pinned_ctx, false);
|
||||
gen12_configure_oar_context(stream, false);
|
||||
|
||||
/* Make sure we disable noa to save power. */
|
||||
intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
|
||||
@ -2713,7 +2734,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
|
||||
if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
|
||||
(INTEL_GEN(perf->i915) < 12 || !stream->ctx)) {
|
||||
DRM_DEBUG("Only OA report sampling supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2745,7 +2767,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
||||
|
||||
format_size = perf->oa_formats[props->oa_format].size;
|
||||
|
||||
stream->sample_flags |= SAMPLE_OA_REPORT;
|
||||
stream->sample_flags = props->sample_flags;
|
||||
stream->sample_size += format_size;
|
||||
|
||||
stream->oa_buffer.format_size = format_size;
|
||||
@ -2854,7 +2876,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
|
||||
return;
|
||||
|
||||
stream = engine->i915->perf.exclusive_stream;
|
||||
if (stream)
|
||||
/*
|
||||
* For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
|
||||
* is already doing that, so nothing to be done for gen12 here.
|
||||
*/
|
||||
if (stream && INTEL_GEN(stream->perf->i915) < 12)
|
||||
gen8_update_reg_state_unlocked(ce, stream);
|
||||
}
|
||||
|
||||
|
@ -935,11 +935,13 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
|
||||
for_each_available_child_of_node(dev->of_node, child) {
|
||||
panel = of_drm_find_panel(child);
|
||||
if (IS_ERR(panel)) {
|
||||
dev_err(dev, "failed to find panel try bridge (%lu)\n",
|
||||
dev_err(dev, "failed to find panel try bridge (%ld)\n",
|
||||
PTR_ERR(panel));
|
||||
panel = NULL;
|
||||
|
||||
bridge = of_drm_find_bridge(child);
|
||||
if (IS_ERR(bridge)) {
|
||||
dev_err(dev, "failed to find bridge (%lu)\n",
|
||||
dev_err(dev, "failed to find bridge (%ld)\n",
|
||||
PTR_ERR(bridge));
|
||||
return PTR_ERR(bridge);
|
||||
}
|
||||
|
@ -64,6 +64,25 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct meson_cvbs_mode *
|
||||
meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
|
||||
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
|
||||
|
||||
if (drm_mode_match(req_mode, &meson_mode->mode,
|
||||
DRM_MODE_MATCH_TIMINGS |
|
||||
DRM_MODE_MATCH_CLOCK |
|
||||
DRM_MODE_MATCH_FLAGS |
|
||||
DRM_MODE_MATCH_3D_FLAGS))
|
||||
return meson_mode;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Connector */
|
||||
|
||||
static void meson_cvbs_connector_destroy(struct drm_connector *connector)
|
||||
@ -136,14 +155,8 @@ static int meson_venc_cvbs_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
|
||||
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
|
||||
|
||||
if (drm_mode_equal(&crtc_state->mode, &meson_mode->mode))
|
||||
return 0;
|
||||
}
|
||||
if (meson_cvbs_get_mode(&crtc_state->mode))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -191,24 +204,17 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
const struct meson_cvbs_mode *meson_mode = meson_cvbs_get_mode(mode);
|
||||
struct meson_venc_cvbs *meson_venc_cvbs =
|
||||
encoder_to_meson_venc_cvbs(encoder);
|
||||
struct meson_drm *priv = meson_venc_cvbs->priv;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
|
||||
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
|
||||
if (meson_mode) {
|
||||
meson_venci_cvbs_mode_set(priv, meson_mode->enci);
|
||||
|
||||
if (drm_mode_equal(mode, &meson_mode->mode)) {
|
||||
meson_venci_cvbs_mode_set(priv,
|
||||
meson_mode->enci);
|
||||
|
||||
/* Setup 27MHz vclk2 for ENCI and VDAC */
|
||||
meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
|
||||
MESON_VCLK_CVBS, MESON_VCLK_CVBS,
|
||||
MESON_VCLK_CVBS, true);
|
||||
break;
|
||||
}
|
||||
/* Setup 27MHz vclk2 for ENCI and VDAC */
|
||||
meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
|
||||
MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,9 +30,8 @@ module_param_named(modeset, mgag200_modeset, int, 0400);
|
||||
static struct drm_driver driver;
|
||||
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_VENDOR_ID_SUN, 0x4852, 0, 0,
|
||||
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
|
||||
G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
|
||||
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
|
||||
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
|
||||
{ PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
|
||||
{ PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
|
||||
|
@ -114,6 +114,7 @@ struct nv50_head_atom {
|
||||
u8 nhsync:1;
|
||||
u8 nvsync:1;
|
||||
u8 depth:4;
|
||||
u8 bpc;
|
||||
} or;
|
||||
|
||||
/* Currently only used for MST */
|
||||
|
@ -326,9 +326,9 @@ nv50_outp_atomic_check_view(struct drm_encoder *encoder,
|
||||
* same size as the native one (e.g. different
|
||||
* refresh rate)
|
||||
*/
|
||||
if (adjusted_mode->hdisplay == native_mode->hdisplay &&
|
||||
adjusted_mode->vdisplay == native_mode->vdisplay &&
|
||||
adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
|
||||
if (mode->hdisplay == native_mode->hdisplay &&
|
||||
mode->vdisplay == native_mode->vdisplay &&
|
||||
mode->type & DRM_MODE_TYPE_DRIVER)
|
||||
break;
|
||||
mode = native_mode;
|
||||
asyc->scaler.full = true;
|
||||
@ -353,10 +353,20 @@ nv50_outp_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct nouveau_connector *nv_connector =
|
||||
nouveau_connector(conn_state->connector);
|
||||
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
||||
nv_connector->native_mode);
|
||||
struct drm_connector *connector = conn_state->connector;
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
||||
int ret;
|
||||
|
||||
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
||||
nv_connector->native_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (crtc_state->mode_changed || crtc_state->connectors_changed)
|
||||
asyh->or.bpc = connector->display_info.bpc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -770,32 +780,54 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
|
||||
struct nv50_mstm *mstm = mstc->mstm;
|
||||
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
||||
int slots;
|
||||
int ret;
|
||||
|
||||
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
||||
mstc->native);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* When restoring duplicated states, we need to make sure that the bw
|
||||
* remains the same and avoid recalculating it, as the connector's bpc
|
||||
* may have changed after the state was duplicated
|
||||
*/
|
||||
if (!state->duplicated) {
|
||||
const int clock = crtc_state->adjusted_mode.clock;
|
||||
|
||||
if (crtc_state->mode_changed || crtc_state->connectors_changed) {
|
||||
/*
|
||||
* When restoring duplicated states, we need to make sure that
|
||||
* the bw remains the same and avoid recalculating it, as the
|
||||
* connector's bpc may have changed after the state was
|
||||
* duplicated
|
||||
* XXX: Since we don't use HDR in userspace quite yet, limit
|
||||
* the bpc to 8 to save bandwidth on the topology. In the
|
||||
* future, we'll want to properly fix this by dynamically
|
||||
* selecting the highest possible bpc that would fit in the
|
||||
* topology
|
||||
*/
|
||||
if (!state->duplicated) {
|
||||
const int bpp = connector->display_info.bpc * 3;
|
||||
const int clock = crtc_state->adjusted_mode.clock;
|
||||
|
||||
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
|
||||
}
|
||||
|
||||
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
|
||||
mstc->port,
|
||||
asyh->dp.pbn);
|
||||
if (slots < 0)
|
||||
return slots;
|
||||
|
||||
asyh->dp.tu = slots;
|
||||
asyh->or.bpc = min(connector->display_info.bpc, 8U);
|
||||
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3);
|
||||
}
|
||||
|
||||
return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
||||
mstc->native);
|
||||
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
|
||||
asyh->dp.pbn);
|
||||
if (slots < 0)
|
||||
return slots;
|
||||
|
||||
asyh->dp.tu = slots;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8
|
||||
nv50_dp_bpc_to_depth(unsigned int bpc)
|
||||
{
|
||||
switch (bpc) {
|
||||
case 6: return 0x2;
|
||||
case 8: return 0x5;
|
||||
case 10: /* fall-through */
|
||||
default: return 0x6;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -808,7 +840,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
|
||||
struct nv50_mstm *mstm = NULL;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
u8 proto, depth;
|
||||
u8 proto;
|
||||
bool r;
|
||||
|
||||
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
|
||||
@ -837,14 +869,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
|
||||
else
|
||||
proto = 0x9;
|
||||
|
||||
switch (mstc->connector.display_info.bpc) {
|
||||
case 6: depth = 0x2; break;
|
||||
case 8: depth = 0x5; break;
|
||||
case 10:
|
||||
default: depth = 0x6; break;
|
||||
}
|
||||
|
||||
mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth);
|
||||
mstm->outp->update(mstm->outp, head->base.index, armh, proto,
|
||||
nv50_dp_bpc_to_depth(armh->or.bpc));
|
||||
|
||||
msto->head = head;
|
||||
msto->mstc = mstc;
|
||||
@ -1498,20 +1524,14 @@ nv50_sor_enable(struct drm_encoder *encoder)
|
||||
lvds.lvds.script |= 0x0200;
|
||||
}
|
||||
|
||||
if (nv_connector->base.display_info.bpc == 8)
|
||||
if (asyh->or.bpc == 8)
|
||||
lvds.lvds.script |= 0x0200;
|
||||
}
|
||||
|
||||
nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
|
||||
break;
|
||||
case DCB_OUTPUT_DP:
|
||||
if (nv_connector->base.display_info.bpc == 6)
|
||||
depth = 0x2;
|
||||
else
|
||||
if (nv_connector->base.display_info.bpc == 8)
|
||||
depth = 0x5;
|
||||
else
|
||||
depth = 0x6;
|
||||
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
|
||||
|
||||
if (nv_encoder->link & 1)
|
||||
proto = 0x8;
|
||||
@ -1662,7 +1682,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
|
||||
nv50_outp_acquire(nv_encoder);
|
||||
|
||||
nv_connector = nouveau_encoder_connector_get(nv_encoder);
|
||||
switch (nv_connector->base.display_info.bpc) {
|
||||
switch (asyh->or.bpc) {
|
||||
case 10: asyh->or.depth = 0x6; break;
|
||||
case 8: asyh->or.depth = 0x5; break;
|
||||
case 6: asyh->or.depth = 0x2; break;
|
||||
|
@ -81,18 +81,17 @@ nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
|
||||
struct nv50_head_atom *asyh,
|
||||
struct nouveau_conn_atom *asyc)
|
||||
{
|
||||
struct drm_connector *connector = asyc->state.connector;
|
||||
u32 mode = 0x00;
|
||||
|
||||
if (asyc->dither.mode == DITHERING_MODE_AUTO) {
|
||||
if (asyh->base.depth > connector->display_info.bpc * 3)
|
||||
if (asyh->base.depth > asyh->or.bpc * 3)
|
||||
mode = DITHERING_MODE_DYNAMIC2X2;
|
||||
} else {
|
||||
mode = asyc->dither.mode;
|
||||
}
|
||||
|
||||
if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
|
||||
if (connector->display_info.bpc >= 8)
|
||||
if (asyh->or.bpc >= 8)
|
||||
mode |= DITHERING_DEPTH_8BPC;
|
||||
} else {
|
||||
mode |= asyc->dither.depth;
|
||||
|
@ -245,14 +245,22 @@ nouveau_conn_atomic_duplicate_state(struct drm_connector *connector)
|
||||
void
|
||||
nouveau_conn_reset(struct drm_connector *connector)
|
||||
{
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nouveau_conn_atom *asyc;
|
||||
|
||||
if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
|
||||
return;
|
||||
if (drm_drv_uses_atomic_modeset(connector->dev)) {
|
||||
if (WARN_ON(!(asyc = kzalloc(sizeof(*asyc), GFP_KERNEL))))
|
||||
return;
|
||||
|
||||
if (connector->state)
|
||||
nouveau_conn_atomic_destroy_state(connector,
|
||||
connector->state);
|
||||
|
||||
__drm_atomic_helper_connector_reset(connector, &asyc->state);
|
||||
} else {
|
||||
asyc = &nv_connector->properties_state;
|
||||
}
|
||||
|
||||
if (connector->state)
|
||||
nouveau_conn_atomic_destroy_state(connector, connector->state);
|
||||
__drm_atomic_helper_connector_reset(connector, &asyc->state);
|
||||
asyc->dither.mode = DITHERING_MODE_AUTO;
|
||||
asyc->dither.depth = DITHERING_DEPTH_AUTO;
|
||||
asyc->scaler.mode = DRM_MODE_SCALE_NONE;
|
||||
@ -276,8 +284,14 @@ void
|
||||
nouveau_conn_attach_properties(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nouveau_conn_atom *armc = nouveau_conn_atom(connector->state);
|
||||
struct nouveau_display *disp = nouveau_display(dev);
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nouveau_conn_atom *armc;
|
||||
|
||||
if (drm_drv_uses_atomic_modeset(connector->dev))
|
||||
armc = nouveau_conn_atom(connector->state);
|
||||
else
|
||||
armc = &nv_connector->properties_state;
|
||||
|
||||
/* Init DVI-I specific properties. */
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_DVII)
|
||||
@ -748,9 +762,9 @@ static int
|
||||
nouveau_connector_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property, uint64_t value)
|
||||
{
|
||||
struct nouveau_conn_atom *asyc = nouveau_conn_atom(connector->state);
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
|
||||
struct nouveau_conn_atom *asyc = &nv_connector->properties_state;
|
||||
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
|
||||
int ret;
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
|
||||
#include <nvif/notify.h>
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_dp_helper.h>
|
||||
@ -44,6 +45,60 @@ struct dcb_output;
|
||||
struct nouveau_backlight;
|
||||
#endif
|
||||
|
||||
#define nouveau_conn_atom(p) \
|
||||
container_of((p), struct nouveau_conn_atom, state)
|
||||
|
||||
struct nouveau_conn_atom {
|
||||
struct drm_connector_state state;
|
||||
|
||||
struct {
|
||||
/* The enum values specifically defined here match nv50/gf119
|
||||
* hw values, and the code relies on this.
|
||||
*/
|
||||
enum {
|
||||
DITHERING_MODE_OFF = 0x00,
|
||||
DITHERING_MODE_ON = 0x01,
|
||||
DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_AUTO
|
||||
} mode;
|
||||
enum {
|
||||
DITHERING_DEPTH_6BPC = 0x00,
|
||||
DITHERING_DEPTH_8BPC = 0x02,
|
||||
DITHERING_DEPTH_AUTO
|
||||
} depth;
|
||||
} dither;
|
||||
|
||||
struct {
|
||||
int mode; /* DRM_MODE_SCALE_* */
|
||||
struct {
|
||||
enum {
|
||||
UNDERSCAN_OFF,
|
||||
UNDERSCAN_ON,
|
||||
UNDERSCAN_AUTO,
|
||||
} mode;
|
||||
u32 hborder;
|
||||
u32 vborder;
|
||||
} underscan;
|
||||
bool full;
|
||||
} scaler;
|
||||
|
||||
struct {
|
||||
int color_vibrance;
|
||||
int vibrant_hue;
|
||||
} procamp;
|
||||
|
||||
union {
|
||||
struct {
|
||||
bool dither:1;
|
||||
bool scaler:1;
|
||||
bool procamp:1;
|
||||
};
|
||||
u8 mask;
|
||||
} set;
|
||||
};
|
||||
|
||||
struct nouveau_connector {
|
||||
struct drm_connector base;
|
||||
enum dcb_connector_type type;
|
||||
@ -63,6 +118,12 @@ struct nouveau_connector {
|
||||
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
|
||||
struct nouveau_backlight *backlight;
|
||||
#endif
|
||||
/*
|
||||
* Our connector property code expects a nouveau_conn_atom struct
|
||||
* even on pre-nv50 where we do not support atomic. This embedded
|
||||
* version gets used in the non atomic modeset case.
|
||||
*/
|
||||
struct nouveau_conn_atom properties_state;
|
||||
};
|
||||
|
||||
static inline struct nouveau_connector *nouveau_connector(
|
||||
@ -121,61 +182,6 @@ extern int nouveau_ignorelid;
|
||||
extern int nouveau_duallink;
|
||||
extern int nouveau_hdmimhz;
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#define nouveau_conn_atom(p) \
|
||||
container_of((p), struct nouveau_conn_atom, state)
|
||||
|
||||
struct nouveau_conn_atom {
|
||||
struct drm_connector_state state;
|
||||
|
||||
struct {
|
||||
/* The enum values specifically defined here match nv50/gf119
|
||||
* hw values, and the code relies on this.
|
||||
*/
|
||||
enum {
|
||||
DITHERING_MODE_OFF = 0x00,
|
||||
DITHERING_MODE_ON = 0x01,
|
||||
DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
|
||||
DITHERING_MODE_AUTO
|
||||
} mode;
|
||||
enum {
|
||||
DITHERING_DEPTH_6BPC = 0x00,
|
||||
DITHERING_DEPTH_8BPC = 0x02,
|
||||
DITHERING_DEPTH_AUTO
|
||||
} depth;
|
||||
} dither;
|
||||
|
||||
struct {
|
||||
int mode; /* DRM_MODE_SCALE_* */
|
||||
struct {
|
||||
enum {
|
||||
UNDERSCAN_OFF,
|
||||
UNDERSCAN_ON,
|
||||
UNDERSCAN_AUTO,
|
||||
} mode;
|
||||
u32 hborder;
|
||||
u32 vborder;
|
||||
} underscan;
|
||||
bool full;
|
||||
} scaler;
|
||||
|
||||
struct {
|
||||
int color_vibrance;
|
||||
int vibrant_hue;
|
||||
} procamp;
|
||||
|
||||
union {
|
||||
struct {
|
||||
bool dither:1;
|
||||
bool scaler:1;
|
||||
bool procamp:1;
|
||||
};
|
||||
u8 mask;
|
||||
} set;
|
||||
};
|
||||
|
||||
void nouveau_conn_attach_properties(struct drm_connector *);
|
||||
void nouveau_conn_reset(struct drm_connector *);
|
||||
struct drm_connector_state *
|
||||
|
@ -18,15 +18,18 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
|
||||
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
|
||||
u32 flags)
|
||||
{
|
||||
struct panfrost_device *pfdev = dev_get_drvdata(dev);
|
||||
struct dev_pm_opp *opp;
|
||||
int err;
|
||||
|
||||
opp = devfreq_recommended_opp(dev, freq, flags);
|
||||
if (IS_ERR(opp))
|
||||
return PTR_ERR(opp);
|
||||
dev_pm_opp_put(opp);
|
||||
|
||||
err = dev_pm_opp_set_rate(dev, *freq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*freq = clk_get_rate(pfdev->clock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -60,20 +63,10 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
|
||||
{
|
||||
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
|
||||
|
||||
*freq = clk_get_rate(pfdev->clock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct devfreq_dev_profile panfrost_devfreq_profile = {
|
||||
.polling_ms = 50, /* ~3 frames */
|
||||
.target = panfrost_devfreq_target,
|
||||
.get_dev_status = panfrost_devfreq_get_dev_status,
|
||||
.get_cur_freq = panfrost_devfreq_get_cur_freq,
|
||||
};
|
||||
|
||||
int panfrost_devfreq_init(struct panfrost_device *pfdev)
|
||||
|
@ -303,14 +303,17 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
/* Don't allow mmapping of heap objects as pages are not pinned. */
|
||||
if (to_panfrost_bo(gem_obj)->is_heap)
|
||||
return -EINVAL;
|
||||
if (to_panfrost_bo(gem_obj)->is_heap) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = drm_gem_create_mmap_offset(gem_obj);
|
||||
if (ret == 0)
|
||||
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
|
||||
drm_gem_object_put_unlocked(gem_obj);
|
||||
|
||||
out:
|
||||
drm_gem_object_put_unlocked(gem_obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -347,20 +350,19 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
mutex_lock(&pfdev->shrinker_lock);
|
||||
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
|
||||
|
||||
if (args->retained) {
|
||||
struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
|
||||
|
||||
mutex_lock(&pfdev->shrinker_lock);
|
||||
|
||||
if (args->madv == PANFROST_MADV_DONTNEED)
|
||||
list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
|
||||
list_add_tail(&bo->base.madv_list,
|
||||
&pfdev->shrinker_list);
|
||||
else if (args->madv == PANFROST_MADV_WILLNEED)
|
||||
list_del_init(&bo->base.madv_list);
|
||||
|
||||
mutex_unlock(&pfdev->shrinker_lock);
|
||||
}
|
||||
mutex_unlock(&pfdev->shrinker_lock);
|
||||
|
||||
drm_gem_object_put_unlocked(gem_obj);
|
||||
return 0;
|
||||
@ -443,7 +445,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
|
||||
|
||||
panfrost_perfcnt_close(panfrost_priv);
|
||||
panfrost_perfcnt_close(file);
|
||||
panfrost_job_close(panfrost_priv);
|
||||
|
||||
panfrost_mmu_pgtable_free(panfrost_priv);
|
||||
|
@ -19,6 +19,16 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
|
||||
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
||||
struct panfrost_device *pfdev = obj->dev->dev_private;
|
||||
|
||||
/*
|
||||
* Make sure the BO is no longer inserted in the shrinker list before
|
||||
* taking care of the destruction itself. If we don't do that we have a
|
||||
* race condition between this function and what's done in
|
||||
* panfrost_gem_shrinker_scan().
|
||||
*/
|
||||
mutex_lock(&pfdev->shrinker_lock);
|
||||
list_del_init(&bo->base.madv_list);
|
||||
mutex_unlock(&pfdev->shrinker_lock);
|
||||
|
||||
if (bo->sgts) {
|
||||
int i;
|
||||
int n_sgt = bo->base.base.size / SZ_2M;
|
||||
@ -33,15 +43,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
|
||||
kfree(bo->sgts);
|
||||
}
|
||||
|
||||
mutex_lock(&pfdev->shrinker_lock);
|
||||
if (!list_empty(&bo->base.madv_list))
|
||||
list_del(&bo->base.madv_list);
|
||||
mutex_unlock(&pfdev->shrinker_lock);
|
||||
|
||||
drm_gem_shmem_free_object(obj);
|
||||
}
|
||||
|
||||
static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
{
|
||||
int ret;
|
||||
size_t size = obj->size;
|
||||
@ -80,7 +85,7 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
{
|
||||
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
||||
struct panfrost_file_priv *priv = file_priv->driver_priv;
|
||||
|
@ -45,6 +45,10 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
|
||||
u32 flags,
|
||||
uint32_t *handle);
|
||||
|
||||
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
|
||||
void panfrost_gem_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
void panfrost_gem_shrinker_init(struct drm_device *dev);
|
||||
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
|
||||
|
||||
|
@ -67,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
|
||||
}
|
||||
|
||||
static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
|
||||
struct panfrost_file_priv *user,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int counterset)
|
||||
{
|
||||
struct panfrost_file_priv *user = file_priv->driver_priv;
|
||||
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
|
||||
struct drm_gem_shmem_object *bo;
|
||||
u32 cfg;
|
||||
@ -91,14 +92,14 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
|
||||
perfcnt->bo = to_panfrost_bo(&bo->base);
|
||||
|
||||
/* Map the perfcnt buf in the address space attached to file_priv. */
|
||||
ret = panfrost_mmu_map(perfcnt->bo);
|
||||
ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
|
||||
if (ret)
|
||||
goto err_put_bo;
|
||||
|
||||
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
|
||||
if (IS_ERR(perfcnt->buf)) {
|
||||
ret = PTR_ERR(perfcnt->buf);
|
||||
goto err_put_bo;
|
||||
goto err_close_bo;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -157,14 +158,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
|
||||
|
||||
err_vunmap:
|
||||
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
|
||||
err_close_bo:
|
||||
panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
|
||||
err_put_bo:
|
||||
drm_gem_object_put_unlocked(&bo->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
|
||||
struct panfrost_file_priv *user)
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct panfrost_file_priv *user = file_priv->driver_priv;
|
||||
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
|
||||
|
||||
if (user != perfcnt->user)
|
||||
@ -180,6 +184,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
|
||||
perfcnt->user = NULL;
|
||||
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
|
||||
perfcnt->buf = NULL;
|
||||
panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
|
||||
drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
|
||||
perfcnt->bo = NULL;
|
||||
pm_runtime_mark_last_busy(pfdev->dev);
|
||||
@ -191,7 +196,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
|
||||
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct panfrost_file_priv *pfile = file_priv->driver_priv;
|
||||
struct panfrost_device *pfdev = dev->dev_private;
|
||||
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
|
||||
struct drm_panfrost_perfcnt_enable *req = data;
|
||||
@ -207,10 +211,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
|
||||
|
||||
mutex_lock(&perfcnt->lock);
|
||||
if (req->enable)
|
||||
ret = panfrost_perfcnt_enable_locked(pfdev, pfile,
|
||||
ret = panfrost_perfcnt_enable_locked(pfdev, file_priv,
|
||||
req->counterset);
|
||||
else
|
||||
ret = panfrost_perfcnt_disable_locked(pfdev, pfile);
|
||||
ret = panfrost_perfcnt_disable_locked(pfdev, file_priv);
|
||||
mutex_unlock(&perfcnt->lock);
|
||||
|
||||
return ret;
|
||||
@ -248,15 +252,16 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void panfrost_perfcnt_close(struct panfrost_file_priv *pfile)
|
||||
void panfrost_perfcnt_close(struct drm_file *file_priv)
|
||||
{
|
||||
struct panfrost_file_priv *pfile = file_priv->driver_priv;
|
||||
struct panfrost_device *pfdev = pfile->pfdev;
|
||||
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
|
||||
|
||||
pm_runtime_get_sync(pfdev->dev);
|
||||
mutex_lock(&perfcnt->lock);
|
||||
if (perfcnt->user == pfile)
|
||||
panfrost_perfcnt_disable_locked(pfdev, pfile);
|
||||
panfrost_perfcnt_disable_locked(pfdev, file_priv);
|
||||
mutex_unlock(&perfcnt->lock);
|
||||
pm_runtime_mark_last_busy(pfdev->dev);
|
||||
pm_runtime_put_autosuspend(pfdev->dev);
|
||||
|
@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev);
|
||||
void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev);
|
||||
int panfrost_perfcnt_init(struct panfrost_device *pfdev);
|
||||
void panfrost_perfcnt_fini(struct panfrost_device *pfdev);
|
||||
void panfrost_perfcnt_close(struct panfrost_file_priv *pfile);
|
||||
void panfrost_perfcnt_close(struct drm_file *file_priv);
|
||||
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
|
||||
|
Loading…
Reference in New Issue
Block a user