mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 08:02:07 +00:00
drm fixes for 5.11-rc4
dma-buf: - Fix a memory leak in CMAV heap core: - Fix format check for legacy pageflips ttm: - Pass correct address to dma_mapping_error() - Use mutex in pool shrinker i915: - Allow the sysadmin to override security mitigations - Restore clear-residual mitigations for ivb/byt - Limit VFE threads based on GT - GVT: fix vfio edid and full display detection - Fix DSI DSC power refcounting - Fix LPT CPU mode backlight takeover - Disable RPM wakeref assertions during driver shutdown - Fix DSI sequence sleeps amdgpu: - Update repo location in MAINTAINERS - Add some new renoir PCI IDs - Revert CRC UAPI changes - Revert OLED display fix which cases clocking problems for some systems - Misc vangogh fixes - GFX fix for sienna cichlid - DCN1.0 fix for pipe split - Fix incorrect PSP command amdkfd: - Fix possible out of bounds read in vcrat creation nouveau: - irq handling fix - expansion ROM fix - hw init dpcd disable - aux semaphore owner field fix - vram heap sizing fix - notifier at 0 is valid fix -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJgAQ/EAAoJEAx081l5xIa+KyIP/3xSMMMzzkYFii8yPeKjKavx kpXzy0eMG5a/t0LGicfJbQxujE94BW2Y7MZPsKKyfs/PqPWCFM9NrmQK5/Dqg+Z8 R4lXC4+B/ha8Xhl+jK6UW5HLWRN24nB9uH1/oN0xV1WrhJDffdc85qh0Qm9AAaav zfpXYu5s+37EMuw5u9jE51OWk9mmgwrpYjXH/SQ2eiSJ4UHeX8CE1b3WgRT8z7cP hep43/6GQ+sczcL1Ekg1guaV/2q9dy+QliP5Z6rv/E9lzeoRQgb8O+Lf95f8yR3t NSzXllS3Y5bUXzIRkNioTmfydmO6oFxY/1TBkfa08TSqveHJSGCtBb12WkDxSNO5 NRBXwHd+WBV6+8dX9Dmm3k/5C9LK9Y/rUI+yEwdE9Soh/frMOWtXA4PLn8p1XofA rfLu5Y5mBR+c/r8rABefUSNaJZ+tl6vfpWSZ1WNyUi8gL+Rknpq5p+5OpX1TQSI+ BBPEKpLFyD0mr1de/SdW41lofFyIgUajVnClg26ynMOU6bkGOq+UWduw+cpDvUCS 2g2YYi+YXkCnZGwLg9NaXSM5N2JCJARRynncehuAl0F1frigPzy3F+Ty0mmlBa1O WuYpbTsNJjDVP6kPNDv0/CYv/dnPb4PJczTq4SDwAtSkD3/SaAKh0jYK/NqGL9sM Nk1psZTXjcbD/rzR/kX9 =hTNV -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2021-01-15' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Regular fixes for rc4, a bunch of fixes across i915, amdgpu and nouveau here, along with a couple of TTM fixes, and dma-buf and one core pageflip/modifier interaction fix. One notable i915 fix is a HSW GT1 regression fix that has been outstanding for quite a while. (Thanks to Matt Turner for kicking Intel into getting it fixed). dma-buf: - Fix a memory leak in CMAV heap core: - Fix format check for legacy pageflips ttm: - Pass correct address to dma_mapping_error() - Use mutex in pool shrinker i915: - Allow the sysadmin to override security mitigations - Restore clear-residual mitigations for ivb/byt - Limit VFE threads based on GT - GVT: fix vfio edid and full display detection - Fix DSI DSC power refcounting - Fix LPT CPU mode backlight takeover - Disable RPM wakeref assertions during driver shutdown - Fix DSI sequence sleeps amdgpu: - Update repo location in MAINTAINERS - Add some new renoir PCI IDs - Revert CRC UAPI changes - Revert OLED display fix which cases clocking problems for some systems - Misc vangogh fixes - GFX fix for sienna cichlid - DCN1.0 fix for pipe split - Fix incorrect PSP command amdkfd: - Fix possible out of bounds read in vcrat creation nouveau: - irq handling fix - expansion ROM fix - hw init dpcd disable - aux semaphore owner field fix - vram heap sizing fix - notifier at 0 is valid fix" * tag 'drm-fixes-2021-01-15' of git://anongit.freedesktop.org/drm/drm: (37 commits) drm/nouveau/kms/nv50-: fix case where notifier buffer is at offset 0 drm/nouveau/mmu: fix vram heap sizing drm/nouveau/i2c/gm200: increase width of aux semaphore owner fields drm/nouveau/i2c/gk110-: disable hw-initiated dpcd reads drm/nouveau/i2c/gk110: split out from i2c/gk104 drm/nouveau/privring: ack interrupts the same way as RM drm/nouveau/bios: fix issue shadowing expansion ROMs drm/amd/display: Fix to be able to stop crc calculation Revert "drm/amd/display: Expose new CRC window property" Revert "drm/amdgpu/disply: fix documentation warnings in display manager" Revert "drm/amd/display: Fix unused variable warning" drm/amdgpu: set power brake sequence drm/amdgpu: add new device id for Renior drm/amdgpu: add green_sardine device id (v2) drm/amdgpu: fix vram type and bandwidth error for DDR5 and DDR4 drm/amdgpu/gfx10: add updated GOLDEN_TSC_COUNT_UPPER/LOWER register offsets for VGH drm/amdkfd: Fix out-of-bounds read in kdf_create_vcrat_image_cpu() Revert "drm/amd/display: Fixed Intermittent blue screen on OLED panel" drm/amd/display: disable dcn10 pipe split by default drm/amd/display: Add a missing DCN3.01 API mapping ...
This commit is contained in:
commit
5ee8805788
@ -906,7 +906,7 @@ AMD KFD
|
||||
M: Felix Kuehling <Felix.Kuehling@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git git://people.freedesktop.org/~agd5f/linux
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd*.[ch]
|
||||
F: drivers/gpu/drm/amd/amdkfd/
|
||||
F: drivers/gpu/drm/amd/include/cik_structs.h
|
||||
@ -14812,7 +14812,7 @@ M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git git://people.freedesktop.org/~agd5f/linux
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/
|
||||
F: drivers/gpu/drm/radeon/
|
||||
F: include/uapi/drm/amdgpu_drm.h
|
||||
|
@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
|
||||
buffer->vaddr = NULL;
|
||||
}
|
||||
|
||||
/* free page list */
|
||||
kfree(buffer->pages);
|
||||
/* release memory */
|
||||
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
|
||||
kfree(buffer);
|
||||
}
|
||||
|
@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
|
||||
union igp_info {
|
||||
struct atom_integrated_system_info_v1_11 v11;
|
||||
struct atom_integrated_system_info_v1_12 v12;
|
||||
struct atom_integrated_system_info_v2_1 v21;
|
||||
};
|
||||
|
||||
union umc_info {
|
||||
@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
igp_info = (union igp_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
switch (crev) {
|
||||
case 11:
|
||||
mem_channel_number = igp_info->v11.umachannelnumber;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v11.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 11:
|
||||
case 12:
|
||||
mem_channel_number = igp_info->v11.umachannelnumber;
|
||||
if (!mem_channel_number)
|
||||
mem_channel_number = 1;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v11.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 12:
|
||||
mem_channel_number = igp_info->v12.umachannelnumber;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v12.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
case 2:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
case 2:
|
||||
mem_channel_number = igp_info->v21.umachannelnumber;
|
||||
if (!mem_channel_number)
|
||||
mem_channel_number = 1;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v21.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -3034,7 +3034,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
||||
#endif
|
||||
default:
|
||||
if (amdgpu_dc > 0)
|
||||
DRM_INFO("Display Core has been requested via kernel parameter "
|
||||
DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
|
||||
"but isn't supported by ASIC, ignoring\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
|
||||
|
||||
/* Renoir */
|
||||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||
|
||||
/* Navi12 */
|
||||
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
|
||||
|
@ -99,6 +99,10 @@
|
||||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
|
||||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
|
||||
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
|
||||
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
|
||||
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
|
||||
@ -160,6 +164,9 @@
|
||||
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db
|
||||
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
|
||||
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
|
||||
@ -3324,6 +3331,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
|
||||
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
|
||||
static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
|
||||
static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
|
||||
static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
|
||||
|
||||
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
|
||||
{
|
||||
@ -7192,6 +7200,9 @@ static int gfx_v10_0_hw_init(void *handle)
|
||||
if (adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
gfx_v10_3_program_pbb_mode(adev);
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
|
||||
gfx_v10_3_set_power_brake_sequence(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -7377,8 +7388,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
|
||||
break;
|
||||
default:
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gpu_clock_mutex);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
return clock;
|
||||
@ -9169,6 +9188,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
|
||||
(0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
|
||||
(0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
|
||||
(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
|
||||
|
||||
WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
|
||||
WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
|
||||
(0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
|
||||
(0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
|
||||
(0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
|
||||
(0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
|
||||
|
||||
WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
|
||||
(0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
|
||||
(0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
|
||||
(0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
|
||||
|
||||
WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
|
||||
|
||||
WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
|
||||
(0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
|
@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
|
||||
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
|
||||
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
|
||||
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
|
||||
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
|
||||
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
|
||||
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
|
||||
|
||||
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
|
||||
|
@ -1239,7 +1239,8 @@ static int soc15_common_early_init(void *handle)
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
if (adev->pdev->device == 0x1636)
|
||||
if ((adev->pdev->device == 0x1636) ||
|
||||
(adev->pdev->device == 0x164c))
|
||||
adev->apu_flags |= AMD_APU_IS_RENOIR;
|
||||
else
|
||||
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
|
||||
|
@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
|
||||
(struct crat_subtype_iolink *)sub_type_hdr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
crat_table->length += (sub_type_hdr->length * entries);
|
||||
crat_table->total_entries += entries;
|
||||
|
||||
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
|
||||
sub_type_hdr->length * entries);
|
||||
if (entries) {
|
||||
crat_table->length += (sub_type_hdr->length * entries);
|
||||
crat_table->total_entries += entries;
|
||||
|
||||
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
|
||||
sub_type_hdr->length * entries);
|
||||
}
|
||||
#else
|
||||
pr_info("IO link not available for non x86 platforms\n");
|
||||
#endif
|
||||
|
@ -939,41 +939,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
dm->crc_win_x_start_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_X_START", 0, U16_MAX);
|
||||
if (!dm->crc_win_x_start_property)
|
||||
return -ENOMEM;
|
||||
|
||||
dm->crc_win_y_start_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_Y_START", 0, U16_MAX);
|
||||
if (!dm->crc_win_y_start_property)
|
||||
return -ENOMEM;
|
||||
|
||||
dm->crc_win_x_end_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_X_END", 0, U16_MAX);
|
||||
if (!dm->crc_win_x_end_property)
|
||||
return -ENOMEM;
|
||||
|
||||
dm->crc_win_y_end_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_Y_END", 0, U16_MAX);
|
||||
if (!dm->crc_win_y_end_property)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dc_init_data init_data;
|
||||
@ -1120,10 +1085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
|
||||
dc_init_callbacks(adev->dm.dc, &init_params);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
if (create_crtc_crc_properties(&adev->dm))
|
||||
DRM_ERROR("amdgpu: failed to create crc property.\n");
|
||||
#endif
|
||||
if (amdgpu_dm_initialize_drm_device(adev)) {
|
||||
DRM_ERROR(
|
||||
@ -5333,64 +5294,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
state->crc_src = cur->crc_src;
|
||||
state->cm_has_degamma = cur->cm_has_degamma;
|
||||
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
state->crc_window = cur->crc_window;
|
||||
#endif
|
||||
|
||||
/* TODO Duplicate dc_stream after objects are stream object is flattened */
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dm_crtc_state *dm_new_state =
|
||||
to_dm_crtc_state(crtc_state);
|
||||
|
||||
if (property == adev->dm.crc_win_x_start_property)
|
||||
dm_new_state->crc_window.x_start = val;
|
||||
else if (property == adev->dm.crc_win_y_start_property)
|
||||
dm_new_state->crc_window.y_start = val;
|
||||
else if (property == adev->dm.crc_win_x_end_property)
|
||||
dm_new_state->crc_window.x_end = val;
|
||||
else if (property == adev->dm.crc_win_y_end_property)
|
||||
dm_new_state->crc_window.y_end = val;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
|
||||
const struct drm_crtc_state *state,
|
||||
struct drm_property *property,
|
||||
uint64_t *val)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dm_crtc_state *dm_state =
|
||||
to_dm_crtc_state(state);
|
||||
|
||||
if (property == adev->dm.crc_win_x_start_property)
|
||||
*val = dm_state->crc_window.x_start;
|
||||
else if (property == adev->dm.crc_win_y_start_property)
|
||||
*val = dm_state->crc_window.y_start;
|
||||
else if (property == adev->dm.crc_win_x_end_property)
|
||||
*val = dm_state->crc_window.x_end;
|
||||
else if (property == adev->dm.crc_win_y_end_property)
|
||||
*val = dm_state->crc_window.y_end;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
@ -5457,10 +5366,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
||||
.enable_vblank = dm_enable_vblank,
|
||||
.disable_vblank = dm_disable_vblank,
|
||||
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
.atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
|
||||
.atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
|
||||
#endif
|
||||
};
|
||||
|
||||
static enum drm_connector_status
|
||||
@ -6662,25 +6567,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_crtc *acrtc)
|
||||
{
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_x_start_property,
|
||||
0);
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_y_start_property,
|
||||
0);
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_x_end_property,
|
||||
0);
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_y_end_property,
|
||||
0);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
struct drm_plane *plane,
|
||||
uint32_t crtc_index)
|
||||
@ -6728,9 +6614,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
|
||||
true, MAX_COLOR_LUT_ENTRIES);
|
||||
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
attach_crtc_crc_properties(dm, acrtc);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
@ -8367,7 +8251,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
*/
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
bool configure_crc = false;
|
||||
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
@ -8377,27 +8260,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
dc_stream_retain(dm_new_crtc_state->stream);
|
||||
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
|
||||
manage_dm_interrupts(adev, acrtc, true);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
|
||||
amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/**
|
||||
* Frontend may have changed so reapply the CRC capture
|
||||
* settings for the stream.
|
||||
*/
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
|
||||
if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
configure_crc = true;
|
||||
} else {
|
||||
if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
|
||||
configure_crc = true;
|
||||
}
|
||||
|
||||
if (configure_crc)
|
||||
if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
|
||||
amdgpu_dm_crtc_configure_crc_source(
|
||||
crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
|
||||
crtc, dm_new_crtc_state,
|
||||
dm_new_crtc_state->crc_src);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -336,32 +336,6 @@ struct amdgpu_display_manager {
|
||||
*/
|
||||
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/**
|
||||
* @crc_win_x_start_property:
|
||||
*
|
||||
* X start of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_x_start_property;
|
||||
/**
|
||||
* @crc_win_y_start_property:
|
||||
*
|
||||
* Y start of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_y_start_property;
|
||||
/**
|
||||
* @crc_win_x_end_property:
|
||||
*
|
||||
* X end of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_x_end_property;
|
||||
/**
|
||||
* @crc_win_y_end_property:
|
||||
*
|
||||
* Y end of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_y_end_property;
|
||||
#endif
|
||||
/**
|
||||
* @mst_encoders:
|
||||
*
|
||||
@ -448,15 +422,6 @@ struct dm_plane_state {
|
||||
struct dc_plane_state *dc_state;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct crc_rec {
|
||||
uint16_t x_start;
|
||||
uint16_t y_start;
|
||||
uint16_t x_end;
|
||||
uint16_t y_end;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct dm_crtc_state {
|
||||
struct drm_crtc_state base;
|
||||
struct dc_stream_state *stream;
|
||||
@ -479,9 +444,6 @@ struct dm_crtc_state {
|
||||
struct dc_info_packet vrr_infopacket;
|
||||
|
||||
int abm_level;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct crc_rec crc_window;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
|
||||
|
@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
|
||||
return pipe_crc_sources;
|
||||
}
|
||||
|
||||
static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
|
||||
{
|
||||
dm_crtc_state->crc_window.x_start = 0;
|
||||
dm_crtc_state->crc_window.y_start = 0;
|
||||
dm_crtc_state->crc_window.x_end = 0;
|
||||
dm_crtc_state->crc_window.y_end = 0;
|
||||
}
|
||||
|
||||
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
if ((dm_crtc_state->crc_window.x_start != 0) ||
|
||||
(dm_crtc_state->crc_window.y_start != 0) ||
|
||||
(dm_crtc_state->crc_window.x_end != 0) ||
|
||||
(dm_crtc_state->crc_window.y_end != 0))
|
||||
ret = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
|
||||
struct dm_crtc_state *dm_old_crtc_state)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
|
||||
(dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
|
||||
(dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
|
||||
(dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
|
||||
size_t *values_cnt)
|
||||
@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
struct dc_stream_state *stream_state = dm_crtc_state->stream;
|
||||
bool enable = amdgpu_dm_is_valid_crc_source(source);
|
||||
int ret = 0;
|
||||
struct crc_params *crc_window = NULL, tmp_window;
|
||||
|
||||
/* Configuration will be deferred to stream enable. */
|
||||
if (!stream_state)
|
||||
@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
|
||||
/* Enable CRTC CRC generation if necessary. */
|
||||
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
|
||||
if (!enable)
|
||||
amdgpu_dm_set_crc_window_default(dm_crtc_state);
|
||||
|
||||
if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
|
||||
crc_window = &tmp_window;
|
||||
|
||||
tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
|
||||
tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
|
||||
tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
|
||||
tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
|
||||
tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
|
||||
tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
|
||||
tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
|
||||
tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
|
||||
}
|
||||
|
||||
if (!dc_stream_configure_crc(stream_state->ctx->dc,
|
||||
stream_state, crc_window, enable, enable)) {
|
||||
stream_state, NULL, enable, enable)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -46,13 +46,10 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
|
||||
}
|
||||
|
||||
/* amdgpu_dm_crc.c */
|
||||
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
|
||||
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
|
||||
struct dm_crtc_state *dm_old_crtc_state);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
struct dm_crtc_state *dm_crtc_state,
|
||||
enum amdgpu_dm_pipe_crc_source source);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
|
||||
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
|
||||
const char *src_name,
|
||||
|
@ -3992,7 +3992,7 @@ bool dc_link_dp_set_test_pattern(
|
||||
unsigned int cust_pattern_size)
|
||||
{
|
||||
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
|
||||
struct pipe_ctx *pipe_ctx = &pipes[0];
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
unsigned int lane;
|
||||
unsigned int i;
|
||||
unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
|
||||
@ -4002,12 +4002,18 @@ bool dc_link_dp_set_test_pattern(
|
||||
memset(&training_pattern, 0, sizeof(training_pattern));
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream == NULL)
|
||||
continue;
|
||||
|
||||
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
|
||||
pipe_ctx = &pipes[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe_ctx == NULL)
|
||||
return false;
|
||||
|
||||
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
|
||||
if (link->test_pattern_enabled && test_pattern ==
|
||||
DP_TEST_PATTERN_VIDEO_MODE) {
|
||||
|
@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
|
||||
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
|
||||
{
|
||||
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
|
||||
uint32_t val = 0;
|
||||
uint32_t val = 0xf;
|
||||
|
||||
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
|
||||
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
|
||||
|
@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_pplib_clock_request = false,
|
||||
.disable_pplib_wm_range = false,
|
||||
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.voltage_align_fclk = true,
|
||||
.disable_stereo_support = true,
|
||||
|
@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
|
||||
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
|
||||
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
|
||||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
|
||||
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
|
||||
|
@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
|
||||
}
|
||||
|
||||
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
|
||||
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
|
||||
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
|
||||
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
|
||||
mode_lib->vba.DRAMClockChangeWatermark += 25;
|
||||
|
||||
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
|
||||
if (mode_lib->vba.DRAMClockChangeWatermark >
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
mode_lib->vba.MinTTUVBlank[k] += 25;
|
||||
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
|
||||
if (mode_lib->vba.DRAMClockChangeWatermark >
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
mode_lib->vba.MinTTUVBlank[k] += 25;
|
||||
}
|
||||
}
|
||||
|
||||
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
|
||||
|
@ -1163,7 +1163,14 @@ retry:
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (old_fb->format != fb->format) {
|
||||
/*
|
||||
* Only check the FOURCC format code, excluding modifiers. This is
|
||||
* enough for all legacy drivers. Atomic drivers have their own
|
||||
* checks in their ->atomic_check implementation, which will
|
||||
* return -EINVAL if any hw or driver constraint is violated due
|
||||
* to modifier changes.
|
||||
*/
|
||||
if (old_fb->format->format != fb->format->format) {
|
||||
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -38,6 +38,7 @@ i915-y += i915_drv.o \
|
||||
i915_config.o \
|
||||
i915_irq.o \
|
||||
i915_getparam.o \
|
||||
i915_mitigations.o \
|
||||
i915_params.o \
|
||||
i915_pci.o \
|
||||
i915_scatterlist.o \
|
||||
|
@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
|
||||
|
||||
get_dsi_io_power_domains(i915,
|
||||
enc_to_intel_dsi(encoder));
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
intel_display_power_get(i915,
|
||||
intel_dsc_power_domain(crtc_state));
|
||||
}
|
||||
|
||||
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
|
@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
|
||||
val = pch_get_backlight(connector);
|
||||
else
|
||||
val = lpt_get_backlight(connector);
|
||||
val = intel_panel_compute_brightness(connector, val);
|
||||
panel->backlight.level = clamp(val, panel->backlight.min,
|
||||
panel->backlight.max);
|
||||
|
||||
if (cpu_mode) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"CPU backlight register was enabled, switching to PCH override\n");
|
||||
|
||||
/* Write converted CPU PWM value to PCH override register */
|
||||
lpt_set_backlight(connector->base.state, panel->backlight.level);
|
||||
lpt_set_backlight(connector->base.state, val);
|
||||
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
|
||||
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
|
||||
|
||||
@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
|
||||
cpu_ctl2 & ~BLM_PWM_ENABLE);
|
||||
}
|
||||
|
||||
val = intel_panel_compute_brightness(connector, val);
|
||||
panel->backlight.level = clamp(val, panel->backlight.min,
|
||||
panel->backlight.max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
intel_dsi_prepare(encoder, pipe_config);
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
|
||||
|
||||
/* Deassert reset */
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
|
||||
/*
|
||||
* Give the panel time to power-on and then deassert its reset.
|
||||
* Depending on the VBT MIPI sequences version the deassert-seq
|
||||
* may contain the necessary delay, intel_dsi_msleep() will skip
|
||||
* the delay in that case. If there is no deassert-seq, then an
|
||||
* unconditional msleep is used to give the panel time to power-on.
|
||||
*/
|
||||
if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
|
||||
} else {
|
||||
msleep(intel_dsi->panel_on_delay);
|
||||
}
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
glk_cold_boot = glk_dsi_enable_io(encoder);
|
||||
|
@ -7,8 +7,6 @@
|
||||
#include "i915_drv.h"
|
||||
#include "intel_gpu_commands.h"
|
||||
|
||||
#define MAX_URB_ENTRIES 64
|
||||
#define STATE_SIZE (4 * 1024)
|
||||
#define GT3_INLINE_DATA_DELAYS 0x1E00
|
||||
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
|
||||
|
||||
@ -34,38 +32,59 @@ struct batch_chunk {
|
||||
};
|
||||
|
||||
struct batch_vals {
|
||||
u32 max_primitives;
|
||||
u32 max_urb_entries;
|
||||
u32 cmd_size;
|
||||
u32 state_size;
|
||||
u32 max_threads;
|
||||
u32 state_start;
|
||||
u32 batch_size;
|
||||
u32 surface_start;
|
||||
u32 surface_height;
|
||||
u32 surface_width;
|
||||
u32 scratch_size;
|
||||
u32 max_size;
|
||||
u32 size;
|
||||
};
|
||||
|
||||
static inline int num_primitives(const struct batch_vals *bv)
|
||||
{
|
||||
/*
|
||||
* We need to saturate the GPU with work in order to dispatch
|
||||
* a shader on every HW thread, and clear the thread-local registers.
|
||||
* In short, we have to dispatch work faster than the shaders can
|
||||
* run in order to fill the EU and occupy each HW thread.
|
||||
*/
|
||||
return bv->max_threads;
|
||||
}
|
||||
|
||||
static void
|
||||
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
|
||||
{
|
||||
if (IS_HASWELL(i915)) {
|
||||
bv->max_primitives = 280;
|
||||
bv->max_urb_entries = MAX_URB_ENTRIES;
|
||||
switch (INTEL_INFO(i915)->gt) {
|
||||
default:
|
||||
case 1:
|
||||
bv->max_threads = 70;
|
||||
break;
|
||||
case 2:
|
||||
bv->max_threads = 140;
|
||||
break;
|
||||
case 3:
|
||||
bv->max_threads = 280;
|
||||
break;
|
||||
}
|
||||
bv->surface_height = 16 * 16;
|
||||
bv->surface_width = 32 * 2 * 16;
|
||||
} else {
|
||||
bv->max_primitives = 128;
|
||||
bv->max_urb_entries = MAX_URB_ENTRIES / 2;
|
||||
switch (INTEL_INFO(i915)->gt) {
|
||||
default:
|
||||
case 1: /* including vlv */
|
||||
bv->max_threads = 36;
|
||||
break;
|
||||
case 2:
|
||||
bv->max_threads = 128;
|
||||
break;
|
||||
}
|
||||
bv->surface_height = 16 * 8;
|
||||
bv->surface_width = 32 * 16;
|
||||
}
|
||||
bv->cmd_size = bv->max_primitives * 4096;
|
||||
bv->state_size = STATE_SIZE;
|
||||
bv->state_start = bv->cmd_size;
|
||||
bv->batch_size = bv->cmd_size + bv->state_size;
|
||||
bv->scratch_size = bv->surface_height * bv->surface_width;
|
||||
bv->max_size = bv->batch_size + bv->scratch_size;
|
||||
bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
|
||||
bv->surface_start = bv->state_start + SZ_4K;
|
||||
bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
|
||||
}
|
||||
|
||||
static void batch_init(struct batch_chunk *bc,
|
||||
@ -155,7 +174,8 @@ static u32
|
||||
gen7_fill_binding_table(struct batch_chunk *state,
|
||||
const struct batch_vals *bv)
|
||||
{
|
||||
u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
|
||||
u32 surface_start =
|
||||
gen7_fill_surface_state(state, bv->surface_start, bv);
|
||||
u32 *cs = batch_alloc_items(state, 32, 8);
|
||||
u32 offset = batch_offset(state, cs);
|
||||
|
||||
@ -214,9 +234,9 @@ static void
|
||||
gen7_emit_state_base_address(struct batch_chunk *batch,
|
||||
u32 surface_state_base)
|
||||
{
|
||||
u32 *cs = batch_alloc_items(batch, 0, 12);
|
||||
u32 *cs = batch_alloc_items(batch, 0, 10);
|
||||
|
||||
*cs++ = STATE_BASE_ADDRESS | (12 - 2);
|
||||
*cs++ = STATE_BASE_ADDRESS | (10 - 2);
|
||||
/* general */
|
||||
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
|
||||
/* surface */
|
||||
@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
|
||||
*cs++ = BASE_ADDRESS_MODIFY;
|
||||
*cs++ = 0;
|
||||
*cs++ = BASE_ADDRESS_MODIFY;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
batch_advance(batch, cs);
|
||||
}
|
||||
|
||||
@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
|
||||
u32 urb_size, u32 curbe_size,
|
||||
u32 mode)
|
||||
{
|
||||
u32 urb_entries = bv->max_urb_entries;
|
||||
u32 threads = bv->max_primitives - 1;
|
||||
u32 threads = bv->max_threads - 1;
|
||||
u32 *cs = batch_alloc_items(batch, 32, 8);
|
||||
|
||||
*cs++ = MEDIA_VFE_STATE | (8 - 2);
|
||||
@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
|
||||
*cs++ = 0;
|
||||
|
||||
/* number of threads & urb entries for GPGPU vs Media Mode */
|
||||
*cs++ = threads << 16 | urb_entries << 8 | mode << 2;
|
||||
*cs++ = threads << 16 | 1 << 8 | mode << 2;
|
||||
|
||||
*cs++ = 0;
|
||||
|
||||
@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
|
||||
{
|
||||
unsigned int x_offset = (media_object_index % 16) * 64;
|
||||
unsigned int y_offset = (media_object_index / 16) * 16;
|
||||
unsigned int inline_data_size;
|
||||
unsigned int media_batch_size;
|
||||
unsigned int i;
|
||||
unsigned int pkt = 6 + 3;
|
||||
u32 *cs;
|
||||
|
||||
inline_data_size = 112 * 8;
|
||||
media_batch_size = inline_data_size + 6;
|
||||
cs = batch_alloc_items(batch, 8, pkt);
|
||||
|
||||
cs = batch_alloc_items(batch, 8, media_batch_size);
|
||||
|
||||
*cs++ = MEDIA_OBJECT | (media_batch_size - 2);
|
||||
*cs++ = MEDIA_OBJECT | (pkt - 2);
|
||||
|
||||
/* interface descriptor offset */
|
||||
*cs++ = 0;
|
||||
@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
|
||||
*cs++ = 0;
|
||||
|
||||
/* inline */
|
||||
*cs++ = (y_offset << 16) | (x_offset);
|
||||
*cs++ = y_offset << 16 | x_offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = GT3_INLINE_DATA_DELAYS;
|
||||
for (i = 3; i < inline_data_size; i++)
|
||||
*cs++ = 0;
|
||||
|
||||
batch_advance(batch, cs);
|
||||
}
|
||||
|
||||
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
|
||||
{
|
||||
u32 *cs = batch_alloc_items(batch, 0, 5);
|
||||
u32 *cs = batch_alloc_items(batch, 0, 4);
|
||||
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(5);
|
||||
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
|
||||
PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
|
||||
PIPE_CONTROL_DC_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_CS_STALL;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
batch_advance(batch, cs);
|
||||
}
|
||||
|
||||
static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
|
||||
{
|
||||
u32 *cs = batch_alloc_items(batch, 0, 8);
|
||||
|
||||
/* ivb: Stall before STATE_CACHE_INVALIDATE */
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
|
||||
PIPE_CONTROL_CS_STALL;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
|
||||
batch_advance(batch, cs);
|
||||
}
|
||||
|
||||
@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma,
|
||||
const struct batch_vals *bv)
|
||||
{
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
unsigned int desc_count = 64;
|
||||
const u32 urb_size = 112;
|
||||
const unsigned int desc_count = 1;
|
||||
const unsigned int urb_size = 1;
|
||||
struct batch_chunk cmds, state;
|
||||
u32 interface_descriptor;
|
||||
u32 descriptors;
|
||||
unsigned int i;
|
||||
|
||||
batch_init(&cmds, vma, start, 0, bv->cmd_size);
|
||||
batch_init(&state, vma, start, bv->state_start, bv->state_size);
|
||||
batch_init(&cmds, vma, start, 0, bv->state_start);
|
||||
batch_init(&state, vma, start, bv->state_start, SZ_4K);
|
||||
|
||||
interface_descriptor =
|
||||
gen7_fill_interface_descriptor(&state, bv,
|
||||
IS_HASWELL(i915) ?
|
||||
&cb_kernel_hsw :
|
||||
&cb_kernel_ivb,
|
||||
desc_count);
|
||||
gen7_emit_pipeline_flush(&cmds);
|
||||
descriptors = gen7_fill_interface_descriptor(&state, bv,
|
||||
IS_HASWELL(i915) ?
|
||||
&cb_kernel_hsw :
|
||||
&cb_kernel_ivb,
|
||||
desc_count);
|
||||
|
||||
gen7_emit_pipeline_invalidate(&cmds);
|
||||
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
|
||||
batch_add(&cmds, MI_NOOP);
|
||||
gen7_emit_state_base_address(&cmds, interface_descriptor);
|
||||
gen7_emit_pipeline_invalidate(&cmds);
|
||||
|
||||
gen7_emit_pipeline_flush(&cmds);
|
||||
gen7_emit_state_base_address(&cmds, descriptors);
|
||||
gen7_emit_pipeline_invalidate(&cmds);
|
||||
|
||||
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
|
||||
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
|
||||
|
||||
gen7_emit_interface_descriptor_load(&cmds,
|
||||
interface_descriptor,
|
||||
desc_count);
|
||||
|
||||
for (i = 0; i < bv->max_primitives; i++)
|
||||
for (i = 0; i < num_primitives(bv); i++)
|
||||
gen7_emit_media_object(&cmds, i);
|
||||
|
||||
batch_add(&cmds, MI_BATCH_BUFFER_END);
|
||||
@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
|
||||
|
||||
batch_get_defaults(engine->i915, &bv);
|
||||
if (!vma)
|
||||
return bv.max_size;
|
||||
return bv.size;
|
||||
|
||||
GEM_BUG_ON(vma->obj->base.size < bv.max_size);
|
||||
GEM_BUG_ON(vma->obj->base.size < bv.size);
|
||||
|
||||
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
|
||||
if (IS_ERR(batch))
|
||||
return PTR_ERR(batch);
|
||||
|
||||
emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
|
||||
emit_batch(vma, memset(batch, 0, bv.size), &bv);
|
||||
|
||||
i915_gem_object_flush_map(vma->obj);
|
||||
__i915_gem_object_release_map(vma->obj);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gen6_ppgtt.h"
|
||||
#include "gen7_renderclear.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_mitigations.h"
|
||||
#include "intel_breadcrumbs.h"
|
||||
#include "intel_context.h"
|
||||
#include "intel_gt.h"
|
||||
@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq)
|
||||
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
|
||||
|
||||
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
|
||||
if (engine->wa_ctx.vma->private != ce) {
|
||||
if (engine->wa_ctx.vma->private != ce &&
|
||||
i915_mitigate_clear_residuals()) {
|
||||
ret = clear_residuals(rq);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
|
||||
|
||||
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
|
||||
|
||||
if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
|
||||
if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
|
||||
err = gen7_ctx_switch_bb_init(engine);
|
||||
if (err)
|
||||
goto err_ring_unpin;
|
||||
|
@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
DDI_BUF_CTL_ENABLE);
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
|
||||
}
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
||||
~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
||||
~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
||||
~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
|
||||
/* No hpd_invert set in vgpu vbt, need to clear invert mask */
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
|
||||
|
||||
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
|
||||
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
|
||||
@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
||||
PORTA_HOTPLUG_ENABLE;
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
||||
}
|
||||
@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
||||
PORTB_HOTPLUG_ENABLE;
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
||||
}
|
||||
@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
||||
PORTC_HOTPLUG_ENABLE;
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
||||
}
|
||||
@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
|
||||
PORTD_HOTPLUG_STATUS_MASK;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
|
||||
} else if (IS_BROXTON(i915)) {
|
||||
if (connected) {
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
if (connected) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
||||
}
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
||||
SFUSE_STRAP_DDIB_DETECTED;
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
||||
}
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
||||
SFUSE_STRAP_DDIC_DETECTED;
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
||||
}
|
||||
} else {
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
} else {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
||||
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
||||
}
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
|
||||
~SFUSE_STRAP_DDIB_DETECTED;
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
||||
~PORTA_HOTPLUG_STATUS_MASK;
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
||||
PORTA_HOTPLUG_LONG_DETECT;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
|
||||
}
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
if (connected) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
||||
SFUSE_STRAP_DDIB_DETECTED;
|
||||
} else {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
||||
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
||||
}
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
|
||||
~SFUSE_STRAP_DDIC_DETECTED;
|
||||
~SFUSE_STRAP_DDIB_DETECTED;
|
||||
}
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
||||
~PORTB_HOTPLUG_STATUS_MASK;
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
||||
PORTB_HOTPLUG_LONG_DETECT;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
|
||||
}
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
if (connected) {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
|
||||
SFUSE_STRAP_DDIC_DETECTED;
|
||||
} else {
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
|
||||
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
|
||||
~SFUSE_STRAP_DDIC_DETECTED;
|
||||
}
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
|
||||
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
|
||||
~PORTC_HOTPLUG_STATUS_MASK;
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
||||
PORTC_HOTPLUG_LONG_DETECT;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
|
||||
}
|
||||
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
|
||||
PORTB_HOTPLUG_STATUS_MASK;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
|
||||
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
|
||||
/* FixMe: Re-enable APL/BXT once vfio_edid enabled */
|
||||
else if (!IS_BROXTON(dev_priv))
|
||||
else
|
||||
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
|
||||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
|
@ -1047,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
|
||||
|
||||
void i915_driver_shutdown(struct drm_i915_private *i915)
|
||||
{
|
||||
disable_rpm_wakeref_asserts(&i915->runtime_pm);
|
||||
|
||||
i915_gem_suspend(i915);
|
||||
|
||||
drm_kms_helper_poll_disable(&i915->drm);
|
||||
@ -1060,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
|
||||
|
||||
intel_suspend_encoders(i915);
|
||||
intel_shutdown_encoders(i915);
|
||||
|
||||
enable_rpm_wakeref_asserts(&i915->runtime_pm);
|
||||
}
|
||||
|
||||
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
|
||||
|
146
drivers/gpu/drm/i915/i915_mitigations.c
Normal file
146
drivers/gpu/drm/i915/i915_mitigations.c
Normal file
@ -0,0 +1,146 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_mitigations.h"
|
||||
|
||||
static unsigned long mitigations __read_mostly = ~0UL;
|
||||
|
||||
enum {
|
||||
CLEAR_RESIDUALS = 0,
|
||||
};
|
||||
|
||||
static const char * const names[] = {
|
||||
[CLEAR_RESIDUALS] = "residuals",
|
||||
};
|
||||
|
||||
bool i915_mitigate_clear_residuals(void)
|
||||
{
|
||||
return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
|
||||
}
|
||||
|
||||
static int mitigations_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
unsigned long new = ~0UL;
|
||||
char *str, *sep, *tok;
|
||||
bool first = true;
|
||||
int err = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
|
||||
|
||||
str = kstrdup(val, GFP_KERNEL);
|
||||
if (!str)
|
||||
return -ENOMEM;
|
||||
|
||||
for (sep = str; (tok = strsep(&sep, ","));) {
|
||||
bool enable = true;
|
||||
int i;
|
||||
|
||||
/* Be tolerant of leading/trailing whitespace */
|
||||
tok = strim(tok);
|
||||
|
||||
if (first) {
|
||||
first = false;
|
||||
|
||||
if (!strcmp(tok, "auto"))
|
||||
continue;
|
||||
|
||||
new = 0;
|
||||
if (!strcmp(tok, "off"))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*tok == '!') {
|
||||
enable = !enable;
|
||||
tok++;
|
||||
}
|
||||
|
||||
if (!strncmp(tok, "no", 2)) {
|
||||
enable = !enable;
|
||||
tok += 2;
|
||||
}
|
||||
|
||||
if (*tok == '\0')
|
||||
continue;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(names); i++) {
|
||||
if (!strcmp(tok, names[i])) {
|
||||
if (enable)
|
||||
new |= BIT(i);
|
||||
else
|
||||
new &= ~BIT(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == ARRAY_SIZE(names)) {
|
||||
pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
|
||||
DRIVER_NAME, val, tok);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
kfree(str);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
WRITE_ONCE(mitigations, new);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mitigations_get(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
unsigned long local = READ_ONCE(mitigations);
|
||||
int count, i;
|
||||
bool enable;
|
||||
|
||||
if (!local)
|
||||
return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
|
||||
|
||||
if (local & BIT(BITS_PER_LONG - 1)) {
|
||||
count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
|
||||
enable = false;
|
||||
} else {
|
||||
enable = true;
|
||||
count = 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(names); i++) {
|
||||
if ((local & BIT(i)) != enable)
|
||||
continue;
|
||||
|
||||
count += scnprintf(buffer + count, PAGE_SIZE - count,
|
||||
"%s%s,", enable ? "" : "!", names[i]);
|
||||
}
|
||||
|
||||
buffer[count - 1] = '\n';
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops ops = {
|
||||
.set = mitigations_set,
|
||||
.get = mitigations_get,
|
||||
};
|
||||
|
||||
module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
|
||||
MODULE_PARM_DESC(mitigations,
|
||||
"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
|
||||
"\n"
|
||||
" auto -- enables all mitigations required for the platform [default]\n"
|
||||
" off -- disables all mitigations\n"
|
||||
"\n"
|
||||
"Individual mitigations can be enabled by passing a comma-separated string,\n"
|
||||
"e.g. mitigations=residuals to enable only clearing residuals or\n"
|
||||
"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
|
||||
"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
|
||||
"disabling it.\n"
|
||||
"\n"
|
||||
"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
|
||||
" residuals -- clear all thread-local registers between contexts"
|
||||
);
|
13
drivers/gpu/drm/i915/i915_mitigations.h
Normal file
13
drivers/gpu/drm/i915/i915_mitigations.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __I915_MITIGATIONS_H__
|
||||
#define __I915_MITIGATIONS_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
bool i915_mitigate_clear_residuals(void);
|
||||
|
||||
#endif /* __I915_MITIGATIONS_H__ */
|
@ -222,7 +222,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
|
||||
|
||||
int
|
||||
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
||||
const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
|
||||
const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
|
||||
struct nv50_dmac *dmac)
|
||||
{
|
||||
struct nouveau_cli *cli = (void *)device->object.client;
|
||||
@ -271,7 +271,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!syncbuf)
|
||||
if (syncbuf < 0)
|
||||
return 0;
|
||||
|
||||
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
|
||||
|
@ -95,7 +95,7 @@ struct nv50_outp_atom {
|
||||
|
||||
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
||||
const s32 *oclass, u8 head, void *data, u32 size,
|
||||
u64 syncbuf, struct nv50_dmac *dmac);
|
||||
s64 syncbuf, struct nv50_dmac *dmac);
|
||||
void nv50_dmac_destroy(struct nv50_dmac *);
|
||||
|
||||
/*
|
||||
|
@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
|
||||
int ret;
|
||||
|
||||
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
|
||||
&oclass, 0, &args, sizeof(args), 0,
|
||||
&oclass, 0, &args, sizeof(args), -1,
|
||||
&wndw->wimm);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
|
||||
|
@ -92,6 +92,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
|
||||
int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
|
||||
int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
|
||||
int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
|
||||
int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
|
||||
int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
|
||||
|
||||
static inline int
|
||||
|
@ -1815,7 +1815,7 @@ nvf0_chipset = {
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.i2c = gk110_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.iccsense = gf100_iccsense_new,
|
||||
.imem = nv50_instmem_new,
|
||||
@ -1853,7 +1853,7 @@ nvf1_chipset = {
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.i2c = gk110_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.iccsense = gf100_iccsense_new,
|
||||
.imem = nv50_instmem_new,
|
||||
@ -1891,7 +1891,7 @@ nv106_chipset = {
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.i2c = gk110_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.iccsense = gf100_iccsense_new,
|
||||
.imem = nv50_instmem_new,
|
||||
@ -1929,7 +1929,7 @@ nv108_chipset = {
|
||||
.fb = gk110_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.i2c = gk110_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.iccsense = gf100_iccsense_new,
|
||||
.imem = nv50_instmem_new,
|
||||
@ -1967,7 +1967,7 @@ nv117_chipset = {
|
||||
.fb = gm107_fb_new,
|
||||
.fuse = gm107_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.i2c = gk110_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.iccsense = gf100_iccsense_new,
|
||||
.imem = nv50_instmem_new,
|
||||
@ -2003,7 +2003,7 @@ nv118_chipset = {
|
||||
.fb = gm107_fb_new,
|
||||
.fuse = gm107_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.i2c = gk110_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.iccsense = gf100_iccsense_new,
|
||||
.imem = nv50_instmem_new,
|
||||
|
@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
|
||||
nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
|
||||
image.base, image.type, image.size);
|
||||
|
||||
if (!shadow_fetch(bios, mthd, image.size)) {
|
||||
if (!shadow_fetch(bios, mthd, image.base + image.size)) {
|
||||
nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
|
||||
return 0;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
|
||||
nvkm-y += nvkm/subdev/i2c/gf117.o
|
||||
nvkm-y += nvkm/subdev/i2c/gf119.o
|
||||
nvkm-y += nvkm/subdev/i2c/gk104.o
|
||||
nvkm-y += nvkm/subdev/i2c/gk110.o
|
||||
nvkm-y += nvkm/subdev/i2c/gm200.o
|
||||
|
||||
nvkm-y += nvkm/subdev/i2c/pad.o
|
||||
|
@ -3,6 +3,13 @@
|
||||
#define __NVKM_I2C_AUX_H__
|
||||
#include "pad.h"
|
||||
|
||||
static inline void
|
||||
nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
|
||||
{
|
||||
if (i2c->func->aux_autodpcd)
|
||||
i2c->func->aux_autodpcd(i2c, aux, false);
|
||||
}
|
||||
|
||||
struct nvkm_i2c_aux_func {
|
||||
bool address_only;
|
||||
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
|
||||
|
@ -77,7 +77,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
u8 type, u32 addr, u8 *data, u8 *size)
|
||||
{
|
||||
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
|
||||
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
|
||||
struct nvkm_i2c *i2c = aux->base.pad->i2c;
|
||||
struct nvkm_device *device = i2c->subdev.device;
|
||||
const u32 base = aux->ch * 0x50;
|
||||
u32 ctrl, stat, timeout, retries = 0;
|
||||
u32 xbuf[4] = {};
|
||||
@ -96,6 +97,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
goto out;
|
||||
}
|
||||
|
||||
nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
|
||||
|
||||
if (!(type & 1)) {
|
||||
memcpy(xbuf, data, *size);
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
@ -128,7 +131,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
if (!timeout--) {
|
||||
AUX_ERR(&aux->base, "timeout %08x", ctrl);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
goto out_err;
|
||||
}
|
||||
} while (ctrl & 0x00010000);
|
||||
ret = 0;
|
||||
@ -154,7 +157,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
memcpy(data, xbuf, *size);
|
||||
*size = stat & 0x0000001f;
|
||||
}
|
||||
|
||||
out_err:
|
||||
nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
|
||||
out:
|
||||
g94_i2c_aux_fini(aux);
|
||||
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
|
||||
|
@ -33,7 +33,7 @@ static void
|
||||
gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
|
||||
{
|
||||
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
|
||||
nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
|
||||
nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
|
||||
AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
|
||||
return -EBUSY;
|
||||
}
|
||||
} while (ctrl & 0x03010000);
|
||||
} while (ctrl & 0x07010000);
|
||||
|
||||
/* set some magic, and wait up to 1ms for it to appear */
|
||||
nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
|
||||
nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
|
||||
timeout = 1000;
|
||||
do {
|
||||
ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
|
||||
@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
|
||||
gm200_i2c_aux_fini(aux);
|
||||
return -EBUSY;
|
||||
}
|
||||
} while ((ctrl & 0x03000000) != urep);
|
||||
} while ((ctrl & 0x07000000) != urep);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -77,7 +77,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
u8 type, u32 addr, u8 *data, u8 *size)
|
||||
{
|
||||
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
|
||||
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
|
||||
struct nvkm_i2c *i2c = aux->base.pad->i2c;
|
||||
struct nvkm_device *device = i2c->subdev.device;
|
||||
const u32 base = aux->ch * 0x50;
|
||||
u32 ctrl, stat, timeout, retries = 0;
|
||||
u32 xbuf[4] = {};
|
||||
@ -96,6 +97,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
goto out;
|
||||
}
|
||||
|
||||
nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
|
||||
|
||||
if (!(type & 1)) {
|
||||
memcpy(xbuf, data, *size);
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
@ -128,7 +131,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
if (!timeout--) {
|
||||
AUX_ERR(&aux->base, "timeout %08x", ctrl);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
goto out_err;
|
||||
}
|
||||
} while (ctrl & 0x00010000);
|
||||
ret = 0;
|
||||
@ -155,6 +158,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
|
||||
*size = stat & 0x0000001f;
|
||||
}
|
||||
|
||||
out_err:
|
||||
nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
|
||||
out:
|
||||
gm200_i2c_aux_fini(aux);
|
||||
return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
|
||||
|
45
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
Normal file
45
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright 2021 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "pad.h"
|
||||
|
||||
static void
|
||||
gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
|
||||
{
|
||||
nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
|
||||
}
|
||||
|
||||
static const struct nvkm_i2c_func
|
||||
gk110_i2c = {
|
||||
.pad_x_new = gf119_i2c_pad_x_new,
|
||||
.pad_s_new = gf119_i2c_pad_s_new,
|
||||
.aux = 4,
|
||||
.aux_stat = gk104_aux_stat,
|
||||
.aux_mask = gk104_aux_mask,
|
||||
.aux_autodpcd = gk110_aux_autodpcd,
|
||||
};
|
||||
|
||||
int
|
||||
gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
|
||||
{
|
||||
return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
|
||||
}
|
@ -24,6 +24,12 @@
|
||||
#include "priv.h"
|
||||
#include "pad.h"
|
||||
|
||||
static void
|
||||
gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
|
||||
{
|
||||
nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
|
||||
}
|
||||
|
||||
static const struct nvkm_i2c_func
|
||||
gm200_i2c = {
|
||||
.pad_x_new = gf119_i2c_pad_x_new,
|
||||
@ -31,6 +37,7 @@ gm200_i2c = {
|
||||
.aux = 8,
|
||||
.aux_stat = gk104_aux_stat,
|
||||
.aux_mask = gk104_aux_mask,
|
||||
.aux_autodpcd = gm200_aux_autodpcd,
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
#ifndef __NVKM_I2C_PAD_H__
|
||||
#define __NVKM_I2C_PAD_H__
|
||||
#include <subdev/i2c.h>
|
||||
#include "priv.h"
|
||||
|
||||
struct nvkm_i2c_pad {
|
||||
const struct nvkm_i2c_pad_func *func;
|
||||
|
@ -23,6 +23,10 @@ struct nvkm_i2c_func {
|
||||
/* mask on/off interrupt types for a given set of auxch
|
||||
*/
|
||||
void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
|
||||
|
||||
/* enable/disable HW-initiated DPCD reads
|
||||
*/
|
||||
void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable);
|
||||
};
|
||||
|
||||
void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include <subdev/timer.h>
|
||||
|
||||
static void
|
||||
gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
||||
@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
||||
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
|
||||
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
|
||||
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
||||
nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
|
||||
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
|
||||
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
|
||||
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
||||
nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
|
||||
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
|
||||
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
|
||||
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
||||
nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
|
||||
}
|
||||
|
||||
void
|
||||
@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
|
||||
intr1 &= ~stat;
|
||||
}
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
|
||||
nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
|
||||
break;
|
||||
);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -22,6 +22,7 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include <subdev/timer.h>
|
||||
|
||||
static void
|
||||
gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
||||
@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
|
||||
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
|
||||
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
|
||||
nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
||||
nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
|
||||
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
|
||||
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
|
||||
nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
||||
nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
|
||||
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
|
||||
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
|
||||
nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
|
||||
nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
|
||||
}
|
||||
|
||||
void
|
||||
@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
|
||||
intr1 &= ~stat;
|
||||
}
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
|
||||
nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
|
||||
break;
|
||||
);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
|
||||
{
|
||||
struct nvkm_device *device = mmu->subdev.device;
|
||||
struct nvkm_mm *mm = &device->fb->ram->vram;
|
||||
const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
|
||||
const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
|
||||
const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
|
||||
const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
|
||||
const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
|
||||
const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
|
||||
u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
|
||||
u8 heap = NVKM_MEM_VRAM;
|
||||
int heapM, heapN, heapU;
|
||||
|
@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
|
||||
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
|
||||
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
|
||||
|
||||
static spinlock_t shrinker_lock;
|
||||
static struct mutex shrinker_lock;
|
||||
static struct list_head shrinker_list;
|
||||
static struct shrinker mm_shrinker;
|
||||
|
||||
@ -190,7 +190,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
|
||||
size_t size = (1ULL << order) * PAGE_SIZE;
|
||||
|
||||
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(pool->dev, **dma_addr))
|
||||
if (dma_mapping_error(pool->dev, addr))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -249,9 +249,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
|
||||
spin_lock_init(&pt->lock);
|
||||
INIT_LIST_HEAD(&pt->pages);
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
mutex_lock(&shrinker_lock);
|
||||
list_add_tail(&pt->shrinker_list, &shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
}
|
||||
|
||||
/* Remove a pool_type from the global shrinker list and free all pages */
|
||||
@ -259,9 +259,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
|
||||
{
|
||||
struct page *p, *tmp;
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
mutex_lock(&shrinker_lock);
|
||||
list_del(&pt->shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
|
||||
list_for_each_entry_safe(p, tmp, &pt->pages, lru)
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
|
||||
@ -302,7 +302,7 @@ static unsigned int ttm_pool_shrink(void)
|
||||
unsigned int num_freed;
|
||||
struct page *p;
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
mutex_lock(&shrinker_lock);
|
||||
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
|
||||
|
||||
p = ttm_pool_type_take(pt);
|
||||
@ -314,7 +314,7 @@ static unsigned int ttm_pool_shrink(void)
|
||||
}
|
||||
|
||||
list_move_tail(&pt->shrinker_list, &shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
|
||||
return num_freed;
|
||||
}
|
||||
@ -564,7 +564,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
mutex_lock(&shrinker_lock);
|
||||
|
||||
seq_puts(m, "\t ");
|
||||
for (i = 0; i < MAX_ORDER; ++i)
|
||||
@ -600,7 +600,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
|
||||
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
|
||||
atomic_long_read(&allocated_pages), page_pool_size);
|
||||
|
||||
spin_unlock(&shrinker_lock);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -644,7 +644,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
|
||||
if (!page_pool_size)
|
||||
page_pool_size = num_pages;
|
||||
|
||||
spin_lock_init(&shrinker_lock);
|
||||
mutex_init(&shrinker_lock);
|
||||
INIT_LIST_HEAD(&shrinker_list);
|
||||
|
||||
for (i = 0; i < MAX_ORDER; ++i) {
|
||||
|
Loading…
Reference in New Issue
Block a user