forked from Minki/linux
drm fixes for 5.10-rc3
fonts: - constify font structures. MAINTAINERS: - Fix path for amdgpu power management amdgpu: - Add support for more navi1x SKUs - Fix for suspend on CI dGPUs - VCN DPG fix for Picasso - Sienna Cichlid fixes - Polaris DPM fix - Add support for Green Sardine amdkfd: - Fix an allocation failure check i915: - Fix set domain's cache coherency - Fixes around breadcrumbs - Fix encoder lookup during PSR atomic - Hold onto an explicit ref to i915_vma_work.pinned - gvt: HWSP reset handling fix - gvt: flush workaround - gvt: vGPU context pin/unpin - gvt: mmio cmd access fix for bxt/apl imx: - drop unused functions and callbacks - reuse imx_drm_encoder_parse_of - spinlock rework - memory leak fix - minor cleanups vc4: - resource cleanup fix panfrost: - madvise/shrinker fix -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJfpM3LAAoJEAx081l5xIa++3UP/R/kUk6lYb0eMLCp9YHS8a7k 2eO5uJ+z4enGxuTOWDDnb1hwpJLRIPY/r6k5t/F7li2Y9xdfyEySmJtZ+eMyfClo AtVRWhyxrytXkWHqsL21XIBY4TGrNYPuXpX1o1SwgZRJl2ijMs3s/jay/JXuHXce 2OKG3jwRxfVKOSlJBkAGqYJ25bfJSEg0lX37+Rajc5t6cU07w7DhgHY7wOv+FajI RWwWWpXed1/pQS8fGctbZlqoNQm1MnPgd8yYxPyhmqHN1B7eKkjSezfecsrF+49Q 8sZkHPW43yPYl4zD9CTfsItI18JMwplQEDCJCNtPBGYCxW89H7P54poPMUwsjkRn 0eJcrt4kIdx57IP6fUdD2aJ+FHLpV964V1rFfkEXF58yosb3t+G894l1U0SD7jor qv8E8xPKKw4uzUOeRBcz4d56wkhx0MPPkNf40BYZRRMUJ8ooUrdFViAaDjLwHMjl fHQtafsGA6Q8FMOtYAu72PTAKepRSvUf8rYMjn6cJtM/VK9ZOYgShDIsYZcRO/Pa WSvikPIYG6ggHmqElO9CWCaOByZcFMot7SYwcaK4heq5r+fjr0QmcE9D7Tbfsd03 V0RmyXevDahOW8jHKnp7Fzy/oPjFby+eWis6Hq/IxPgsZQo8mED1OnfqwRrWDiWf dfGAGkIcXYNIF4Y8Gz5I =6eUB -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2020-11-06-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "It's Friday here so that means another installment of drm fixes to distract you from the counting process. Changes all over the place, the amdgpu changes contain support for a new GPU that is close to current one already in the tree (Green Sardine) so it shouldn't have much side effects. Otherwise imx has a few cleanup patches and fixes, amdgpu and i915 have around the usual smattering of fixes, fonts got constified, and vc4/panfrost has some minor fixes. All in all a fairly regular rc3. We have an outstanding nouveau regression, but the author is looking into the fix, so should be here next week. I now return you to counting. fonts: - constify font structures. MAINTAINERS: - Fix path for amdgpu power management amdgpu: - Add support for more navi1x SKUs - Fix for suspend on CI dGPUs - VCN DPG fix for Picasso - Sienna Cichlid fixes - Polaris DPM fix - Add support for Green Sardine amdkfd: - Fix an allocation failure check i915: - Fix set domain's cache coherency - Fixes around breadcrumbs - Fix encoder lookup during PSR atomic - Hold onto an explicit ref to i915_vma_work.pinned - gvt: HWSP reset handling fix - gvt: flush workaround - gvt: vGPU context pin/unpin - gvt: mmio cmd access fix for bxt/apl imx: - drop unused functions and callbacks - reuse imx_drm_encoder_parse_of - spinlock rework - memory leak fix - minor cleanups vc4: - resource cleanup fix panfrost: - madvise/shrinker fix" * tag 'drm-fixes-2020-11-06-1' of git://anongit.freedesktop.org/drm/drm: (55 commits) drm/amdgpu/display: remove DRM_AMD_DC_GREEN_SARDINE drm/amd/display: Add green_sardine support to DM drm/amd/display: Add green_sardine support to DC drm/amdgpu: enable vcn support for green_sardine (v2) drm/amdgpu: enable green_sardine_asd.bin loading (v2) drm/amdgpu/sdma: add sdma engine support for green_sardine (v2) drm/amdgpu: add gfx support for green_sardine (v2) drm/amdgpu: add soc15 common ip block support for green_sardine (v3) drm/amdgpu: add green_sardine support for gpu_info and ip block setting (v2) drm/amdgpu: add Green_Sardine APU flag drm/amdgpu: resolved ASD loading issue on sienna amdkfd: Check kvmalloc return before memcpy drm/amdgpu: update golden setting for sienna_cichlid amd/amdgpu: Disable VCN DPG mode for Picasso drm/amdgpu/swsmu: remove duplicate call to smu_set_default_dpm_table drm/i915: Hold onto an explicit ref to i915_vma_work.pinned drm/i915/gt: Flush xcs before tgl breadcrumbs drm/i915/gt: Expose more parameters for emitting writes into the ring drm/i915: Fix encoder lookup during PSR atomic check drm/i915/gt: Use the local HWSP offset during submission ...
This commit is contained in:
commit
fc7b66ef07
@ -934,7 +934,7 @@ M: Evan Quan <evan.quan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git git://people.freedesktop.org/~agd5f/linux
|
||||
F: drivers/gpu/drm/amd/powerplay/
|
||||
F: drivers/gpu/drm/amd/pm/powerplay/
|
||||
|
||||
AMD SEATTLE DEVICE TREE SUPPORT
|
||||
M: Brijesh Singh <brijeshkumar.singh@amd.com>
|
||||
|
@ -80,6 +80,7 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
|
||||
|
||||
#define AMDGPU_RESUME_MS 2000
|
||||
|
||||
@ -1805,7 +1806,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
case CHIP_NAVI10:
|
||||
chip_name = "navi10";
|
||||
|
@ -2524,6 +2524,7 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
|
||||
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->asd_start_addr = ucode_start_addr;
|
||||
psp->asd_fw = psp->ta_fw;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_XGMI:
|
||||
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
|
||||
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
|
||||
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
|
||||
#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
|
||||
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
|
||||
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
|
||||
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
|
||||
@ -50,6 +51,7 @@ MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
|
||||
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
|
||||
MODULE_FIRMWARE(FIRMWARE_RENOIR);
|
||||
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI10);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI14);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI12);
|
||||
@ -89,7 +91,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
fw_name = FIRMWARE_RENOIR;
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
fw_name = FIRMWARE_RENOIR;
|
||||
else
|
||||
fw_name = FIRMWARE_GREEN_SARDINE;
|
||||
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
|
@ -1336,11 +1336,13 @@ cik_asic_reset_method(struct amdgpu_device *adev)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
/* disable baco reset until it works */
|
||||
/* smu7_asic_get_baco_capability(adev, &baco_reset); */
|
||||
baco_reset = false;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
baco_reset = cik_asic_supports_baco(adev);
|
||||
break;
|
||||
default:
|
||||
baco_reset = false;
|
||||
break;
|
||||
|
@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
|
||||
{
|
||||
u32 srbm_soft_reset = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 tmp = RREG32(mmSRBM_STATUS2);
|
||||
u32 tmp;
|
||||
|
||||
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
|
||||
/* sdma0 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
|
||||
}
|
||||
if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
|
||||
/* sdma1 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
|
||||
}
|
||||
/* sdma0 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
|
||||
|
||||
/* sdma1 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(mmSRBM_SOFT_RESET);
|
||||
|
@ -128,6 +128,9 @@
|
||||
#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
|
||||
#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
|
||||
|
||||
#define mmCGTT_SPI_CS_CLK_CTRL 0x507c
|
||||
#define mmCGTT_SPI_CS_CLK_CTRL_BASE_IDX 1
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
|
||||
@ -3094,6 +3097,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
|
||||
|
@ -117,6 +117,13 @@ MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
|
||||
|
||||
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
|
||||
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
|
||||
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
|
||||
@ -1630,7 +1637,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -455,10 +455,11 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
|
||||
adev->virt.ops = &xgpu_nv_virt_ops;
|
||||
}
|
||||
|
||||
static bool nv_is_blockchain_sku(struct pci_dev *pdev)
|
||||
static bool nv_is_headless_sku(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->device == 0x731E &&
|
||||
(pdev->revision == 0xC6 || pdev->revision == 0xC7))
|
||||
if ((pdev->device == 0x731E &&
|
||||
(pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
|
||||
(pdev->device == 0x7340 && pdev->revision == 0xC9))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -492,7 +493,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev) &&
|
||||
!nv_is_blockchain_sku(adev->pdev))
|
||||
!nv_is_headless_sku(adev->pdev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
@ -500,7 +501,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (!nv_is_blockchain_sku(adev->pdev))
|
||||
if (!nv_is_headless_sku(adev->pdev))
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
|
||||
if (adev->enable_mes)
|
||||
|
@ -39,6 +39,7 @@
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
|
||||
|
||||
/* address block */
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
@ -54,7 +55,10 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -69,6 +69,7 @@ MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
|
||||
|
||||
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
|
||||
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
|
||||
@ -619,7 +620,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
chip_name = "renoir";
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -1195,8 +1195,7 @@ static int soc15_common_early_init(void *handle)
|
||||
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
|
||||
AMD_PG_SUPPORT_MMHUB |
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
AMD_PG_SUPPORT_VCN;
|
||||
} else {
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
@ -1243,7 +1242,15 @@ static int soc15_common_early_init(void *handle)
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
adev->apu_flags |= AMD_APU_IS_RENOIR;
|
||||
if (adev->pdev->device == 0x1636)
|
||||
adev->apu_flags |= AMD_APU_IS_RENOIR;
|
||||
else
|
||||
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
|
||||
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
adev->external_rev_id = adev->rev_id + 0x91;
|
||||
else
|
||||
adev->external_rev_id = adev->rev_id + 0xa1;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
@ -1268,7 +1275,6 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_JPEG |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
adev->external_rev_id = adev->rev_id + 0x91;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@ -798,10 +798,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
|
||||
}
|
||||
|
||||
pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
|
||||
memcpy(pcrat_image, crat_table, crat_table->length);
|
||||
if (!pcrat_image)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(pcrat_image, crat_table, crat_table->length);
|
||||
*crat_image = pcrat_image;
|
||||
*size = crat_table->length;
|
||||
|
||||
|
@ -100,6 +100,8 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
|
||||
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
|
||||
#endif
|
||||
#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
|
||||
|
||||
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
|
||||
@ -973,6 +975,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
init_data.flags.gpu_vm_support = true;
|
||||
if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
|
||||
init_data.flags.disable_dmcu = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -1267,6 +1271,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
case CHIP_RENOIR:
|
||||
dmub_asic = DMUB_ASIC_DCN21;
|
||||
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
|
||||
if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
|
||||
fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
|
@ -166,6 +166,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
|
||||
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
|
||||
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
break;
|
||||
}
|
||||
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
|
||||
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
|
||||
break;
|
||||
|
@ -120,6 +120,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
dc_version = DCN_VERSION_1_01;
|
||||
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_2_1;
|
||||
if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_2_1;
|
||||
break;
|
||||
#endif
|
||||
|
||||
|
@ -205,6 +205,10 @@ enum {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
#define ASICREV_IS_SIENNA_CICHLID_P(eChipRev) ((eChipRev >= NV_SIENNA_CICHLID_P_A0))
|
||||
#endif
|
||||
#define GREEN_SARDINE_A0 0xA1
|
||||
#ifndef ASICREV_IS_GREEN_SARDINE
|
||||
#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ASIC chip ID
|
||||
|
@ -45,6 +45,7 @@ enum amd_apu_flags {
|
||||
AMD_APU_IS_RAVEN2 = 0x00000002UL,
|
||||
AMD_APU_IS_PICASSO = 0x00000004UL,
|
||||
AMD_APU_IS_RENOIR = 0x00000008UL,
|
||||
AMD_APU_IS_GREEN_SARDINE = 0x00000010UL,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -229,6 +229,7 @@ struct pp_smumgr_func {
|
||||
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
|
||||
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
|
||||
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
|
||||
int (*stop_smc)(struct pp_hwmgr *hwmgr);
|
||||
};
|
||||
|
||||
struct pp_hwmgr_func {
|
||||
|
@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
|
||||
|
||||
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
|
||||
|
||||
extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
|
||||
|
||||
#endif
|
||||
|
@ -142,12 +142,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
|
||||
{ CMD_DELAY_MS, 0, 0, 0, 20, 0 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
|
||||
};
|
||||
@ -155,6 +155,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
|
||||
static const struct baco_cmd_entry clean_baco_tbl[] =
|
||||
{
|
||||
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
|
||||
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
|
||||
{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -1541,6 +1541,10 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to reset to default!", result = tmp_result);
|
||||
|
||||
tmp_result = smum_stop_smc(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to stop smc!", result = tmp_result);
|
||||
|
||||
tmp_result = smu7_force_switch_to_arbf0(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to force to switch arbf0!", result = tmp_result);
|
||||
@ -1585,18 +1589,24 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
data->current_profile_setting.sclk_down_hyst = 100;
|
||||
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
|
||||
data->current_profile_setting.bupdate_mclk = 1;
|
||||
if (adev->gmc.vram_width == 256) {
|
||||
data->current_profile_setting.mclk_up_hyst = 10;
|
||||
data->current_profile_setting.mclk_down_hyst = 60;
|
||||
data->current_profile_setting.mclk_activity = 25;
|
||||
} else if (adev->gmc.vram_width == 128) {
|
||||
data->current_profile_setting.mclk_up_hyst = 5;
|
||||
data->current_profile_setting.mclk_down_hyst = 16;
|
||||
data->current_profile_setting.mclk_activity = 20;
|
||||
} else if (adev->gmc.vram_width == 64) {
|
||||
data->current_profile_setting.mclk_up_hyst = 3;
|
||||
data->current_profile_setting.mclk_down_hyst = 16;
|
||||
data->current_profile_setting.mclk_activity = 20;
|
||||
if (hwmgr->chip_id >= CHIP_POLARIS10) {
|
||||
if (adev->gmc.vram_width == 256) {
|
||||
data->current_profile_setting.mclk_up_hyst = 10;
|
||||
data->current_profile_setting.mclk_down_hyst = 60;
|
||||
data->current_profile_setting.mclk_activity = 25;
|
||||
} else if (adev->gmc.vram_width == 128) {
|
||||
data->current_profile_setting.mclk_up_hyst = 5;
|
||||
data->current_profile_setting.mclk_down_hyst = 16;
|
||||
data->current_profile_setting.mclk_activity = 20;
|
||||
} else if (adev->gmc.vram_width == 64) {
|
||||
data->current_profile_setting.mclk_up_hyst = 3;
|
||||
data->current_profile_setting.mclk_down_hyst = 16;
|
||||
data->current_profile_setting.mclk_activity = 20;
|
||||
}
|
||||
} else {
|
||||
data->current_profile_setting.mclk_up_hyst = 0;
|
||||
data->current_profile_setting.mclk_down_hyst = 100;
|
||||
data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
|
||||
}
|
||||
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
|
||||
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
|
@ -2726,10 +2726,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
|
||||
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
|
||||
CGS_IND_REG__SMC, FEATURE_STATUS,
|
||||
VOLTAGE_CONTROLLER_ON))
|
||||
? true : false;
|
||||
return ci_is_smc_ram_running(hwmgr);
|
||||
}
|
||||
|
||||
static int ci_smu_init(struct pp_hwmgr *hwmgr)
|
||||
@ -2939,6 +2936,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ci_reset_smc(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
SMC_SYSCON_RESET_CNTL,
|
||||
rst_reg, 1);
|
||||
}
|
||||
|
||||
|
||||
static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
SMC_SYSCON_CLOCK_CNTL_0,
|
||||
ck_disable, 1);
|
||||
}
|
||||
|
||||
static int ci_stop_smc(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
ci_reset_smc(hwmgr);
|
||||
ci_stop_smc_clock(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pp_smumgr_func ci_smu_funcs = {
|
||||
.name = "ci_smu",
|
||||
.smu_init = ci_smu_init,
|
||||
@ -2964,4 +2984,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
|
||||
.is_dpm_running = ci_is_dpm_running,
|
||||
.update_dpm_settings = ci_update_dpm_settings,
|
||||
.update_smc_table = ci_update_smc_table,
|
||||
.stop_smc = ci_stop_smc,
|
||||
};
|
||||
|
@ -245,3 +245,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int smum_stop_smc(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (hwmgr->smumgr_funcs->stop_smc)
|
||||
return hwmgr->smumgr_funcs->stop_smc(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1029,17 +1029,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set initialized values (get from vbios) to dpm tables context such as
|
||||
* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
|
||||
* type of clks.
|
||||
*/
|
||||
ret = smu_set_default_dpm_table(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_notify_display_change(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1754,7 +1754,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
|
||||
return;
|
||||
|
||||
intel_connector = to_intel_connector(connector);
|
||||
dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
|
||||
dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder));
|
||||
if (dev_priv->psr.dp != &dig_port->dp)
|
||||
return;
|
||||
|
||||
|
@ -508,21 +508,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Already in the desired write domain? Nothing for us to do!
|
||||
*
|
||||
* We apply a little bit of cunning here to catch a broader set of
|
||||
* no-ops. If obj->write_domain is set, we must be in the same
|
||||
* obj->read_domains, and only that domain. Therefore, if that
|
||||
* obj->write_domain matches the request read_domains, we are
|
||||
* already in the same read/write domain and can skip the operation,
|
||||
* without having to further check the requested write_domain.
|
||||
*/
|
||||
if (READ_ONCE(obj->write_domain) == read_domains) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to flush the object off the GPU without holding the lock.
|
||||
* We will repeat the flush holding the lock in the normal manner
|
||||
@ -560,6 +545,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Already in the desired write domain? Nothing for us to do!
|
||||
*
|
||||
* We apply a little bit of cunning here to catch a broader set of
|
||||
* no-ops. If obj->write_domain is set, we must be in the same
|
||||
* obj->read_domains, and only that domain. Therefore, if that
|
||||
* obj->write_domain matches the request read_domains, we are
|
||||
* already in the same read/write domain and can skip the operation,
|
||||
* without having to further check the requested write_domain.
|
||||
*/
|
||||
if (READ_ONCE(obj->write_domain) == read_domains)
|
||||
goto out_unpin;
|
||||
|
||||
err = i915_gem_object_lock_interruptible(obj, NULL);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
|
@ -245,22 +245,14 @@ static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u
|
||||
}
|
||||
|
||||
static inline u32 *
|
||||
__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
|
||||
__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
|
||||
{
|
||||
/* We're using qword write, offset should be aligned to 8 bytes. */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
/* w/a for post sync ops following a GPGPU operation we
|
||||
* need a prior CS_STALL, which is emitted by the flush
|
||||
* following the batch.
|
||||
*/
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
|
||||
*cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
*cs++ = gtt_offset;
|
||||
*cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
/* We're thrashing one dword of HWS. */
|
||||
*cs++ = 0;
|
||||
*cs++ = 0; /* We're thrashing one extra dword. */
|
||||
|
||||
return cs;
|
||||
}
|
||||
@ -268,13 +260,38 @@ __gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 f
|
||||
static inline u32*
|
||||
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
|
||||
{
|
||||
return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
|
||||
/* We're using qword write, offset should be aligned to 8 bytes. */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
return __gen8_emit_write_rcs(cs,
|
||||
value,
|
||||
gtt_offset,
|
||||
0,
|
||||
flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
|
||||
}
|
||||
|
||||
static inline u32*
|
||||
gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
|
||||
{
|
||||
return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
|
||||
/* We're using qword write, offset should be aligned to 8 bytes. */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
return __gen8_emit_write_rcs(cs,
|
||||
value,
|
||||
gtt_offset,
|
||||
flags0,
|
||||
flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
|
||||
}
|
||||
|
||||
static inline u32 *
|
||||
__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
|
||||
{
|
||||
*cs++ = (MI_FLUSH_DW + 1) | flags;
|
||||
*cs++ = gtt_offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
|
||||
return cs;
|
||||
}
|
||||
|
||||
static inline u32 *
|
||||
@ -285,12 +302,10 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
|
||||
/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
|
||||
*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
|
||||
return cs;
|
||||
return __gen8_emit_flush_dw(cs,
|
||||
value,
|
||||
gtt_offset | MI_FLUSH_DW_USE_GTT,
|
||||
flags | MI_FLUSH_DW_OP_STOREDW);
|
||||
}
|
||||
|
||||
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
|
||||
|
@ -3547,6 +3547,19 @@ static const struct intel_context_ops execlists_context_ops = {
|
||||
.destroy = execlists_context_destroy,
|
||||
};
|
||||
|
||||
static u32 hwsp_offset(const struct i915_request *rq)
|
||||
{
|
||||
const struct intel_timeline_cacheline *cl;
|
||||
|
||||
/* Before the request is executed, the timeline/cachline is fixed */
|
||||
|
||||
cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
|
||||
if (cl)
|
||||
return cl->ggtt_offset;
|
||||
|
||||
return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
|
||||
}
|
||||
|
||||
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
|
||||
{
|
||||
u32 *cs;
|
||||
@ -3569,7 +3582,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = i915_request_timeline(rq)->hwsp_offset;
|
||||
*cs++ = hwsp_offset(rq);
|
||||
*cs++ = 0;
|
||||
*cs++ = rq->fence.seqno - 1;
|
||||
|
||||
@ -4886,11 +4899,9 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
|
||||
return gen8_emit_wa_tail(request, cs);
|
||||
}
|
||||
|
||||
static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
|
||||
static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
u32 addr = i915_request_active_timeline(request)->hwsp_offset;
|
||||
|
||||
return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
|
||||
return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
|
||||
}
|
||||
|
||||
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
@ -4909,7 +4920,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
|
||||
/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
|
||||
cs = gen8_emit_ggtt_write_rcs(cs,
|
||||
request->fence.seqno,
|
||||
i915_request_active_timeline(request)->hwsp_offset,
|
||||
hwsp_offset(request),
|
||||
PIPE_CONTROL_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_CS_STALL);
|
||||
|
||||
@ -4921,7 +4932,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
|
||||
{
|
||||
cs = gen8_emit_ggtt_write_rcs(cs,
|
||||
request->fence.seqno,
|
||||
i915_request_active_timeline(request)->hwsp_offset,
|
||||
hwsp_offset(request),
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_TILE_CACHE_FLUSH |
|
||||
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
|
||||
@ -4983,7 +4994,9 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
|
||||
|
||||
static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
|
||||
/* XXX Stalling flush before seqno write; post-sync not */
|
||||
cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
|
||||
return gen12_emit_fini_breadcrumb_tail(rq, cs);
|
||||
}
|
||||
|
||||
static u32 *
|
||||
@ -4991,7 +5004,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
|
||||
{
|
||||
cs = gen12_emit_ggtt_write_rcs(cs,
|
||||
request->fence.seqno,
|
||||
i915_request_active_timeline(request)->hwsp_offset,
|
||||
hwsp_offset(request),
|
||||
PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_TILE_CACHE_FLUSH |
|
||||
|
@ -188,10 +188,14 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
|
||||
return cl;
|
||||
}
|
||||
|
||||
static void cacheline_acquire(struct intel_timeline_cacheline *cl)
|
||||
static void cacheline_acquire(struct intel_timeline_cacheline *cl,
|
||||
u32 ggtt_offset)
|
||||
{
|
||||
if (cl)
|
||||
i915_active_acquire(&cl->active);
|
||||
if (!cl)
|
||||
return;
|
||||
|
||||
cl->ggtt_offset = ggtt_offset;
|
||||
i915_active_acquire(&cl->active);
|
||||
}
|
||||
|
||||
static void cacheline_release(struct intel_timeline_cacheline *cl)
|
||||
@ -340,7 +344,7 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
|
||||
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
|
||||
tl->fence_context, tl->hwsp_offset);
|
||||
|
||||
cacheline_acquire(tl->hwsp_cacheline);
|
||||
cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
|
||||
if (atomic_fetch_inc(&tl->pin_count)) {
|
||||
cacheline_release(tl->hwsp_cacheline);
|
||||
__i915_vma_unpin(tl->hwsp_ggtt);
|
||||
@ -515,7 +519,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
|
||||
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
|
||||
tl->fence_context, tl->hwsp_offset);
|
||||
|
||||
cacheline_acquire(cl);
|
||||
cacheline_acquire(cl, tl->hwsp_offset);
|
||||
tl->hwsp_cacheline = cl;
|
||||
|
||||
*seqno = timeline_advance(tl);
|
||||
@ -573,9 +577,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
|
||||
ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
|
||||
|
||||
*hwsp = cl->ggtt_offset;
|
||||
out:
|
||||
i915_active_release(&cl->active);
|
||||
return err;
|
||||
|
@ -94,6 +94,8 @@ struct intel_timeline_cacheline {
|
||||
struct intel_timeline_hwsp *hwsp;
|
||||
void *vaddr;
|
||||
|
||||
u32 ggtt_offset;
|
||||
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
@ -1489,7 +1489,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
const struct intel_engine_cs *engine =
|
||||
intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
|
||||
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
|
||||
if (value != 0 &&
|
||||
!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
|
||||
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
|
||||
offset, value);
|
||||
return -EINVAL;
|
||||
@ -1650,6 +1651,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* FixMe:
|
||||
* If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
|
||||
* 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
|
||||
* Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
|
||||
* these MI_BATCH_BUFFER.
|
||||
* Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
|
||||
* PML4 PTE: PAT(0) PCD(1) PWT(1).
|
||||
* The performance is still expected to be low, will need further improvement.
|
||||
*/
|
||||
static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u64 pat =
|
||||
GEN8_PPAT(0, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(1, 0) |
|
||||
GEN8_PPAT(2, 0) |
|
||||
GEN8_PPAT(3, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(7, CHV_PPAT_SNOOP);
|
||||
|
||||
vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int guc_status_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data,
|
||||
unsigned int bytes)
|
||||
@ -2812,7 +2841,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
|
||||
|
||||
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
|
||||
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
|
||||
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
|
||||
|
||||
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
|
||||
@ -3139,7 +3168,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3313,9 +3342,21 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
|
||||
MMIO_D(GEN6_GFXPAUSE, D_BXT);
|
||||
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
|
||||
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1277,7 +1277,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
|
||||
|
||||
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
|
||||
for_each_engine(engine, vgpu->gvt->gt, id)
|
||||
intel_context_unpin(s->shadow[id]);
|
||||
intel_context_put(s->shadow[id]);
|
||||
|
||||
kmem_cache_destroy(s->workloads);
|
||||
}
|
||||
@ -1369,11 +1369,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
||||
ce->ring = __intel_context_ring_size(ring_size);
|
||||
}
|
||||
|
||||
ret = intel_context_pin(ce);
|
||||
intel_context_put(ce);
|
||||
if (ret)
|
||||
goto out_shadow_ctx;
|
||||
|
||||
s->shadow[i] = ce;
|
||||
}
|
||||
|
||||
@ -1405,7 +1400,6 @@ out_shadow_ctx:
|
||||
if (IS_ERR(s->shadow[i]))
|
||||
break;
|
||||
|
||||
intel_context_unpin(s->shadow[i]);
|
||||
intel_context_put(s->shadow[i]);
|
||||
}
|
||||
i915_vm_put(&ppgtt->vm);
|
||||
@ -1479,6 +1473,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &workload->vgpu->submission;
|
||||
|
||||
intel_context_unpin(s->shadow[workload->engine->id]);
|
||||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
|
||||
@ -1724,6 +1719,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = intel_context_pin(s->shadow[engine->id]);
|
||||
if (ret) {
|
||||
intel_vgpu_destroy_workload(workload);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return workload;
|
||||
}
|
||||
|
||||
|
@ -314,8 +314,10 @@ static void __vma_release(struct dma_fence_work *work)
|
||||
{
|
||||
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
|
||||
|
||||
if (vw->pinned)
|
||||
if (vw->pinned) {
|
||||
__i915_gem_object_unpin_pages(vw->pinned);
|
||||
i915_gem_object_put(vw->pinned);
|
||||
}
|
||||
|
||||
i915_vm_free_pt_stash(vw->vm, &vw->stash);
|
||||
i915_vm_put(vw->vm);
|
||||
@ -431,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma,
|
||||
|
||||
if (vma->obj) {
|
||||
__i915_gem_object_pin_pages(vma->obj);
|
||||
work->pinned = vma->obj;
|
||||
work->pinned = i915_gem_object_get(vma->obj);
|
||||
}
|
||||
} else {
|
||||
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
|
||||
|
@ -111,10 +111,6 @@ static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
}
|
||||
|
||||
static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
|
||||
@ -140,7 +136,6 @@ static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
|
||||
|
||||
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
|
||||
.enable = dw_hdmi_imx_encoder_enable,
|
||||
.disable = dw_hdmi_imx_encoder_disable,
|
||||
.atomic_check = dw_hdmi_imx_atomic_check,
|
||||
};
|
||||
|
||||
@ -219,15 +214,9 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
|
||||
hdmi->dev = &pdev->dev;
|
||||
encoder = &hdmi->encoder;
|
||||
|
||||
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
|
||||
/*
|
||||
* If we failed to find the CRTC(s) which this encoder is
|
||||
* supposed to be connected to, it's because the CRTC has
|
||||
* not been registered yet. Defer probing, and hope that
|
||||
* the required CRTC is added later.
|
||||
*/
|
||||
if (encoder->possible_crtcs == 0)
|
||||
return -EPROBE_DEFER;
|
||||
ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dw_hdmi_imx_parse_dt(hdmi);
|
||||
if (ret < 0)
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
@ -212,7 +213,9 @@ static int imx_drm_bind(struct device *dev)
|
||||
drm->mode_config.allow_fb_modifiers = true;
|
||||
drm->mode_config.normalize_zpos = true;
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
ret = drmm_mode_config_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_vblank_init(drm, MAX_CRTC);
|
||||
if (ret)
|
||||
@ -251,7 +254,6 @@ err_poll_fini:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
component_unbind_all(drm->dev, drm);
|
||||
err_kms:
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm_dev_put(drm);
|
||||
|
||||
return ret;
|
||||
@ -267,11 +269,9 @@ static void imx_drm_unbind(struct device *dev)
|
||||
|
||||
component_unbind_all(drm->dev, drm);
|
||||
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm_dev_put(drm);
|
||||
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static const struct component_master_ops imx_drm_ops = {
|
||||
|
@ -62,7 +62,6 @@ struct imx_ldb_channel {
|
||||
struct i2c_adapter *ddc;
|
||||
int chno;
|
||||
void *edid;
|
||||
int edid_len;
|
||||
struct drm_display_mode mode;
|
||||
int mode_valid;
|
||||
u32 bus_format;
|
||||
@ -536,15 +535,14 @@ static int imx_ldb_panel_ddc(struct device *dev,
|
||||
}
|
||||
|
||||
if (!channel->ddc) {
|
||||
int edid_len;
|
||||
|
||||
/* if no DDC available, fallback to hardcoded EDID */
|
||||
dev_dbg(dev, "no ddc available\n");
|
||||
|
||||
edidp = of_get_property(child, "edid",
|
||||
&channel->edid_len);
|
||||
edidp = of_get_property(child, "edid", &edid_len);
|
||||
if (edidp) {
|
||||
channel->edid = kmemdup(edidp,
|
||||
channel->edid_len,
|
||||
GFP_KERNEL);
|
||||
channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
|
||||
} else if (!channel->panel) {
|
||||
/* fallback to display-timings node */
|
||||
ret = of_get_drm_display_mode(child,
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/videodev2.h>
|
||||
|
||||
#include <video/imx-ipu-v3.h>
|
||||
@ -104,8 +103,6 @@ struct imx_tve {
|
||||
struct drm_connector connector;
|
||||
struct drm_encoder encoder;
|
||||
struct device *dev;
|
||||
spinlock_t lock; /* register lock */
|
||||
bool enabled;
|
||||
int mode;
|
||||
int di_hsync_pin;
|
||||
int di_vsync_pin;
|
||||
@ -129,30 +126,10 @@ static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
|
||||
return container_of(e, struct imx_tve, encoder);
|
||||
}
|
||||
|
||||
static void tve_lock(void *__tve)
|
||||
__acquires(&tve->lock)
|
||||
{
|
||||
struct imx_tve *tve = __tve;
|
||||
|
||||
spin_lock(&tve->lock);
|
||||
}
|
||||
|
||||
static void tve_unlock(void *__tve)
|
||||
__releases(&tve->lock)
|
||||
{
|
||||
struct imx_tve *tve = __tve;
|
||||
|
||||
spin_unlock(&tve->lock);
|
||||
}
|
||||
|
||||
static void tve_enable(struct imx_tve *tve)
|
||||
{
|
||||
if (!tve->enabled) {
|
||||
tve->enabled = true;
|
||||
clk_prepare_enable(tve->clk);
|
||||
regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
|
||||
TVE_EN, TVE_EN);
|
||||
}
|
||||
clk_prepare_enable(tve->clk);
|
||||
regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN);
|
||||
|
||||
/* clear interrupt status register */
|
||||
regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff);
|
||||
@ -169,11 +146,8 @@ static void tve_enable(struct imx_tve *tve)
|
||||
|
||||
static void tve_disable(struct imx_tve *tve)
|
||||
{
|
||||
if (tve->enabled) {
|
||||
tve->enabled = false;
|
||||
regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
|
||||
clk_disable_unprepare(tve->clk);
|
||||
}
|
||||
regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
|
||||
clk_disable_unprepare(tve->clk);
|
||||
}
|
||||
|
||||
static int tve_setup_tvout(struct imx_tve *tve)
|
||||
@ -500,8 +474,7 @@ static struct regmap_config tve_regmap_config = {
|
||||
|
||||
.readable_reg = imx_tve_readable_reg,
|
||||
|
||||
.lock = tve_lock,
|
||||
.unlock = tve_unlock,
|
||||
.fast_io = true,
|
||||
|
||||
.max_register = 0xdc,
|
||||
};
|
||||
@ -511,7 +484,7 @@ static const char * const imx_tve_modes[] = {
|
||||
[TVE_MODE_VGA] = "vga",
|
||||
};
|
||||
|
||||
static const int of_get_tve_mode(struct device_node *np)
|
||||
static int of_get_tve_mode(struct device_node *np)
|
||||
{
|
||||
const char *bm;
|
||||
int ret, i;
|
||||
@ -544,7 +517,6 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
|
||||
memset(tve, 0, sizeof(*tve));
|
||||
|
||||
tve->dev = dev;
|
||||
spin_lock_init(&tve->lock);
|
||||
|
||||
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
|
||||
if (ddc_node) {
|
||||
|
@ -28,7 +28,6 @@ struct imx_parallel_display {
|
||||
struct drm_bridge bridge;
|
||||
struct device *dev;
|
||||
void *edid;
|
||||
int edid_len;
|
||||
u32 bus_format;
|
||||
u32 bus_flags;
|
||||
struct drm_display_mode mode;
|
||||
@ -41,11 +40,6 @@ static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
|
||||
return container_of(c, struct imx_parallel_display, connector);
|
||||
}
|
||||
|
||||
static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
|
||||
{
|
||||
return container_of(e, struct imx_parallel_display, encoder);
|
||||
}
|
||||
|
||||
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
|
||||
{
|
||||
return container_of(b, struct imx_parallel_display, bridge);
|
||||
@ -310,6 +304,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
struct device_node *np = dev->of_node;
|
||||
const u8 *edidp;
|
||||
struct imx_parallel_display *imxpd;
|
||||
int edid_len;
|
||||
int ret;
|
||||
u32 bus_format = 0;
|
||||
const char *fmt;
|
||||
@ -323,9 +318,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
if (ret && ret != -ENODEV)
|
||||
return ret;
|
||||
|
||||
edidp = of_get_property(np, "edid", &imxpd->edid_len);
|
||||
edidp = of_get_property(np, "edid", &edid_len);
|
||||
if (edidp)
|
||||
imxpd->edid = kmemdup(edidp, imxpd->edid_len, GFP_KERNEL);
|
||||
imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL);
|
||||
|
||||
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
|
||||
if (!ret) {
|
||||
@ -349,17 +344,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx_pd_unbind(struct device *dev, struct device *master,
|
||||
void *data)
|
||||
{
|
||||
struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
|
||||
|
||||
kfree(imxpd->edid);
|
||||
}
|
||||
|
||||
static const struct component_ops imx_pd_ops = {
|
||||
.bind = imx_pd_bind,
|
||||
.unbind = imx_pd_unbind,
|
||||
};
|
||||
|
||||
static int imx_pd_probe(struct platform_device *pdev)
|
||||
|
@ -626,6 +626,7 @@ static int panfrost_probe(struct platform_device *pdev)
|
||||
err_out1:
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
panfrost_device_fini(pfdev);
|
||||
pm_runtime_set_suspended(pfdev->dev);
|
||||
err_out0:
|
||||
drm_dev_put(ddev);
|
||||
return err;
|
||||
@ -640,9 +641,9 @@ static int panfrost_remove(struct platform_device *pdev)
|
||||
panfrost_gem_shrinker_cleanup(ddev);
|
||||
|
||||
pm_runtime_get_sync(pfdev->dev);
|
||||
panfrost_device_fini(pfdev);
|
||||
pm_runtime_put_sync_suspend(pfdev->dev);
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
panfrost_device_fini(pfdev);
|
||||
pm_runtime_set_suspended(pfdev->dev);
|
||||
|
||||
drm_dev_put(ddev);
|
||||
return 0;
|
||||
|
@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
|
||||
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
|
||||
}
|
||||
|
||||
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
|
||||
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
|
||||
{
|
||||
struct panfrost_gem_mapping *mapping;
|
||||
|
||||
mutex_lock(&bo->mappings.lock);
|
||||
list_for_each_entry(mapping, &bo->mappings.list, node)
|
||||
panfrost_gem_teardown_mapping(mapping);
|
||||
mutex_unlock(&bo->mappings.lock);
|
||||
}
|
||||
|
||||
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
|
@ -82,7 +82,7 @@ struct panfrost_gem_mapping *
|
||||
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
|
||||
struct panfrost_file_priv *priv);
|
||||
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
|
||||
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
|
||||
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
|
||||
|
||||
void panfrost_gem_shrinker_init(struct drm_device *dev);
|
||||
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
|
||||
|
@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
||||
bool ret = false;
|
||||
|
||||
if (atomic_read(&bo->gpu_usecount))
|
||||
return false;
|
||||
|
||||
if (!mutex_trylock(&shmem->pages_lock))
|
||||
if (!mutex_trylock(&bo->mappings.lock))
|
||||
return false;
|
||||
|
||||
panfrost_gem_teardown_mappings(bo);
|
||||
if (!mutex_trylock(&shmem->pages_lock))
|
||||
goto unlock_mappings;
|
||||
|
||||
panfrost_gem_teardown_mappings_locked(bo);
|
||||
drm_gem_shmem_purge_locked(obj);
|
||||
ret = true;
|
||||
|
||||
mutex_unlock(&shmem->pages_lock);
|
||||
return true;
|
||||
|
||||
unlock_mappings:
|
||||
mutex_unlock(&bo->mappings.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
|
@ -449,7 +449,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
|
||||
}
|
||||
|
||||
if (IS_ERR(cma_obj)) {
|
||||
struct drm_printer p = drm_info_printer(vc4->dev->dev);
|
||||
struct drm_printer p = drm_info_printer(vc4->base.dev);
|
||||
DRM_ERROR("Failed to allocate from CMA:\n");
|
||||
vc4_bo_stats_print(&p, vc4);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -590,7 +590,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
|
||||
{
|
||||
struct vc4_dev *vc4 =
|
||||
container_of(work, struct vc4_dev, bo_cache.time_work);
|
||||
struct drm_device *dev = vc4->dev;
|
||||
struct drm_device *dev = &vc4->base;
|
||||
|
||||
mutex_lock(&vc4->bo_lock);
|
||||
vc4_bo_cache_free_old(dev);
|
||||
@ -1005,6 +1005,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
|
||||
int vc4_bo_cache_init(struct drm_device *dev)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
@ -1033,10 +1034,10 @@ int vc4_bo_cache_init(struct drm_device *dev)
|
||||
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
|
||||
timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
|
||||
|
||||
return 0;
|
||||
return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
|
||||
}
|
||||
|
||||
void vc4_bo_cache_destroy(struct drm_device *dev)
|
||||
static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
int i;
|
||||
|
@ -257,37 +257,37 @@ static int vc4_drm_bind(struct device *dev)
|
||||
|
||||
dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
|
||||
if (!vc4)
|
||||
return -ENOMEM;
|
||||
|
||||
/* If VC4 V3D is missing, don't advertise render nodes. */
|
||||
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
|
||||
if (!node || !of_device_is_available(node))
|
||||
vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
|
||||
of_node_put(node);
|
||||
|
||||
drm = drm_dev_alloc(&vc4_drm_driver, dev);
|
||||
if (IS_ERR(drm))
|
||||
return PTR_ERR(drm);
|
||||
vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
|
||||
if (IS_ERR(vc4))
|
||||
return PTR_ERR(vc4);
|
||||
|
||||
drm = &vc4->base;
|
||||
platform_set_drvdata(pdev, drm);
|
||||
vc4->dev = drm;
|
||||
drm->dev_private = vc4;
|
||||
INIT_LIST_HEAD(&vc4->debugfs_list);
|
||||
|
||||
mutex_init(&vc4->bin_bo_lock);
|
||||
|
||||
ret = vc4_bo_cache_init(drm);
|
||||
if (ret)
|
||||
goto dev_put;
|
||||
return ret;
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
ret = drmm_mode_config_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vc4_gem_init(drm);
|
||||
ret = vc4_gem_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = component_bind_all(dev, drm);
|
||||
if (ret)
|
||||
goto gem_destroy;
|
||||
return ret;
|
||||
|
||||
ret = vc4_plane_create_additional_planes(drm);
|
||||
if (ret)
|
||||
@ -312,30 +312,17 @@ static int vc4_drm_bind(struct device *dev)
|
||||
|
||||
unbind_all:
|
||||
component_unbind_all(dev, drm);
|
||||
gem_destroy:
|
||||
vc4_gem_destroy(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
vc4_bo_cache_destroy(drm);
|
||||
dev_put:
|
||||
drm_dev_put(drm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vc4_drm_unbind(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct vc4_dev *vc4 = to_vc4_dev(drm);
|
||||
|
||||
drm_dev_unregister(drm);
|
||||
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
drm_atomic_private_obj_fini(&vc4->load_tracker);
|
||||
drm_atomic_private_obj_fini(&vc4->ctm_manager);
|
||||
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static const struct component_master_ops vc4_drm_ops = {
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_mm.h>
|
||||
#include <drm/drm_modeset_lock.h>
|
||||
|
||||
@ -71,7 +72,7 @@ struct vc4_perfmon {
|
||||
};
|
||||
|
||||
struct vc4_dev {
|
||||
struct drm_device *dev;
|
||||
struct drm_device base;
|
||||
|
||||
struct vc4_hvs *hvs;
|
||||
struct vc4_v3d *v3d;
|
||||
@ -234,7 +235,7 @@ struct vc4_dev {
|
||||
static inline struct vc4_dev *
|
||||
to_vc4_dev(struct drm_device *dev)
|
||||
{
|
||||
return (struct vc4_dev *)dev->dev_private;
|
||||
return container_of(dev, struct vc4_dev, base);
|
||||
}
|
||||
|
||||
struct vc4_bo {
|
||||
@ -809,7 +810,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
|
||||
struct sg_table *sgt);
|
||||
void *vc4_prime_vmap(struct drm_gem_object *obj);
|
||||
int vc4_bo_cache_init(struct drm_device *dev);
|
||||
void vc4_bo_cache_destroy(struct drm_device *dev);
|
||||
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
|
||||
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
|
||||
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
|
||||
@ -874,8 +874,7 @@ extern struct platform_driver vc4_dsi_driver;
|
||||
extern const struct dma_fence_ops vc4_fence_ops;
|
||||
|
||||
/* vc4_gem.c */
|
||||
void vc4_gem_init(struct drm_device *dev);
|
||||
void vc4_gem_destroy(struct drm_device *dev);
|
||||
int vc4_gem_init(struct drm_device *dev);
|
||||
int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -314,16 +314,16 @@ vc4_reset_work(struct work_struct *work)
|
||||
struct vc4_dev *vc4 =
|
||||
container_of(work, struct vc4_dev, hangcheck.reset_work);
|
||||
|
||||
vc4_save_hang_state(vc4->dev);
|
||||
vc4_save_hang_state(&vc4->base);
|
||||
|
||||
vc4_reset(vc4->dev);
|
||||
vc4_reset(&vc4->base);
|
||||
}
|
||||
|
||||
static void
|
||||
vc4_hangcheck_elapsed(struct timer_list *t)
|
||||
{
|
||||
struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
|
||||
struct drm_device *dev = vc4->dev;
|
||||
struct drm_device *dev = &vc4->base;
|
||||
uint32_t ct0ca, ct1ca;
|
||||
unsigned long irqflags;
|
||||
struct vc4_exec_info *bin_exec, *render_exec;
|
||||
@ -1000,7 +1000,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
|
||||
list_del(&exec->head);
|
||||
|
||||
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
|
||||
vc4_complete_exec(vc4->dev, exec);
|
||||
vc4_complete_exec(&vc4->base, exec);
|
||||
spin_lock_irqsave(&vc4->job_lock, irqflags);
|
||||
}
|
||||
|
||||
@ -1258,13 +1258,13 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
vc4_complete_exec(vc4->dev, exec);
|
||||
vc4_complete_exec(&vc4->base, exec);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
vc4_gem_init(struct drm_device *dev)
|
||||
static void vc4_gem_destroy(struct drm_device *dev, void *unused);
|
||||
int vc4_gem_init(struct drm_device *dev)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
|
||||
@ -1285,10 +1285,11 @@ vc4_gem_init(struct drm_device *dev)
|
||||
|
||||
INIT_LIST_HEAD(&vc4->purgeable.list);
|
||||
mutex_init(&vc4->purgeable.lock);
|
||||
|
||||
return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL);
|
||||
}
|
||||
|
||||
void
|
||||
vc4_gem_destroy(struct drm_device *dev)
|
||||
static void vc4_gem_destroy(struct drm_device *dev, void *unused)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
|
||||
|
@ -560,7 +560,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct drm_device *drm = dev_get_drvdata(master);
|
||||
struct vc4_dev *vc4 = drm->dev_private;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(drm);
|
||||
struct vc4_hvs *hvs = NULL;
|
||||
int ret;
|
||||
u32 dispctrl;
|
||||
@ -679,7 +679,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master,
|
||||
void *data)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(master);
|
||||
struct vc4_dev *vc4 = drm->dev_private;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(drm);
|
||||
struct vc4_hvs *hvs = vc4->hvs;
|
||||
|
||||
if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
|
||||
|
@ -51,7 +51,7 @@ static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
|
||||
struct drm_private_obj *manager)
|
||||
{
|
||||
struct drm_device *dev = state->dev;
|
||||
struct vc4_dev *vc4 = dev->dev_private;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct drm_private_state *priv_state;
|
||||
int ret;
|
||||
|
||||
@ -93,6 +93,29 @@ static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
|
||||
.atomic_destroy_state = vc4_ctm_destroy_state,
|
||||
};
|
||||
|
||||
static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
|
||||
drm_atomic_private_obj_fini(&vc4->ctm_manager);
|
||||
}
|
||||
|
||||
static int vc4_ctm_obj_init(struct vc4_dev *vc4)
|
||||
{
|
||||
struct vc4_ctm_state *ctm_state;
|
||||
|
||||
drm_modeset_lock_init(&vc4->ctm_state_lock);
|
||||
|
||||
ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
|
||||
if (!ctm_state)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
|
||||
&vc4_ctm_state_funcs);
|
||||
|
||||
return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL);
|
||||
}
|
||||
|
||||
/* Converts a DRM S31.32 value to the HW S0.9 format. */
|
||||
static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
|
||||
{
|
||||
@ -609,6 +632,34 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
|
||||
.atomic_destroy_state = vc4_load_tracker_destroy_state,
|
||||
};
|
||||
|
||||
static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
|
||||
if (!vc4->load_tracker_available)
|
||||
return;
|
||||
|
||||
drm_atomic_private_obj_fini(&vc4->load_tracker);
|
||||
}
|
||||
|
||||
static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
|
||||
{
|
||||
struct vc4_load_tracker_state *load_state;
|
||||
|
||||
if (!vc4->load_tracker_available)
|
||||
return 0;
|
||||
|
||||
load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
|
||||
if (!load_state)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
|
||||
&load_state->base,
|
||||
&vc4_load_tracker_state_funcs);
|
||||
|
||||
return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL);
|
||||
}
|
||||
|
||||
#define NUM_OUTPUTS 6
|
||||
#define NUM_CHANNELS 3
|
||||
|
||||
@ -711,8 +762,6 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = {
|
||||
int vc4_kms_load(struct drm_device *dev)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_ctm_state *ctm_state;
|
||||
struct vc4_load_tracker_state *load_state;
|
||||
bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
|
||||
"brcm,bcm2711-vc5");
|
||||
int ret;
|
||||
@ -751,26 +800,13 @@ int vc4_kms_load(struct drm_device *dev)
|
||||
dev->mode_config.async_page_flip = true;
|
||||
dev->mode_config.allow_fb_modifiers = true;
|
||||
|
||||
drm_modeset_lock_init(&vc4->ctm_state_lock);
|
||||
ret = vc4_ctm_obj_init(vc4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
|
||||
if (!ctm_state)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
|
||||
&vc4_ctm_state_funcs);
|
||||
|
||||
if (vc4->load_tracker_available) {
|
||||
load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
|
||||
if (!load_state) {
|
||||
drm_atomic_private_obj_fini(&vc4->ctm_manager);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
drm_atomic_private_obj_init(dev, &vc4->load_tracker,
|
||||
&load_state->base,
|
||||
&vc4_load_tracker_state_funcs);
|
||||
}
|
||||
ret = vc4_load_tracker_obj_init(vc4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
|
@ -168,7 +168,7 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
|
||||
|
||||
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4)
|
||||
{
|
||||
struct drm_device *dev = vc4->dev;
|
||||
struct drm_device *dev = &vc4->base;
|
||||
unsigned long irqflags;
|
||||
int slot;
|
||||
uint64_t seqno = 0;
|
||||
@ -246,7 +246,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
while (true) {
|
||||
struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true,
|
||||
struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true,
|
||||
VC4_BO_TYPE_BIN);
|
||||
|
||||
if (IS_ERR(bo)) {
|
||||
@ -361,7 +361,7 @@ static int vc4_v3d_runtime_suspend(struct device *dev)
|
||||
struct vc4_v3d *v3d = dev_get_drvdata(dev);
|
||||
struct vc4_dev *vc4 = v3d->vc4;
|
||||
|
||||
vc4_irq_uninstall(vc4->dev);
|
||||
vc4_irq_uninstall(&vc4->base);
|
||||
|
||||
clk_disable_unprepare(v3d->clk);
|
||||
|
||||
@ -378,11 +378,11 @@ static int vc4_v3d_runtime_resume(struct device *dev)
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
vc4_v3d_init_hw(vc4->dev);
|
||||
vc4_v3d_init_hw(&vc4->base);
|
||||
|
||||
/* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
|
||||
enable_irq(vc4->dev->irq);
|
||||
vc4_irq_postinstall(vc4->dev);
|
||||
enable_irq(vc4->base.irq);
|
||||
vc4_irq_postinstall(&vc4->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -133,73 +133,6 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
|
||||
|
||||
bool ipu_pixelformat_is_planar(u32 pixelformat)
|
||||
{
|
||||
switch (pixelformat) {
|
||||
case V4L2_PIX_FMT_YUV420:
|
||||
case V4L2_PIX_FMT_YVU420:
|
||||
case V4L2_PIX_FMT_YUV422P:
|
||||
case V4L2_PIX_FMT_NV12:
|
||||
case V4L2_PIX_FMT_NV21:
|
||||
case V4L2_PIX_FMT_NV16:
|
||||
case V4L2_PIX_FMT_NV61:
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
|
||||
|
||||
enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
|
||||
{
|
||||
switch (mbus_code & 0xf000) {
|
||||
case 0x1000:
|
||||
return IPUV3_COLORSPACE_RGB;
|
||||
case 0x2000:
|
||||
return IPUV3_COLORSPACE_YUV;
|
||||
default:
|
||||
return IPUV3_COLORSPACE_UNKNOWN;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
|
||||
|
||||
int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
|
||||
{
|
||||
switch (pixelformat) {
|
||||
case V4L2_PIX_FMT_YUV420:
|
||||
case V4L2_PIX_FMT_YVU420:
|
||||
case V4L2_PIX_FMT_YUV422P:
|
||||
case V4L2_PIX_FMT_NV12:
|
||||
case V4L2_PIX_FMT_NV21:
|
||||
case V4L2_PIX_FMT_NV16:
|
||||
case V4L2_PIX_FMT_NV61:
|
||||
/*
|
||||
* for the planar YUV formats, the stride passed to
|
||||
* cpmem must be the stride in bytes of the Y plane.
|
||||
* And all the planar YUV formats have an 8-bit
|
||||
* Y component.
|
||||
*/
|
||||
return (8 * pixel_stride) >> 3;
|
||||
case V4L2_PIX_FMT_RGB565:
|
||||
case V4L2_PIX_FMT_YUYV:
|
||||
case V4L2_PIX_FMT_UYVY:
|
||||
return (16 * pixel_stride) >> 3;
|
||||
case V4L2_PIX_FMT_BGR24:
|
||||
case V4L2_PIX_FMT_RGB24:
|
||||
return (24 * pixel_stride) >> 3;
|
||||
case V4L2_PIX_FMT_BGR32:
|
||||
case V4L2_PIX_FMT_RGB32:
|
||||
case V4L2_PIX_FMT_XBGR32:
|
||||
case V4L2_PIX_FMT_XRGB32:
|
||||
return (32 * pixel_stride) >> 3;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
|
||||
|
||||
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
|
||||
bool hflip, bool vflip)
|
||||
{
|
||||
|
@ -484,9 +484,6 @@ int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level);
|
||||
|
||||
enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc);
|
||||
enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat);
|
||||
enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code);
|
||||
int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat);
|
||||
bool ipu_pixelformat_is_planar(u32 pixelformat);
|
||||
int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
|
||||
bool hflip, bool vflip);
|
||||
int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#define FONTDATAMAX 9216
|
||||
|
||||
static struct font_data fontdata_10x18 = {
|
||||
static const struct font_data fontdata_10x18 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, 0x00, /* 0000000000 */
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#define FONTDATAMAX 2560
|
||||
|
||||
static struct font_data fontdata_6x10 = {
|
||||
static const struct font_data fontdata_6x10 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, /* 00000000 */
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#define FONTDATAMAX (11*256)
|
||||
|
||||
static struct font_data fontdata_6x11 = {
|
||||
static const struct font_data fontdata_6x11 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, /* 00000000 */
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#define FONTDATAMAX 2048
|
||||
|
||||
static struct font_data fontdata_6x8 = {
|
||||
static const struct font_data fontdata_6x8 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, /* 000000 */
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#define FONTDATAMAX 3584
|
||||
|
||||
static struct font_data fontdata_7x14 = {
|
||||
static const struct font_data fontdata_7x14 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, /* 0000000 */
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
#define FONTDATAMAX 4096
|
||||
|
||||
static struct font_data fontdata_8x16 = {
|
||||
static const struct font_data fontdata_8x16 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, /* 00000000 */
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#define FONTDATAMAX 2048
|
||||
|
||||
static struct font_data fontdata_8x8 = {
|
||||
static const struct font_data fontdata_8x8 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, /* 00000000 */
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#define FONTDATAMAX 2048
|
||||
|
||||
static struct font_data acorndata_8x8 = {
|
||||
static const struct font_data acorndata_8x8 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
|
||||
/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
|
||||
|
@ -43,7 +43,7 @@ __END__;
|
||||
|
||||
#define FONTDATAMAX 1536
|
||||
|
||||
static struct font_data fontdata_mini_4x6 = {
|
||||
static const struct font_data fontdata_mini_4x6 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/*{*/
|
||||
/* Char 0: ' ' */
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
#define FONTDATAMAX 2048
|
||||
|
||||
static struct font_data fontdata_pearl8x8 = {
|
||||
static const struct font_data fontdata_pearl8x8 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, /* 00000000 */
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#define FONTDATAMAX 11264
|
||||
|
||||
static struct font_data fontdata_sun12x22 = {
|
||||
static const struct font_data fontdata_sun12x22 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* 0 0x00 '^@' */
|
||||
0x00, 0x00, /* 000000000000 */
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#define FONTDATAMAX 4096
|
||||
|
||||
static struct font_data fontdata_sun8x16 = {
|
||||
static const struct font_data fontdata_sun8x16 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
#define FONTDATAMAX 16384
|
||||
|
||||
static struct font_data fontdata_ter16x32 = {
|
||||
static const struct font_data fontdata_ter16x32 = {
|
||||
{ 0, 0, FONTDATAMAX, 0 }, {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
|
||||
|
Loading…
Reference in New Issue
Block a user