2019-08-15 18:37:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
// Miscellaneous Arm SMMU implementation and integration quirks
|
|
|
|
// Copyright (C) 2019 Arm Limited
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) "arm-smmu: " fmt
|
|
|
|
|
2019-08-15 18:37:36 +00:00
|
|
|
#include <linux/bitfield.h>
|
2019-08-15 18:37:34 +00:00
|
|
|
#include <linux/of.h>
|
|
|
|
|
2019-08-15 18:37:33 +00:00
|
|
|
#include "arm-smmu.h"
|
|
|
|
|
|
|
|
|
2019-08-15 18:37:34 +00:00
|
|
|
static int arm_smmu_gr0_ns(int offset)
|
|
|
|
{
|
2020-11-25 07:00:18 +00:00
|
|
|
switch (offset) {
|
2019-08-15 18:37:34 +00:00
|
|
|
case ARM_SMMU_GR0_sCR0:
|
|
|
|
case ARM_SMMU_GR0_sACR:
|
|
|
|
case ARM_SMMU_GR0_sGFSR:
|
|
|
|
case ARM_SMMU_GR0_sGFSYNR0:
|
|
|
|
case ARM_SMMU_GR0_sGFSYNR1:
|
|
|
|
case ARM_SMMU_GR0_sGFSYNR2:
|
|
|
|
return offset + 0x400;
|
|
|
|
default:
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page,
|
|
|
|
int offset)
|
|
|
|
{
|
|
|
|
if (page == ARM_SMMU_GR0)
|
|
|
|
offset = arm_smmu_gr0_ns(offset);
|
|
|
|
return readl_relaxed(arm_smmu_page(smmu, page) + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page,
|
|
|
|
int offset, u32 val)
|
|
|
|
{
|
|
|
|
if (page == ARM_SMMU_GR0)
|
|
|
|
offset = arm_smmu_gr0_ns(offset);
|
|
|
|
writel_relaxed(val, arm_smmu_page(smmu, page) + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Since we don't care for sGFAR, we can do without 64-bit accessors */
|
2019-08-20 09:58:03 +00:00
|
|
|
static const struct arm_smmu_impl calxeda_impl = {
|
2019-08-15 18:37:34 +00:00
|
|
|
.read_reg = arm_smmu_read_ns,
|
|
|
|
.write_reg = arm_smmu_write_ns,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2019-08-15 18:37:37 +00:00
|
|
|
struct cavium_smmu {
|
|
|
|
struct arm_smmu_device smmu;
|
|
|
|
u32 id_base;
|
|
|
|
};
|
|
|
|
|
2019-08-15 18:37:35 +00:00
|
|
|
static int cavium_cfg_probe(struct arm_smmu_device *smmu)
|
|
|
|
{
|
|
|
|
static atomic_t context_count = ATOMIC_INIT(0);
|
2019-08-15 18:37:37 +00:00
|
|
|
struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu);
|
2019-08-15 18:37:35 +00:00
|
|
|
/*
|
|
|
|
* Cavium CN88xx erratum #27704.
|
|
|
|
* Ensure ASID and VMID allocation is unique across all SMMUs in
|
|
|
|
* the system.
|
|
|
|
*/
|
2019-08-15 18:37:37 +00:00
|
|
|
cs->id_base = atomic_fetch_add(smmu->num_context_banks, &context_count);
|
2019-08-15 18:37:35 +00:00
|
|
|
dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-05 20:04:18 +00:00
|
|
|
static int cavium_init_context(struct arm_smmu_domain *smmu_domain,
|
2020-09-05 20:04:20 +00:00
|
|
|
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
|
2019-08-15 18:37:37 +00:00
|
|
|
{
|
|
|
|
struct cavium_smmu *cs = container_of(smmu_domain->smmu,
|
|
|
|
struct cavium_smmu, smmu);
|
|
|
|
|
|
|
|
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
|
|
|
|
smmu_domain->cfg.vmid += cs->id_base;
|
|
|
|
else
|
|
|
|
smmu_domain->cfg.asid += cs->id_base;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-20 09:58:03 +00:00
|
|
|
static const struct arm_smmu_impl cavium_impl = {
|
2019-08-15 18:37:35 +00:00
|
|
|
.cfg_probe = cavium_cfg_probe,
|
2019-08-15 18:37:37 +00:00
|
|
|
.init_context = cavium_init_context,
|
2019-08-15 18:37:35 +00:00
|
|
|
};
|
|
|
|
|
2019-08-20 09:58:03 +00:00
|
|
|
static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smmu)
|
2019-08-15 18:37:37 +00:00
|
|
|
{
|
|
|
|
struct cavium_smmu *cs;
|
|
|
|
|
2020-10-26 12:00:22 +00:00
|
|
|
cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL);
|
2019-08-15 18:37:37 +00:00
|
|
|
if (!cs)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
cs->smmu.impl = &cavium_impl;
|
|
|
|
|
|
|
|
return &cs->smmu;
|
|
|
|
}
|
|
|
|
|
2019-08-15 18:37:35 +00:00
|
|
|
|
2019-08-15 18:37:36 +00:00
|
|
|
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
|
|
|
|
|
|
|
|
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
|
|
|
|
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
|
|
|
|
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
|
|
|
|
|
iommu: arm-smmu-impl: Add sdm845 implementation hook
Add reset hook for sdm845 based platforms to turn off
the wait-for-safe sequence.
Understanding how wait-for-safe logic affects USB and UFS performance
on MTP845 and DB845 boards:
Qcom's implementation of arm,mmu-500 adds a WAIT-FOR-SAFE logic
to address under-performance issues in real-time clients, such as
Display, and Camera.
On receiving an invalidation requests, the SMMU forwards SAFE request
to these clients and waits for SAFE ack signal from real-time clients.
The SAFE signal from such clients is used to qualify the start of
invalidation.
This logic is controlled by chicken bits, one for each - MDP (display),
IFE0, and IFE1 (camera), that can be accessed only from secure software
on sdm845.
This configuration, however, degrades the performance of non-real time
clients, such as USB, and UFS etc. This happens because, with wait-for-safe
logic enabled the hardware tries to throttle non-real time clients while
waiting for SAFE ack signals from real-time clients.
On mtp845 and db845 devices, with wait-for-safe logic enabled by the
bootloaders we see degraded performance of USB and UFS when kernel
enables the smmu stage-1 translations for these clients.
Turn off this wait-for-safe logic from the kernel gets us back the perf
of USB and UFS devices until we re-visit this when we start seeing perf
issues on display/camera on upstream supported SDM845 platforms.
The bootloaders on these boards implement secure monitor callbacks to
handle a specific command - QCOM_SCM_SVC_SMMU_PROGRAM with which the
logic can be toggled.
There are other boards such as cheza whose bootloaders don't enable this
logic. Such boards don't implement callbacks to handle the specific SCM
call so disabling this logic for such boards will be a no-op.
This change is inspired by the downstream change from Patrick Daly
to address performance issues with display and camera by handling
this wait-for-safe within separte io-pagetable ops to do TLB
maintenance. So a big thanks to him for the change and for all the
offline discussions.
Without this change the UFS reads are pretty slow:
$ time dd if=/dev/sda of=/dev/zero bs=1048576 count=10 conv=sync
10+0 records in
10+0 records out
10485760 bytes (10.0MB) copied, 22.394903 seconds, 457.2KB/s
real 0m 22.39s
user 0m 0.00s
sys 0m 0.01s
With this change they are back to rock!
$ time dd if=/dev/sda of=/dev/zero bs=1048576 count=300 conv=sync
300+0 records in
300+0 records out
314572800 bytes (300.0MB) copied, 1.030541 seconds, 291.1MB/s
real 0m 1.03s
user 0m 0.00s
sys 0m 0.54s
Signed-off-by: Vivek Gautam <vivek.gautam@codeaurora.org>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Stephen Boyd <swboyd@chromium.org>
Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Signed-off-by: Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
Signed-off-by: Will Deacon <will@kernel.org>
2019-09-20 08:04:29 +00:00
|
|
|
int arm_mmu500_reset(struct arm_smmu_device *smmu)
|
2019-08-15 18:37:36 +00:00
|
|
|
{
|
|
|
|
u32 reg, major;
|
|
|
|
int i;
|
|
|
|
/*
|
|
|
|
* On MMU-500 r2p0 onwards we need to clear ACR.CACHE_LOCK before
|
|
|
|
* writes to the context bank ACTLRs will stick. And we just hope that
|
|
|
|
* Secure has also cleared SACR.CACHE_LOCK for this to take effect...
|
|
|
|
*/
|
|
|
|
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID7);
|
2020-01-10 13:20:03 +00:00
|
|
|
major = FIELD_GET(ARM_SMMU_ID7_MAJOR, reg);
|
2019-08-15 18:37:36 +00:00
|
|
|
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sACR);
|
|
|
|
if (major >= 2)
|
|
|
|
reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
|
|
|
|
/*
|
|
|
|
* Allow unmatched Stream IDs to allocate bypass
|
|
|
|
* TLB entries for reduced latency.
|
|
|
|
*/
|
|
|
|
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
|
|
|
|
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sACR, reg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable MMU-500's not-particularly-beneficial next-page
|
|
|
|
* prefetcher for the sake of errata #841119 and #826419.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < smmu->num_context_banks; ++i) {
|
|
|
|
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
|
|
|
|
reg &= ~ARM_MMU500_ACTLR_CPRE;
|
|
|
|
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, reg);
|
2022-11-03 22:21:21 +00:00
|
|
|
reg = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
|
|
|
|
if (reg & ARM_MMU500_ACTLR_CPRE)
|
|
|
|
dev_warn_once(smmu->dev, "Failed to disable prefetcher [errata #841119 and #826419], check ACR.CACHE_LOCK\n");
|
2019-08-15 18:37:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-20 09:58:03 +00:00
|
|
|
static const struct arm_smmu_impl arm_mmu500_impl = {
|
2019-08-15 18:37:36 +00:00
|
|
|
.reset = arm_mmu500_reset,
|
|
|
|
};
|
|
|
|
|
2020-07-15 07:06:47 +00:00
|
|
|
static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Marvell Armada-AP806 erratum #582743.
|
|
|
|
* Split all the readq to double readl
|
|
|
|
*/
|
|
|
|
return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off,
|
|
|
|
u64 val)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Marvell Armada-AP806 erratum #582743.
|
|
|
|
* Split all the writeq to double writel
|
|
|
|
*/
|
|
|
|
hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Armada-AP806 erratum #582743.
|
|
|
|
* Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64
|
|
|
|
* formats altogether and allow using 32 bits access on the
|
|
|
|
* interconnect.
|
|
|
|
*/
|
|
|
|
smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K |
|
|
|
|
ARM_SMMU_FEAT_FMT_AARCH64_16K |
|
|
|
|
ARM_SMMU_FEAT_FMT_AARCH64_64K);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct arm_smmu_impl mrvl_mmu500_impl = {
|
|
|
|
.read_reg64 = mrvl_mmu500_readq,
|
|
|
|
.write_reg64 = mrvl_mmu500_writeq,
|
|
|
|
.cfg_probe = mrvl_mmu500_cfg_probe,
|
|
|
|
.reset = arm_mmu500_reset,
|
|
|
|
};
|
|
|
|
|
2019-08-15 18:37:36 +00:00
|
|
|
|
2019-08-15 18:37:33 +00:00
|
|
|
struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu)
|
|
|
|
{
|
2020-04-20 18:33:49 +00:00
|
|
|
const struct device_node *np = smmu->dev->of_node;
|
|
|
|
|
2019-08-15 18:37:35 +00:00
|
|
|
/*
|
2020-06-24 10:24:51 +00:00
|
|
|
* Set the impl for model-specific implementation quirks first,
|
|
|
|
* such that platform integration quirks can pick it up and
|
|
|
|
* inherit from it if necessary.
|
2019-08-15 18:37:35 +00:00
|
|
|
*/
|
|
|
|
switch (smmu->model) {
|
2019-08-15 18:37:36 +00:00
|
|
|
case ARM_MMU500:
|
|
|
|
smmu->impl = &arm_mmu500_impl;
|
|
|
|
break;
|
2019-08-15 18:37:35 +00:00
|
|
|
case CAVIUM_SMMUV2:
|
2019-08-15 18:37:37 +00:00
|
|
|
return cavium_smmu_impl_init(smmu);
|
2019-08-15 18:37:35 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-06-24 10:24:51 +00:00
|
|
|
/* This is implicitly MMU-400 */
|
2020-04-20 18:33:49 +00:00
|
|
|
if (of_property_read_bool(np, "calxeda,smmu-secure-config-access"))
|
2019-08-15 18:37:34 +00:00
|
|
|
smmu->impl = &calxeda_impl;
|
|
|
|
|
2022-04-29 08:22:43 +00:00
|
|
|
if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
|
|
|
|
of_device_is_compatible(np, "nvidia,tegra194-smmu") ||
|
2021-06-03 16:46:29 +00:00
|
|
|
of_device_is_compatible(np, "nvidia,tegra186-smmu"))
|
2020-07-18 19:34:55 +00:00
|
|
|
return nvidia_smmu_impl_init(smmu);
|
|
|
|
|
2021-09-28 07:50:27 +00:00
|
|
|
if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM))
|
|
|
|
smmu = qcom_smmu_impl_init(smmu);
|
2020-11-09 18:47:25 +00:00
|
|
|
|
2020-07-15 07:06:47 +00:00
|
|
|
if (of_device_is_compatible(np, "marvell,ap806-smmu-500"))
|
|
|
|
smmu->impl = &mrvl_mmu500_impl;
|
|
|
|
|
2019-08-15 18:37:33 +00:00
|
|
|
return smmu;
|
|
|
|
}
|