mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
Arm SMMU fixes for 6.8
- Fix CD allocation from atomic context when using SVA with SMMUv3 - Revert the conversion of SMMUv2 to domain_alloc_paging(), as it breaks the boot for Qualcomm MSM8996 devices -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmXYgxgQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNKXKCACg7WYRfX725aIb2kW3ARW/nczdOwiXi9Fm dXgCgQTJHn3tPiRR0HhTlDSif0GI0uXN96fYb5GqkkDFLlvA276DQkoc3ckh97wD YaVtc/W77vDUgHYoADdbXu4oEjuSbBb9j+PU4fzwZToRNSsGi7fWfIbrMiWg5rkG MFJR2qtTrMcuay2/nE/iIu7VHrd0j4rgYz1NiJ9+o2Vi2IjIpq+qaMm1TxzNRmOs v9GXzCJ08uP/o1gayGdSHz2YtMhCMfIFfZ/yRYHsof8b9JY7lZ6bY5V89Z160Sy9 NpZgfs2BIa/JPmmllF8YxBnbx7nP3aiC/s6Is5wD0zY/wjXHFNR0 =IwdZ -----END PGP SIGNATURE----- Merge tag 'arm-smmu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/will/linux into iommu/fixes Arm SMMU fixes for 6.8 - Fix CD allocation from atomic context when using SVA with SMMUv3 - Revert the conversion of SMMUv2 to domain_alloc_paging(), as it breaks the boot for Qualcomm MSM8996 devices
This commit is contained in:
commit
16b1b39126
@ -292,10 +292,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
struct arm_smmu_ctx_desc *cd;
|
||||
struct arm_smmu_mmu_notifier *smmu_mn;
|
||||
struct arm_smmu_master *master;
|
||||
|
||||
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
|
||||
if (smmu_mn->mn.mm == mm) {
|
||||
@ -325,28 +323,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
|
||||
goto err_free_cd;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
|
||||
ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
|
||||
cd);
|
||||
if (ret) {
|
||||
list_for_each_entry_from_reverse(
|
||||
master, &smmu_domain->devices, domain_head)
|
||||
arm_smmu_write_ctx_desc(
|
||||
master, mm_get_enqcmd_pasid(mm), NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||
if (ret)
|
||||
goto err_put_notifier;
|
||||
|
||||
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
|
||||
return smmu_mn;
|
||||
|
||||
err_put_notifier:
|
||||
/* Frees smmu_mn */
|
||||
mmu_notifier_put(&smmu_mn->mn);
|
||||
err_free_cd:
|
||||
arm_smmu_free_shared_cd(cd);
|
||||
return ERR_PTR(ret);
|
||||
@ -363,9 +342,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
|
||||
|
||||
list_del(&smmu_mn->list);
|
||||
|
||||
arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
|
||||
NULL);
|
||||
|
||||
/*
|
||||
* If we went through clear(), we've already invalidated, and no
|
||||
* new TLB entry can have been formed.
|
||||
@ -381,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
|
||||
arm_smmu_free_shared_cd(cd);
|
||||
}
|
||||
|
||||
static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
|
||||
static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
int ret;
|
||||
struct arm_smmu_bond *bond;
|
||||
@ -404,9 +381,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
|
||||
goto err_free_bond;
|
||||
}
|
||||
|
||||
ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
|
||||
if (ret)
|
||||
goto err_put_notifier;
|
||||
|
||||
list_add(&bond->list, &master->bonds);
|
||||
return 0;
|
||||
|
||||
err_put_notifier:
|
||||
arm_smmu_mmu_notifier_put(bond->smmu_mn);
|
||||
err_free_bond:
|
||||
kfree(bond);
|
||||
return ret;
|
||||
@ -568,6 +551,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
|
||||
mutex_lock(&sva_lock);
|
||||
|
||||
arm_smmu_write_ctx_desc(master, id, NULL);
|
||||
|
||||
list_for_each_entry(t, &master->bonds, list) {
|
||||
if (t->mm == mm) {
|
||||
bond = t;
|
||||
@ -590,7 +576,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct mm_struct *mm = domain->mm;
|
||||
|
||||
mutex_lock(&sva_lock);
|
||||
ret = __arm_smmu_sva_bind(dev, mm);
|
||||
ret = __arm_smmu_sva_bind(dev, id, mm);
|
||||
mutex_unlock(&sva_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
|
||||
arm_smmu_rpm_put(smmu);
|
||||
}
|
||||
|
||||
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
|
||||
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED) {
|
||||
if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Allocate the domain and initialise some of its data structures.
|
||||
* We can't really do anything meaningful until we've added a
|
||||
@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
|
||||
mutex_init(&smmu_domain->init_mutex);
|
||||
spin_lock_init(&smmu_domain->cb_lock);
|
||||
|
||||
if (dev) {
|
||||
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
|
||||
|
||||
if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
|
||||
kfree(smmu_domain);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return &smmu_domain->domain;
|
||||
}
|
||||
|
||||
@ -1600,7 +1595,7 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.identity_domain = &arm_smmu_identity_domain,
|
||||
.blocked_domain = &arm_smmu_blocked_domain,
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc_paging = arm_smmu_domain_alloc_paging,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
.probe_device = arm_smmu_probe_device,
|
||||
.release_device = arm_smmu_release_device,
|
||||
.probe_finalize = arm_smmu_probe_finalize,
|
||||
|
Loading…
Reference in New Issue
Block a user