forked from Minki/linux
iommu/amd: Lock code paths traversing protection_domain->dev_list
The traversing of this list requires protection_domain->lock to be taken
to avoid nasty races with attach/detach code. Make sure the lock is held
on all code-paths traversing this list.
Reported-by: Filippo Sironi <sironi@amazon.de>
Fixes: 92d420ec02
("iommu/amd: Relax locking in dma_ops path")
Reviewed-by: Filippo Sironi <sironi@amazon.de>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
ab7b2577f0
commit
2a78f99625
@ -1334,8 +1334,12 @@ static void domain_flush_np_cache(struct protection_domain *domain,
|
|||||||
dma_addr_t iova, size_t size)
|
dma_addr_t iova, size_t size)
|
||||||
{
|
{
|
||||||
if (unlikely(amd_iommu_np_cache)) {
|
if (unlikely(amd_iommu_np_cache)) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&domain->lock, flags);
|
||||||
domain_flush_pages(domain, iova, size);
|
domain_flush_pages(domain, iova, size);
|
||||||
domain_flush_complete(domain);
|
domain_flush_complete(domain);
|
||||||
|
spin_unlock_irqrestore(&domain->lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1700,8 +1704,13 @@ static int iommu_map_page(struct protection_domain *dom,
|
|||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (updated)
|
if (updated) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dom->lock, flags);
|
||||||
update_domain(dom);
|
update_domain(dom);
|
||||||
|
spin_unlock_irqrestore(&dom->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
/* Everything flushed out, free pages now */
|
/* Everything flushed out, free pages now */
|
||||||
free_page_list(freelist);
|
free_page_list(freelist);
|
||||||
@ -1857,8 +1866,12 @@ static void free_gcr3_table(struct protection_domain *domain)
|
|||||||
|
|
||||||
static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
|
static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dom->domain.lock, flags);
|
||||||
domain_flush_tlb(&dom->domain);
|
domain_flush_tlb(&dom->domain);
|
||||||
domain_flush_complete(&dom->domain);
|
domain_flush_complete(&dom->domain);
|
||||||
|
spin_unlock_irqrestore(&dom->domain.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iova_domain_flush_tlb(struct iova_domain *iovad)
|
static void iova_domain_flush_tlb(struct iova_domain *iovad)
|
||||||
@ -2414,6 +2427,7 @@ static dma_addr_t __map_single(struct device *dev,
|
|||||||
{
|
{
|
||||||
dma_addr_t offset = paddr & ~PAGE_MASK;
|
dma_addr_t offset = paddr & ~PAGE_MASK;
|
||||||
dma_addr_t address, start, ret;
|
dma_addr_t address, start, ret;
|
||||||
|
unsigned long flags;
|
||||||
unsigned int pages;
|
unsigned int pages;
|
||||||
int prot = 0;
|
int prot = 0;
|
||||||
int i;
|
int i;
|
||||||
@ -2451,8 +2465,10 @@ out_unmap:
|
|||||||
iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
|
iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dma_dom->domain.lock, flags);
|
||||||
domain_flush_tlb(&dma_dom->domain);
|
domain_flush_tlb(&dma_dom->domain);
|
||||||
domain_flush_complete(&dma_dom->domain);
|
domain_flush_complete(&dma_dom->domain);
|
||||||
|
spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
|
||||||
|
|
||||||
dma_ops_free_iova(dma_dom, address, pages);
|
dma_ops_free_iova(dma_dom, address, pages);
|
||||||
|
|
||||||
@ -2481,8 +2497,12 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (amd_iommu_unmap_flush) {
|
if (amd_iommu_unmap_flush) {
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dma_dom->domain.lock, flags);
|
||||||
domain_flush_tlb(&dma_dom->domain);
|
domain_flush_tlb(&dma_dom->domain);
|
||||||
domain_flush_complete(&dma_dom->domain);
|
domain_flush_complete(&dma_dom->domain);
|
||||||
|
spin_unlock_irqrestore(&dma_dom->domain.lock, flags);
|
||||||
dma_ops_free_iova(dma_dom, dma_addr, pages);
|
dma_ops_free_iova(dma_dom, dma_addr, pages);
|
||||||
} else {
|
} else {
|
||||||
pages = __roundup_pow_of_two(pages);
|
pages = __roundup_pow_of_two(pages);
|
||||||
@ -3246,9 +3266,12 @@ static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
|
|||||||
static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
|
||||||
{
|
{
|
||||||
struct protection_domain *dom = to_pdomain(domain);
|
struct protection_domain *dom = to_pdomain(domain);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&dom->lock, flags);
|
||||||
domain_flush_tlb_pde(dom);
|
domain_flush_tlb_pde(dom);
|
||||||
domain_flush_complete(dom);
|
domain_flush_complete(dom);
|
||||||
|
spin_unlock_irqrestore(&dom->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
|
static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
|
||||||
|
Loading…
Reference in New Issue
Block a user