mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
[IA64] fix SBA IOMMU to handle allocation failure properly
It's possible that SBA IOMMU might fail to find I/O space under heavy I/Os. SBA IOMMU panics on allocation failure but it shouldn't; drivers can handle the failure. The majority of other IOMMU drivers don't panic on allocation failure. This patch fixes SBA IOMMU path to handle allocation failure properly. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Fenghua Yu <fenghua.yu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
9ee27c7639
commit
e2a465675d
@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
|
|||||||
spin_unlock_irqrestore(&ioc->saved_lock, flags);
|
spin_unlock_irqrestore(&ioc->saved_lock, flags);
|
||||||
|
|
||||||
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
|
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
|
||||||
if (unlikely(pide >= (ioc->res_size << 3)))
|
if (unlikely(pide >= (ioc->res_size << 3))) {
|
||||||
panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
|
printk(KERN_WARNING "%s: I/O MMU @ %p is"
|
||||||
ioc->ioc_hpa);
|
"out of mapping resources, %u %u %lx\n",
|
||||||
|
__func__, ioc->ioc_hpa, ioc->res_size,
|
||||||
|
pages_needed, dma_get_seg_boundary(dev));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
|
printk(KERN_WARNING "%s: I/O MMU @ %p is"
|
||||||
ioc->ioc_hpa);
|
"out of mapping resources, %u %u %lx\n",
|
||||||
|
__func__, ioc->ioc_hpa, ioc->res_size,
|
||||||
|
pages_needed, dma_get_seg_boundary(dev));
|
||||||
|
return -1;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
pide = sba_alloc_range(ioc, dev, size);
|
pide = sba_alloc_range(ioc, dev, size);
|
||||||
|
if (pide < 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
iovp = (dma_addr_t) pide << iovp_shift;
|
iovp = (dma_addr_t) pide << iovp_shift;
|
||||||
|
|
||||||
@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
|
|||||||
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
|
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
|
||||||
int n_mappings = 0;
|
int n_mappings = 0;
|
||||||
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
||||||
|
int idx;
|
||||||
|
|
||||||
while (nents > 0) {
|
while (nents > 0) {
|
||||||
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
|
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
|
||||||
@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
|
|||||||
vcontig_sg->dma_length = vcontig_len;
|
vcontig_sg->dma_length = vcontig_len;
|
||||||
dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
|
dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
|
||||||
ASSERT(dma_len <= DMA_CHUNK_SIZE);
|
ASSERT(dma_len <= DMA_CHUNK_SIZE);
|
||||||
dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
|
idx = sba_alloc_range(ioc, dev, dma_len);
|
||||||
| (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
|
if (idx < 0) {
|
||||||
| dma_offset);
|
dma_sg->dma_length = 0;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
|
||||||
|
| dma_offset);
|
||||||
n_mappings++;
|
n_mappings++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return n_mappings;
|
return n_mappings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
||||||
|
int nents, enum dma_data_direction dir,
|
||||||
|
struct dma_attrs *attrs);
|
||||||
/**
|
/**
|
||||||
* sba_map_sg - map Scatter/Gather list
|
* sba_map_sg - map Scatter/Gather list
|
||||||
* @dev: instance of PCI owned by the driver that's asking.
|
* @dev: instance of PCI owned by the driver that's asking.
|
||||||
@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
|
|||||||
** Access to the virtual address is what forces a two pass algorithm.
|
** Access to the virtual address is what forces a two pass algorithm.
|
||||||
*/
|
*/
|
||||||
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
|
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
|
||||||
|
if (coalesced < 0) {
|
||||||
|
sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** Program the I/O Pdir
|
** Program the I/O Pdir
|
||||||
|
Loading…
Reference in New Issue
Block a user