mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 21:33:00 +00:00
iommu sg merging: parisc: make iommu respect the segment size limits
This patch makes iommu respect segment size limits when merging sg lists. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Jeff Garzik <jeff@garzik.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Acked-by: Jens Axboe <jens.axboe@oracle.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Grant Grundler <grundler@parisc-linux.org> Cc: Matthew Wilcox <willy@debian.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fde6a3c82d
commit
d1b5163206
@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
** w/o this association, we wouldn't have coherent DMA!
|
** w/o this association, we wouldn't have coherent DMA!
|
||||||
** Access to the virtual address is what forces a two pass algorithm.
|
** Access to the virtual address is what forces a two pass algorithm.
|
||||||
*/
|
*/
|
||||||
coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range);
|
coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** Program the I/O Pdir
|
** Program the I/O Pdir
|
||||||
|
@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static inline unsigned int
|
static inline unsigned int
|
||||||
iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
|
||||||
|
struct scatterlist *startsg, int nents,
|
||||||
int (*iommu_alloc_range)(struct ioc *, size_t))
|
int (*iommu_alloc_range)(struct ioc *, size_t))
|
||||||
{
|
{
|
||||||
struct scatterlist *contig_sg; /* contig chunk head */
|
struct scatterlist *contig_sg; /* contig chunk head */
|
||||||
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
|
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
|
||||||
unsigned int n_mappings = 0;
|
unsigned int n_mappings = 0;
|
||||||
|
unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
||||||
|
|
||||||
while (nents > 0) {
|
while (nents > 0) {
|
||||||
|
|
||||||
@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|||||||
IOVP_SIZE) > DMA_CHUNK_SIZE))
|
IOVP_SIZE) > DMA_CHUNK_SIZE))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
if (startsg->length + dma_len > max_seg_size)
|
||||||
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** Next see if we can append the next chunk (i.e.
|
** Next see if we can append the next chunk (i.e.
|
||||||
** it must end on one page and begin on another
|
** it must end on one page and begin on another
|
||||||
|
@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
|
|||||||
** w/o this association, we wouldn't have coherent DMA!
|
** w/o this association, we wouldn't have coherent DMA!
|
||||||
** Access to the virtual address is what forces a two pass algorithm.
|
** Access to the virtual address is what forces a two pass algorithm.
|
||||||
*/
|
*/
|
||||||
coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range);
|
coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
** Program the I/O Pdir
|
** Program the I/O Pdir
|
||||||
|
Loading…
Reference in New Issue
Block a user