intel-iommu sg chaining support

x86_64 defines ARCH_HAS_SG_CHAIN. So if IOMMU implementations don't
support sg chaining, we will get data corruption.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
FUJITA Tomonori 2007-10-21 16:42:00 -07:00 committed by Linus Torvalds
parent 358dd8ac53
commit c03ab37cbe

View File

@ -1963,7 +1963,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size,
}
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
int i;
@ -1973,16 +1973,17 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
struct iova *iova;
size_t size = 0;
void *addr;
struct scatterlist *sg;
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return;
domain = find_domain(pdev);
iova = find_iova(&domain->iovad, IOVA_PFN(sg[0].dma_address));
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
if (!iova)
return;
for (i = 0; i < nelems; i++, sg++) {
for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
size += aligned_size((u64)addr, sg->length);
}
@ -2003,21 +2004,21 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
}
static int intel_nontranslate_map_sg(struct device *hddev,
struct scatterlist *sg, int nelems, int dir)
struct scatterlist *sglist, int nelems, int dir)
{
int i;
struct scatterlist *sg;
for (i = 0; i < nelems; i++) {
struct scatterlist *s = &sg[i];
BUG_ON(!s->page);
s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s));
s->dma_length = s->length;
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg->page);
sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
sg->dma_length = sg->length;
}
return nelems;
}
static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
int nelems, int dir)
static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
void *addr;
int i;
@ -2028,18 +2029,18 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
size_t offset = 0;
struct iova *iova = NULL;
int ret;
struct scatterlist *orig_sg = sg;
struct scatterlist *sg;
unsigned long start_addr;
BUG_ON(dir == DMA_NONE);
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return intel_nontranslate_map_sg(hwdev, sg, nelems, dir);
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev);
if (!domain)
return 0;
for (i = 0; i < nelems; i++, sg++) {
for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size += aligned_size((u64)addr, sg->length);
@ -2047,7 +2048,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
iova = __intel_alloc_iova(hwdev, domain, size);
if (!iova) {
orig_sg->dma_length = 0;
sglist->dma_length = 0;
return 0;
}
@ -2063,8 +2064,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
offset = 0;
sg = orig_sg;
for (i = 0; i < nelems; i++, sg++) {
for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
addr = (void *)virt_to_phys(addr);
size = aligned_size((u64)addr, sg->length);