dma-mapping updates for linux 6.12

- support DMA zones for arm64 systems where memory starts at > 4GB
    (Baruch Siach, Catalin Marinas)
  - support direct calls into dma-iommu and thus obsolete dma_map_ops for
    many common configurations (Leon Romanovsky)
  - add DMA-API tracing (Sean Anderson)
  - remove the not very useful return value from various dma_set_* APIs
    (Christoph Hellwig)
  - misc cleanups and minor optimizations (Chen Y, Yosry Ahmed,
    Christoph Hellwig)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmbr2BALHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYNheA/6A453SQy2kFvspFRvEp8ztEqtvxwxGLAUMIyvmU+a
 9b37KlMwUnpbMsXK5+KtYdTLRoIvtl89uIkdZq7pYYKj0uoPZvF9QVnKtrJWAvqK
 fFuauokZznuD3ZSd6v6uY4ijb29ImGfx5kZopQf1zWoYLENxM7mWqRU+eqxDozev
 FbyfYhJzMBhpHveen9+Q7PEfi/90ZdEqtJhSK2AOzuV9ZvbYiSFCrcnT/4wM30DS
 2OxjGa8tKcGYZ9ah0rF2V5hboaRuYedTFgXoKfUSJINJkzmBlTXdxVx5Xr3kQtyC
 7S/xv2y79CXkDKck2+IY7xkhwwBsXPrTAyTzWAIJqOEmaMJ4KqEW54JOsK+VHfmO
 29UKBnASOK0xvfCzakm2631iOzEZF743RgpQiOGeMcnph789Mwu8EUCcqeEW/fJy
 Xh7B0z3/XgJz8BtTG/64IhmqO63Cwa/o7DSQdLr9dh5F/mPBzqrnRov97KL7mH1q
 VSO0Z7+8J0x9ALcYutpth/IzG/lXtXn/pfR1sj6dBHvjf5SwjuT8MKUHgh0l6N+C
 BWZn8swwrZaJ2Li2Gv3CpnCzVQZCkL6ns9VqAWiWq7VfGhDLndMqfi/jHCyGH83i
 E3dMtqf81XaQ7JRDPCs7Jx/4Zkn/iNkkZe8IQsByMc1BY4oeD7/Z2s8mkK8MbNla
 /CA=
 =DZVc
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-6.12-2024-09-19' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - support DMA zones for arm64 systems where memory starts at > 4GB
   (Baruch Siach, Catalin Marinas)

 - support direct calls into dma-iommu and thus obsolete dma_map_ops for
   many common configurations (Leon Romanovsky)

 - add DMA-API tracing (Sean Anderson)

 - remove the not very useful return value from various dma_set_* APIs
   (Christoph Hellwig)

 - misc cleanups and minor optimizations (Chen Y, Yosry Ahmed, Christoph
   Hellwig)

* tag 'dma-mapping-6.12-2024-09-19' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: reflow dma_supported
  dma-mapping: reliably inform about DMA support for IOMMU
  dma-mapping: add tracing for dma-mapping API calls
  dma-mapping: use IOMMU DMA calls for common alloc/free page calls
  dma-direct: optimize page freeing when it is not addressable
  dma-mapping: clearly mark DMA ops as an architecture feature
  vdpa_sim: don't select DMA_OPS
  arm64: mm: keep low RAM dma zone
  dma-mapping: don't return errors from dma_set_max_seg_size
  dma-mapping: don't return errors from dma_set_seg_boundary
  dma-mapping: don't return errors from dma_set_min_align_mask
  scsi: check that busses support the DMA API before setting dma parameters
  arm64: mm: fix DMA zone when dma-ranges is missing
  dma-mapping: direct calls for dma-iommu
  dma-mapping: call ->unmap_page and ->unmap_sg unconditionally
  arm64: support DMA zone above 4GB
  dma-mapping: replace zone_dma_bits by zone_dma_limit
  dma-mapping: use bit masking to check VM_DMA_COHERENT
This commit is contained in:
Linus Torvalds 2024-09-19 11:12:49 +02:00
commit 726e2d0cf2
49 changed files with 782 additions and 226 deletions

View File

@ -11841,6 +11841,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git
F: drivers/iommu/dma-iommu.c F: drivers/iommu/dma-iommu.c
F: drivers/iommu/dma-iommu.h F: drivers/iommu/dma-iommu.h
F: drivers/iommu/iova.c F: drivers/iommu/iova.c
F: include/linux/iommu-dma.h
F: include/linux/iova.h F: include/linux/iova.h
IOMMU SUBSYSTEM IOMMU SUBSYSTEM

View File

@ -17,6 +17,15 @@ config CPU_MITIGATIONS
def_bool y def_bool y
endif endif
#
# Selected by architectures that need custom DMA operations for e.g. legacy
# IOMMUs not handled by dma-iommu. Drivers must never select this symbol.
#
config ARCH_HAS_DMA_OPS
depends on HAS_DMA
select DMA_OPS_HELPERS
bool
menu "General architecture-dependent options" menu "General architecture-dependent options"
config ARCH_HAS_SUBPAGE_FAULTS config ARCH_HAS_SUBPAGE_FAULTS

View File

@ -4,12 +4,12 @@ config ALPHA
default y default y
select ARCH_32BIT_USTAT_F_TINODE select ARCH_32BIT_USTAT_F_TINODE
select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DMA_OPS if PCI
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_NO_PREEMPT select ARCH_NO_PREEMPT
select ARCH_NO_SG_CHAIN select ARCH_NO_SG_CHAIN
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select DMA_OPS if PCI
select FORCE_PCI select FORCE_PCI
select PCI_DOMAINS if PCI select PCI_DOMAINS if PCI
select PCI_SYSCALL if PCI select PCI_SYSCALL if PCI

View File

@ -10,6 +10,7 @@ config ARM
select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DMA_ALLOC if MMU select ARCH_HAS_DMA_ALLOC if MMU
select ARCH_HAS_DMA_OPS
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
@ -54,7 +55,6 @@ config ARM
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
select DMA_DECLARE_COHERENT select DMA_DECLARE_COHERENT
select DMA_GLOBAL_POOL if !MMU select DMA_GLOBAL_POOL if !MMU
select DMA_OPS
select DMA_NONCOHERENT_MMAP if MMU select DMA_NONCOHERENT_MMAP if MMU
select EDAC_SUPPORT select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB select EDAC_ATOMIC_SCRUB

View File

@ -24,6 +24,7 @@ config ARM64
select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_OPS if XEN
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FAST_MULTIPLIER

View File

@ -114,36 +114,33 @@ static void __init arch_reserve_crashkernel(void)
low_size, high); low_size, high);
} }
/* static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
* Return the maximum physical address for a zone accessible by the given bits
* limit. If DRAM starts above 32-bit, expand the zone to the maximum
* available memory, otherwise cap it at 32-bit.
*/
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{ {
phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); /**
phys_addr_t phys_start = memblock_start_of_DRAM(); * Information we get from firmware (e.g. DT dma-ranges) describe DMA
* bus constraints. Devices using DMA might have their own limitations.
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
* DMA zone on platforms that have RAM there.
*/
if (memblock_start_of_DRAM() < U32_MAX)
zone_limit = min(zone_limit, U32_MAX);
if (phys_start > U32_MAX) return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
zone_mask = PHYS_ADDR_MAX;
else if (phys_start > zone_mask)
zone_mask = U32_MAX;
return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
} }
static void __init zone_sizes_init(void) static void __init zone_sizes_init(void)
{ {
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits; phys_addr_t __maybe_unused acpi_zone_dma_limit;
unsigned int __maybe_unused dt_zone_dma_bits; phys_addr_t __maybe_unused dt_zone_dma_limit;
phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32); phys_addr_t __maybe_unused dma32_phys_limit =
max_zone_phys(DMA_BIT_MASK(32));
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif #endif
#ifdef CONFIG_ZONE_DMA32 #ifdef CONFIG_ZONE_DMA32

View File

@ -8,6 +8,7 @@ config MIPS
select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CURRENT_STACK_POINTER if !CC_IS_CLANG || CLANG_VERSION >= 140000 select ARCH_HAS_CURRENT_STACK_POINTER if !CC_IS_CLANG || CLANG_VERSION >= 140000
select ARCH_HAS_DEBUG_VIRTUAL if !64BIT select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
select ARCH_HAS_DMA_OPS if MACH_JAZZ
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV select ARCH_HAS_KCOV
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
@ -393,7 +394,6 @@ config MACH_JAZZ
select ARC_PROMLIB select ARC_PROMLIB
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
select DMA_OPS
select FW_ARC select FW_ARC
select FW_ARC32 select FW_ARC32
select ARCH_MAY_HAVE_PC_FDC select ARCH_MAY_HAVE_PC_FDC

View File

@ -10,6 +10,7 @@ config PARISC
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_HAS_CPU_CACHE_ALIASING select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_DMA_ALLOC if PA11 select ARCH_HAS_DMA_ALLOC if PA11
select ARCH_HAS_DMA_OPS
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_STRICT_MODULE_RWX
@ -23,7 +24,6 @@ config PARISC
select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VM_PGTABLE
select HAVE_RELIABLE_STACKTRACE select HAVE_RELIABLE_STACKTRACE
select DMA_OPS
select RTC_CLASS select RTC_CLASS
select RTC_DRV_GENERIC select RTC_DRV_GENERIC
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE

View File

@ -133,6 +133,7 @@ config PPC
select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
select ARCH_HAS_DMA_OPS if PPC64
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV select ARCH_HAS_KCOV
@ -185,7 +186,6 @@ config PPC
select CPUMASK_OFFSTACK if NR_CPUS >= 8192 select CPUMASK_OFFSTACK if NR_CPUS >= 8192
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select DMA_OPS_BYPASS if PPC64 select DMA_OPS_BYPASS if PPC64
select DMA_OPS if PPC64
select DYNAMIC_FTRACE if FUNCTION_TRACER select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_ATOMIC_SCRUB select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT select EDAC_SUPPORT

View File

@ -216,7 +216,7 @@ static int __init mark_nonram_nosave(void)
* everything else. GFP_DMA32 page allocations automatically fall back to * everything else. GFP_DMA32 page allocations automatically fall back to
* ZONE_DMA. * ZONE_DMA.
* *
* By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the
* generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
* anyway) will take a first dip into ZONE_NORMAL and get otherwise served by * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
* ZONE_DMA. * ZONE_DMA.
@ -230,6 +230,7 @@ void __init paging_init(void)
{ {
unsigned long long total_ram = memblock_phys_mem_size(); unsigned long long total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM(); phys_addr_t top_of_ram = memblock_end_of_DRAM();
int zone_dma_bits;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
unsigned long v = __fix_to_virt(FIX_KMAP_END); unsigned long v = __fix_to_virt(FIX_KMAP_END);
@ -256,6 +257,8 @@ void __init paging_init(void)
else else
zone_dma_bits = 31; zone_dma_bits = 31;
zone_dma_limit = DMA_BIT_MASK(zone_dma_bits);
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn, max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
1UL << (zone_dma_bits - PAGE_SHIFT)); 1UL << (zone_dma_bits - PAGE_SHIFT));

View File

@ -70,6 +70,7 @@ config S390
select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_OPS if PCI
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORCE_DMA_UNENCRYPTED select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
@ -137,7 +138,6 @@ config S390
select BUILDTIME_TABLE_SORT select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2 select CLONE_BACKWARDS2
select DCACHE_WORD_ACCESS if !KMSAN select DCACHE_WORD_ACCESS if !KMSAN
select DMA_OPS if PCI
select DYNAMIC_FTRACE if FUNCTION_TRACER select DYNAMIC_FTRACE if FUNCTION_TRACER
select FUNCTION_ALIGNMENT_8B if CC_IS_GCC select FUNCTION_ALIGNMENT_8B if CC_IS_GCC
select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC select FUNCTION_ALIGNMENT_16B if !CC_IS_GCC

View File

@ -97,7 +97,7 @@ void __init paging_init(void)
vmem_map_init(); vmem_map_init();
sparse_init(); sparse_init();
zone_dma_bits = 31; zone_dma_limit = DMA_BIT_MASK(31);
memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn; max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

View File

@ -14,9 +14,9 @@ config SPARC
bool bool
default y default y
select ARCH_HAS_CPU_CACHE_ALIASING select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_DMA_OPS
select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
select DMA_OPS
select OF select OF
select OF_PROMTREE select OF_PROMTREE
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS

View File

@ -79,6 +79,7 @@ config X86
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_OPS if GART_IOMMU || XEN
select ARCH_HAS_EARLY_DEBUG if KGDB select ARCH_HAS_EARLY_DEBUG if KGDB
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FAST_MULTIPLIER
@ -944,7 +945,6 @@ config DMI
config GART_IOMMU config GART_IOMMU
bool "Old AMD GART IOMMU support" bool "Old AMD GART IOMMU support"
select DMA_OPS
select IOMMU_HELPER select IOMMU_HELPER
select SWIOTLB select SWIOTLB
depends on X86_64 && PCI && AMD_NB depends on X86_64 && PCI && AMD_NB

View File

@ -447,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) if (ret)
return ret; return ret;
ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX); dma_set_max_seg_size(&pdev->dev, UINT_MAX);
if (ret)
return ret;
qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]); qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
if (IS_ERR(qdev->bar_0)) if (IS_ERR(qdev->bar_0))

View File

@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev; idma64->dma.dev = chip->sysdev;
ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
if (ret)
return ret;
ret = dma_async_device_register(&idma64->dma); ret = dma_async_device_register(&idma64->dma);
if (ret) if (ret)

View File

@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
* This is the limit for transfers with a buswidth of 1, larger * This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits. * buswidths will have larger limits.
*/ */
ret = dma_set_max_seg_size(&adev->dev, 1900800); dma_set_max_seg_size(&adev->dev, 1900800);
if (ret)
dev_err(&adev->dev, "unable to set the seg size\n");
init_pl330_debugfs(pl330); init_pl330_debugfs(pl330);
dev_info(&adev->dev, dev_info(&adev->dev,

View File

@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */ /* set max dma segment size */
bdev->common.dev = bdev->dev; bdev->common.dev = bdev->dev;
ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE); dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
if (ret) {
dev_err(bdev->dev, "cannot set maximum segment size\n");
goto err_bam_channel_exit;
}
platform_set_drvdata(pdev, bdev); platform_set_drvdata(pdev, bdev);

View File

@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev; dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac); platform_set_drvdata(pdev, dmac);
ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
if (ret)
return ret;
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret) if (ret)

View File

@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (ret) if (ret)
goto destroy_cache; goto destroy_cache;
ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE); dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (ret) {
d40_err(dev, "Failed to set dma max seg size\n");
goto destroy_cache;
}
d40_hw_init(base); d40_hw_init(base);

View File

@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
* Configure the DMA segment size to make sure we get contiguous IOVA * Configure the DMA segment size to make sure we get contiguous IOVA
* when importing PRIME buffers. * when importing PRIME buffers.
*/ */
ret = dma_set_max_seg_size(dma_dev, UINT_MAX); dma_set_max_seg_size(dma_dev, UINT_MAX);
if (ret) {
dev_err(dma_dev, "Failed to set DMA segment size\n");
goto err_component_unbind;
}
ret = drm_vblank_init(drm, MAX_CRTC); ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0) if (ret < 0)

View File

@ -151,7 +151,7 @@ config OF_IOMMU
# IOMMU-agnostic DMA-mapping layer # IOMMU-agnostic DMA-mapping layer
config IOMMU_DMA config IOMMU_DMA
def_bool ARM64 || X86 || S390 def_bool ARM64 || X86 || S390
select DMA_OPS select DMA_OPS_HELPERS
select IOMMU_API select IOMMU_API
select IOMMU_IOVA select IOMMU_IOVA
select IRQ_MSI_IOMMU select IRQ_MSI_IOMMU

View File

@ -17,6 +17,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/huge_mm.h> #include <linux/huge_mm.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/iommu-dma.h>
#include <linux/iova.h> #include <linux/iova.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/list_sort.h> #include <linux/list_sort.h>
@ -1037,9 +1038,8 @@ out_unmap:
return NULL; return NULL;
} }
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
size_t size, enum dma_data_direction dir, gfp_t gfp, enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
unsigned long attrs)
{ {
struct dma_sgt_handle *sh; struct dma_sgt_handle *sh;
@ -1055,7 +1055,7 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
return &sh->sgt; return &sh->sgt;
} }
static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir) struct sg_table *sgt, enum dma_data_direction dir)
{ {
struct dma_sgt_handle *sh = sgt_handle(sgt); struct dma_sgt_handle *sh = sgt_handle(sgt);
@ -1066,8 +1066,8 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
kfree(sh); kfree(sh);
} }
static void iommu_dma_sync_single_for_cpu(struct device *dev, void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
phys_addr_t phys; phys_addr_t phys;
@ -1081,8 +1081,8 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
swiotlb_sync_single_for_cpu(dev, phys, size, dir); swiotlb_sync_single_for_cpu(dev, phys, size, dir);
} }
static void iommu_dma_sync_single_for_device(struct device *dev, void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
phys_addr_t phys; phys_addr_t phys;
@ -1096,9 +1096,8 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
arch_sync_dma_for_device(phys, size, dir); arch_sync_dma_for_device(phys, size, dir);
} }
static void iommu_dma_sync_sg_for_cpu(struct device *dev, void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
struct scatterlist *sgl, int nelems, int nelems, enum dma_data_direction dir)
enum dma_data_direction dir)
{ {
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
@ -1112,9 +1111,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
} }
static void iommu_dma_sync_sg_for_device(struct device *dev, void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
struct scatterlist *sgl, int nelems, int nelems, enum dma_data_direction dir)
enum dma_data_direction dir)
{ {
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
@ -1129,9 +1127,9 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
} }
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs) unsigned long attrs)
{ {
phys_addr_t phys = page_to_phys(page) + offset; phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev); bool coherent = dev_is_dma_coherent(dev);
@ -1189,7 +1187,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return iova; return iova;
} }
static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_domain *domain = iommu_get_dma_domain(dev);
@ -1342,8 +1340,8 @@ out_unmap:
* impedance-matching, to be able to hand off a suitably-aligned list, * impedance-matching, to be able to hand off a suitably-aligned list,
* but still preserve the original offsets and sizes for the caller. * but still preserve the original offsets and sizes for the caller.
*/ */
static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
int nents, enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
@ -1462,8 +1460,8 @@ out:
return ret; return ret;
} }
static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
int nents, enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
{ {
dma_addr_t end = 0, start; dma_addr_t end = 0, start;
struct scatterlist *tmp; struct scatterlist *tmp;
@ -1512,7 +1510,7 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
__iommu_dma_unmap(dev, start, end - start); __iommu_dma_unmap(dev, start, end - start);
} }
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
return __iommu_dma_map(dev, phys, size, return __iommu_dma_map(dev, phys, size,
@ -1520,7 +1518,7 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
dma_get_mask(dev)); dma_get_mask(dev));
} }
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
__iommu_dma_unmap(dev, handle, size); __iommu_dma_unmap(dev, handle, size);
@ -1557,7 +1555,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
dma_free_contiguous(dev, page, alloc_size); dma_free_contiguous(dev, page, alloc_size);
} }
static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs) dma_addr_t handle, unsigned long attrs)
{ {
__iommu_dma_unmap(dev, handle, size); __iommu_dma_unmap(dev, handle, size);
@ -1601,8 +1599,8 @@ out_free_pages:
return NULL; return NULL;
} }
static void *iommu_dma_alloc(struct device *dev, size_t size, void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
dma_addr_t *handle, gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
bool coherent = dev_is_dma_coherent(dev); bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
@ -1635,7 +1633,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr; return cpu_addr;
} }
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
@ -1666,7 +1664,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot); vma->vm_page_prot);
} }
static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
@ -1693,19 +1691,19 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret; return ret;
} }
static unsigned long iommu_dma_get_merge_boundary(struct device *dev) unsigned long iommu_dma_get_merge_boundary(struct device *dev)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_domain *domain = iommu_get_dma_domain(dev);
return (1UL << __ffs(domain->pgsize_bitmap)) - 1; return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
} }
static size_t iommu_dma_opt_mapping_size(void) size_t iommu_dma_opt_mapping_size(void)
{ {
return iova_rcache_range(); return iova_rcache_range();
} }
static size_t iommu_dma_max_mapping_size(struct device *dev) size_t iommu_dma_max_mapping_size(struct device *dev)
{ {
if (dev_is_untrusted(dev)) if (dev_is_untrusted(dev))
return swiotlb_max_mapping_size(dev); return swiotlb_max_mapping_size(dev);
@ -1713,32 +1711,6 @@ static size_t iommu_dma_max_mapping_size(struct device *dev)
return SIZE_MAX; return SIZE_MAX;
} }
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED |
DMA_F_CAN_SKIP_SYNC,
.alloc = iommu_dma_alloc,
.free = iommu_dma_free,
.alloc_pages_op = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
.free_noncontiguous = iommu_dma_free_noncontiguous,
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
.unmap_page = iommu_dma_unmap_page,
.map_sg = iommu_dma_map_sg,
.unmap_sg = iommu_dma_unmap_sg,
.sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
.sync_single_for_device = iommu_dma_sync_single_for_device,
.sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
.sync_sg_for_device = iommu_dma_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size,
.max_mapping_size = iommu_dma_max_mapping_size,
};
void iommu_setup_dma_ops(struct device *dev) void iommu_setup_dma_ops(struct device *dev)
{ {
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@ -1746,19 +1718,15 @@ void iommu_setup_dma_ops(struct device *dev)
if (dev_is_pci(dev)) if (dev_is_pci(dev))
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
if (iommu_is_dma_domain(domain)) { dev->dma_iommu = iommu_is_dma_domain(domain);
if (iommu_dma_init_domain(domain, dev)) if (dev->dma_iommu && iommu_dma_init_domain(domain, dev))
goto out_err; goto out_err;
dev->dma_ops = &iommu_dma_ops;
} else if (dev->dma_ops == &iommu_dma_ops) {
/* Clean up if we've switched *from* a DMA domain */
dev->dma_ops = NULL;
}
return; return;
out_err: out_err:
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
dev_name(dev)); dev_name(dev));
dev->dma_iommu = false;
} }
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,

View File

@ -12,7 +12,6 @@ config DMAR_DEBUG
config INTEL_IOMMU config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices" bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && X86 depends on PCI_MSI && ACPI && X86
select DMA_OPS
select IOMMU_API select IOMMU_API
select IOMMU_IOVA select IOMMU_IOVA
select IOMMUFD_DRIVER if IOMMUFD select IOMMUFD_DRIVER if IOMMUFD

View File

@ -387,7 +387,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dma_set_max_seg_size(&dev->ofdev.dev, 65536); dma_set_max_seg_size(&dev->ofdev.dev, 65536);
dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff); dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
#if defined(CONFIG_PCI) && defined(CONFIG_DMA_OPS) #if defined(CONFIG_PCI) && defined(CONFIG_ARCH_HAS_DMA_OPS)
/* Set the DMA ops to the ones from the PCI device, this could be /* Set the DMA ops to the ones from the PCI device, this could be
* fishy if we didn't know that on PowerMac it's always direct ops * fishy if we didn't know that on PowerMac it's always direct ops
* or iommu ops that will work fine * or iommu ops that will work fine
@ -396,7 +396,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
*/ */
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata; dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops; dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
#endif /* CONFIG_PCI && CONFIG_DMA_OPS */ #endif /* CONFIG_PCI && CONFIG_ARCH_HAS_DMA_OPS */
#ifdef DEBUG #ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",

View File

@ -854,8 +854,7 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
return -ENODEV; return -ENODEV;
} }
if (dma_get_max_seg_size(dev) < size) if (dma_get_max_seg_size(dev) < size)
return dma_set_max_seg_size(dev, size); dma_set_max_seg_size(dev, size);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size); EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);

View File

@ -4,8 +4,13 @@ config VIDEO_INTEL_IPU6
depends on VIDEO_DEV depends on VIDEO_DEV
depends on X86 && X86_64 && HAS_DMA depends on X86 && X86_64 && HAS_DMA
depends on IPU_BRIDGE || !IPU_BRIDGE depends on IPU_BRIDGE || !IPU_BRIDGE
#
# This driver incorrectly tries to override the dma_ops. It should
# never have done that, but for now keep it working on architectures
# that use dma ops
#
depends on ARCH_HAS_DMA_OPS
select AUXILIARY_BUS select AUXILIARY_BUS
select DMA_OPS
select IOMMU_IOVA select IOMMU_IOVA
select VIDEO_V4L2_SUBDEV_API select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER select MEDIA_CONTROLLER

View File

@ -576,9 +576,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) if (ret)
return dev_err_probe(dev, ret, "Failed to set DMA mask\n"); return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
ret = dma_set_max_seg_size(dev, UINT_MAX); dma_set_max_seg_size(dev, UINT_MAX);
if (ret)
return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
ret = ipu6_pci_config_setup(pdev, isp->hw_ver); ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
if (ret) if (ret)

View File

@ -213,7 +213,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_seg_size = host->mmc->max_req_size; host->mmc->max_seg_size = host->mmc->max_req_size;
} }
return dma_set_max_seg_size(dev, host->mmc->max_seg_size); dma_set_max_seg_size(dev, host->mmc->max_seg_size);
return 0;
} }
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)

View File

@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) if (err)
goto release_region; goto release_region;
err = dma_set_max_seg_size(&pdev->dev, UINT_MAX); dma_set_max_seg_size(&pdev->dev, UINT_MAX);
if (err) {
dev_err(&pdev->dev, "Failed to set dma device segment size\n");
goto release_region;
}
err = -ENOMEM; err = -ENOMEM;
gc = vzalloc(sizeof(*gc)); gc = vzalloc(sizeof(*gc));

View File

@ -13861,12 +13861,7 @@ fcponly:
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE) if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE; sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len); dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6400 Can't set dma maximum segment size\n");
return rc;
}
/* /*
* Check whether the adapter supports an embedded copy of the * Check whether the adapter supports an embedded copy of the

View File

@ -1988,8 +1988,15 @@ void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
if (shost->no_highmem) if (shost->no_highmem)
lim->features |= BLK_FEAT_BOUNCE_HIGH; lim->features |= BLK_FEAT_BOUNCE_HIGH;
dma_set_seg_boundary(dev, shost->dma_boundary); /*
dma_set_max_seg_size(dev, shost->max_segment_size); * Propagate the DMA formation properties to the dma-mapping layer as
* a courtesy service to the LLDDs. This needs to check that the buses
* actually support the DMA API first, though.
*/
if (dev->dma_parms) {
dma_set_seg_boundary(dev, shost->dma_boundary);
dma_set_max_seg_size(dev, shost->max_segment_size);
}
} }
EXPORT_SYMBOL_GPL(scsi_init_limits); EXPORT_SYMBOL_GPL(scsi_init_limits);

View File

@ -11,8 +11,7 @@ if VDPA
config VDPA_SIM config VDPA_SIM
tristate "vDPA device simulator core" tristate "vDPA device simulator core"
depends on RUNTIME_TESTING_MENU && HAS_DMA depends on RUNTIME_TESTING_MENU
select DMA_OPS
select VHOST_RING select VHOST_RING
select IOMMU_IOVA select IOMMU_IOVA
help help
@ -36,7 +35,12 @@ config VDPA_SIM_BLOCK
config VDPA_USER config VDPA_USER
tristate "VDUSE (vDPA Device in Userspace) support" tristate "VDUSE (vDPA Device in Userspace) support"
depends on EVENTFD && MMU && HAS_DMA depends on EVENTFD && MMU && HAS_DMA
select DMA_OPS #
# This driver incorrectly tries to override the dma_ops. It should
# never have done that, but for now keep it working on architectures
# that use dma ops
#
depends on ARCH_HAS_DMA_OPS
select VHOST_IOTLB select VHOST_IOTLB
select IOMMU_IOVA select IOMMU_IOVA
help help

View File

@ -177,8 +177,8 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN config SWIOTLB_XEN
def_bool y def_bool y
depends on ARCH_HAS_DMA_OPS
depends on XEN_PV || ARM || ARM64 depends on XEN_PV || ARM || ARM64
select DMA_OPS
select SWIOTLB select SWIOTLB
config XEN_PCI_STUB config XEN_PCI_STUB
@ -348,10 +348,10 @@ config XEN_GRANT_DMA_IOMMU
config XEN_GRANT_DMA_OPS config XEN_GRANT_DMA_OPS
bool bool
select DMA_OPS
config XEN_VIRTIO config XEN_VIRTIO
bool "Xen virtio support" bool "Xen virtio support"
depends on ARCH_HAS_DMA_OPS
depends on VIRTIO depends on VIRTIO
select XEN_GRANT_DMA_OPS select XEN_GRANT_DMA_OPS
select XEN_GRANT_DMA_IOMMU if OF select XEN_GRANT_DMA_IOMMU if OF

View File

@ -707,6 +707,8 @@ struct device_physical_location {
* for dma allocations. This flag is managed by the dma ops * for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported. * instance from ->dma_supported.
* @dma_skip_sync: DMA sync operations can be skipped for coherent buffers. * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
* @dma_iommu: Device is using default IOMMU implementation for DMA and
* doesn't rely on dma_ops structure.
* *
* At the lowest level, every device in a Linux system is represented by an * At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information * instance of struct device. The device structure contains the information
@ -748,7 +750,7 @@ struct device {
struct dev_pin_info *pins; struct dev_pin_info *pins;
#endif #endif
struct dev_msi_info msi; struct dev_msi_info msi;
#ifdef CONFIG_DMA_OPS #ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops; const struct dma_map_ops *dma_ops;
#endif #endif
u64 *dma_mask; /* dma mask (if dma'able device) */ u64 *dma_mask; /* dma mask (if dma'able device) */
@ -822,6 +824,9 @@ struct device {
#ifdef CONFIG_DMA_NEED_SYNC #ifdef CONFIG_DMA_NEED_SYNC
bool dma_skip_sync:1; bool dma_skip_sync:1;
#endif #endif
#ifdef CONFIG_IOMMU_DMA
bool dma_iommu:1;
#endif
}; };
/** /**

View File

@ -12,7 +12,7 @@
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
extern unsigned int zone_dma_bits; extern u64 zone_dma_limit;
/* /*
* Record the mapping of CPU physical to DMA addresses for a given region. * Record the mapping of CPU physical to DMA addresses for a given region.

View File

@ -13,20 +13,7 @@
struct cma; struct cma;
struct iommu_ops; struct iommu_ops;
/*
* Values for struct dma_map_ops.flags:
*
* DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
* handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
* DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is
* coherent and it's not an SWIOTLB buffer.
*/
#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
#define DMA_F_CAN_SKIP_SYNC (1 << 1)
struct dma_map_ops { struct dma_map_ops {
unsigned int flags;
void *(*alloc)(struct device *dev, size_t size, void *(*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs); unsigned long attrs);
@ -88,7 +75,7 @@ struct dma_map_ops {
unsigned long (*get_merge_boundary)(struct device *dev); unsigned long (*get_merge_boundary)(struct device *dev);
}; };
#ifdef CONFIG_DMA_OPS #ifdef CONFIG_ARCH_HAS_DMA_OPS
#include <asm/dma-mapping.h> #include <asm/dma-mapping.h>
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
@ -103,7 +90,7 @@ static inline void set_dma_ops(struct device *dev,
{ {
dev->dma_ops = dma_ops; dev->dma_ops = dma_ops;
} }
#else /* CONFIG_DMA_OPS */ #else /* CONFIG_ARCH_HAS_DMA_OPS */
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
return NULL; return NULL;
@ -112,7 +99,7 @@ static inline void set_dma_ops(struct device *dev,
const struct dma_map_ops *dma_ops) const struct dma_map_ops *dma_ops)
{ {
} }
#endif /* CONFIG_DMA_OPS */ #endif /* CONFIG_ARCH_HAS_DMA_OPS */
#ifdef CONFIG_DMA_CMA #ifdef CONFIG_DMA_CMA
extern struct cma *dma_contiguous_default_area; extern struct cma *dma_contiguous_default_area;

View File

@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K; return SZ_64K;
} }
static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{ {
if (dev->dma_parms) { if (WARN_ON_ONCE(!dev->dma_parms))
dev->dma_parms->max_segment_size = size; return;
return 0; dev->dma_parms->max_segment_size = size;
}
return -EIO;
} }
static inline unsigned long dma_get_seg_boundary(struct device *dev) static inline unsigned long dma_get_seg_boundary(struct device *dev)
@ -559,13 +557,11 @@ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
return (dma_get_seg_boundary(dev) >> page_shift) + 1; return (dma_get_seg_boundary(dev) >> page_shift) + 1;
} }
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
{ {
if (dev->dma_parms) { if (WARN_ON_ONCE(!dev->dma_parms))
dev->dma_parms->segment_boundary_mask = mask; return;
return 0; dev->dma_parms->segment_boundary_mask = mask;
}
return -EIO;
} }
static inline unsigned int dma_get_min_align_mask(struct device *dev) static inline unsigned int dma_get_min_align_mask(struct device *dev)
@ -575,13 +571,12 @@ static inline unsigned int dma_get_min_align_mask(struct device *dev)
return 0; return 0;
} }
static inline int dma_set_min_align_mask(struct device *dev, static inline void dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask) unsigned int min_align_mask)
{ {
if (WARN_ON_ONCE(!dev->dma_parms)) if (WARN_ON_ONCE(!dev->dma_parms))
return -EIO; return;
dev->dma_parms->min_align_mask = min_align_mask; dev->dma_parms->min_align_mask = min_align_mask;
return 0;
} }
#ifndef dma_get_cache_alignment #ifndef dma_get_cache_alignment

155
include/linux/iommu-dma.h Normal file
View File

@ -0,0 +1,155 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*
* DMA operations that map physical memory through IOMMU.
*/
#ifndef _LINUX_IOMMU_DMA_H
#define _LINUX_IOMMU_DMA_H
#include <linux/dma-direction.h>
#ifdef CONFIG_IOMMU_DMA
static inline bool use_dma_iommu(struct device *dev)
{
return dev->dma_iommu;
}
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);
void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, unsigned long attrs);
int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
unsigned long iommu_dma_get_merge_boundary(struct device *dev);
size_t iommu_dma_opt_mapping_size(void);
size_t iommu_dma_max_mapping_size(struct device *dev);
void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs);
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir);
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir);
void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir);
void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir);
void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir);
#else
static inline bool use_dma_iommu(struct device *dev)
{
return false;
}
static inline dma_addr_t iommu_dma_map_page(struct device *dev,
struct page *page, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
return DMA_MAPPING_ERROR;
}
static inline void iommu_dma_unmap_page(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
}
static inline int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
return -EINVAL;
}
static inline void iommu_dma_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents, enum dma_data_direction dir,
unsigned long attrs)
{
}
static inline void *iommu_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return NULL;
}
static inline int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
return -EINVAL;
}
static inline int iommu_dma_get_sgtable(struct device *dev,
struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs)
{
return -EINVAL;
}
static inline unsigned long iommu_dma_get_merge_boundary(struct device *dev)
{
return 0;
}
static inline size_t iommu_dma_opt_mapping_size(void)
{
return 0;
}
static inline size_t iommu_dma_max_mapping_size(struct device *dev)
{
return 0;
}
static inline void iommu_dma_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
}
static inline dma_addr_t iommu_dma_map_resource(struct device *dev,
phys_addr_t phys, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
return DMA_MAPPING_ERROR;
}
static inline void iommu_dma_unmap_resource(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
}
static inline struct sg_table *
iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
{
return NULL;
}
static inline void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
struct sg_table *sgt, enum dma_data_direction dir)
{
}
static inline void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
}
static inline void iommu_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
}
static inline void iommu_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
}
static inline void iommu_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
}
#endif /* CONFIG_IOMMU_DMA */
#endif /* _LINUX_IOMMU_DMA_H */

341
include/trace/events/dma.h Normal file
View File

@ -0,0 +1,341 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM dma
#if !defined(_TRACE_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_DMA_H
#include <linux/tracepoint.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <trace/events/mmflags.h>
TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
TRACE_DEFINE_ENUM(DMA_NONE);
#define decode_dma_data_direction(dir) \
__print_symbolic(dir, \
{ DMA_BIDIRECTIONAL, "BIDIRECTIONAL" }, \
{ DMA_TO_DEVICE, "TO_DEVICE" }, \
{ DMA_FROM_DEVICE, "FROM_DEVICE" }, \
{ DMA_NONE, "NONE" })
#define decode_dma_attrs(attrs) \
__print_flags(attrs, "|", \
{ DMA_ATTR_WEAK_ORDERING, "WEAK_ORDERING" }, \
{ DMA_ATTR_WRITE_COMBINE, "WRITE_COMBINE" }, \
{ DMA_ATTR_NO_KERNEL_MAPPING, "NO_KERNEL_MAPPING" }, \
{ DMA_ATTR_SKIP_CPU_SYNC, "SKIP_CPU_SYNC" }, \
{ DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
{ DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
{ DMA_ATTR_NO_WARN, "NO_WARN" }, \
{ DMA_ATTR_PRIVILEGED, "PRIVILEGED" })
DECLARE_EVENT_CLASS(dma_map,
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__field(u64, phys_addr)
__field(u64, dma_addr)
__field(size_t, size)
__field(enum dma_data_direction, dir)
__field(unsigned long, attrs)
),
TP_fast_assign(
__assign_str(device);
__entry->phys_addr = phys_addr;
__entry->dma_addr = dma_addr;
__entry->size = size;
__entry->dir = dir;
__entry->attrs = attrs;
),
TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__entry->dma_addr,
__entry->size,
__entry->phys_addr,
decode_dma_attrs(__entry->attrs))
);
DEFINE_EVENT(dma_map, dma_map_page,
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
DEFINE_EVENT(dma_map, dma_map_resource,
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs));
DECLARE_EVENT_CLASS(dma_unmap,
TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, addr, size, dir, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__field(u64, addr)
__field(size_t, size)
__field(enum dma_data_direction, dir)
__field(unsigned long, attrs)
),
TP_fast_assign(
__assign_str(device);
__entry->addr = addr;
__entry->size = size;
__entry->dir = dir;
__entry->attrs = attrs;
),
TP_printk("%s dir=%s dma_addr=%llx size=%zu attrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__entry->addr,
__entry->size,
decode_dma_attrs(__entry->attrs))
);
DEFINE_EVENT(dma_unmap, dma_unmap_page,
TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, addr, size, dir, attrs));
DEFINE_EVENT(dma_unmap, dma_unmap_resource,
TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, addr, size, dir, attrs));
TRACE_EVENT(dma_alloc,
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
size_t size, gfp_t flags, unsigned long attrs),
TP_ARGS(dev, virt_addr, dma_addr, size, flags, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__field(u64, phys_addr)
__field(u64, dma_addr)
__field(size_t, size)
__field(gfp_t, flags)
__field(unsigned long, attrs)
),
TP_fast_assign(
__assign_str(device);
__entry->phys_addr = virt_to_phys(virt_addr);
__entry->dma_addr = dma_addr;
__entry->size = size;
__entry->flags = flags;
__entry->attrs = attrs;
),
TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx flags=%s attrs=%s",
__get_str(device),
__entry->dma_addr,
__entry->size,
__entry->phys_addr,
show_gfp_flags(__entry->flags),
decode_dma_attrs(__entry->attrs))
);
TRACE_EVENT(dma_free,
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs),
TP_ARGS(dev, virt_addr, dma_addr, size, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__field(u64, phys_addr)
__field(u64, dma_addr)
__field(size_t, size)
__field(unsigned long, attrs)
),
TP_fast_assign(
__assign_str(device);
__entry->phys_addr = virt_to_phys(virt_addr);
__entry->dma_addr = dma_addr;
__entry->size = size;
__entry->attrs = attrs;
),
TP_printk("%s dma_addr=%llx size=%zu phys_addr=%llx attrs=%s",
__get_str(device),
__entry->dma_addr,
__entry->size,
__entry->phys_addr,
decode_dma_attrs(__entry->attrs))
);
TRACE_EVENT(dma_map_sg,
TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
int ents, enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, sg, nents, ents, dir, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__dynamic_array(u64, phys_addrs, nents)
__dynamic_array(u64, dma_addrs, ents)
__dynamic_array(unsigned int, lengths, ents)
__field(enum dma_data_direction, dir)
__field(unsigned long, attrs)
),
TP_fast_assign(
int i;
__assign_str(device);
for (i = 0; i < nents; i++)
((u64 *)__get_dynamic_array(phys_addrs))[i] =
sg_phys(sg + i);
for (i = 0; i < ents; i++) {
((u64 *)__get_dynamic_array(dma_addrs))[i] =
sg_dma_address(sg + i);
((unsigned int *)__get_dynamic_array(lengths))[i] =
sg_dma_len(sg + i);
}
__entry->dir = dir;
__entry->attrs = attrs;
),
TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__print_array(__get_dynamic_array(dma_addrs),
__get_dynamic_array_len(dma_addrs) /
sizeof(u64), sizeof(u64)),
__print_array(__get_dynamic_array(lengths),
__get_dynamic_array_len(lengths) /
sizeof(unsigned int), sizeof(unsigned int)),
__print_array(__get_dynamic_array(phys_addrs),
__get_dynamic_array_len(phys_addrs) /
sizeof(u64), sizeof(u64)),
decode_dma_attrs(__entry->attrs))
);
TRACE_EVENT(dma_unmap_sg,
TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs),
TP_ARGS(dev, sg, nents, dir, attrs),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__dynamic_array(u64, addrs, nents)
__field(enum dma_data_direction, dir)
__field(unsigned long, attrs)
),
TP_fast_assign(
int i;
__assign_str(device);
for (i = 0; i < nents; i++)
((u64 *)__get_dynamic_array(addrs))[i] =
sg_phys(sg + i);
__entry->dir = dir;
__entry->attrs = attrs;
),
TP_printk("%s dir=%s phys_addrs=%s attrs=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__print_array(__get_dynamic_array(addrs),
__get_dynamic_array_len(addrs) /
sizeof(u64), sizeof(u64)),
decode_dma_attrs(__entry->attrs))
);
DECLARE_EVENT_CLASS(dma_sync_single,
TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir),
TP_ARGS(dev, dma_addr, size, dir),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__field(u64, dma_addr)
__field(size_t, size)
__field(enum dma_data_direction, dir)
),
TP_fast_assign(
__assign_str(device);
__entry->dma_addr = dma_addr;
__entry->size = size;
__entry->dir = dir;
),
TP_printk("%s dir=%s dma_addr=%llx size=%zu",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__entry->dma_addr,
__entry->size)
);
DEFINE_EVENT(dma_sync_single, dma_sync_single_for_cpu,
TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir),
TP_ARGS(dev, dma_addr, size, dir));
DEFINE_EVENT(dma_sync_single, dma_sync_single_for_device,
TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir),
TP_ARGS(dev, dma_addr, size, dir));
DECLARE_EVENT_CLASS(dma_sync_sg,
TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir),
TP_ARGS(dev, sg, nents, dir),
TP_STRUCT__entry(
__string(device, dev_name(dev))
__dynamic_array(u64, dma_addrs, nents)
__dynamic_array(unsigned int, lengths, nents)
__field(enum dma_data_direction, dir)
),
TP_fast_assign(
int i;
__assign_str(device);
for (i = 0; i < nents; i++) {
((u64 *)__get_dynamic_array(dma_addrs))[i] =
sg_dma_address(sg + i);
((unsigned int *)__get_dynamic_array(lengths))[i] =
sg_dma_len(sg + i);
}
__entry->dir = dir;
),
TP_printk("%s dir=%s dma_addrs=%s sizes=%s",
__get_str(device),
decode_dma_data_direction(__entry->dir),
__print_array(__get_dynamic_array(dma_addrs),
__get_dynamic_array_len(dma_addrs) /
sizeof(u64), sizeof(u64)),
__print_array(__get_dynamic_array(lengths),
__get_dynamic_array_len(lengths) /
sizeof(unsigned int), sizeof(unsigned int)))
);
DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_cpu,
TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir),
TP_ARGS(dev, sg, nents, dir));
DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_device,
TP_PROTO(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir),
TP_ARGS(dev, sg, nents, dir));
#endif /* _TRACE_DMA_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -8,8 +8,7 @@ config HAS_DMA
depends on !NO_DMA depends on !NO_DMA
default y default y
config DMA_OPS config DMA_OPS_HELPERS
depends on HAS_DMA
bool bool
# #
@ -109,8 +108,8 @@ config DMA_BOUNCE_UNALIGNED_KMALLOC
config DMA_NEED_SYNC config DMA_NEED_SYNC
def_bool ARCH_HAS_SYNC_DMA_FOR_DEVICE || ARCH_HAS_SYNC_DMA_FOR_CPU || \ def_bool ARCH_HAS_SYNC_DMA_FOR_DEVICE || ARCH_HAS_SYNC_DMA_FOR_CPU || \
ARCH_HAS_SYNC_DMA_FOR_CPU_ALL || DMA_API_DEBUG || DMA_OPS || \ ARCH_HAS_SYNC_DMA_FOR_CPU_ALL || DMA_API_DEBUG || \
SWIOTLB ARCH_HAS_DMA_OPS || SWIOTLB
config DMA_RESTRICTED_POOL config DMA_RESTRICTED_POOL
bool "DMA Restricted Pool" bool "DMA Restricted Pool"

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_HAS_DMA) += mapping.o direct.o obj-$(CONFIG_HAS_DMA) += mapping.o direct.o
obj-$(CONFIG_DMA_OPS) += ops_helpers.o obj-$(CONFIG_DMA_OPS_HELPERS) += ops_helpers.o
obj-$(CONFIG_DMA_OPS) += dummy.o obj-$(CONFIG_ARCH_HAS_DMA_OPS) += dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o obj-$(CONFIG_DMA_API_DEBUG) += debug.o

View File

@ -20,7 +20,7 @@
* it for entirely different regions. In that case the arch code needs to * it for entirely different regions. In that case the arch code needs to
* override the variable below for dma-direct to work properly. * override the variable below for dma-direct to work properly.
*/ */
unsigned int zone_dma_bits __ro_after_init = 24; u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
static inline dma_addr_t phys_to_dma_direct(struct device *dev, static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys) phys_addr_t phys)
@ -59,7 +59,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
* zones. * zones.
*/ */
*phys_limit = dma_to_phys(dev, dma_limit); *phys_limit = dma_to_phys(dev, dma_limit);
if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) if (*phys_limit <= zone_dma_limit)
return GFP_DMA; return GFP_DMA;
if (*phys_limit <= DMA_BIT_MASK(32)) if (*phys_limit <= DMA_BIT_MASK(32))
return GFP_DMA32; return GFP_DMA32;
@ -140,7 +140,7 @@ again:
if (!page) if (!page)
page = alloc_pages_node(node, gfp, get_order(size)); page = alloc_pages_node(node, gfp, get_order(size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size); __free_pages(page, get_order(size));
page = NULL; page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) && if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
@ -580,7 +580,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
* part of the check. * part of the check.
*/ */
if (IS_ENABLED(CONFIG_ZONE_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA))
min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); min_mask = min_t(u64, min_mask, zone_dma_limit);
return mask >= phys_to_dma_unencrypted(dev, min_mask); return mask >= phys_to_dma_unencrypted(dev, min_mask);
} }

View File

@ -17,6 +17,15 @@ static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page,
{ {
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
static void dma_dummy_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
/*
* Dummy ops doesn't support map_page, so unmap_page should never be
* called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl, static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir, int nelems, enum dma_data_direction dir,
@ -25,6 +34,16 @@ static int dma_dummy_map_sg(struct device *dev, struct scatterlist *sgl,
return -EINVAL; return -EINVAL;
} }
static void dma_dummy_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
/*
* Dummy ops doesn't support map_sg, so unmap_sg should never be called.
*/
WARN_ON_ONCE(true);
}
static int dma_dummy_supported(struct device *hwdev, u64 mask) static int dma_dummy_supported(struct device *hwdev, u64 mask)
{ {
return 0; return 0;
@ -33,6 +52,8 @@ static int dma_dummy_supported(struct device *hwdev, u64 mask)
const struct dma_map_ops dma_dummy_ops = { const struct dma_map_ops dma_dummy_ops = {
.mmap = dma_dummy_mmap, .mmap = dma_dummy_mmap,
.map_page = dma_dummy_map_page, .map_page = dma_dummy_map_page,
.unmap_page = dma_dummy_unmap_page,
.map_sg = dma_dummy_map_sg, .map_sg = dma_dummy_map_sg,
.unmap_sg = dma_dummy_unmap_sg,
.dma_supported = dma_dummy_supported, .dma_supported = dma_dummy_supported,
}; };

View File

@ -10,6 +10,7 @@
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/iommu-dma.h>
#include <linux/kmsan.h> #include <linux/kmsan.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -17,6 +18,9 @@
#include "debug.h" #include "debug.h"
#include "direct.h" #include "direct.h"
#define CREATE_TRACE_POINTS
#include <trace/events/dma.h>
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
@ -116,8 +120,12 @@ EXPORT_SYMBOL(dmam_alloc_attrs);
static bool dma_go_direct(struct device *dev, dma_addr_t mask, static bool dma_go_direct(struct device *dev, dma_addr_t mask,
const struct dma_map_ops *ops) const struct dma_map_ops *ops)
{ {
if (use_dma_iommu(dev))
return false;
if (likely(!ops)) if (likely(!ops))
return true; return true;
#ifdef CONFIG_DMA_OPS_BYPASS #ifdef CONFIG_DMA_OPS_BYPASS
if (dev->dma_ops_bypass) if (dev->dma_ops_bypass)
return min_not_zero(mask, dev->bus_dma_limit) >= return min_not_zero(mask, dev->bus_dma_limit) >=
@ -159,9 +167,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else if (use_dma_iommu(dev))
addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs);
else else
addr = ops->map_page(dev, page, offset, size, dir, attrs); addr = ops->map_page(dev, page, offset, size, dir, attrs);
kmsan_handle_dma(page, offset, size, dir); kmsan_handle_dma(page, offset, size, dir);
trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir,
attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
return addr; return addr;
@ -177,8 +189,11 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_unmap_page_direct(dev, addr + size)) arch_dma_unmap_page_direct(dev, addr + size))
dma_direct_unmap_page(dev, addr, size, dir, attrs); dma_direct_unmap_page(dev, addr, size, dir, attrs);
else if (ops->unmap_page) else if (use_dma_iommu(dev))
iommu_dma_unmap_page(dev, addr, size, dir, attrs);
else
ops->unmap_page(dev, addr, size, dir, attrs); ops->unmap_page(dev, addr, size, dir, attrs);
trace_dma_unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir); debug_dma_unmap_page(dev, addr, size, dir);
} }
EXPORT_SYMBOL(dma_unmap_page_attrs); EXPORT_SYMBOL(dma_unmap_page_attrs);
@ -197,11 +212,14 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_map_sg_direct(dev, sg, nents)) arch_dma_map_sg_direct(dev, sg, nents))
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
else if (use_dma_iommu(dev))
ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs);
else else
ents = ops->map_sg(dev, sg, nents, dir, attrs); ents = ops->map_sg(dev, sg, nents, dir, attrs);
if (ents > 0) { if (ents > 0) {
kmsan_handle_dma_sg(sg, nents, dir); kmsan_handle_dma_sg(sg, nents, dir);
trace_dma_map_sg(dev, sg, nents, ents, dir, attrs);
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs); debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO && ents != -EREMOTEIO)) { ents != -EIO && ents != -EREMOTEIO)) {
@ -287,10 +305,13 @@ void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
trace_dma_unmap_sg(dev, sg, nents, dir, attrs);
debug_dma_unmap_sg(dev, sg, nents, dir); debug_dma_unmap_sg(dev, sg, nents, dir);
if (dma_map_direct(dev, ops) || if (dma_map_direct(dev, ops) ||
arch_dma_unmap_sg_direct(dev, sg, nents)) arch_dma_unmap_sg_direct(dev, sg, nents))
dma_direct_unmap_sg(dev, sg, nents, dir, attrs); dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
else if (use_dma_iommu(dev))
iommu_dma_unmap_sg(dev, sg, nents, dir, attrs);
else if (ops->unmap_sg) else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs); ops->unmap_sg(dev, sg, nents, dir, attrs);
} }
@ -309,9 +330,12 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
if (dma_map_direct(dev, ops)) if (dma_map_direct(dev, ops))
addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
else if (use_dma_iommu(dev))
addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs);
else if (ops->map_resource) else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs); addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
return addr; return addr;
} }
@ -323,8 +347,13 @@ void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (!dma_map_direct(dev, ops) && ops->unmap_resource) if (dma_map_direct(dev, ops))
; /* nothing to do: uncached and no swiotlb */
else if (use_dma_iommu(dev))
iommu_dma_unmap_resource(dev, addr, size, dir, attrs);
else if (ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs); ops->unmap_resource(dev, addr, size, dir, attrs);
trace_dma_unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir); debug_dma_unmap_resource(dev, addr, size, dir);
} }
EXPORT_SYMBOL(dma_unmap_resource); EXPORT_SYMBOL(dma_unmap_resource);
@ -338,8 +367,11 @@ void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops)) if (dma_map_direct(dev, ops))
dma_direct_sync_single_for_cpu(dev, addr, size, dir); dma_direct_sync_single_for_cpu(dev, addr, size, dir);
else if (use_dma_iommu(dev))
iommu_dma_sync_single_for_cpu(dev, addr, size, dir);
else if (ops->sync_single_for_cpu) else if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir); ops->sync_single_for_cpu(dev, addr, size, dir);
trace_dma_sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir); debug_dma_sync_single_for_cpu(dev, addr, size, dir);
} }
EXPORT_SYMBOL(__dma_sync_single_for_cpu); EXPORT_SYMBOL(__dma_sync_single_for_cpu);
@ -352,8 +384,11 @@ void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops)) if (dma_map_direct(dev, ops))
dma_direct_sync_single_for_device(dev, addr, size, dir); dma_direct_sync_single_for_device(dev, addr, size, dir);
else if (use_dma_iommu(dev))
iommu_dma_sync_single_for_device(dev, addr, size, dir);
else if (ops->sync_single_for_device) else if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir); ops->sync_single_for_device(dev, addr, size, dir);
trace_dma_sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir); debug_dma_sync_single_for_device(dev, addr, size, dir);
} }
EXPORT_SYMBOL(__dma_sync_single_for_device); EXPORT_SYMBOL(__dma_sync_single_for_device);
@ -366,8 +401,11 @@ void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops)) if (dma_map_direct(dev, ops))
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
else if (use_dma_iommu(dev))
iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
else if (ops->sync_sg_for_cpu) else if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir); ops->sync_sg_for_cpu(dev, sg, nelems, dir);
trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
} }
EXPORT_SYMBOL(__dma_sync_sg_for_cpu); EXPORT_SYMBOL(__dma_sync_sg_for_cpu);
@ -380,8 +418,11 @@ void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops)) if (dma_map_direct(dev, ops))
dma_direct_sync_sg_for_device(dev, sg, nelems, dir); dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
else if (use_dma_iommu(dev))
iommu_dma_sync_sg_for_device(dev, sg, nelems, dir);
else if (ops->sync_sg_for_device) else if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir); ops->sync_sg_for_device(dev, sg, nelems, dir);
trace_dma_sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir); debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
} }
EXPORT_SYMBOL(__dma_sync_sg_for_device); EXPORT_SYMBOL(__dma_sync_sg_for_device);
@ -405,7 +446,7 @@ static void dma_setup_need_sync(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_map_direct(dev, ops) || (ops->flags & DMA_F_CAN_SKIP_SYNC)) if (dma_map_direct(dev, ops) || use_dma_iommu(dev))
/* /*
* dma_skip_sync will be reset to %false on first SWIOTLB buffer * dma_skip_sync will be reset to %false on first SWIOTLB buffer
* mapping, if any. During the device initialization, it's * mapping, if any. During the device initialization, it's
@ -446,6 +487,9 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs); size, attrs);
if (use_dma_iommu(dev))
return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs);
if (!ops->get_sgtable) if (!ops->get_sgtable)
return -ENXIO; return -ENXIO;
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
@ -482,6 +526,8 @@ bool dma_can_mmap(struct device *dev)
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
return dma_direct_can_mmap(dev); return dma_direct_can_mmap(dev);
if (use_dma_iommu(dev))
return true;
return ops->mmap != NULL; return ops->mmap != NULL;
} }
EXPORT_SYMBOL_GPL(dma_can_mmap); EXPORT_SYMBOL_GPL(dma_can_mmap);
@ -508,6 +554,9 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs); attrs);
if (use_dma_iommu(dev))
return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs);
if (!ops->mmap) if (!ops->mmap)
return -ENXIO; return -ENXIO;
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@ -559,11 +608,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs); cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
else if (use_dma_iommu(dev))
cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
else if (ops->alloc) else if (ops->alloc)
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
else else
return NULL; return NULL;
trace_dma_alloc(dev, cpu_addr, *dma_handle, size, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs); debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
return cpu_addr; return cpu_addr;
} }
@ -588,9 +640,12 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
if (!cpu_addr) if (!cpu_addr)
return; return;
trace_dma_free(dev, cpu_addr, dma_handle, size, attrs);
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs); dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
else if (use_dma_iommu(dev))
iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs);
else if (ops->free) else if (ops->free)
ops->free(dev, size, cpu_addr, dma_handle, attrs); ops->free(dev, size, cpu_addr, dma_handle, attrs);
} }
@ -611,6 +666,8 @@ static struct page *__dma_alloc_pages(struct device *dev, size_t size,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
if (use_dma_iommu(dev))
return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp);
if (!ops->alloc_pages_op) if (!ops->alloc_pages_op)
return NULL; return NULL;
return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp);
@ -621,8 +678,11 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
{ {
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
if (page) if (page) {
trace_dma_map_page(dev, page_to_phys(page), *dma_handle, size,
dir, 0);
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0); debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
}
return page; return page;
} }
EXPORT_SYMBOL_GPL(dma_alloc_pages); EXPORT_SYMBOL_GPL(dma_alloc_pages);
@ -635,6 +695,8 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (dma_alloc_direct(dev, ops)) if (dma_alloc_direct(dev, ops))
dma_direct_free_pages(dev, size, page, dma_handle, dir); dma_direct_free_pages(dev, size, page, dma_handle, dir);
else if (use_dma_iommu(dev))
dma_common_free_pages(dev, size, page, dma_handle, dir);
else if (ops->free_pages) else if (ops->free_pages)
ops->free_pages(dev, size, page, dma_handle, dir); ops->free_pages(dev, size, page, dma_handle, dir);
} }
@ -642,6 +704,7 @@ static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
void dma_free_pages(struct device *dev, size_t size, struct page *page, void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir) dma_addr_t dma_handle, enum dma_data_direction dir)
{ {
trace_dma_unmap_page(dev, dma_handle, size, dir, 0);
debug_dma_unmap_page(dev, dma_handle, size, dir); debug_dma_unmap_page(dev, dma_handle, size, dir);
__dma_free_pages(dev, size, page, dma_handle, dir); __dma_free_pages(dev, size, page, dma_handle, dir);
} }
@ -697,11 +760,14 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
if (ops && ops->alloc_noncontiguous) if (ops && ops->alloc_noncontiguous)
sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
else if (use_dma_iommu(dev))
sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
else else
sgt = alloc_single_sgt(dev, size, dir, gfp); sgt = alloc_single_sgt(dev, size, dir, gfp);
if (sgt) { if (sgt) {
sgt->nents = 1; sgt->nents = 1;
trace_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs); debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
} }
return sgt; return sgt;
@ -722,9 +788,12 @@ void dma_free_noncontiguous(struct device *dev, size_t size,
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
if (ops && ops->free_noncontiguous) if (ops && ops->free_noncontiguous)
ops->free_noncontiguous(dev, size, sgt, dir); ops->free_noncontiguous(dev, size, sgt, dir);
else if (use_dma_iommu(dev))
iommu_dma_free_noncontiguous(dev, size, sgt, dir);
else else
free_single_sgt(dev, size, sgt, dir); free_single_sgt(dev, size, sgt, dir);
} }
@ -772,32 +841,37 @@ static int dma_supported(struct device *dev, u64 mask)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (use_dma_iommu(dev)) {
if (WARN_ON(ops))
return false;
return true;
}
/* /*
* ->dma_supported sets the bypass flag, so we must always call * ->dma_supported sets and clears the bypass flag, so ignore it here
* into the method here unless the device is truly direct mapped. * and always call into the method if there is one.
*/ */
if (!ops) if (ops) {
return dma_direct_supported(dev, mask); if (!ops->dma_supported)
if (!ops->dma_supported) return true;
return 1; return ops->dma_supported(dev, mask);
return ops->dma_supported(dev, mask); }
return dma_direct_supported(dev, mask);
} }
bool dma_pci_p2pdma_supported(struct device *dev) bool dma_pci_p2pdma_supported(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
/* if ops is not set, dma direct will be used which supports P2PDMA */
if (!ops)
return true;
/* /*
* Note: dma_ops_bypass is not checked here because P2PDMA should * Note: dma_ops_bypass is not checked here because P2PDMA should
* not be used with dma mapping ops that do not have support even * not be used with dma mapping ops that do not have support even
* if the specific device is bypassing them. * if the specific device is bypassing them.
*/ */
return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED; /* if ops is not set, dma direct and default IOMMU support P2PDMA */
return !ops;
} }
EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
@ -865,6 +939,8 @@ size_t dma_max_mapping_size(struct device *dev)
if (dma_map_direct(dev, ops)) if (dma_map_direct(dev, ops))
size = dma_direct_max_mapping_size(dev); size = dma_direct_max_mapping_size(dev);
else if (use_dma_iommu(dev))
size = iommu_dma_max_mapping_size(dev);
else if (ops && ops->max_mapping_size) else if (ops && ops->max_mapping_size)
size = ops->max_mapping_size(dev); size = ops->max_mapping_size(dev);
@ -877,7 +953,9 @@ size_t dma_opt_mapping_size(struct device *dev)
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
size_t size = SIZE_MAX; size_t size = SIZE_MAX;
if (ops && ops->opt_mapping_size) if (use_dma_iommu(dev))
size = iommu_dma_opt_mapping_size();
else if (ops && ops->opt_mapping_size)
size = ops->opt_mapping_size(); size = ops->opt_mapping_size();
return min(dma_max_mapping_size(dev), size); return min(dma_max_mapping_size(dev), size);
@ -888,6 +966,9 @@ unsigned long dma_get_merge_boundary(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (use_dma_iommu(dev))
return iommu_dma_get_merge_boundary(dev);
if (!ops || !ops->get_merge_boundary) if (!ops || !ops->get_merge_boundary)
return 0; /* can't merge */ return 0; /* can't merge */

View File

@ -4,6 +4,7 @@
* the allocated memory contains normal pages in the direct kernel mapping. * the allocated memory contains normal pages in the direct kernel mapping.
*/ */
#include <linux/dma-map-ops.h> #include <linux/dma-map-ops.h>
#include <linux/iommu-dma.h>
static struct page *dma_common_vaddr_to_page(void *cpu_addr) static struct page *dma_common_vaddr_to_page(void *cpu_addr)
{ {
@ -70,8 +71,12 @@ struct page *dma_common_alloc_pages(struct device *dev, size_t size,
if (!page) if (!page)
return NULL; return NULL;
*dma_handle = ops->map_page(dev, page, 0, size, dir, if (use_dma_iommu(dev))
DMA_ATTR_SKIP_CPU_SYNC); *dma_handle = iommu_dma_map_page(dev, page, 0, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
else
*dma_handle = ops->map_page(dev, page, 0, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
if (*dma_handle == DMA_MAPPING_ERROR) { if (*dma_handle == DMA_MAPPING_ERROR) {
dma_free_contiguous(dev, page, size); dma_free_contiguous(dev, page, size);
return NULL; return NULL;
@ -86,7 +91,10 @@ void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->unmap_page) if (use_dma_iommu(dev))
iommu_dma_unmap_page(dev, dma_handle, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
else if (ops->unmap_page)
ops->unmap_page(dev, dma_handle, size, dir, ops->unmap_page(dev, dma_handle, size, dir,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
dma_free_contiguous(dev, page, size); dma_free_contiguous(dev, page, size);

View File

@ -70,9 +70,9 @@ static bool cma_in_zone(gfp_t gfp)
/* CMA can't cross zone boundaries, see cma_activate_area() */ /* CMA can't cross zone boundaries, see cma_activate_area() */
end = cma_get_base(cma) + size - 1; end = cma_get_base(cma) + size - 1;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
return end <= DMA_BIT_MASK(zone_dma_bits); return end <= zone_dma_limit;
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
return end <= DMA_BIT_MASK(32); return end <= max(DMA_BIT_MASK(32), zone_dma_limit);
return true; return true;
} }

View File

@ -10,8 +10,10 @@ struct page **dma_common_find_pages(void *cpu_addr)
{ {
struct vm_struct *area = find_vm_area(cpu_addr); struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || area->flags != VM_DMA_COHERENT) if (!area || !(area->flags & VM_DMA_COHERENT))
return NULL; return NULL;
WARN(area->flags != VM_DMA_COHERENT,
"unexpected flags in area: %p\n", cpu_addr);
return area->pages; return area->pages;
} }
@ -61,7 +63,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size)
{ {
struct vm_struct *area = find_vm_area(cpu_addr); struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || area->flags != VM_DMA_COHERENT) { if (!area || !(area->flags & VM_DMA_COHERENT)) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return; return;
} }

View File

@ -450,9 +450,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
if (!remap) if (!remap)
io_tlb_default_mem.can_grow = true; io_tlb_default_mem.can_grow = true;
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA)) if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits); io_tlb_default_mem.phys_limit = zone_dma_limit;
else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32)) else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32); io_tlb_default_mem.phys_limit = max(DMA_BIT_MASK(32), zone_dma_limit);
else else
io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1); io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
#endif #endif
@ -629,7 +629,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
} }
gfp &= ~GFP_ZONEMASK; gfp &= ~GFP_ZONEMASK;
if (phys_limit <= DMA_BIT_MASK(zone_dma_bits)) if (phys_limit <= zone_dma_limit)
gfp |= __GFP_DMA; gfp |= __GFP_DMA;
else if (phys_limit <= DMA_BIT_MASK(32)) else if (phys_limit <= DMA_BIT_MASK(32))
gfp |= __GFP_DMA32; gfp |= __GFP_DMA32;