mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
Merge branch 'for-3.15' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA-mapping updates from Marek Szyprowski: "This contains extension for more efficient handling of io address space for dma-mapping subsystem for ARM architecture" * 'for-3.15' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: arm: dma-mapping: remove order parameter from arm_iommu_create_mapping() arm: dma-mapping: Add support to extend DMA IOMMU mappings
This commit is contained in:
commit
7474043eff
@ -13,9 +13,12 @@ struct dma_iommu_mapping {
|
||||
/* iommu specific data */
|
||||
struct iommu_domain *domain;
|
||||
|
||||
void *bitmap;
|
||||
size_t bits;
|
||||
unsigned int order;
|
||||
unsigned long **bitmaps; /* array of bitmaps */
|
||||
unsigned int nr_bitmaps; /* nr of elements in array */
|
||||
unsigned int extensions;
|
||||
size_t bitmap_size; /* size of a single bitmap */
|
||||
size_t bits; /* per bitmap */
|
||||
unsigned int size; /* per bitmap */
|
||||
dma_addr_t base;
|
||||
|
||||
spinlock_t lock;
|
||||
@ -23,8 +26,7 @@ struct dma_iommu_mapping {
|
||||
};
|
||||
|
||||
struct dma_iommu_mapping *
|
||||
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
|
||||
int order);
|
||||
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
|
||||
|
||||
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
|
||||
|
||||
|
@ -1069,6 +1069,8 @@ fs_initcall(dma_debug_do_init);
|
||||
|
||||
/* IOMMU */
|
||||
|
||||
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
|
||||
|
||||
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
||||
size_t size)
|
||||
{
|
||||
@ -1076,41 +1078,87 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
||||
unsigned int align = 0;
|
||||
unsigned int count, start;
|
||||
unsigned long flags;
|
||||
dma_addr_t iova;
|
||||
int i;
|
||||
|
||||
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
|
||||
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
|
||||
|
||||
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
|
||||
(1 << mapping->order) - 1) >> mapping->order;
|
||||
|
||||
if (order > mapping->order)
|
||||
align = (1 << (order - mapping->order)) - 1;
|
||||
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
align = (1 << order) - 1;
|
||||
|
||||
spin_lock_irqsave(&mapping->lock, flags);
|
||||
start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
|
||||
count, align);
|
||||
if (start > mapping->bits) {
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
return DMA_ERROR_CODE;
|
||||
for (i = 0; i < mapping->nr_bitmaps; i++) {
|
||||
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
||||
mapping->bits, 0, count, align);
|
||||
|
||||
if (start > mapping->bits)
|
||||
continue;
|
||||
|
||||
bitmap_set(mapping->bitmaps[i], start, count);
|
||||
break;
|
||||
}
|
||||
|
||||
bitmap_set(mapping->bitmap, start, count);
|
||||
/*
|
||||
* No unused range found. Try to extend the existing mapping
|
||||
* and perform a second attempt to reserve an IO virtual
|
||||
* address range of size bytes.
|
||||
*/
|
||||
if (i == mapping->nr_bitmaps) {
|
||||
if (extend_iommu_mapping(mapping)) {
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
||||
mapping->bits, 0, count, align);
|
||||
|
||||
if (start > mapping->bits) {
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
bitmap_set(mapping->bitmaps[i], start, count);
|
||||
}
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
|
||||
return mapping->base + (start << (mapping->order + PAGE_SHIFT));
|
||||
iova = mapping->base + (mapping->size * i);
|
||||
iova += start << PAGE_SHIFT;
|
||||
|
||||
return iova;
|
||||
}
|
||||
|
||||
static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
||||
dma_addr_t addr, size_t size)
|
||||
{
|
||||
unsigned int start = (addr - mapping->base) >>
|
||||
(mapping->order + PAGE_SHIFT);
|
||||
unsigned int count = ((size >> PAGE_SHIFT) +
|
||||
(1 << mapping->order) - 1) >> mapping->order;
|
||||
unsigned int start, count;
|
||||
unsigned long flags;
|
||||
dma_addr_t bitmap_base;
|
||||
u32 bitmap_index;
|
||||
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
|
||||
BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
|
||||
|
||||
bitmap_base = mapping->base + mapping->size * bitmap_index;
|
||||
|
||||
start = (addr - bitmap_base) >> PAGE_SHIFT;
|
||||
|
||||
if (addr + size > bitmap_base + mapping->size) {
|
||||
/*
|
||||
* The address range to be freed reaches into the iova
|
||||
* range of the next bitmap. This should not happen as
|
||||
* we don't allow this in __alloc_iova (at the
|
||||
* moment).
|
||||
*/
|
||||
BUG();
|
||||
} else
|
||||
count = size >> PAGE_SHIFT;
|
||||
|
||||
spin_lock_irqsave(&mapping->lock, flags);
|
||||
bitmap_clear(mapping->bitmap, start, count);
|
||||
bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
}
|
||||
|
||||
@ -1875,8 +1923,7 @@ struct dma_map_ops iommu_coherent_ops = {
|
||||
* arm_iommu_create_mapping
|
||||
* @bus: pointer to the bus holding the client device (for IOMMU calls)
|
||||
* @base: start address of the valid IO address space
|
||||
* @size: size of the valid IO address space
|
||||
* @order: accuracy of the IO addresses allocations
|
||||
* @size: maximum size of the valid IO address space
|
||||
*
|
||||
* Creates a mapping structure which holds information about used/unused
|
||||
* IO address ranges, which is required to perform memory allocation and
|
||||
@ -1886,38 +1933,54 @@ struct dma_map_ops iommu_coherent_ops = {
|
||||
* arm_iommu_attach_device function.
|
||||
*/
|
||||
struct dma_iommu_mapping *
|
||||
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
|
||||
int order)
|
||||
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
|
||||
{
|
||||
unsigned int count = size >> (PAGE_SHIFT + order);
|
||||
unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
|
||||
unsigned int bits = size >> PAGE_SHIFT;
|
||||
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
|
||||
struct dma_iommu_mapping *mapping;
|
||||
int extensions = 1;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (!count)
|
||||
if (!bitmap_size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (bitmap_size > PAGE_SIZE) {
|
||||
extensions = bitmap_size / PAGE_SIZE;
|
||||
bitmap_size = PAGE_SIZE;
|
||||
}
|
||||
|
||||
mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
|
||||
if (!mapping)
|
||||
goto err;
|
||||
|
||||
mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!mapping->bitmap)
|
||||
mapping->bitmap_size = bitmap_size;
|
||||
mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
|
||||
GFP_KERNEL);
|
||||
if (!mapping->bitmaps)
|
||||
goto err2;
|
||||
|
||||
mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!mapping->bitmaps[0])
|
||||
goto err3;
|
||||
|
||||
mapping->nr_bitmaps = 1;
|
||||
mapping->extensions = extensions;
|
||||
mapping->base = base;
|
||||
mapping->size = bitmap_size << PAGE_SHIFT;
|
||||
mapping->bits = BITS_PER_BYTE * bitmap_size;
|
||||
mapping->order = order;
|
||||
|
||||
spin_lock_init(&mapping->lock);
|
||||
|
||||
mapping->domain = iommu_domain_alloc(bus);
|
||||
if (!mapping->domain)
|
||||
goto err3;
|
||||
goto err4;
|
||||
|
||||
kref_init(&mapping->kref);
|
||||
return mapping;
|
||||
err4:
|
||||
kfree(mapping->bitmaps[0]);
|
||||
err3:
|
||||
kfree(mapping->bitmap);
|
||||
kfree(mapping->bitmaps);
|
||||
err2:
|
||||
kfree(mapping);
|
||||
err:
|
||||
@ -1927,14 +1990,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
|
||||
|
||||
static void release_iommu_mapping(struct kref *kref)
|
||||
{
|
||||
int i;
|
||||
struct dma_iommu_mapping *mapping =
|
||||
container_of(kref, struct dma_iommu_mapping, kref);
|
||||
|
||||
iommu_domain_free(mapping->domain);
|
||||
kfree(mapping->bitmap);
|
||||
for (i = 0; i < mapping->nr_bitmaps; i++)
|
||||
kfree(mapping->bitmaps[i]);
|
||||
kfree(mapping->bitmaps);
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
|
||||
{
|
||||
int next_bitmap;
|
||||
|
||||
if (mapping->nr_bitmaps > mapping->extensions)
|
||||
return -EINVAL;
|
||||
|
||||
next_bitmap = mapping->nr_bitmaps;
|
||||
mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
|
||||
GFP_ATOMIC);
|
||||
if (!mapping->bitmaps[next_bitmap])
|
||||
return -ENOMEM;
|
||||
|
||||
mapping->nr_bitmaps++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
|
||||
{
|
||||
if (mapping)
|
||||
|
@ -237,7 +237,6 @@ struct drm_exynos_file_private {
|
||||
* otherwise default one.
|
||||
* @da_space_size: size of device address space.
|
||||
* if 0 then default value is used for it.
|
||||
* @da_space_order: order to device address space.
|
||||
*/
|
||||
struct exynos_drm_private {
|
||||
struct drm_fb_helper *fb_helper;
|
||||
@ -255,7 +254,6 @@ struct exynos_drm_private {
|
||||
|
||||
unsigned long da_start;
|
||||
unsigned long da_space_size;
|
||||
unsigned long da_space_order;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -36,12 +36,10 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
|
||||
priv->da_start = EXYNOS_DEV_ADDR_START;
|
||||
if (!priv->da_space_size)
|
||||
priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
|
||||
if (!priv->da_space_order)
|
||||
priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
|
||||
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
|
||||
priv->da_space_size,
|
||||
priv->da_space_order);
|
||||
priv->da_space_size);
|
||||
|
||||
if (IS_ERR(mapping))
|
||||
return PTR_ERR(mapping);
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#define EXYNOS_DEV_ADDR_START 0x20000000
|
||||
#define EXYNOS_DEV_ADDR_SIZE 0x40000000
|
||||
#define EXYNOS_DEV_ADDR_ORDER 0x0
|
||||
|
||||
#ifdef CONFIG_DRM_EXYNOS_IOMMU
|
||||
|
||||
|
@ -343,7 +343,7 @@ static int shmobile_iommu_add_device(struct device *dev)
|
||||
mapping = archdata->iommu_mapping;
|
||||
if (!mapping) {
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
|
||||
L1_LEN << 20, 0);
|
||||
L1_LEN << 20);
|
||||
if (IS_ERR(mapping))
|
||||
return PTR_ERR(mapping);
|
||||
archdata->iommu_mapping = mapping;
|
||||
|
Loading…
Reference in New Issue
Block a user