From 141e9d4b5492499c4735d764b599c21e83dac154 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 3 Dec 2007 11:57:48 -0500 Subject: [PATCH 1/7] Move dmapool.c to mm/ directory Signed-off-by: Matthew Wilcox --- drivers/base/Makefile | 2 +- mm/Makefile | 1 + {drivers/base => mm}/dmapool.c | 0 3 files changed, 2 insertions(+), 1 deletion(-) rename {drivers/base => mm}/dmapool.c (100%) diff --git a/drivers/base/Makefile b/drivers/base/Makefile index b39ea3f59c9b..ed0a722c38ca 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -5,7 +5,7 @@ obj-y := core.o sys.o bus.o dd.o \ cpu.o firmware.o init.o map.o devres.o \ attribute_container.o transport_class.o obj-y += power/ -obj-$(CONFIG_HAS_DMA) += dma-mapping.o dmapool.o +obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_ISA) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o diff --git a/mm/Makefile b/mm/Makefile index 5c0b0ea7572d..e222cc5a79cd 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -15,6 +15,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o +obj-$(CONFIG_HAS_DMA) += dmapool.o obj-$(CONFIG_HUGETLBFS) += hugetlb.o obj-$(CONFIG_NUMA) += mempolicy.o obj-$(CONFIG_SPARSEMEM) += sparse.o diff --git a/drivers/base/dmapool.c b/mm/dmapool.c similarity index 100% rename from drivers/base/dmapool.c rename to mm/dmapool.c From e87aa773747fb5e4217d716ea22a573c03b6693a Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 3 Dec 2007 12:04:31 -0500 Subject: [PATCH 2/7] dmapool: Fix style problems Run Lindent and fix all issues reported by checkpatch.pl Signed-off-by: Matthew Wilcox --- mm/dmapool.c | 286 +++++++++++++++++++++++++-------------------------- 1 file changed, 141 insertions(+), 145 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index b5034dc72a05..92e886d37e90 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -15,32 +15,32 @@ * This should probably be sharing the guts of the slab allocator. */ -struct dma_pool { /* the pool */ - struct list_head page_list; - spinlock_t lock; - size_t blocks_per_page; - size_t size; - struct device *dev; - size_t allocation; - char name [32]; - wait_queue_head_t waitq; - struct list_head pools; +struct dma_pool { /* the pool */ + struct list_head page_list; + spinlock_t lock; + size_t blocks_per_page; + size_t size; + struct device *dev; + size_t allocation; + char name[32]; + wait_queue_head_t waitq; + struct list_head pools; }; -struct dma_page { /* cacheable header for 'allocation' bytes */ - struct list_head page_list; - void *vaddr; - dma_addr_t dma; - unsigned in_use; - unsigned long bitmap [0]; +struct dma_page { /* cacheable header for 'allocation' bytes */ + struct list_head page_list; + void *vaddr; + dma_addr_t dma; + unsigned in_use; + unsigned long bitmap[0]; }; #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) -static DEFINE_MUTEX (pools_lock); +static DEFINE_MUTEX(pools_lock); static ssize_t -show_pools (struct device *dev, struct device_attribute *attr, char *buf) +show_pools(struct device *dev, struct device_attribute *attr, char *buf) { unsigned temp; unsigned size; @@ -67,9 +67,9 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf) /* per-pool info, no real statistics yet */ temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", - pool->name, - blocks, pages * pool->blocks_per_page, - pool->size, pages); + pool->name, + blocks, pages * pool->blocks_per_page, + pool->size, pages); size -= temp; next += temp; } @@ -77,7 +77,8 @@ show_pools (struct device *dev, struct device_attribute *attr, char *buf) return PAGE_SIZE - size; } -static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); + +static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); /** * dma_pool_create - Creates a pool of consistent memory blocks, for dma. @@ -100,11 +101,10 @@ static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); * addressing restrictions on individual DMA transfers, such as not crossing * boundaries of 4KBytes. */ -struct dma_pool * -dma_pool_create (const char *name, struct device *dev, - size_t size, size_t align, size_t allocation) +struct dma_pool *dma_pool_create(const char *name, struct device *dev, + size_t size, size_t align, size_t allocation) { - struct dma_pool *retval; + struct dma_pool *retval; if (align == 0) align = 1; @@ -122,81 +122,79 @@ dma_pool_create (const char *name, struct device *dev, allocation = size; else allocation = PAGE_SIZE; - // FIXME: round up for less fragmentation + /* FIXME: round up for less fragmentation */ } else if (allocation < size) return NULL; - if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) + if (! + (retval = + kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) return retval; - strlcpy (retval->name, name, sizeof retval->name); + strlcpy(retval->name, name, sizeof retval->name); retval->dev = dev; - INIT_LIST_HEAD (&retval->page_list); - spin_lock_init (&retval->lock); + INIT_LIST_HEAD(&retval->page_list); + spin_lock_init(&retval->lock); retval->size = size; retval->allocation = allocation; retval->blocks_per_page = allocation / size; - init_waitqueue_head (&retval->waitq); + init_waitqueue_head(&retval->waitq); if (dev) { int ret; mutex_lock(&pools_lock); - if (list_empty (&dev->dma_pools)) - ret = device_create_file (dev, &dev_attr_pools); + if (list_empty(&dev->dma_pools)) + ret = device_create_file(dev, &dev_attr_pools); else ret = 0; /* note: not currently insisting "name" be unique */ if (!ret) - list_add (&retval->pools, &dev->dma_pools); + list_add(&retval->pools, &dev->dma_pools); else { kfree(retval); retval = NULL; } mutex_unlock(&pools_lock); } else - INIT_LIST_HEAD (&retval->pools); + INIT_LIST_HEAD(&retval->pools); return retval; } +EXPORT_SYMBOL(dma_pool_create); - -static struct dma_page * -pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) +static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) { - struct dma_page *page; - int mapsize; + struct dma_page *page; + int mapsize; mapsize = pool->blocks_per_page; mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; - mapsize *= sizeof (long); + mapsize *= sizeof(long); page = kmalloc(mapsize + sizeof *page, mem_flags); if (!page) return NULL; - page->vaddr = dma_alloc_coherent (pool->dev, - pool->allocation, - &page->dma, - mem_flags); + page->vaddr = dma_alloc_coherent(pool->dev, + pool->allocation, + &page->dma, mem_flags); if (page->vaddr) { - memset (page->bitmap, 0xff, mapsize); // bit set == free + memset(page->bitmap, 0xff, mapsize); /* bit set == free */ #ifdef CONFIG_DEBUG_SLAB - memset (page->vaddr, POOL_POISON_FREED, pool->allocation); + memset(page->vaddr, POOL_POISON_FREED, pool->allocation); #endif - list_add (&page->page_list, &pool->page_list); + list_add(&page->page_list, &pool->page_list); page->in_use = 0; } else { - kfree (page); + kfree(page); page = NULL; } return page; } - -static inline int -is_page_busy (int blocks, unsigned long *bitmap) +static inline int is_page_busy(int blocks, unsigned long *bitmap) { while (blocks > 0) { if (*bitmap++ != ~0UL) @@ -206,20 +204,18 @@ is_page_busy (int blocks, unsigned long *bitmap) return 0; } -static void -pool_free_page (struct dma_pool *pool, struct dma_page *page) +static void pool_free_page(struct dma_pool *pool, struct dma_page *page) { - dma_addr_t dma = page->dma; + dma_addr_t dma = page->dma; #ifdef CONFIG_DEBUG_SLAB - memset (page->vaddr, POOL_POISON_FREED, pool->allocation); + memset(page->vaddr, POOL_POISON_FREED, pool->allocation); #endif - dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma); - list_del (&page->page_list); - kfree (page); + dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); + list_del(&page->page_list); + kfree(page); } - /** * dma_pool_destroy - destroys a pool of dma memory blocks. * @pool: dma pool that will be destroyed @@ -228,36 +224,37 @@ pool_free_page (struct dma_pool *pool, struct dma_page *page) * Caller guarantees that no more memory from the pool is in use, * and that nothing will try to use the pool after this call. */ -void -dma_pool_destroy (struct dma_pool *pool) +void dma_pool_destroy(struct dma_pool *pool) { mutex_lock(&pools_lock); - list_del (&pool->pools); - if (pool->dev && list_empty (&pool->dev->dma_pools)) - device_remove_file (pool->dev, &dev_attr_pools); + list_del(&pool->pools); + if (pool->dev && list_empty(&pool->dev->dma_pools)) + device_remove_file(pool->dev, &dev_attr_pools); mutex_unlock(&pools_lock); - while (!list_empty (&pool->page_list)) { - struct dma_page *page; - page = list_entry (pool->page_list.next, - struct dma_page, page_list); - if (is_page_busy (pool->blocks_per_page, page->bitmap)) { + while (!list_empty(&pool->page_list)) { + struct dma_page *page; + page = list_entry(pool->page_list.next, + struct dma_page, page_list); + if (is_page_busy(pool->blocks_per_page, page->bitmap)) { if (pool->dev) - dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", + dev_err(pool->dev, + "dma_pool_destroy %s, %p busy\n", pool->name, page->vaddr); else - printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", - pool->name, page->vaddr); + printk(KERN_ERR + "dma_pool_destroy %s, %p busy\n", + pool->name, page->vaddr); /* leak the still-in-use consistent memory */ - list_del (&page->page_list); - kfree (page); + list_del(&page->page_list); + kfree(page); } else - pool_free_page (pool, page); + pool_free_page(pool, page); } - kfree (pool); + kfree(pool); } - +EXPORT_SYMBOL(dma_pool_destroy); /** * dma_pool_alloc - get a block of consistent memory @@ -269,73 +266,72 @@ dma_pool_destroy (struct dma_pool *pool) * and reports its dma address through the handle. * If such a memory block can't be allocated, null is returned. */ -void * -dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) +void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) { - unsigned long flags; - struct dma_page *page; - int map, block; - size_t offset; - void *retval; + unsigned long flags; + struct dma_page *page; + int map, block; + size_t offset; + void *retval; -restart: - spin_lock_irqsave (&pool->lock, flags); + restart: + spin_lock_irqsave(&pool->lock, flags); list_for_each_entry(page, &pool->page_list, page_list) { - int i; + int i; /* only cachable accesses here ... */ for (map = 0, i = 0; - i < pool->blocks_per_page; - i += BITS_PER_LONG, map++) { - if (page->bitmap [map] == 0) + i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { + if (page->bitmap[map] == 0) continue; - block = ffz (~ page->bitmap [map]); + block = ffz(~page->bitmap[map]); if ((i + block) < pool->blocks_per_page) { - clear_bit (block, &page->bitmap [map]); + clear_bit(block, &page->bitmap[map]); offset = (BITS_PER_LONG * map) + block; offset *= pool->size; goto ready; } } } - if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { + page = pool_alloc_page(pool, GFP_ATOMIC); + if (!page) { if (mem_flags & __GFP_WAIT) { - DECLARE_WAITQUEUE (wait, current); + DECLARE_WAITQUEUE(wait, current); __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue (&pool->waitq, &wait); - spin_unlock_irqrestore (&pool->lock, flags); + add_wait_queue(&pool->waitq, &wait); + spin_unlock_irqrestore(&pool->lock, flags); - schedule_timeout (POOL_TIMEOUT_JIFFIES); + schedule_timeout(POOL_TIMEOUT_JIFFIES); - remove_wait_queue (&pool->waitq, &wait); + remove_wait_queue(&pool->waitq, &wait); goto restart; } retval = NULL; goto done; } - clear_bit (0, &page->bitmap [0]); + clear_bit(0, &page->bitmap[0]); offset = 0; -ready: + ready: page->in_use++; retval = offset + page->vaddr; *handle = offset + page->dma; #ifdef CONFIG_DEBUG_SLAB - memset (retval, POOL_POISON_ALLOCATED, pool->size); + memset(retval, POOL_POISON_ALLOCATED, pool->size); #endif -done: - spin_unlock_irqrestore (&pool->lock, flags); + done: + spin_unlock_irqrestore(&pool->lock, flags); return retval; } +EXPORT_SYMBOL(dma_pool_alloc); - -static struct dma_page * -pool_find_page (struct dma_pool *pool, dma_addr_t dma) +static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) { - unsigned long flags; - struct dma_page *page; + unsigned long flags; + struct dma_page *page; - spin_lock_irqsave (&pool->lock, flags); + spin_lock_irqsave(&pool->lock, flags); list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; @@ -343,12 +339,11 @@ pool_find_page (struct dma_pool *pool, dma_addr_t dma) goto done; } page = NULL; -done: - spin_unlock_irqrestore (&pool->lock, flags); + done: + spin_unlock_irqrestore(&pool->lock, flags); return page; } - /** * dma_pool_free - put block back into dma pool * @pool: the dma pool holding the block @@ -358,20 +353,21 @@ done: * Caller promises neither device nor driver will again touch this block * unless it is first re-allocated. */ -void -dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) +void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) { - struct dma_page *page; - unsigned long flags; - int map, block; + struct dma_page *page; + unsigned long flags; + int map, block; - if ((page = pool_find_page(pool, dma)) == NULL) { + page = pool_find_page(pool, dma); + if (!page) { if (pool->dev) - dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", - pool->name, vaddr, (unsigned long) dma); + dev_err(pool->dev, + "dma_pool_free %s, %p/%lx (bad dma)\n", + pool->name, vaddr, (unsigned long)dma); else - printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", - pool->name, vaddr, (unsigned long) dma); + printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", + pool->name, vaddr, (unsigned long)dma); return; } @@ -383,37 +379,42 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) #ifdef CONFIG_DEBUG_SLAB if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { if (pool->dev) - dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", - pool->name, vaddr, (unsigned long long) dma); + dev_err(pool->dev, + "dma_pool_free %s, %p (bad vaddr)/%Lx\n", + pool->name, vaddr, (unsigned long long)dma); else - printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", - pool->name, vaddr, (unsigned long long) dma); + printk(KERN_ERR + "dma_pool_free %s, %p (bad vaddr)/%Lx\n", + pool->name, vaddr, (unsigned long long)dma); return; } - if (page->bitmap [map] & (1UL << block)) { + if (page->bitmap[map] & (1UL << block)) { if (pool->dev) - dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", + dev_err(pool->dev, + "dma_pool_free %s, dma %Lx already free\n", pool->name, (unsigned long long)dma); else - printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", - pool->name, (unsigned long long)dma); + printk(KERN_ERR + "dma_pool_free %s, dma %Lx already free\n", + pool->name, (unsigned long long)dma); return; } - memset (vaddr, POOL_POISON_FREED, pool->size); + memset(vaddr, POOL_POISON_FREED, pool->size); #endif - spin_lock_irqsave (&pool->lock, flags); + spin_lock_irqsave(&pool->lock, flags); page->in_use--; - set_bit (block, &page->bitmap [map]); - if (waitqueue_active (&pool->waitq)) - wake_up (&pool->waitq); + set_bit(block, &page->bitmap[map]); + if (waitqueue_active(&pool->waitq)) + wake_up(&pool->waitq); /* * Resist a temptation to do * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); * Better have a few empty pages hang around. */ - spin_unlock_irqrestore (&pool->lock, flags); + spin_unlock_irqrestore(&pool->lock, flags); } +EXPORT_SYMBOL(dma_pool_free); /* * Managed DMA pool @@ -458,6 +459,7 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev, return pool; } +EXPORT_SYMBOL(dmam_pool_create); /** * dmam_pool_destroy - Managed dma_pool_destroy() @@ -472,10 +474,4 @@ void dmam_pool_destroy(struct dma_pool *pool) dma_pool_destroy(pool); WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); } - -EXPORT_SYMBOL (dma_pool_create); -EXPORT_SYMBOL (dma_pool_destroy); -EXPORT_SYMBOL (dma_pool_alloc); -EXPORT_SYMBOL (dma_pool_free); -EXPORT_SYMBOL (dmam_pool_create); -EXPORT_SYMBOL (dmam_pool_destroy); +EXPORT_SYMBOL(dmam_pool_destroy); From 2cae367e4854ff055c4f5e8aacd56b0eeec9f6cb Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 3 Dec 2007 12:09:33 -0500 Subject: [PATCH 3/7] Avoid taking waitqueue lock in dmapool With one trivial change (taking the lock slightly earlier on wakeup from schedule), all uses of the waitq are under the pool lock, so we can use the locked (or __) versions of the wait queue functions, and avoid the extra spinlock. Signed-off-by: Matthew Wilcox Acked-by: David S. Miller --- mm/dmapool.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 92e886d37e90..b5ff9ce8765b 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -275,8 +275,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, size_t offset; void *retval; - restart: spin_lock_irqsave(&pool->lock, flags); + restart: list_for_each_entry(page, &pool->page_list, page_list) { int i; /* only cachable accesses here ... */ @@ -299,12 +299,13 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, DECLARE_WAITQUEUE(wait, current); __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&pool->waitq, &wait); + __add_wait_queue(&pool->waitq, &wait); spin_unlock_irqrestore(&pool->lock, flags); schedule_timeout(POOL_TIMEOUT_JIFFIES); - remove_wait_queue(&pool->waitq, &wait); + spin_lock_irqsave(&pool->lock, flags); + __remove_wait_queue(&pool->waitq, &wait); goto restart; } retval = NULL; @@ -406,7 +407,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) page->in_use--; set_bit(block, &page->bitmap[map]); if (waitqueue_active(&pool->waitq)) - wake_up(&pool->waitq); + wake_up_locked(&pool->waitq); /* * Resist a temptation to do * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); From 399154be2dcb6a58dbde9682162c38113cf3e40b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 3 Dec 2007 12:10:24 -0500 Subject: [PATCH 4/7] dmapool: Validate parameters to dma_pool_create Check that 'align' is a power of two, like the API specifies. Align 'size' to 'align' correctly -- the current code has an off-by-one. The ALIGN macro in kernel.h doesn't. Signed-off-by: Matthew Wilcox Acked-by: David S. Miller --- mm/dmapool.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index b5ff9ce8765b..744d541df866 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -106,16 +106,17 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, { struct dma_pool *retval; - if (align == 0) + if (align == 0) { align = 1; + } else if (align & (align - 1)) { + return NULL; + } + if (size == 0) return NULL; - else if (size < align) - size = align; - else if ((size % align) != 0) { - size += align + 1; - size &= ~(align - 1); - } + + if ((size % align) != 0) + size = ALIGN(size, align); if (allocation == 0) { if (PAGE_SIZE < size) From 6182a0943af2235756836ed7e021fa22b93ec68b Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 3 Dec 2007 12:16:57 -0500 Subject: [PATCH 5/7] dmapool: Tidy up includes and add comments We were missing a copyright statement and license, so add GPLv2, David Brownell's copyright and my copyright. The asm/io.h include was superfluous, but we were missing a few other necessary includes. Signed-off-by: Matthew Wilcox --- mm/dmapool.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 744d541df866..e2ea4543abb4 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -1,19 +1,39 @@ +/* + * DMA Pool allocator + * + * Copyright 2001 David Brownell + * Copyright 2007 Intel Corporation + * Author: Matthew Wilcox + * + * This software may be redistributed and/or modified under the terms of + * the GNU General Public License ("GPL") version 2 as published by the + * Free Software Foundation. + * + * This allocator returns small blocks of a given size which are DMA-able by + * the given device. It uses the dma_alloc_coherent page allocator to get + * new pages, then splits them up into blocks of the required size. + * Many older drivers still have their own code to do this. + * + * The current design of this allocator is fairly simple. The pool is + * represented by the 'struct dma_pool' which keeps a doubly-linked list of + * allocated pages. Each page in the page_list is split into blocks of at + * least 'size' bytes. + */ #include -#include -#include /* Needed for i386 to build */ #include #include -#include +#include +#include #include +#include #include #include - -/* - * Pool allocator ... wraps the dma_alloc_coherent page allocator, so - * small blocks are easily used by drivers for bus mastering controllers. - * This should probably be sharing the guts of the slab allocator. - */ +#include +#include +#include +#include +#include struct dma_pool { /* the pool */ struct list_head page_list; @@ -265,7 +285,7 @@ EXPORT_SYMBOL(dma_pool_destroy); * * This returns the kernel virtual address of a currently unused block, * and reports its dma address through the handle. - * If such a memory block can't be allocated, null is returned. + * If such a memory block can't be allocated, %NULL is returned. */ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) From a35a3455142976e3fffdf27027f3082cbaba6e8c Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 3 Dec 2007 14:08:28 -0500 Subject: [PATCH 6/7] Change dmapool free block management Use a list of free blocks within a page instead of using a bitmap. Update documentation to reflect this. As well as being a slight reduction in memory allocation, locked ops and lines of code, it speeds up a transaction processing benchmark by 0.4%. Signed-off-by: Matthew Wilcox --- mm/dmapool.c | 119 +++++++++++++++++++++++++-------------------------- 1 file changed, 58 insertions(+), 61 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index e2ea4543abb4..72e7ece7ee9d 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -17,7 +17,9 @@ * The current design of this allocator is fairly simple. The pool is * represented by the 'struct dma_pool' which keeps a doubly-linked list of * allocated pages. Each page in the page_list is split into blocks of at - * least 'size' bytes. + * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked + * list of free blocks within the page. Used blocks aren't tracked, but we + * keep a count of how many are currently allocated from each page. */ #include @@ -38,7 +40,6 @@ struct dma_pool { /* the pool */ struct list_head page_list; spinlock_t lock; - size_t blocks_per_page; size_t size; struct device *dev; size_t allocation; @@ -51,8 +52,8 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ struct list_head page_list; void *vaddr; dma_addr_t dma; - unsigned in_use; - unsigned long bitmap[0]; + unsigned int in_use; + unsigned int offset; }; #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) @@ -87,8 +88,8 @@ show_pools(struct device *dev, struct device_attribute *attr, char *buf) /* per-pool info, no real statistics yet */ temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", - pool->name, - blocks, pages * pool->blocks_per_page, + pool->name, blocks, + pages * (pool->allocation / pool->size), pool->size, pages); size -= temp; next += temp; @@ -132,8 +133,11 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, return NULL; } - if (size == 0) + if (size == 0) { return NULL; + } else if (size < 4) { + size = 4; + } if ((size % align) != 0) size = ALIGN(size, align); @@ -160,7 +164,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, spin_lock_init(&retval->lock); retval->size = size; retval->allocation = allocation; - retval->blocks_per_page = allocation / size; init_waitqueue_head(&retval->waitq); if (dev) { @@ -186,28 +189,36 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, } EXPORT_SYMBOL(dma_pool_create); +static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) +{ + unsigned int offset = 0; + + do { + unsigned int next = offset + pool->size; + if (unlikely((next + pool->size) >= pool->allocation)) + next = pool->allocation; + *(int *)(page->vaddr + offset) = next; + offset = next; + } while (offset < pool->allocation); +} + static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) { struct dma_page *page; - int mapsize; - mapsize = pool->blocks_per_page; - mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; - mapsize *= sizeof(long); - - page = kmalloc(mapsize + sizeof *page, mem_flags); + page = kmalloc(sizeof(*page), mem_flags); if (!page) return NULL; - page->vaddr = dma_alloc_coherent(pool->dev, - pool->allocation, + page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, &page->dma, mem_flags); if (page->vaddr) { - memset(page->bitmap, 0xff, mapsize); /* bit set == free */ #ifdef CONFIG_DEBUG_SLAB memset(page->vaddr, POOL_POISON_FREED, pool->allocation); #endif + pool_initialise_page(pool, page); list_add(&page->page_list, &pool->page_list); page->in_use = 0; + page->offset = 0; } else { kfree(page); page = NULL; @@ -215,14 +226,9 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) return page; } -static inline int is_page_busy(int blocks, unsigned long *bitmap) +static inline int is_page_busy(struct dma_page *page) { - while (blocks > 0) { - if (*bitmap++ != ~0UL) - return 1; - blocks -= BITS_PER_LONG; - } - return 0; + return page->in_use != 0; } static void pool_free_page(struct dma_pool *pool, struct dma_page *page) @@ -257,7 +263,7 @@ void dma_pool_destroy(struct dma_pool *pool) struct dma_page *page; page = list_entry(pool->page_list.next, struct dma_page, page_list); - if (is_page_busy(pool->blocks_per_page, page->bitmap)) { + if (is_page_busy(page)) { if (pool->dev) dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", @@ -292,27 +298,14 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, { unsigned long flags; struct dma_page *page; - int map, block; size_t offset; void *retval; spin_lock_irqsave(&pool->lock, flags); restart: list_for_each_entry(page, &pool->page_list, page_list) { - int i; - /* only cachable accesses here ... */ - for (map = 0, i = 0; - i < pool->blocks_per_page; i += BITS_PER_LONG, map++) { - if (page->bitmap[map] == 0) - continue; - block = ffz(~page->bitmap[map]); - if ((i + block) < pool->blocks_per_page) { - clear_bit(block, &page->bitmap[map]); - offset = (BITS_PER_LONG * map) + block; - offset *= pool->size; - goto ready; - } - } + if (page->offset < pool->allocation) + goto ready; } page = pool_alloc_page(pool, GFP_ATOMIC); if (!page) { @@ -333,10 +326,10 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, goto done; } - clear_bit(0, &page->bitmap[0]); - offset = 0; ready: page->in_use++; + offset = page->offset; + page->offset = *(int *)(page->vaddr + offset); retval = offset + page->vaddr; *handle = offset + page->dma; #ifdef CONFIG_DEBUG_SLAB @@ -379,7 +372,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) { struct dma_page *page; unsigned long flags; - int map, block; + unsigned int offset; page = pool_find_page(pool, dma); if (!page) { @@ -393,13 +386,9 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) return; } - block = dma - page->dma; - block /= pool->size; - map = block / BITS_PER_LONG; - block %= BITS_PER_LONG; - + offset = vaddr - page->vaddr; #ifdef CONFIG_DEBUG_SLAB - if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { + if ((dma - page->dma) != offset) { if (pool->dev) dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", @@ -410,28 +399,36 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) pool->name, vaddr, (unsigned long long)dma); return; } - if (page->bitmap[map] & (1UL << block)) { - if (pool->dev) - dev_err(pool->dev, - "dma_pool_free %s, dma %Lx already free\n", - pool->name, (unsigned long long)dma); - else - printk(KERN_ERR - "dma_pool_free %s, dma %Lx already free\n", - pool->name, (unsigned long long)dma); - return; + { + unsigned int chain = page->offset; + while (chain < pool->allocation) { + if (chain != offset) { + chain = *(int *)(page->vaddr + chain); + continue; + } + if (pool->dev) + dev_err(pool->dev, "dma_pool_free %s, dma %Lx " + "already free\n", pool->name, + (unsigned long long)dma); + else + printk(KERN_ERR "dma_pool_free %s, dma %Lx " + "already free\n", pool->name, + (unsigned long long)dma); + return; + } } memset(vaddr, POOL_POISON_FREED, pool->size); #endif spin_lock_irqsave(&pool->lock, flags); page->in_use--; - set_bit(block, &page->bitmap[map]); + *(int *)vaddr = page->offset; + page->offset = offset; if (waitqueue_active(&pool->waitq)) wake_up_locked(&pool->waitq); /* * Resist a temptation to do - * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); + * if (!is_page_busy(page)) pool_free_page(pool, page); * Better have a few empty pages hang around. */ spin_unlock_irqrestore(&pool->lock, flags); From e34f44b3517fe545f7fd45a8c2f6ee1e5e4432d3 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 3 Dec 2007 14:16:24 -0500 Subject: [PATCH 7/7] pool: Improve memory usage for devices which can't cross boundaries The previous implementation simply refused to allocate more than a boundary's worth of data from an entire page. Some users didn't know this, so specified things like SMP_CACHE_BYTES, not realising the horrible waste of memory that this was. It's fairly easy to correct this problem, just by ensuring we don't cross a boundary within a page. This even helps drivers like EHCI (which can't cross a 4k boundary) on machines with larger page sizes. Signed-off-by: Matthew Wilcox Acked-by: David S. Miller --- mm/dmapool.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 72e7ece7ee9d..34aaac451a96 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -43,6 +43,7 @@ struct dma_pool { /* the pool */ size_t size; struct device *dev; size_t allocation; + size_t boundary; char name[32]; wait_queue_head_t waitq; struct list_head pools; @@ -107,7 +108,7 @@ static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); * @dev: device that will be doing the DMA * @size: size of the blocks in this pool. * @align: alignment requirement for blocks; must be a power of two - * @allocation: returned blocks won't cross this boundary (or zero) + * @boundary: returned blocks won't cross this power of two boundary * Context: !in_interrupt() * * Returns a dma allocation pool with the requested characteristics, or @@ -117,15 +118,16 @@ static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); * cache flushing primitives. The actual size of blocks allocated may be * larger than requested because of alignment. * - * If allocation is nonzero, objects returned from dma_pool_alloc() won't + * If @boundary is nonzero, objects returned from dma_pool_alloc() won't * cross that size boundary. This is useful for devices which have * addressing restrictions on individual DMA transfers, such as not crossing * boundaries of 4KBytes. */ struct dma_pool *dma_pool_create(const char *name, struct device *dev, - size_t size, size_t align, size_t allocation) + size_t size, size_t align, size_t boundary) { struct dma_pool *retval; + size_t allocation; if (align == 0) { align = 1; @@ -142,27 +144,26 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, if ((size % align) != 0) size = ALIGN(size, align); - if (allocation == 0) { - if (PAGE_SIZE < size) - allocation = size; - else - allocation = PAGE_SIZE; - /* FIXME: round up for less fragmentation */ - } else if (allocation < size) - return NULL; + allocation = max_t(size_t, size, PAGE_SIZE); - if (! - (retval = - kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) + if (!boundary) { + boundary = allocation; + } else if ((boundary < size) || (boundary & (boundary - 1))) { + return NULL; + } + + retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); + if (!retval) return retval; - strlcpy(retval->name, name, sizeof retval->name); + strlcpy(retval->name, name, sizeof(retval->name)); retval->dev = dev; INIT_LIST_HEAD(&retval->page_list); spin_lock_init(&retval->lock); retval->size = size; + retval->boundary = boundary; retval->allocation = allocation; init_waitqueue_head(&retval->waitq); @@ -192,11 +193,14 @@ EXPORT_SYMBOL(dma_pool_create); static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) { unsigned int offset = 0; + unsigned int next_boundary = pool->boundary; do { unsigned int next = offset + pool->size; - if (unlikely((next + pool->size) >= pool->allocation)) - next = pool->allocation; + if (unlikely((next + pool->size) >= next_boundary)) { + next = next_boundary; + next_boundary += pool->boundary; + } *(int *)(page->vaddr + offset) = next; offset = next; } while (offset < pool->allocation);