Files
linux-apfs/mm/dmapool.c
T

480 lines
12 KiB
C
Raw Normal View History

2005-04-16 15:20:36 -07:00
#include <linux/device.h>
#include <linux/mm.h>
#include <asm/io.h> /* Needed for i386 to build */
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/poison.h>
2007-05-21 01:22:52 +04:00
#include <linux/sched.h>
2005-04-16 15:20:36 -07:00
/*
* Pool allocator ... wraps the dma_alloc_coherent page allocator, so
* small blocks are easily used by drivers for bus mastering controllers.
* This should probably be sharing the guts of the slab allocator.
*/
2007-12-03 12:04:31 -05:00
struct dma_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
size_t blocks_per_page;
size_t size;
struct device *dev;
size_t allocation;
char name[32];
wait_queue_head_t waitq;
struct list_head pools;
2005-04-16 15:20:36 -07:00
};
2007-12-03 12:04:31 -05:00
struct dma_page { /* cacheable header for 'allocation' bytes */
struct list_head page_list;
void *vaddr;
dma_addr_t dma;
unsigned in_use;
unsigned long bitmap[0];
2005-04-16 15:20:36 -07:00
};
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
2007-12-03 12:04:31 -05:00
static DEFINE_MUTEX(pools_lock);
2005-04-16 15:20:36 -07:00
static ssize_t
2007-12-03 12:04:31 -05:00
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
2005-04-16 15:20:36 -07:00
{
unsigned temp;
unsigned size;
char *next;
struct dma_page *page;
struct dma_pool *pool;
next = buf;
size = PAGE_SIZE;
temp = scnprintf(next, size, "poolinfo - 0.1\n");
size -= temp;
next += temp;
mutex_lock(&pools_lock);
2005-04-16 15:20:36 -07:00
list_for_each_entry(pool, &dev->dma_pools, pools) {
unsigned pages = 0;
unsigned blocks = 0;
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
/* per-pool info, no real statistics yet */
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
2007-12-03 12:04:31 -05:00
pool->name,
blocks, pages * pool->blocks_per_page,
pool->size, pages);
2005-04-16 15:20:36 -07:00
size -= temp;
next += temp;
}
mutex_unlock(&pools_lock);
2005-04-16 15:20:36 -07:00
return PAGE_SIZE - size;
}
2007-12-03 12:04:31 -05:00
static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
2005-04-16 15:20:36 -07:00
/**
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
* @name: name of pool, for diagnostics
* @dev: device that will be doing the DMA
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @allocation: returned blocks won't cross this boundary (or zero)
* Context: !in_interrupt()
*
* Returns a dma allocation pool with the requested characteristics, or
* null if one can't be created. Given one of these pools, dma_pool_alloc()
* may be used to allocate memory. Such memory will all have "consistent"
* DMA mappings, accessible by the device and its driver without using
* cache flushing primitives. The actual size of blocks allocated may be
* larger than requested because of alignment.
*
* If allocation is nonzero, objects returned from dma_pool_alloc() won't
* cross that size boundary. This is useful for devices which have
* addressing restrictions on individual DMA transfers, such as not crossing
* boundaries of 4KBytes.
*/
2007-12-03 12:04:31 -05:00
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation)
2005-04-16 15:20:36 -07:00
{
2007-12-03 12:04:31 -05:00
struct dma_pool *retval;
2005-04-16 15:20:36 -07:00
if (align == 0) {
2005-04-16 15:20:36 -07:00
align = 1;
} else if (align & (align - 1)) {
return NULL;
}
2005-04-16 15:20:36 -07:00
if (size == 0)
return NULL;
if ((size % align) != 0)
size = ALIGN(size, align);
2005-04-16 15:20:36 -07:00
if (allocation == 0) {
if (PAGE_SIZE < size)
allocation = size;
else
allocation = PAGE_SIZE;
2007-12-03 12:04:31 -05:00
/* FIXME: round up for less fragmentation */
2005-04-16 15:20:36 -07:00
} else if (allocation < size)
return NULL;
2007-12-03 12:04:31 -05:00
if (!
(retval =
kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
2005-04-16 15:20:36 -07:00
return retval;
2007-12-03 12:04:31 -05:00
strlcpy(retval->name, name, sizeof retval->name);
2005-04-16 15:20:36 -07:00
retval->dev = dev;
2007-12-03 12:04:31 -05:00
INIT_LIST_HEAD(&retval->page_list);
spin_lock_init(&retval->lock);
2005-04-16 15:20:36 -07:00
retval->size = size;
retval->allocation = allocation;
retval->blocks_per_page = allocation / size;
2007-12-03 12:04:31 -05:00
init_waitqueue_head(&retval->waitq);
2005-04-16 15:20:36 -07:00
if (dev) {
int ret;
mutex_lock(&pools_lock);
2007-12-03 12:04:31 -05:00
if (list_empty(&dev->dma_pools))
ret = device_create_file(dev, &dev_attr_pools);
else
ret = 0;
2005-04-16 15:20:36 -07:00
/* note: not currently insisting "name" be unique */
if (!ret)
2007-12-03 12:04:31 -05:00
list_add(&retval->pools, &dev->dma_pools);
else {
kfree(retval);
retval = NULL;
}
mutex_unlock(&pools_lock);
2005-04-16 15:20:36 -07:00
} else
2007-12-03 12:04:31 -05:00
INIT_LIST_HEAD(&retval->pools);
2005-04-16 15:20:36 -07:00
return retval;
}
2007-12-03 12:04:31 -05:00
EXPORT_SYMBOL(dma_pool_create);
2005-04-16 15:20:36 -07:00
2007-12-03 12:04:31 -05:00
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
2005-04-16 15:20:36 -07:00
{
2007-12-03 12:04:31 -05:00
struct dma_page *page;
int mapsize;
2005-04-16 15:20:36 -07:00
mapsize = pool->blocks_per_page;
mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
2007-12-03 12:04:31 -05:00
mapsize *= sizeof(long);
2005-04-16 15:20:36 -07:00
page = kmalloc(mapsize + sizeof *page, mem_flags);
2005-04-16 15:20:36 -07:00
if (!page)
return NULL;
2007-12-03 12:04:31 -05:00
page->vaddr = dma_alloc_coherent(pool->dev,
pool->allocation,
&page->dma, mem_flags);
2005-04-16 15:20:36 -07:00
if (page->vaddr) {
2007-12-03 12:04:31 -05:00
memset(page->bitmap, 0xff, mapsize); /* bit set == free */
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_DEBUG_SLAB
2007-12-03 12:04:31 -05:00
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
2005-04-16 15:20:36 -07:00
#endif
2007-12-03 12:04:31 -05:00
list_add(&page->page_list, &pool->page_list);
2005-04-16 15:20:36 -07:00
page->in_use = 0;
} else {
2007-12-03 12:04:31 -05:00
kfree(page);
2005-04-16 15:20:36 -07:00
page = NULL;
}
return page;
}
2007-12-03 12:04:31 -05:00
static inline int is_page_busy(int blocks, unsigned long *bitmap)
2005-04-16 15:20:36 -07:00
{
while (blocks > 0) {
if (*bitmap++ != ~0UL)
return 1;
blocks -= BITS_PER_LONG;
}
return 0;
}
2007-12-03 12:04:31 -05:00
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
2005-04-16 15:20:36 -07:00
{
2007-12-03 12:04:31 -05:00
dma_addr_t dma = page->dma;
2005-04-16 15:20:36 -07:00
#ifdef CONFIG_DEBUG_SLAB
2007-12-03 12:04:31 -05:00
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
2005-04-16 15:20:36 -07:00
#endif
2007-12-03 12:04:31 -05:00
dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
list_del(&page->page_list);
kfree(page);
2005-04-16 15:20:36 -07:00
}
/**
* dma_pool_destroy - destroys a pool of dma memory blocks.
* @pool: dma pool that will be destroyed
* Context: !in_interrupt()
*
* Caller guarantees that no more memory from the pool is in use,
* and that nothing will try to use the pool after this call.
*/
2007-12-03 12:04:31 -05:00
void dma_pool_destroy(struct dma_pool *pool)
2005-04-16 15:20:36 -07:00
{
mutex_lock(&pools_lock);
2007-12-03 12:04:31 -05:00
list_del(&pool->pools);
if (pool->dev && list_empty(&pool->dev->dma_pools))
device_remove_file(pool->dev, &dev_attr_pools);
mutex_unlock(&pools_lock);
2005-04-16 15:20:36 -07:00
2007-12-03 12:04:31 -05:00
while (!list_empty(&pool->page_list)) {
struct dma_page *page;
page = list_entry(pool->page_list.next,
struct dma_page, page_list);
if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
2005-04-16 15:20:36 -07:00
if (pool->dev)
2007-12-03 12:04:31 -05:00
dev_err(pool->dev,
"dma_pool_destroy %s, %p busy\n",
2005-04-16 15:20:36 -07:00
pool->name, page->vaddr);
else
2007-12-03 12:04:31 -05:00
printk(KERN_ERR
"dma_pool_destroy %s, %p busy\n",
pool->name, page->vaddr);
2005-04-16 15:20:36 -07:00
/* leak the still-in-use consistent memory */
2007-12-03 12:04:31 -05:00
list_del(&page->page_list);
kfree(page);
2005-04-16 15:20:36 -07:00
} else
2007-12-03 12:04:31 -05:00
pool_free_page(pool, page);
2005-04-16 15:20:36 -07:00
}
2007-12-03 12:04:31 -05:00
kfree(pool);
2005-04-16 15:20:36 -07:00
}
2007-12-03 12:04:31 -05:00
EXPORT_SYMBOL(dma_pool_destroy);
2005-04-16 15:20:36 -07:00
/**
* dma_pool_alloc - get a block of consistent memory
* @pool: dma pool that will produce the block
* @mem_flags: GFP_* bitmask
* @handle: pointer to dma address of block
*
* This returns the kernel virtual address of a currently unused block,
* and reports its dma address through the handle.
* If such a memory block can't be allocated, null is returned.
*/
2007-12-03 12:04:31 -05:00
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
2005-04-16 15:20:36 -07:00
{
2007-12-03 12:04:31 -05:00
unsigned long flags;
struct dma_page *page;
int map, block;
size_t offset;
void *retval;
2005-04-16 15:20:36 -07:00
2007-12-03 12:04:31 -05:00
spin_lock_irqsave(&pool->lock, flags);
2007-12-03 12:09:33 -05:00
restart:
2005-04-16 15:20:36 -07:00
list_for_each_entry(page, &pool->page_list, page_list) {
2007-12-03 12:04:31 -05:00
int i;
2005-04-16 15:20:36 -07:00
/* only cachable accesses here ... */
for (map = 0, i = 0;
2007-12-03 12:04:31 -05:00
i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
if (page->bitmap[map] == 0)
2005-04-16 15:20:36 -07:00
continue;
2007-12-03 12:04:31 -05:00
block = ffz(~page->bitmap[map]);
2005-04-16 15:20:36 -07:00
if ((i + block) < pool->blocks_per_page) {
2007-12-03 12:04:31 -05:00
clear_bit(block, &page->bitmap[map]);
2005-04-16 15:20:36 -07:00
offset = (BITS_PER_LONG * map) + block;
offset *= pool->size;
goto ready;
}
}
}
2007-12-03 12:04:31 -05:00
page = pool_alloc_page(pool, GFP_ATOMIC);
if (!page) {
2005-04-16 15:20:36 -07:00
if (mem_flags & __GFP_WAIT) {
2007-12-03 12:04:31 -05:00
DECLARE_WAITQUEUE(wait, current);
2005-04-16 15:20:36 -07:00
2007-10-16 23:29:32 -07:00
__set_current_state(TASK_INTERRUPTIBLE);
2007-12-03 12:09:33 -05:00
__add_wait_queue(&pool->waitq, &wait);
2007-12-03 12:04:31 -05:00
spin_unlock_irqrestore(&pool->lock, flags);
2005-04-16 15:20:36 -07:00
2007-12-03 12:04:31 -05:00
schedule_timeout(POOL_TIMEOUT_JIFFIES);
2005-04-16 15:20:36 -07:00
2007-12-03 12:09:33 -05:00
spin_lock_irqsave(&pool->lock, flags);
__remove_wait_queue(&pool->waitq, &wait);
2005-04-16 15:20:36 -07:00
goto restart;
}
retval = NULL;
goto done;
}
2007-12-03 12:04:31 -05:00
clear_bit(0, &page->bitmap[0]);
2005-04-16 15:20:36 -07:00
offset = 0;
2007-12-03 12:04:31 -05:00
ready:
2005-04-16 15:20:36 -07:00
page->in_use++;
retval = offset + page->vaddr;
*handle = offset + page->dma;
#ifdef CONFIG_DEBUG_SLAB
2007-12-03 12:04:31 -05:00
memset(retval, POOL_POISON_ALLOCATED, pool->size);
2005-04-16 15:20:36 -07:00
#endif
2007-12-03 12:04:31 -05:00
done:
spin_unlock_irqrestore(&pool->lock, flags);
2005-04-16 15:20:36 -07:00
return retval;
}
2007-12-03 12:04:31 -05:00
EXPORT_SYMBOL(dma_pool_alloc);
2005-04-16 15:20:36 -07:00
2007-12-03 12:04:31 -05:00
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
2005-04-16 15:20:36 -07:00
{
2007-12-03 12:04:31 -05:00
unsigned long flags;
struct dma_page *page;
2005-04-16 15:20:36 -07:00
2007-12-03 12:04:31 -05:00
spin_lock_irqsave(&pool->lock, flags);
2005-04-16 15:20:36 -07:00
list_for_each_entry(page, &pool->page_list, page_list) {
if (dma < page->dma)
continue;
if (dma < (page->dma + pool->allocation))
goto done;
}
page = NULL;
2007-12-03 12:04:31 -05:00
done:
spin_unlock_irqrestore(&pool->lock, flags);
2005-04-16 15:20:36 -07:00
return page;
}
/**
* dma_pool_free - put block back into dma pool
* @pool: the dma pool holding the block
* @vaddr: virtual address of block
* @dma: dma address of block
*
* Caller promises neither device nor driver will again touch this block
* unless it is first re-allocated.
*/
2007-12-03 12:04:31 -05:00
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
2005-04-16 15:20:36 -07:00
{
2007-12-03 12:04:31 -05:00
struct dma_page *page;
unsigned long flags;
int map, block;
2005-04-16 15:20:36 -07:00
2007-12-03 12:04:31 -05:00
page = pool_find_page(pool, dma);
if (!page) {
2005-04-16 15:20:36 -07:00
if (pool->dev)
2007-12-03 12:04:31 -05:00
dev_err(pool->dev,
"dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long)dma);
2005-04-16 15:20:36 -07:00
else
2007-12-03 12:04:31 -05:00
printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long)dma);
2005-04-16 15:20:36 -07:00
return;
}
block = dma - page->dma;
block /= pool->size;
map = block / BITS_PER_LONG;
block %= BITS_PER_LONG;
#ifdef CONFIG_DEBUG_SLAB
if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
if (pool->dev)
2007-12-03 12:04:31 -05:00
dev_err(pool->dev,
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma);
2005-04-16 15:20:36 -07:00
else
2007-12-03 12:04:31 -05:00
printk(KERN_ERR
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma);
2005-04-16 15:20:36 -07:00
return;
}
2007-12-03 12:04:31 -05:00
if (page->bitmap[map] & (1UL << block)) {
2005-04-16 15:20:36 -07:00
if (pool->dev)
2007-12-03 12:04:31 -05:00
dev_err(pool->dev,
"dma_pool_free %s, dma %Lx already free\n",
2005-04-16 15:20:36 -07:00
pool->name, (unsigned long long)dma);
else
2007-12-03 12:04:31 -05:00
printk(KERN_ERR
"dma_pool_free %s, dma %Lx already free\n",
pool->name, (unsigned long long)dma);
2005-04-16 15:20:36 -07:00
return;
}
2007-12-03 12:04:31 -05:00
memset(vaddr, POOL_POISON_FREED, pool->size);
2005-04-16 15:20:36 -07:00
#endif
2007-12-03 12:04:31 -05:00
spin_lock_irqsave(&pool->lock, flags);
2005-04-16 15:20:36 -07:00
page->in_use--;
2007-12-03 12:04:31 -05:00
set_bit(block, &page->bitmap[map]);
if (waitqueue_active(&pool->waitq))
2007-12-03 12:09:33 -05:00
wake_up_locked(&pool->waitq);
2005-04-16 15:20:36 -07:00
/*
* Resist a temptation to do
* if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
* Better have a few empty pages hang around.
*/
2007-12-03 12:04:31 -05:00
spin_unlock_irqrestore(&pool->lock, flags);
2005-04-16 15:20:36 -07:00
}
2007-12-03 12:04:31 -05:00
EXPORT_SYMBOL(dma_pool_free);
2005-04-16 15:20:36 -07:00
2007-01-20 16:00:26 +09:00
/*
* Managed DMA pool
*/
static void dmam_pool_release(struct device *dev, void *res)
{
struct dma_pool *pool = *(struct dma_pool **)res;
dma_pool_destroy(pool);
}
static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
return *(struct dma_pool **)res == match_data;
}
/**
* dmam_pool_create - Managed dma_pool_create()
* @name: name of pool, for diagnostics
* @dev: device that will be doing the DMA
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @allocation: returned blocks won't cross this boundary (or zero)
*
* Managed dma_pool_create(). DMA pool created with this function is
* automatically destroyed on driver detach.
*/
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation)
{
struct dma_pool **ptr, *pool;
ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
if (pool)
devres_add(dev, ptr);
else
devres_free(ptr);
return pool;
}
2007-12-03 12:04:31 -05:00
EXPORT_SYMBOL(dmam_pool_create);
2007-01-20 16:00:26 +09:00
/**
* dmam_pool_destroy - Managed dma_pool_destroy()
* @pool: dma pool that will be destroyed
*
* Managed dma_pool_destroy().
*/
void dmam_pool_destroy(struct dma_pool *pool)
{
struct device *dev = pool->dev;
dma_pool_destroy(pool);
WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
2007-12-03 12:04:31 -05:00
EXPORT_SYMBOL(dmam_pool_destroy);