| Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 1 | /* | 
|  | 2 | * DMA Pool allocator | 
|  | 3 | * | 
|  | 4 | * Copyright 2001 David Brownell | 
|  | 5 | * Copyright 2007 Intel Corporation | 
|  | 6 | *   Author: Matthew Wilcox <willy@linux.intel.com> | 
|  | 7 | * | 
|  | 8 | * This software may be redistributed and/or modified under the terms of | 
|  | 9 | * the GNU General Public License ("GPL") version 2 as published by the | 
|  | 10 | * Free Software Foundation. | 
|  | 11 | * | 
|  | 12 | * This allocator returns small blocks of a given size which are DMA-able by | 
|  | 13 | * the given device.  It uses the dma_alloc_coherent page allocator to get | 
|  | 14 | * new pages, then splits them up into blocks of the required size. | 
|  | 15 | * Many older drivers still have their own code to do this. | 
|  | 16 | * | 
|  | 17 | * The current design of this allocator is fairly simple.  The pool is | 
|  | 18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of | 
|  | 19 | * allocated pages.  Each page in the page_list is split into blocks of at | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 20 | * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked | 
|  | 21 | * list of free blocks within the page.  Used blocks aren't tracked, but we | 
|  | 22 | * keep a count of how many are currently allocated from each page. | 
| Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 23 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 |  | 
|  | 25 | #include <linux/device.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/dma-mapping.h> | 
|  | 27 | #include <linux/dmapool.h> | 
| Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 28 | #include <linux/kernel.h> | 
|  | 29 | #include <linux/list.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/module.h> | 
| Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 31 | #include <linux/mutex.h> | 
| Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 32 | #include <linux/poison.h> | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 33 | #include <linux/sched.h> | 
| Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 34 | #include <linux/slab.h> | 
|  | 35 | #include <linux/spinlock.h> | 
|  | 36 | #include <linux/string.h> | 
|  | 37 | #include <linux/types.h> | 
|  | 38 | #include <linux/wait.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
| Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 40 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) | 
|  | 41 | #define DMAPOOL_DEBUG 1 | 
|  | 42 | #endif | 
|  | 43 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 44 | struct dma_pool {		/* the pool */ | 
|  | 45 | struct list_head page_list; | 
|  | 46 | spinlock_t lock; | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 47 | size_t size; | 
|  | 48 | struct device *dev; | 
|  | 49 | size_t allocation; | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 50 | size_t boundary; | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 51 | char name[32]; | 
|  | 52 | wait_queue_head_t waitq; | 
|  | 53 | struct list_head pools; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | }; | 
|  | 55 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 56 | struct dma_page {		/* cacheable header for 'allocation' bytes */ | 
|  | 57 | struct list_head page_list; | 
|  | 58 | void *vaddr; | 
|  | 59 | dma_addr_t dma; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 60 | unsigned int in_use; | 
|  | 61 | unsigned int offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | }; | 
|  | 63 |  | 
|  | 64 | #define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 66 | static DEFINE_MUTEX(pools_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 |  | 
|  | 68 | static ssize_t | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 69 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | { | 
|  | 71 | unsigned temp; | 
|  | 72 | unsigned size; | 
|  | 73 | char *next; | 
|  | 74 | struct dma_page *page; | 
|  | 75 | struct dma_pool *pool; | 
|  | 76 |  | 
|  | 77 | next = buf; | 
|  | 78 | size = PAGE_SIZE; | 
|  | 79 |  | 
|  | 80 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | 
|  | 81 | size -= temp; | 
|  | 82 | next += temp; | 
|  | 83 |  | 
| Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 84 | mutex_lock(&pools_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | list_for_each_entry(pool, &dev->dma_pools, pools) { | 
|  | 86 | unsigned pages = 0; | 
|  | 87 | unsigned blocks = 0; | 
|  | 88 |  | 
| Thomas Gleixner | c495682 | 2009-06-30 11:41:25 -0700 | [diff] [blame] | 89 | spin_lock_irq(&pool->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | list_for_each_entry(page, &pool->page_list, page_list) { | 
|  | 91 | pages++; | 
|  | 92 | blocks += page->in_use; | 
|  | 93 | } | 
| Thomas Gleixner | c495682 | 2009-06-30 11:41:25 -0700 | [diff] [blame] | 94 | spin_unlock_irq(&pool->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
|  | 96 | /* per-pool info, no real statistics yet */ | 
|  | 97 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 98 | pool->name, blocks, | 
|  | 99 | pages * (pool->allocation / pool->size), | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 100 | pool->size, pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | size -= temp; | 
|  | 102 | next += temp; | 
|  | 103 | } | 
| Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 104 | mutex_unlock(&pools_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 |  | 
|  | 106 | return PAGE_SIZE - size; | 
|  | 107 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 108 |  | 
|  | 109 | static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 |  | 
|  | 111 | /** | 
|  | 112 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | 
|  | 113 | * @name: name of pool, for diagnostics | 
|  | 114 | * @dev: device that will be doing the DMA | 
|  | 115 | * @size: size of the blocks in this pool. | 
|  | 116 | * @align: alignment requirement for blocks; must be a power of two | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 117 | * @boundary: returned blocks won't cross this power of two boundary | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | * Context: !in_interrupt() | 
|  | 119 | * | 
|  | 120 | * Returns a dma allocation pool with the requested characteristics, or | 
|  | 121 | * null if one can't be created.  Given one of these pools, dma_pool_alloc() | 
|  | 122 | * may be used to allocate memory.  Such memory will all have "consistent" | 
|  | 123 | * DMA mappings, accessible by the device and its driver without using | 
|  | 124 | * cache flushing primitives.  The actual size of blocks allocated may be | 
|  | 125 | * larger than requested because of alignment. | 
|  | 126 | * | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 127 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | * cross that size boundary.  This is useful for devices which have | 
|  | 129 | * addressing restrictions on individual DMA transfers, such as not crossing | 
|  | 130 | * boundaries of 4KBytes. | 
|  | 131 | */ | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 132 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 133 | size_t size, size_t align, size_t boundary) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 135 | struct dma_pool *retval; | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 136 | size_t allocation; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 |  | 
| Matthew Wilcox | 399154b | 2007-12-03 12:10:24 -0500 | [diff] [blame] | 138 | if (align == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | align = 1; | 
| Matthew Wilcox | 399154b | 2007-12-03 12:10:24 -0500 | [diff] [blame] | 140 | } else if (align & (align - 1)) { | 
|  | 141 | return NULL; | 
|  | 142 | } | 
|  | 143 |  | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 144 | if (size == 0) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | return NULL; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 146 | } else if (size < 4) { | 
|  | 147 | size = 4; | 
|  | 148 | } | 
| Matthew Wilcox | 399154b | 2007-12-03 12:10:24 -0500 | [diff] [blame] | 149 |  | 
|  | 150 | if ((size % align) != 0) | 
|  | 151 | size = ALIGN(size, align); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 |  | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 153 | allocation = max_t(size_t, size, PAGE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 |  | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 155 | if (!boundary) { | 
|  | 156 | boundary = allocation; | 
|  | 157 | } else if ((boundary < size) || (boundary & (boundary - 1))) { | 
|  | 158 | return NULL; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); | 
|  | 162 | if (!retval) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | return retval; | 
|  | 164 |  | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 165 | strlcpy(retval->name, name, sizeof(retval->name)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 |  | 
|  | 167 | retval->dev = dev; | 
|  | 168 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 169 | INIT_LIST_HEAD(&retval->page_list); | 
|  | 170 | spin_lock_init(&retval->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | retval->size = size; | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 172 | retval->boundary = boundary; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | retval->allocation = allocation; | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 174 | init_waitqueue_head(&retval->waitq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 |  | 
|  | 176 | if (dev) { | 
| Cornelia Huck | 141ecc5 | 2006-09-22 11:37:27 +0200 | [diff] [blame] | 177 | int ret; | 
|  | 178 |  | 
| Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 179 | mutex_lock(&pools_lock); | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 180 | if (list_empty(&dev->dma_pools)) | 
|  | 181 | ret = device_create_file(dev, &dev_attr_pools); | 
| Cornelia Huck | 141ecc5 | 2006-09-22 11:37:27 +0200 | [diff] [blame] | 182 | else | 
|  | 183 | ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | /* note:  not currently insisting "name" be unique */ | 
| Cornelia Huck | 141ecc5 | 2006-09-22 11:37:27 +0200 | [diff] [blame] | 185 | if (!ret) | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 186 | list_add(&retval->pools, &dev->dma_pools); | 
| Cornelia Huck | 141ecc5 | 2006-09-22 11:37:27 +0200 | [diff] [blame] | 187 | else { | 
|  | 188 | kfree(retval); | 
|  | 189 | retval = NULL; | 
|  | 190 | } | 
| Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 191 | mutex_unlock(&pools_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | } else | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 193 | INIT_LIST_HEAD(&retval->pools); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 |  | 
|  | 195 | return retval; | 
|  | 196 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 197 | EXPORT_SYMBOL(dma_pool_create); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 |  | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 199 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) | 
|  | 200 | { | 
|  | 201 | unsigned int offset = 0; | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 202 | unsigned int next_boundary = pool->boundary; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 203 |  | 
|  | 204 | do { | 
|  | 205 | unsigned int next = offset + pool->size; | 
| Matthew Wilcox | e34f44b | 2007-12-03 14:16:24 -0500 | [diff] [blame] | 206 | if (unlikely((next + pool->size) >= next_boundary)) { | 
|  | 207 | next = next_boundary; | 
|  | 208 | next_boundary += pool->boundary; | 
|  | 209 | } | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 210 | *(int *)(page->vaddr + offset) = next; | 
|  | 211 | offset = next; | 
|  | 212 | } while (offset < pool->allocation); | 
|  | 213 | } | 
|  | 214 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 215 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 217 | struct dma_page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 |  | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 219 | page = kmalloc(sizeof(*page), mem_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | if (!page) | 
|  | 221 | return NULL; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 222 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 223 | &page->dma, mem_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | if (page->vaddr) { | 
| Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 225 | #ifdef	DMAPOOL_DEBUG | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 226 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | #endif | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 228 | pool_initialise_page(pool, page); | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 229 | list_add(&page->page_list, &pool->page_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | page->in_use = 0; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 231 | page->offset = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | } else { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 233 | kfree(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | page = NULL; | 
|  | 235 | } | 
|  | 236 | return page; | 
|  | 237 | } | 
|  | 238 |  | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 239 | static inline int is_page_busy(struct dma_page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 241 | return page->in_use != 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | } | 
|  | 243 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 244 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 246 | dma_addr_t dma = page->dma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 |  | 
| Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 248 | #ifdef	DMAPOOL_DEBUG | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 249 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | #endif | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 251 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); | 
|  | 252 | list_del(&page->page_list); | 
|  | 253 | kfree(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | } | 
|  | 255 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | /** | 
|  | 257 | * dma_pool_destroy - destroys a pool of dma memory blocks. | 
|  | 258 | * @pool: dma pool that will be destroyed | 
|  | 259 | * Context: !in_interrupt() | 
|  | 260 | * | 
|  | 261 | * Caller guarantees that no more memory from the pool is in use, | 
|  | 262 | * and that nothing will try to use the pool after this call. | 
|  | 263 | */ | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 264 | void dma_pool_destroy(struct dma_pool *pool) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | { | 
| Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 266 | mutex_lock(&pools_lock); | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 267 | list_del(&pool->pools); | 
|  | 268 | if (pool->dev && list_empty(&pool->dev->dma_pools)) | 
|  | 269 | device_remove_file(pool->dev, &dev_attr_pools); | 
| Matthias Kaehlcke | b2366d6 | 2007-04-24 22:45:25 +0200 | [diff] [blame] | 270 | mutex_unlock(&pools_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 272 | while (!list_empty(&pool->page_list)) { | 
|  | 273 | struct dma_page *page; | 
|  | 274 | page = list_entry(pool->page_list.next, | 
|  | 275 | struct dma_page, page_list); | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 276 | if (is_page_busy(page)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | if (pool->dev) | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 278 | dev_err(pool->dev, | 
|  | 279 | "dma_pool_destroy %s, %p busy\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | pool->name, page->vaddr); | 
|  | 281 | else | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 282 | printk(KERN_ERR | 
|  | 283 | "dma_pool_destroy %s, %p busy\n", | 
|  | 284 | pool->name, page->vaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | /* leak the still-in-use consistent memory */ | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 286 | list_del(&page->page_list); | 
|  | 287 | kfree(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | } else | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 289 | pool_free_page(pool, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | } | 
|  | 291 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 292 | kfree(pool); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 294 | EXPORT_SYMBOL(dma_pool_destroy); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 |  | 
|  | 296 | /** | 
|  | 297 | * dma_pool_alloc - get a block of consistent memory | 
|  | 298 | * @pool: dma pool that will produce the block | 
|  | 299 | * @mem_flags: GFP_* bitmask | 
|  | 300 | * @handle: pointer to dma address of block | 
|  | 301 | * | 
|  | 302 | * This returns the kernel virtual address of a currently unused block, | 
|  | 303 | * and reports its dma address through the handle. | 
| Matthew Wilcox | 6182a09 | 2007-12-03 12:16:57 -0500 | [diff] [blame] | 304 | * If such a memory block can't be allocated, %NULL is returned. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | */ | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 306 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, | 
|  | 307 | dma_addr_t *handle) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 309 | unsigned long flags; | 
|  | 310 | struct dma_page *page; | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 311 | size_t offset; | 
|  | 312 | void *retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 |  | 
| Dima Zavin | ea05c84 | 2010-10-26 14:21:54 -0700 | [diff] [blame] | 314 | might_sleep_if(mem_flags & __GFP_WAIT); | 
|  | 315 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 316 | spin_lock_irqsave(&pool->lock, flags); | 
| Matthew Wilcox | 2cae367 | 2007-12-03 12:09:33 -0500 | [diff] [blame] | 317 | restart: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | list_for_each_entry(page, &pool->page_list, page_list) { | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 319 | if (page->offset < pool->allocation) | 
|  | 320 | goto ready; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 322 | page = pool_alloc_page(pool, GFP_ATOMIC); | 
|  | 323 | if (!page) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | if (mem_flags & __GFP_WAIT) { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 325 | DECLARE_WAITQUEUE(wait, current); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 |  | 
| Andrew Morton | 684265d | 2011-01-13 15:47:25 -0800 | [diff] [blame] | 327 | __set_current_state(TASK_UNINTERRUPTIBLE); | 
| Matthew Wilcox | 2cae367 | 2007-12-03 12:09:33 -0500 | [diff] [blame] | 328 | __add_wait_queue(&pool->waitq, &wait); | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 329 | spin_unlock_irqrestore(&pool->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 331 | schedule_timeout(POOL_TIMEOUT_JIFFIES); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 |  | 
| Matthew Wilcox | 2cae367 | 2007-12-03 12:09:33 -0500 | [diff] [blame] | 333 | spin_lock_irqsave(&pool->lock, flags); | 
|  | 334 | __remove_wait_queue(&pool->waitq, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | goto restart; | 
|  | 336 | } | 
|  | 337 | retval = NULL; | 
|  | 338 | goto done; | 
|  | 339 | } | 
|  | 340 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 341 | ready: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | page->in_use++; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 343 | offset = page->offset; | 
|  | 344 | page->offset = *(int *)(page->vaddr + offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | retval = offset + page->vaddr; | 
|  | 346 | *handle = offset + page->dma; | 
| Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 347 | #ifdef	DMAPOOL_DEBUG | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 348 | memset(retval, POOL_POISON_ALLOCATED, pool->size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | #endif | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 350 | done: | 
|  | 351 | spin_unlock_irqrestore(&pool->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | return retval; | 
|  | 353 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 354 | EXPORT_SYMBOL(dma_pool_alloc); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 |  | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 356 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 358 | struct dma_page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | list_for_each_entry(page, &pool->page_list, page_list) { | 
|  | 361 | if (dma < page->dma) | 
|  | 362 | continue; | 
|  | 363 | if (dma < (page->dma + pool->allocation)) | 
| Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 364 | return page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | } | 
| Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 366 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | } | 
|  | 368 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | /** | 
|  | 370 | * dma_pool_free - put block back into dma pool | 
|  | 371 | * @pool: the dma pool holding the block | 
|  | 372 | * @vaddr: virtual address of block | 
|  | 373 | * @dma: dma address of block | 
|  | 374 | * | 
|  | 375 | * Caller promises neither device nor driver will again touch this block | 
|  | 376 | * unless it is first re-allocated. | 
|  | 377 | */ | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 378 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | { | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 380 | struct dma_page *page; | 
|  | 381 | unsigned long flags; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 382 | unsigned int offset; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 |  | 
| Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 384 | spin_lock_irqsave(&pool->lock, flags); | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 385 | page = pool_find_page(pool, dma); | 
|  | 386 | if (!page) { | 
| Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 387 | spin_unlock_irqrestore(&pool->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | if (pool->dev) | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 389 | dev_err(pool->dev, | 
|  | 390 | "dma_pool_free %s, %p/%lx (bad dma)\n", | 
|  | 391 | pool->name, vaddr, (unsigned long)dma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | else | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 393 | printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | 
|  | 394 | pool->name, vaddr, (unsigned long)dma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | return; | 
|  | 396 | } | 
|  | 397 |  | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 398 | offset = vaddr - page->vaddr; | 
| Andi Kleen | b5ee5be | 2008-04-28 02:12:37 -0700 | [diff] [blame] | 399 | #ifdef	DMAPOOL_DEBUG | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 400 | if ((dma - page->dma) != offset) { | 
| Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 401 | spin_unlock_irqrestore(&pool->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | if (pool->dev) | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 403 | dev_err(pool->dev, | 
|  | 404 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | 
|  | 405 | pool->name, vaddr, (unsigned long long)dma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | else | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 407 | printk(KERN_ERR | 
|  | 408 | "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | 
|  | 409 | pool->name, vaddr, (unsigned long long)dma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | return; | 
|  | 411 | } | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 412 | { | 
|  | 413 | unsigned int chain = page->offset; | 
|  | 414 | while (chain < pool->allocation) { | 
|  | 415 | if (chain != offset) { | 
|  | 416 | chain = *(int *)(page->vaddr + chain); | 
|  | 417 | continue; | 
|  | 418 | } | 
| Rolf Eike Beer | 84bc227 | 2011-01-13 15:47:24 -0800 | [diff] [blame] | 419 | spin_unlock_irqrestore(&pool->lock, flags); | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 420 | if (pool->dev) | 
|  | 421 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx " | 
|  | 422 | "already free\n", pool->name, | 
|  | 423 | (unsigned long long)dma); | 
|  | 424 | else | 
|  | 425 | printk(KERN_ERR "dma_pool_free %s, dma %Lx " | 
|  | 426 | "already free\n", pool->name, | 
|  | 427 | (unsigned long long)dma); | 
|  | 428 | return; | 
|  | 429 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 431 | memset(vaddr, POOL_POISON_FREED, pool->size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | #endif | 
|  | 433 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | page->in_use--; | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 435 | *(int *)vaddr = page->offset; | 
|  | 436 | page->offset = offset; | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 437 | if (waitqueue_active(&pool->waitq)) | 
| Matthew Wilcox | 2cae367 | 2007-12-03 12:09:33 -0500 | [diff] [blame] | 438 | wake_up_locked(&pool->waitq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | /* | 
|  | 440 | * Resist a temptation to do | 
| Matthew Wilcox | a35a345 | 2007-12-03 14:08:28 -0500 | [diff] [blame] | 441 | *    if (!is_page_busy(page)) pool_free_page(pool, page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | * Better have a few empty pages hang around. | 
|  | 443 | */ | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 444 | spin_unlock_irqrestore(&pool->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 446 | EXPORT_SYMBOL(dma_pool_free); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 |  | 
| Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 448 | /* | 
|  | 449 | * Managed DMA pool | 
|  | 450 | */ | 
|  | 451 | static void dmam_pool_release(struct device *dev, void *res) | 
|  | 452 | { | 
|  | 453 | struct dma_pool *pool = *(struct dma_pool **)res; | 
|  | 454 |  | 
|  | 455 | dma_pool_destroy(pool); | 
|  | 456 | } | 
|  | 457 |  | 
|  | 458 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | 
|  | 459 | { | 
|  | 460 | return *(struct dma_pool **)res == match_data; | 
|  | 461 | } | 
|  | 462 |  | 
|  | 463 | /** | 
|  | 464 | * dmam_pool_create - Managed dma_pool_create() | 
|  | 465 | * @name: name of pool, for diagnostics | 
|  | 466 | * @dev: device that will be doing the DMA | 
|  | 467 | * @size: size of the blocks in this pool. | 
|  | 468 | * @align: alignment requirement for blocks; must be a power of two | 
|  | 469 | * @allocation: returned blocks won't cross this boundary (or zero) | 
|  | 470 | * | 
|  | 471 | * Managed dma_pool_create().  DMA pool created with this function is | 
|  | 472 | * automatically destroyed on driver detach. | 
|  | 473 | */ | 
|  | 474 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | 
|  | 475 | size_t size, size_t align, size_t allocation) | 
|  | 476 | { | 
|  | 477 | struct dma_pool **ptr, *pool; | 
|  | 478 |  | 
|  | 479 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | 
|  | 480 | if (!ptr) | 
|  | 481 | return NULL; | 
|  | 482 |  | 
|  | 483 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | 
|  | 484 | if (pool) | 
|  | 485 | devres_add(dev, ptr); | 
|  | 486 | else | 
|  | 487 | devres_free(ptr); | 
|  | 488 |  | 
|  | 489 | return pool; | 
|  | 490 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 491 | EXPORT_SYMBOL(dmam_pool_create); | 
| Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 492 |  | 
|  | 493 | /** | 
|  | 494 | * dmam_pool_destroy - Managed dma_pool_destroy() | 
|  | 495 | * @pool: dma pool that will be destroyed | 
|  | 496 | * | 
|  | 497 | * Managed dma_pool_destroy(). | 
|  | 498 | */ | 
|  | 499 | void dmam_pool_destroy(struct dma_pool *pool) | 
|  | 500 | { | 
|  | 501 | struct device *dev = pool->dev; | 
|  | 502 |  | 
|  | 503 | dma_pool_destroy(pool); | 
|  | 504 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | 
|  | 505 | } | 
| Matthew Wilcox | e87aa77 | 2007-12-03 12:04:31 -0500 | [diff] [blame] | 506 | EXPORT_SYMBOL(dmam_pool_destroy); |