| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/arch/arm/mm/consistent.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 2000-2004 Russell King | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | *  DMA uncached mapping support. | 
|  | 11 | */ | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/mm.h> | 
|  | 14 | #include <linux/slab.h> | 
|  | 15 | #include <linux/errno.h> | 
|  | 16 | #include <linux/list.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/device.h> | 
|  | 19 | #include <linux/dma-mapping.h> | 
|  | 20 |  | 
| Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 21 | #include <asm/memory.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <asm/cacheflush.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/tlbflush.h> | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 24 | #include <asm/sizes.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 26 | /* Sanity check size */ | 
|  | 27 | #if (CONSISTENT_DMA_SIZE % SZ_2M) | 
|  | 28 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | 
|  | 29 | #endif | 
|  | 30 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #define CONSISTENT_END	(0xffe00000) | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 32 | #define CONSISTENT_BASE	(CONSISTENT_END - CONSISTENT_DMA_SIZE) | 
|  | 33 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 35 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 
|  | 36 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 
|  | 37 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
|  | 39 | /* | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 40 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | */ | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 42 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | static DEFINE_SPINLOCK(consistent_lock); | 
|  | 44 |  | 
|  | 45 | /* | 
|  | 46 | * VM region handling support. | 
|  | 47 | * | 
|  | 48 | * This should become something generic, handling VM region allocations for | 
|  | 49 | * vmalloc and similar (ioremap, module space, etc). | 
|  | 50 | * | 
|  | 51 | * I envisage vmalloc()'s supporting vm_struct becoming: | 
|  | 52 | * | 
|  | 53 | *  struct vm_struct { | 
|  | 54 | *    struct vm_region	region; | 
|  | 55 | *    unsigned long	flags; | 
|  | 56 | *    struct page	**pages; | 
|  | 57 | *    unsigned int	nr_pages; | 
|  | 58 | *    unsigned long	phys_addr; | 
|  | 59 | *  }; | 
|  | 60 | * | 
|  | 61 | * get_vm_area() would then call vm_region_alloc with an appropriate | 
|  | 62 | * struct vm_region head (eg): | 
|  | 63 | * | 
|  | 64 | *  struct vm_region vmalloc_head = { | 
|  | 65 | *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list), | 
|  | 66 | *	.vm_start	= VMALLOC_START, | 
|  | 67 | *	.vm_end		= VMALLOC_END, | 
|  | 68 | *  }; | 
|  | 69 | * | 
|  | 70 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | 
|  | 71 | * the amount of RAM found at boot time.)  I would imagine that get_vm_area() | 
|  | 72 | * would have to initialise this each time prior to calling vm_region_alloc(). | 
|  | 73 | */ | 
|  | 74 | struct vm_region { | 
|  | 75 | struct list_head	vm_list; | 
|  | 76 | unsigned long		vm_start; | 
|  | 77 | unsigned long		vm_end; | 
|  | 78 | struct page		*vm_pages; | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 79 | int			vm_active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | }; | 
|  | 81 |  | 
|  | 82 | static struct vm_region consistent_head = { | 
|  | 83 | .vm_list	= LIST_HEAD_INIT(consistent_head.vm_list), | 
|  | 84 | .vm_start	= CONSISTENT_BASE, | 
|  | 85 | .vm_end		= CONSISTENT_END, | 
|  | 86 | }; | 
|  | 87 |  | 
|  | 88 | static struct vm_region * | 
| Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 89 | vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { | 
|  | 91 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 
|  | 92 | unsigned long flags; | 
|  | 93 | struct vm_region *c, *new; | 
|  | 94 |  | 
|  | 95 | new = kmalloc(sizeof(struct vm_region), gfp); | 
|  | 96 | if (!new) | 
|  | 97 | goto out; | 
|  | 98 |  | 
|  | 99 | spin_lock_irqsave(&consistent_lock, flags); | 
|  | 100 |  | 
|  | 101 | list_for_each_entry(c, &head->vm_list, vm_list) { | 
|  | 102 | if ((addr + size) < addr) | 
|  | 103 | goto nospc; | 
|  | 104 | if ((addr + size) <= c->vm_start) | 
|  | 105 | goto found; | 
|  | 106 | addr = c->vm_end; | 
|  | 107 | if (addr > end) | 
|  | 108 | goto nospc; | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | found: | 
|  | 112 | /* | 
|  | 113 | * Insert this entry _before_ the one we found. | 
|  | 114 | */ | 
|  | 115 | list_add_tail(&new->vm_list, &c->vm_list); | 
|  | 116 | new->vm_start = addr; | 
|  | 117 | new->vm_end = addr + size; | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 118 | new->vm_active = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 |  | 
|  | 120 | spin_unlock_irqrestore(&consistent_lock, flags); | 
|  | 121 | return new; | 
|  | 122 |  | 
|  | 123 | nospc: | 
|  | 124 | spin_unlock_irqrestore(&consistent_lock, flags); | 
|  | 125 | kfree(new); | 
|  | 126 | out: | 
|  | 127 | return NULL; | 
|  | 128 | } | 
|  | 129 |  | 
|  | 130 | static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) | 
|  | 131 | { | 
|  | 132 | struct vm_region *c; | 
|  | 133 |  | 
|  | 134 | list_for_each_entry(c, &head->vm_list, vm_list) { | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 135 | if (c->vm_active && c->vm_start == addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | goto out; | 
|  | 137 | } | 
|  | 138 | c = NULL; | 
|  | 139 | out: | 
|  | 140 | return c; | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 144 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 
|  | 145 | #endif | 
|  | 146 |  | 
|  | 147 | static void * | 
| Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 148 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | pgprot_t prot) | 
|  | 150 | { | 
|  | 151 | struct page *page; | 
|  | 152 | struct vm_region *c; | 
|  | 153 | unsigned long order; | 
|  | 154 | u64 mask = ISA_DMA_THRESHOLD, limit; | 
|  | 155 |  | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 156 | if (!consistent_pte[0]) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | printk(KERN_ERR "%s: not initialised\n", __func__); | 
|  | 158 | dump_stack(); | 
|  | 159 | return NULL; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | if (dev) { | 
|  | 163 | mask = dev->coherent_dma_mask; | 
|  | 164 |  | 
|  | 165 | /* | 
|  | 166 | * Sanity check the DMA mask - it must be non-zero, and | 
|  | 167 | * must be able to be satisfied by a DMA allocation. | 
|  | 168 | */ | 
|  | 169 | if (mask == 0) { | 
|  | 170 | dev_warn(dev, "coherent DMA mask is unset\n"); | 
|  | 171 | goto no_page; | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | if ((~mask) & ISA_DMA_THRESHOLD) { | 
|  | 175 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | 
|  | 176 | "than system GFP_DMA mask %#llx\n", | 
|  | 177 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | 
|  | 178 | goto no_page; | 
|  | 179 | } | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | /* | 
|  | 183 | * Sanity check the allocation size. | 
|  | 184 | */ | 
|  | 185 | size = PAGE_ALIGN(size); | 
|  | 186 | limit = (mask + 1) & ~mask; | 
|  | 187 | if ((limit && size >= limit) || | 
|  | 188 | size >= (CONSISTENT_END - CONSISTENT_BASE)) { | 
|  | 189 | printk(KERN_WARNING "coherent allocation too big " | 
|  | 190 | "(requested %#x mask %#llx)\n", size, mask); | 
|  | 191 | goto no_page; | 
|  | 192 | } | 
|  | 193 |  | 
|  | 194 | order = get_order(size); | 
|  | 195 |  | 
|  | 196 | if (mask != 0xffffffff) | 
|  | 197 | gfp |= GFP_DMA; | 
|  | 198 |  | 
|  | 199 | page = alloc_pages(gfp, order); | 
|  | 200 | if (!page) | 
|  | 201 | goto no_page; | 
|  | 202 |  | 
|  | 203 | /* | 
|  | 204 | * Invalidate any data that might be lurking in the | 
|  | 205 | * kernel direct-mapped region for device DMA. | 
|  | 206 | */ | 
|  | 207 | { | 
| Russell King | 7ae5a76 | 2007-02-06 17:39:31 +0000 | [diff] [blame] | 208 | void *ptr = page_address(page); | 
|  | 209 | memset(ptr, 0, size); | 
|  | 210 | dmac_flush_range(ptr, ptr + size); | 
|  | 211 | outer_flush_range(__pa(ptr), __pa(ptr) + size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | } | 
|  | 213 |  | 
|  | 214 | /* | 
|  | 215 | * Allocate a virtual address in the consistent mapping region. | 
|  | 216 | */ | 
|  | 217 | c = vm_region_alloc(&consistent_head, size, | 
|  | 218 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 
|  | 219 | if (c) { | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 220 | pte_t *pte; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | struct page *end = page + (1 << order); | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 222 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); | 
|  | 223 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 |  | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 225 | pte = consistent_pte[idx] + off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | c->vm_pages = page; | 
|  | 227 |  | 
| Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 228 | split_page(page, order); | 
|  | 229 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | /* | 
|  | 231 | * Set the "dma handle" | 
|  | 232 | */ | 
|  | 233 | *handle = page_to_dma(dev, page); | 
|  | 234 |  | 
|  | 235 | do { | 
|  | 236 | BUG_ON(!pte_none(*pte)); | 
|  | 237 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | /* | 
|  | 239 | * x86 does not mark the pages reserved... | 
|  | 240 | */ | 
|  | 241 | SetPageReserved(page); | 
| Russell King | ad1ae2f | 2006-12-13 14:34:43 +0000 | [diff] [blame] | 242 | set_pte_ext(pte, mk_pte(page, prot), 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | page++; | 
|  | 244 | pte++; | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 245 | off++; | 
|  | 246 | if (off >= PTRS_PER_PTE) { | 
|  | 247 | off = 0; | 
|  | 248 | pte = consistent_pte[++idx]; | 
|  | 249 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | } while (size -= PAGE_SIZE); | 
|  | 251 |  | 
|  | 252 | /* | 
|  | 253 | * Free the otherwise unused pages. | 
|  | 254 | */ | 
|  | 255 | while (page < end) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | __free_page(page); | 
|  | 257 | page++; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | return (void *)c->vm_start; | 
|  | 261 | } | 
|  | 262 |  | 
|  | 263 | if (page) | 
|  | 264 | __free_pages(page, order); | 
|  | 265 | no_page: | 
|  | 266 | *handle = ~0; | 
|  | 267 | return NULL; | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | /* | 
|  | 271 | * Allocate DMA-coherent memory space and return both the kernel remapped | 
|  | 272 | * virtual and bus address for that space. | 
|  | 273 | */ | 
|  | 274 | void * | 
| Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 275 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { | 
| Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 277 | if (arch_is_coherent()) { | 
|  | 278 | void *virt; | 
|  | 279 |  | 
|  | 280 | virt = kmalloc(size, gfp); | 
|  | 281 | if (!virt) | 
|  | 282 | return NULL; | 
|  | 283 | *handle =  virt_to_dma(dev, virt); | 
|  | 284 |  | 
|  | 285 | return virt; | 
|  | 286 | } | 
|  | 287 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | return __dma_alloc(dev, size, handle, gfp, | 
|  | 289 | pgprot_noncached(pgprot_kernel)); | 
|  | 290 | } | 
|  | 291 | EXPORT_SYMBOL(dma_alloc_coherent); | 
|  | 292 |  | 
|  | 293 | /* | 
|  | 294 | * Allocate a writecombining region, in much the same way as | 
|  | 295 | * dma_alloc_coherent above. | 
|  | 296 | */ | 
|  | 297 | void * | 
| Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 298 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | { | 
|  | 300 | return __dma_alloc(dev, size, handle, gfp, | 
|  | 301 | pgprot_writecombine(pgprot_kernel)); | 
|  | 302 | } | 
|  | 303 | EXPORT_SYMBOL(dma_alloc_writecombine); | 
|  | 304 |  | 
|  | 305 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | 
|  | 306 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
|  | 307 | { | 
|  | 308 | unsigned long flags, user_size, kern_size; | 
|  | 309 | struct vm_region *c; | 
|  | 310 | int ret = -ENXIO; | 
|  | 311 |  | 
|  | 312 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 
|  | 313 |  | 
|  | 314 | spin_lock_irqsave(&consistent_lock, flags); | 
|  | 315 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 
|  | 316 | spin_unlock_irqrestore(&consistent_lock, flags); | 
|  | 317 |  | 
|  | 318 | if (c) { | 
|  | 319 | unsigned long off = vma->vm_pgoff; | 
|  | 320 |  | 
|  | 321 | kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; | 
|  | 322 |  | 
|  | 323 | if (off < kern_size && | 
|  | 324 | user_size <= (kern_size - off)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | ret = remap_pfn_range(vma, vma->vm_start, | 
|  | 326 | page_to_pfn(c->vm_pages) + off, | 
|  | 327 | user_size << PAGE_SHIFT, | 
|  | 328 | vma->vm_page_prot); | 
|  | 329 | } | 
|  | 330 | } | 
|  | 331 |  | 
|  | 332 | return ret; | 
|  | 333 | } | 
|  | 334 |  | 
|  | 335 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 
|  | 336 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
|  | 337 | { | 
|  | 338 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 
|  | 339 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 
|  | 340 | } | 
|  | 341 | EXPORT_SYMBOL(dma_mmap_coherent); | 
|  | 342 |  | 
|  | 343 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | 
|  | 344 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
|  | 345 | { | 
|  | 346 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 
|  | 347 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 
|  | 348 | } | 
|  | 349 | EXPORT_SYMBOL(dma_mmap_writecombine); | 
|  | 350 |  | 
|  | 351 | /* | 
|  | 352 | * free a page as defined by the above mapping. | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 353 | * Must not be called with IRQs disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | */ | 
|  | 355 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 
|  | 356 | { | 
|  | 357 | struct vm_region *c; | 
|  | 358 | unsigned long flags, addr; | 
|  | 359 | pte_t *ptep; | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 360 | int idx; | 
|  | 361 | u32 off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 |  | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 363 | WARN_ON(irqs_disabled()); | 
|  | 364 |  | 
| Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 365 | if (arch_is_coherent()) { | 
|  | 366 | kfree(cpu_addr); | 
|  | 367 | return; | 
|  | 368 | } | 
|  | 369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | size = PAGE_ALIGN(size); | 
|  | 371 |  | 
|  | 372 | spin_lock_irqsave(&consistent_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 
|  | 374 | if (!c) | 
|  | 375 | goto no_area; | 
|  | 376 |  | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 377 | c->vm_active = 0; | 
|  | 378 | spin_unlock_irqrestore(&consistent_lock, flags); | 
|  | 379 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | if ((c->vm_end - c->vm_start) != size) { | 
|  | 381 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | 
|  | 382 | __func__, c->vm_end - c->vm_start, size); | 
|  | 383 | dump_stack(); | 
|  | 384 | size = c->vm_end - c->vm_start; | 
|  | 385 | } | 
|  | 386 |  | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 387 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | 
|  | 388 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | 
|  | 389 | ptep = consistent_pte[idx] + off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | addr = c->vm_start; | 
|  | 391 | do { | 
|  | 392 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | 
|  | 393 | unsigned long pfn; | 
|  | 394 |  | 
|  | 395 | ptep++; | 
|  | 396 | addr += PAGE_SIZE; | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 397 | off++; | 
|  | 398 | if (off >= PTRS_PER_PTE) { | 
|  | 399 | off = 0; | 
|  | 400 | ptep = consistent_pte[++idx]; | 
|  | 401 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 |  | 
|  | 403 | if (!pte_none(pte) && pte_present(pte)) { | 
|  | 404 | pfn = pte_pfn(pte); | 
|  | 405 |  | 
|  | 406 | if (pfn_valid(pfn)) { | 
|  | 407 | struct page *page = pfn_to_page(pfn); | 
|  | 408 |  | 
|  | 409 | /* | 
|  | 410 | * x86 does not mark the pages reserved... | 
|  | 411 | */ | 
|  | 412 | ClearPageReserved(page); | 
|  | 413 |  | 
|  | 414 | __free_page(page); | 
|  | 415 | continue; | 
|  | 416 | } | 
|  | 417 | } | 
|  | 418 |  | 
|  | 419 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | 
|  | 420 | __func__); | 
|  | 421 | } while (size -= PAGE_SIZE); | 
|  | 422 |  | 
|  | 423 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | 
|  | 424 |  | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 425 | spin_lock_irqsave(&consistent_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | list_del(&c->vm_list); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | spin_unlock_irqrestore(&consistent_lock, flags); | 
|  | 428 |  | 
|  | 429 | kfree(c); | 
|  | 430 | return; | 
|  | 431 |  | 
|  | 432 | no_area: | 
|  | 433 | spin_unlock_irqrestore(&consistent_lock, flags); | 
|  | 434 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | 
|  | 435 | __func__, cpu_addr); | 
|  | 436 | dump_stack(); | 
|  | 437 | } | 
|  | 438 | EXPORT_SYMBOL(dma_free_coherent); | 
|  | 439 |  | 
|  | 440 | /* | 
|  | 441 | * Initialise the consistent memory allocation. | 
|  | 442 | */ | 
|  | 443 | static int __init consistent_init(void) | 
|  | 444 | { | 
|  | 445 | pgd_t *pgd; | 
|  | 446 | pmd_t *pmd; | 
|  | 447 | pte_t *pte; | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 448 | int ret = 0, i = 0; | 
|  | 449 | u32 base = CONSISTENT_BASE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | do { | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 452 | pgd = pgd_offset(&init_mm, base); | 
|  | 453 | pmd = pmd_alloc(&init_mm, pgd, base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | if (!pmd) { | 
|  | 455 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | 
|  | 456 | ret = -ENOMEM; | 
|  | 457 | break; | 
|  | 458 | } | 
|  | 459 | WARN_ON(!pmd_none(*pmd)); | 
|  | 460 |  | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 461 | pte = pte_alloc_kernel(pmd, base); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | if (!pte) { | 
|  | 463 | printk(KERN_ERR "%s: no pte tables\n", __func__); | 
|  | 464 | ret = -ENOMEM; | 
|  | 465 | break; | 
|  | 466 | } | 
|  | 467 |  | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 468 | consistent_pte[i++] = pte; | 
|  | 469 | base += (1 << PGDIR_SHIFT); | 
|  | 470 | } while (base < CONSISTENT_END); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | return ret; | 
|  | 473 | } | 
|  | 474 |  | 
|  | 475 | core_initcall(consistent_init); | 
|  | 476 |  | 
|  | 477 | /* | 
|  | 478 | * Make an area consistent for devices. | 
| Dan Williams | 105ef9a | 2006-11-21 22:57:23 +0100 | [diff] [blame] | 479 | * Note: Drivers should NOT use this function directly, as it will break | 
|  | 480 | * platforms with CONFIG_DMABOUNCE. | 
|  | 481 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | */ | 
| Russell King | 84aa462 | 2007-10-09 14:17:01 +0100 | [diff] [blame] | 483 | void dma_cache_maint(const void *start, size_t size, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | { | 
| Russell King | 7ae5a76 | 2007-02-06 17:39:31 +0000 | [diff] [blame] | 485 | const void *end = start + size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 |  | 
| Lennert Buytenhek | 3e1a80f | 2007-02-08 16:26:23 +0100 | [diff] [blame] | 487 | BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1)); | 
| Catalin Marinas | 953233d | 2007-02-05 14:48:08 +0100 | [diff] [blame] | 488 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | switch (direction) { | 
|  | 490 | case DMA_FROM_DEVICE:		/* invalidate only */ | 
|  | 491 | dmac_inv_range(start, end); | 
| Catalin Marinas | 953233d | 2007-02-05 14:48:08 +0100 | [diff] [blame] | 492 | outer_inv_range(__pa(start), __pa(end)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | break; | 
|  | 494 | case DMA_TO_DEVICE:		/* writeback only */ | 
|  | 495 | dmac_clean_range(start, end); | 
| Catalin Marinas | 953233d | 2007-02-05 14:48:08 +0100 | [diff] [blame] | 496 | outer_clean_range(__pa(start), __pa(end)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | break; | 
|  | 498 | case DMA_BIDIRECTIONAL:		/* writeback and invalidate */ | 
|  | 499 | dmac_flush_range(start, end); | 
| Catalin Marinas | 953233d | 2007-02-05 14:48:08 +0100 | [diff] [blame] | 500 | outer_flush_range(__pa(start), __pa(end)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | break; | 
|  | 502 | default: | 
|  | 503 | BUG(); | 
|  | 504 | } | 
|  | 505 | } | 
| Russell King | 84aa462 | 2007-10-09 14:17:01 +0100 | [diff] [blame] | 506 | EXPORT_SYMBOL(dma_cache_maint); |