| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Russell King | 0ddbccd | 2008-09-25 15:59:19 +0100 | [diff] [blame] | 2 | *  linux/arch/arm/mm/dma-mapping.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | *  Copyright (C) 2000-2004 Russell King | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | *  DMA uncached mapping support. | 
|  | 11 | */ | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/mm.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/errno.h> | 
|  | 16 | #include <linux/list.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/device.h> | 
|  | 19 | #include <linux/dma-mapping.h> | 
|  | 20 |  | 
| Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 21 | #include <asm/memory.h> | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 22 | #include <asm/highmem.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/cacheflush.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <asm/tlbflush.h> | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 25 | #include <asm/sizes.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 27 | static u64 get_coherent_dma_mask(struct device *dev) | 
|  | 28 | { | 
|  | 29 | u64 mask = ISA_DMA_THRESHOLD; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 31 | if (dev) { | 
|  | 32 | mask = dev->coherent_dma_mask; | 
|  | 33 |  | 
|  | 34 | /* | 
|  | 35 | * Sanity check the DMA mask - it must be non-zero, and | 
|  | 36 | * must be able to be satisfied by a DMA allocation. | 
|  | 37 | */ | 
|  | 38 | if (mask == 0) { | 
|  | 39 | dev_warn(dev, "coherent DMA mask is unset\n"); | 
|  | 40 | return 0; | 
|  | 41 | } | 
|  | 42 |  | 
|  | 43 | if ((~mask) & ISA_DMA_THRESHOLD) { | 
|  | 44 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | 
|  | 45 | "than system GFP_DMA mask %#llx\n", | 
|  | 46 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | 
|  | 47 | return 0; | 
|  | 48 | } | 
|  | 49 | } | 
|  | 50 |  | 
|  | 51 | return mask; | 
|  | 52 | } | 
|  | 53 |  | 
| Russell King | 7a9a32a | 2009-11-19 15:31:07 +0000 | [diff] [blame] | 54 | /* | 
|  | 55 | * Allocate a DMA buffer for 'dev' of size 'size' using the | 
|  | 56 | * specified gfp mask.  Note that 'size' must be page aligned. | 
|  | 57 | */ | 
|  | 58 | static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) | 
|  | 59 | { | 
|  | 60 | unsigned long order = get_order(size); | 
|  | 61 | struct page *page, *p, *e; | 
|  | 62 | void *ptr; | 
|  | 63 | u64 mask = get_coherent_dma_mask(dev); | 
|  | 64 |  | 
|  | 65 | #ifdef CONFIG_DMA_API_DEBUG | 
|  | 66 | u64 limit = (mask + 1) & ~mask; | 
|  | 67 | if (limit && size >= limit) { | 
|  | 68 | dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", | 
|  | 69 | size, mask); | 
|  | 70 | return NULL; | 
|  | 71 | } | 
|  | 72 | #endif | 
|  | 73 |  | 
|  | 74 | if (!mask) | 
|  | 75 | return NULL; | 
|  | 76 |  | 
|  | 77 | if (mask < 0xffffffffULL) | 
|  | 78 | gfp |= GFP_DMA; | 
|  | 79 |  | 
|  | 80 | page = alloc_pages(gfp, order); | 
|  | 81 | if (!page) | 
|  | 82 | return NULL; | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * Now split the huge page and free the excess pages | 
|  | 86 | */ | 
|  | 87 | split_page(page, order); | 
|  | 88 | for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) | 
|  | 89 | __free_page(p); | 
|  | 90 |  | 
|  | 91 | /* | 
|  | 92 | * Ensure that the allocated pages are zeroed, and that any data | 
|  | 93 | * lurking in the kernel direct-mapped region is invalidated. | 
|  | 94 | */ | 
|  | 95 | ptr = page_address(page); | 
|  | 96 | memset(ptr, 0, size); | 
|  | 97 | dmac_flush_range(ptr, ptr + size); | 
|  | 98 | outer_flush_range(__pa(ptr), __pa(ptr) + size); | 
|  | 99 |  | 
|  | 100 | return page; | 
|  | 101 | } | 
|  | 102 |  | 
|  | 103 | /* | 
|  | 104 | * Free a DMA buffer.  'size' must be page aligned. | 
|  | 105 | */ | 
|  | 106 | static void __dma_free_buffer(struct page *page, size_t size) | 
|  | 107 | { | 
|  | 108 | struct page *e = page + (size >> PAGE_SHIFT); | 
|  | 109 |  | 
|  | 110 | while (page < e) { | 
|  | 111 | __free_page(page); | 
|  | 112 | page++; | 
|  | 113 | } | 
|  | 114 | } | 
|  | 115 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 116 | #ifdef CONFIG_MMU | 
| Catalin Marinas | a5e9d38 | 2010-06-21 15:09:06 +0100 | [diff] [blame] | 117 | /* Sanity check size */ | 
|  | 118 | #if (CONSISTENT_DMA_SIZE % SZ_2M) | 
|  | 119 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | 
|  | 120 | #endif | 
|  | 121 |  | 
|  | 122 | #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 
|  | 123 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 
|  | 124 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 
|  | 125 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | /* | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 127 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | */ | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 129 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 |  | 
| Russell King | 13ccf3a | 2009-11-19 15:07:04 +0000 | [diff] [blame] | 131 | #include "vmregion.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 |  | 
| Russell King | 13ccf3a | 2009-11-19 15:07:04 +0000 | [diff] [blame] | 133 | static struct arm_vmregion_head consistent_head = { | 
|  | 134 | .vm_lock	= __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | .vm_list	= LIST_HEAD_INIT(consistent_head.vm_list), | 
|  | 136 | .vm_start	= CONSISTENT_BASE, | 
|  | 137 | .vm_end		= CONSISTENT_END, | 
|  | 138 | }; | 
|  | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 141 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 
|  | 142 | #endif | 
|  | 143 |  | 
| Russell King | 88c58f3 | 2009-11-19 16:46:02 +0000 | [diff] [blame] | 144 | /* | 
|  | 145 | * Initialise the consistent memory allocation. | 
|  | 146 | */ | 
|  | 147 | static int __init consistent_init(void) | 
|  | 148 | { | 
|  | 149 | int ret = 0; | 
|  | 150 | pgd_t *pgd; | 
|  | 151 | pmd_t *pmd; | 
|  | 152 | pte_t *pte; | 
|  | 153 | int i = 0; | 
|  | 154 | u32 base = CONSISTENT_BASE; | 
|  | 155 |  | 
|  | 156 | do { | 
|  | 157 | pgd = pgd_offset(&init_mm, base); | 
|  | 158 | pmd = pmd_alloc(&init_mm, pgd, base); | 
|  | 159 | if (!pmd) { | 
|  | 160 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | 
|  | 161 | ret = -ENOMEM; | 
|  | 162 | break; | 
|  | 163 | } | 
|  | 164 | WARN_ON(!pmd_none(*pmd)); | 
|  | 165 |  | 
|  | 166 | pte = pte_alloc_kernel(pmd, base); | 
|  | 167 | if (!pte) { | 
|  | 168 | printk(KERN_ERR "%s: no pte tables\n", __func__); | 
|  | 169 | ret = -ENOMEM; | 
|  | 170 | break; | 
|  | 171 | } | 
|  | 172 |  | 
|  | 173 | consistent_pte[i++] = pte; | 
|  | 174 | base += (1 << PGDIR_SHIFT); | 
|  | 175 | } while (base < CONSISTENT_END); | 
|  | 176 |  | 
|  | 177 | return ret; | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | core_initcall(consistent_init); | 
|  | 181 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | static void * | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 183 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { | 
| Russell King | 13ccf3a | 2009-11-19 15:07:04 +0000 | [diff] [blame] | 185 | struct arm_vmregion *c; | 
| Russell King | 5bc23d3 | 2010-07-25 08:57:02 +0100 | [diff] [blame] | 186 | size_t align; | 
|  | 187 | int bit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  | 
| Russell King | ebd7a84 | 2009-11-19 20:58:31 +0000 | [diff] [blame] | 189 | if (!consistent_pte[0]) { | 
|  | 190 | printk(KERN_ERR "%s: not initialised\n", __func__); | 
|  | 191 | dump_stack(); | 
| Russell King | ebd7a84 | 2009-11-19 20:58:31 +0000 | [diff] [blame] | 192 | return NULL; | 
|  | 193 | } | 
|  | 194 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | /* | 
| Russell King | 5bc23d3 | 2010-07-25 08:57:02 +0100 | [diff] [blame] | 196 | * Align the virtual region allocation - maximum alignment is | 
|  | 197 | * a section size, minimum is a page size.  This helps reduce | 
|  | 198 | * fragmentation of the DMA space, and also prevents allocations | 
|  | 199 | * smaller than a section from crossing a section boundary. | 
|  | 200 | */ | 
|  | 201 | bit = fls(size - 1) + 1; | 
|  | 202 | if (bit > SECTION_SHIFT) | 
|  | 203 | bit = SECTION_SHIFT; | 
|  | 204 | align = 1 << bit; | 
|  | 205 |  | 
|  | 206 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | * Allocate a virtual address in the consistent mapping region. | 
|  | 208 | */ | 
| Russell King | 5bc23d3 | 2010-07-25 08:57:02 +0100 | [diff] [blame] | 209 | c = arm_vmregion_alloc(&consistent_head, align, size, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 
|  | 211 | if (c) { | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 212 | pte_t *pte; | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 213 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); | 
|  | 214 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 |  | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 216 | pte = consistent_pte[idx] + off; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | c->vm_pages = page; | 
|  | 218 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | do { | 
|  | 220 | BUG_ON(!pte_none(*pte)); | 
|  | 221 |  | 
| Russell King | ad1ae2f | 2006-12-13 14:34:43 +0000 | [diff] [blame] | 222 | set_pte_ext(pte, mk_pte(page, prot), 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | page++; | 
|  | 224 | pte++; | 
| Kevin Hilman | 37134cd | 2006-01-12 16:12:21 +0000 | [diff] [blame] | 225 | off++; | 
|  | 226 | if (off >= PTRS_PER_PTE) { | 
|  | 227 | off = 0; | 
|  | 228 | pte = consistent_pte[++idx]; | 
|  | 229 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | } while (size -= PAGE_SIZE); | 
|  | 231 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | return (void *)c->vm_start; | 
|  | 233 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | return NULL; | 
|  | 235 | } | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 236 |  | 
|  | 237 | static void __dma_free_remap(void *cpu_addr, size_t size) | 
|  | 238 | { | 
|  | 239 | struct arm_vmregion *c; | 
|  | 240 | unsigned long addr; | 
|  | 241 | pte_t *ptep; | 
|  | 242 | int idx; | 
|  | 243 | u32 off; | 
|  | 244 |  | 
|  | 245 | c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); | 
|  | 246 | if (!c) { | 
|  | 247 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | 
|  | 248 | __func__, cpu_addr); | 
|  | 249 | dump_stack(); | 
|  | 250 | return; | 
|  | 251 | } | 
|  | 252 |  | 
|  | 253 | if ((c->vm_end - c->vm_start) != size) { | 
|  | 254 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | 
|  | 255 | __func__, c->vm_end - c->vm_start, size); | 
|  | 256 | dump_stack(); | 
|  | 257 | size = c->vm_end - c->vm_start; | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | 
|  | 261 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | 
|  | 262 | ptep = consistent_pte[idx] + off; | 
|  | 263 | addr = c->vm_start; | 
|  | 264 | do { | 
|  | 265 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 266 |  | 
|  | 267 | ptep++; | 
|  | 268 | addr += PAGE_SIZE; | 
|  | 269 | off++; | 
|  | 270 | if (off >= PTRS_PER_PTE) { | 
|  | 271 | off = 0; | 
|  | 272 | ptep = consistent_pte[++idx]; | 
|  | 273 | } | 
|  | 274 |  | 
| Russell King | acaac25 | 2009-11-20 18:19:52 +0000 | [diff] [blame] | 275 | if (pte_none(pte) || !pte_present(pte)) | 
|  | 276 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | 
|  | 277 | __func__); | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 278 | } while (size -= PAGE_SIZE); | 
|  | 279 |  | 
|  | 280 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | 
|  | 281 |  | 
|  | 282 | arm_vmregion_free(&consistent_head, c); | 
|  | 283 | } | 
|  | 284 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 285 | #else	/* !CONFIG_MMU */ | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 286 |  | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 287 | #define __dma_alloc_remap(page, size, gfp, prot)	page_address(page) | 
|  | 288 | #define __dma_free_remap(addr, size)			do { } while (0) | 
|  | 289 |  | 
|  | 290 | #endif	/* CONFIG_MMU */ | 
|  | 291 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 292 | static void * | 
|  | 293 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 
|  | 294 | pgprot_t prot) | 
|  | 295 | { | 
| Russell King | 04da569 | 2009-11-19 15:54:45 +0000 | [diff] [blame] | 296 | struct page *page; | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 297 | void *addr; | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 298 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 299 | *handle = ~0; | 
| Russell King | 04da569 | 2009-11-19 15:54:45 +0000 | [diff] [blame] | 300 | size = PAGE_ALIGN(size); | 
|  | 301 |  | 
|  | 302 | page = __dma_alloc_buffer(dev, size, gfp); | 
|  | 303 | if (!page) | 
|  | 304 | return NULL; | 
|  | 305 |  | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 306 | if (!arch_is_coherent()) | 
|  | 307 | addr = __dma_alloc_remap(page, size, gfp, prot); | 
|  | 308 | else | 
|  | 309 | addr = page_address(page); | 
|  | 310 |  | 
|  | 311 | if (addr) | 
|  | 312 | *handle = page_to_dma(dev, page); | 
|  | 313 |  | 
|  | 314 | return addr; | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 315 | } | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 316 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | /* | 
|  | 318 | * Allocate DMA-coherent memory space and return both the kernel remapped | 
|  | 319 | * virtual and bus address for that space. | 
|  | 320 | */ | 
|  | 321 | void * | 
| Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 322 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | { | 
| Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 324 | void *memory; | 
|  | 325 |  | 
|  | 326 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | 
|  | 327 | return memory; | 
|  | 328 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | return __dma_alloc(dev, size, handle, gfp, | 
| Russell King | 26a26d3 | 2009-11-20 21:06:43 +0000 | [diff] [blame] | 330 | pgprot_dmacoherent(pgprot_kernel)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | } | 
|  | 332 | EXPORT_SYMBOL(dma_alloc_coherent); | 
|  | 333 |  | 
|  | 334 | /* | 
|  | 335 | * Allocate a writecombining region, in much the same way as | 
|  | 336 | * dma_alloc_coherent above. | 
|  | 337 | */ | 
|  | 338 | void * | 
| Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 339 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | { | 
|  | 341 | return __dma_alloc(dev, size, handle, gfp, | 
|  | 342 | pgprot_writecombine(pgprot_kernel)); | 
|  | 343 | } | 
|  | 344 | EXPORT_SYMBOL(dma_alloc_writecombine); | 
|  | 345 |  | 
|  | 346 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | 
|  | 347 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
|  | 348 | { | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 349 | int ret = -ENXIO; | 
|  | 350 | #ifdef CONFIG_MMU | 
| Russell King | 13ccf3a | 2009-11-19 15:07:04 +0000 | [diff] [blame] | 351 | unsigned long user_size, kern_size; | 
|  | 352 | struct arm_vmregion *c; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 |  | 
|  | 354 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 
|  | 355 |  | 
| Russell King | 13ccf3a | 2009-11-19 15:07:04 +0000 | [diff] [blame] | 356 | c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | if (c) { | 
|  | 358 | unsigned long off = vma->vm_pgoff; | 
|  | 359 |  | 
|  | 360 | kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; | 
|  | 361 |  | 
|  | 362 | if (off < kern_size && | 
|  | 363 | user_size <= (kern_size - off)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | ret = remap_pfn_range(vma, vma->vm_start, | 
|  | 365 | page_to_pfn(c->vm_pages) + off, | 
|  | 366 | user_size << PAGE_SHIFT, | 
|  | 367 | vma->vm_page_prot); | 
|  | 368 | } | 
|  | 369 | } | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 370 | #endif	/* CONFIG_MMU */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 |  | 
|  | 372 | return ret; | 
|  | 373 | } | 
|  | 374 |  | 
|  | 375 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 
|  | 376 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
|  | 377 | { | 
| Russell King | 26a26d3 | 2009-11-20 21:06:43 +0000 | [diff] [blame] | 378 | vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 
|  | 380 | } | 
|  | 381 | EXPORT_SYMBOL(dma_mmap_coherent); | 
|  | 382 |  | 
|  | 383 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | 
|  | 384 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 
|  | 385 | { | 
|  | 386 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 
|  | 387 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 
|  | 388 | } | 
|  | 389 | EXPORT_SYMBOL(dma_mmap_writecombine); | 
|  | 390 |  | 
|  | 391 | /* | 
|  | 392 | * free a page as defined by the above mapping. | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 393 | * Must not be called with IRQs disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | */ | 
|  | 395 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 
|  | 396 | { | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 397 | WARN_ON(irqs_disabled()); | 
|  | 398 |  | 
| Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 399 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | 
|  | 400 | return; | 
|  | 401 |  | 
| Russell King | 3e82d01 | 2009-11-19 15:38:12 +0000 | [diff] [blame] | 402 | size = PAGE_ALIGN(size); | 
|  | 403 |  | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 404 | if (!arch_is_coherent()) | 
|  | 405 | __dma_free_remap(cpu_addr, size); | 
| Russell King | 7a9a32a | 2009-11-19 15:31:07 +0000 | [diff] [blame] | 406 |  | 
|  | 407 | __dma_free_buffer(dma_to_page(dev, handle), size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | } | 
|  | 409 | EXPORT_SYMBOL(dma_free_coherent); | 
|  | 410 |  | 
|  | 411 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | * Make an area consistent for devices. | 
| Dan Williams | 105ef9a | 2006-11-21 22:57:23 +0100 | [diff] [blame] | 413 | * Note: Drivers should NOT use this function directly, as it will break | 
|  | 414 | * platforms with CONFIG_DMABOUNCE. | 
|  | 415 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | */ | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 417 | void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, | 
|  | 418 | enum dma_data_direction dir) | 
|  | 419 | { | 
| Russell King | 2ffe2da | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 420 | unsigned long paddr; | 
|  | 421 |  | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 422 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | 
|  | 423 |  | 
|  | 424 | dmac_map_area(kaddr, size, dir); | 
| Russell King | 2ffe2da | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 425 |  | 
|  | 426 | paddr = __pa(kaddr); | 
|  | 427 | if (dir == DMA_FROM_DEVICE) { | 
|  | 428 | outer_inv_range(paddr, paddr + size); | 
|  | 429 | } else { | 
|  | 430 | outer_clean_range(paddr, paddr + size); | 
|  | 431 | } | 
|  | 432 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 433 | } | 
|  | 434 | EXPORT_SYMBOL(___dma_single_cpu_to_dev); | 
|  | 435 |  | 
|  | 436 | void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, | 
|  | 437 | enum dma_data_direction dir) | 
|  | 438 | { | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 439 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | 
|  | 440 |  | 
| Russell King | 2ffe2da | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 441 | /* FIXME: non-speculating: not required */ | 
|  | 442 | /* don't bother invalidating if DMA to device */ | 
|  | 443 | if (dir != DMA_TO_DEVICE) { | 
|  | 444 | unsigned long paddr = __pa(kaddr); | 
|  | 445 | outer_inv_range(paddr, paddr + size); | 
|  | 446 | } | 
|  | 447 |  | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 448 | dmac_unmap_area(kaddr, size, dir); | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 449 | } | 
|  | 450 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 451 |  | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 452 | static void dma_cache_maint_page(struct page *page, unsigned long offset, | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 453 | size_t size, enum dma_data_direction dir, | 
|  | 454 | void (*op)(const void *, size_t, int)) | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 455 | { | 
|  | 456 | /* | 
|  | 457 | * A single sg entry may refer to multiple physically contiguous | 
|  | 458 | * pages.  But we still need to process highmem pages individually. | 
|  | 459 | * If highmem is not configured then the bulk of this loop gets | 
|  | 460 | * optimized out. | 
|  | 461 | */ | 
|  | 462 | size_t left = size; | 
|  | 463 | do { | 
|  | 464 | size_t len = left; | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 465 | void *vaddr; | 
|  | 466 |  | 
|  | 467 | if (PageHighMem(page)) { | 
|  | 468 | if (len + offset > PAGE_SIZE) { | 
|  | 469 | if (offset >= PAGE_SIZE) { | 
|  | 470 | page += offset / PAGE_SIZE; | 
|  | 471 | offset %= PAGE_SIZE; | 
|  | 472 | } | 
|  | 473 | len = PAGE_SIZE - offset; | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 474 | } | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 475 | vaddr = kmap_high_get(page); | 
|  | 476 | if (vaddr) { | 
|  | 477 | vaddr += offset; | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 478 | op(vaddr, len, dir); | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 479 | kunmap_high(page); | 
| Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 480 | } else if (cache_is_vipt()) { | 
|  | 481 | pte_t saved_pte; | 
|  | 482 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | 
|  | 483 | op(vaddr + offset, len, dir); | 
|  | 484 | kunmap_high_l1_vipt(page, saved_pte); | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 485 | } | 
|  | 486 | } else { | 
|  | 487 | vaddr = page_address(page) + offset; | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 488 | op(vaddr, len, dir); | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 489 | } | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 490 | offset = 0; | 
|  | 491 | page++; | 
|  | 492 | left -= len; | 
|  | 493 | } while (left); | 
|  | 494 | } | 
|  | 495 |  | 
|  | 496 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | 
|  | 497 | size_t size, enum dma_data_direction dir) | 
|  | 498 | { | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 499 | unsigned long paddr; | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 500 |  | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 501 | dma_cache_maint_page(page, off, size, dir, dmac_map_area); | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 502 |  | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 503 | paddr = page_to_phys(page) + off; | 
| Russell King | 2ffe2da | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 504 | if (dir == DMA_FROM_DEVICE) { | 
|  | 505 | outer_inv_range(paddr, paddr + size); | 
|  | 506 | } else { | 
|  | 507 | outer_clean_range(paddr, paddr + size); | 
|  | 508 | } | 
|  | 509 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 510 | } | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 511 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | 
|  | 512 |  | 
|  | 513 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | 
|  | 514 | size_t size, enum dma_data_direction dir) | 
|  | 515 | { | 
| Russell King | 2ffe2da | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 516 | unsigned long paddr = page_to_phys(page) + off; | 
|  | 517 |  | 
|  | 518 | /* FIXME: non-speculating: not required */ | 
|  | 519 | /* don't bother invalidating if DMA to device */ | 
|  | 520 | if (dir != DMA_TO_DEVICE) | 
|  | 521 | outer_inv_range(paddr, paddr + size); | 
|  | 522 |  | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 523 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 524 | } | 
|  | 525 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 526 |  | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 527 | /** | 
|  | 528 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | 
|  | 529 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 530 | * @sg: list of buffers | 
|  | 531 | * @nents: number of buffers to map | 
|  | 532 | * @dir: DMA transfer direction | 
|  | 533 | * | 
|  | 534 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 
|  | 535 | * This is the scatter-gather version of the dma_map_single interface. | 
|  | 536 | * Here the scatter gather list elements are each tagged with the | 
|  | 537 | * appropriate dma address and length.  They are obtained via | 
|  | 538 | * sg_dma_{address,length}. | 
|  | 539 | * | 
|  | 540 | * Device ownership issues as mentioned for dma_map_single are the same | 
|  | 541 | * here. | 
|  | 542 | */ | 
|  | 543 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 544 | enum dma_data_direction dir) | 
|  | 545 | { | 
|  | 546 | struct scatterlist *s; | 
| Russell King | 01135d9 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 547 | int i, j; | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 548 |  | 
|  | 549 | for_each_sg(sg, s, nents, i) { | 
| Russell King | 01135d9 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 550 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | 
|  | 551 | s->length, dir); | 
|  | 552 | if (dma_mapping_error(dev, s->dma_address)) | 
|  | 553 | goto bad_mapping; | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 554 | } | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 555 | return nents; | 
| Russell King | 01135d9 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 556 |  | 
|  | 557 | bad_mapping: | 
|  | 558 | for_each_sg(sg, s, i, j) | 
|  | 559 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 
|  | 560 | return 0; | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 561 | } | 
|  | 562 | EXPORT_SYMBOL(dma_map_sg); | 
|  | 563 |  | 
|  | 564 | /** | 
|  | 565 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 
|  | 566 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 567 | * @sg: list of buffers | 
|  | 568 | * @nents: number of buffers to unmap (returned from dma_map_sg) | 
|  | 569 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 570 | * | 
|  | 571 | * Unmap a set of streaming mode DMA translations.  Again, CPU access | 
|  | 572 | * rules concerning calls here are the same as for dma_unmap_single(). | 
|  | 573 | */ | 
|  | 574 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 575 | enum dma_data_direction dir) | 
|  | 576 | { | 
| Russell King | 01135d9 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 577 | struct scatterlist *s; | 
|  | 578 | int i; | 
|  | 579 |  | 
|  | 580 | for_each_sg(sg, s, nents, i) | 
|  | 581 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 582 | } | 
|  | 583 | EXPORT_SYMBOL(dma_unmap_sg); | 
|  | 584 |  | 
|  | 585 | /** | 
|  | 586 | * dma_sync_sg_for_cpu | 
|  | 587 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 588 | * @sg: list of buffers | 
|  | 589 | * @nents: number of buffers to map (returned from dma_map_sg) | 
|  | 590 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 591 | */ | 
|  | 592 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 
|  | 593 | int nents, enum dma_data_direction dir) | 
|  | 594 | { | 
|  | 595 | struct scatterlist *s; | 
|  | 596 | int i; | 
|  | 597 |  | 
|  | 598 | for_each_sg(sg, s, nents, i) { | 
| Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 599 | if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, | 
|  | 600 | sg_dma_len(s), dir)) | 
|  | 601 | continue; | 
|  | 602 |  | 
|  | 603 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 
|  | 604 | s->length, dir); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 605 | } | 
|  | 606 | } | 
|  | 607 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 
|  | 608 |  | 
|  | 609 | /** | 
|  | 610 | * dma_sync_sg_for_device | 
|  | 611 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 612 | * @sg: list of buffers | 
|  | 613 | * @nents: number of buffers to map (returned from dma_map_sg) | 
|  | 614 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 615 | */ | 
|  | 616 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 
|  | 617 | int nents, enum dma_data_direction dir) | 
|  | 618 | { | 
|  | 619 | struct scatterlist *s; | 
|  | 620 | int i; | 
|  | 621 |  | 
|  | 622 | for_each_sg(sg, s, nents, i) { | 
| Russell King | 2638b4d | 2008-09-25 21:38:41 +0100 | [diff] [blame] | 623 | if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, | 
|  | 624 | sg_dma_len(s), dir)) | 
|  | 625 | continue; | 
|  | 626 |  | 
| Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 627 | __dma_page_cpu_to_dev(sg_page(s), s->offset, | 
|  | 628 | s->length, dir); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 629 | } | 
|  | 630 | } | 
|  | 631 | EXPORT_SYMBOL(dma_sync_sg_for_device); |