Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * PowerPC version derived from arch/arm/mm/consistent.c |
| 3 | * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) |
| 4 | * |
| 5 | * Copyright (C) 2000 Russell King |
| 6 | * |
| 7 | * Consistent memory allocators. Used for DMA devices that want to |
| 8 | * share uncached memory with the processor core. The function return |
| 9 | * is the virtual address and 'dma_handle' is the physical address. |
| 10 | * Mostly stolen from the ARM port, with some changes for PowerPC. |
| 11 | * -- Dan |
| 12 | * |
| 13 | * Reorganized to get rid of the arch-specific consistent_* functions |
| 14 | * and provide non-coherent implementations for the DMA API. -Matt |
| 15 | * |
| 16 | * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() |
| 17 | * implementation. This is pulled straight from ARM and barely |
| 18 | * modified. -Matt |
| 19 | * |
| 20 | * This program is free software; you can redistribute it and/or modify |
| 21 | * it under the terms of the GNU General Public License version 2 as |
| 22 | * published by the Free Software Foundation. |
| 23 | */ |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/sched.h> |
| 26 | #include <linux/kernel.h> |
| 27 | #include <linux/errno.h> |
| 28 | #include <linux/string.h> |
| 29 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/highmem.h> |
| 31 | #include <linux/dma-mapping.h> |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 32 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
| 34 | #include <asm/tlbflush.h> |
| 35 | |
| 36 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | * Allocate DMA-coherent memory space and return both the kernel remapped |
| 38 | * virtual and bus address for that space. |
| 39 | */ |
| 40 | void * |
Al Viro | e82dd4d | 2005-10-21 03:21:33 -0400 | [diff] [blame] | 41 | __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | { |
| 43 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | unsigned long order; |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 45 | int i; |
| 46 | unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT; |
| 47 | unsigned int array_size = nr_pages * sizeof(struct page *); |
| 48 | struct page **pages; |
| 49 | struct page *end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | u64 mask = 0x00ffffff, limit; /* ISA default */ |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 51 | struct vm_struct *area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 53 | BUG_ON(!mem_init_done); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | size = PAGE_ALIGN(size); |
| 55 | limit = (mask + 1) & ~mask; |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 56 | if (limit && size >= limit) { |
| 57 | printk(KERN_WARNING "coherent allocation too big (requested " |
| 58 | "%#x mask %#Lx)\n", size, mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | return NULL; |
| 60 | } |
| 61 | |
| 62 | order = get_order(size); |
| 63 | |
| 64 | if (mask != 0xffffffff) |
| 65 | gfp |= GFP_DMA; |
| 66 | |
| 67 | page = alloc_pages(gfp, order); |
| 68 | if (!page) |
| 69 | goto no_page; |
| 70 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 71 | end = page + (1 << order); |
| 72 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | /* |
| 74 | * Invalidate any data that might be lurking in the |
| 75 | * kernel direct-mapped region for device DMA. |
| 76 | */ |
| 77 | { |
| 78 | unsigned long kaddr = (unsigned long)page_address(page); |
| 79 | memset(page_address(page), 0, size); |
| 80 | flush_dcache_range(kaddr, kaddr + size); |
| 81 | } |
| 82 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 83 | split_page(page, order); |
| 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | /* |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 86 | * Set the "dma handle" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | */ |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 88 | *handle = page_to_phys(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 90 | area = get_vm_area_caller(size, VM_IOREMAP, |
| 91 | __builtin_return_address(1)); |
| 92 | if (!area) |
| 93 | goto out_free_pages; |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 94 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 95 | if (array_size > PAGE_SIZE) { |
| 96 | pages = vmalloc(array_size); |
| 97 | area->flags |= VM_VPAGES; |
| 98 | } else { |
| 99 | pages = kmalloc(array_size, GFP_KERNEL); |
| 100 | } |
| 101 | if (!pages) |
| 102 | goto out_free_area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 104 | area->pages = pages; |
| 105 | area->nr_pages = nr_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 107 | for (i = 0; i < nr_pages; i++) |
| 108 | pages[i] = page + i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 110 | if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages)) |
| 111 | goto out_unmap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 113 | /* |
| 114 | * Free the otherwise unused pages. |
| 115 | */ |
| 116 | page += nr_pages; |
| 117 | while (page < end) { |
| 118 | __free_page(page); |
| 119 | page++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | } |
| 121 | |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 122 | return area->addr; |
| 123 | out_unmap: |
| 124 | vunmap(area->addr); |
| 125 | if (array_size > PAGE_SIZE) |
| 126 | vfree(pages); |
| 127 | else |
| 128 | kfree(pages); |
| 129 | goto out_free_pages; |
| 130 | out_free_area: |
| 131 | free_vm_area(area); |
| 132 | out_free_pages: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | if (page) |
| 134 | __free_pages(page, order); |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 135 | no_page: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | return NULL; |
| 137 | } |
| 138 | EXPORT_SYMBOL(__dma_alloc_coherent); |
| 139 | |
| 140 | /* |
| 141 | * free a page as defined by the above mapping. |
| 142 | */ |
| 143 | void __dma_free_coherent(size_t size, void *vaddr) |
| 144 | { |
Ilya Yanok | 33f00dc | 2009-02-12 13:20:53 +0000 | [diff] [blame] | 145 | vfree(vaddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
| 148 | EXPORT_SYMBOL(__dma_free_coherent); |
| 149 | |
| 150 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | * make an area consistent. |
| 152 | */ |
| 153 | void __dma_sync(void *vaddr, size_t size, int direction) |
| 154 | { |
| 155 | unsigned long start = (unsigned long)vaddr; |
| 156 | unsigned long end = start + size; |
| 157 | |
| 158 | switch (direction) { |
| 159 | case DMA_NONE: |
| 160 | BUG(); |
Andrew Lewis | 03d7061 | 2008-06-26 19:29:05 +1000 | [diff] [blame] | 161 | case DMA_FROM_DEVICE: |
| 162 | /* |
| 163 | * invalidate only when cache-line aligned otherwise there is |
| 164 | * the potential for discarding uncommitted data from the cache |
| 165 | */ |
| 166 | if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1))) |
| 167 | flush_dcache_range(start, end); |
| 168 | else |
| 169 | invalidate_dcache_range(start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | break; |
| 171 | case DMA_TO_DEVICE: /* writeback only */ |
| 172 | clean_dcache_range(start, end); |
| 173 | break; |
| 174 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ |
| 175 | flush_dcache_range(start, end); |
| 176 | break; |
| 177 | } |
| 178 | } |
| 179 | EXPORT_SYMBOL(__dma_sync); |
| 180 | |
| 181 | #ifdef CONFIG_HIGHMEM |
| 182 | /* |
| 183 | * __dma_sync_page() implementation for systems using highmem. |
| 184 | * In this case, each page of a buffer must be kmapped/kunmapped |
| 185 | * in order to have a virtual address for __dma_sync(). This must |
Adrian Bunk | 338cec3 | 2005-09-10 00:26:54 -0700 | [diff] [blame] | 186 | * not sleep so kmap_atomic()/kunmap_atomic() are used. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | * |
| 188 | * Note: yes, it is possible and correct to have a buffer extend |
| 189 | * beyond the first page. |
| 190 | */ |
| 191 | static inline void __dma_sync_page_highmem(struct page *page, |
| 192 | unsigned long offset, size_t size, int direction) |
| 193 | { |
Paolo Galtieri | a0c111c | 2005-10-11 08:29:07 -0700 | [diff] [blame] | 194 | size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | size_t cur_size = seg_size; |
| 196 | unsigned long flags, start, seg_offset = offset; |
Paolo Galtieri | a0c111c | 2005-10-11 08:29:07 -0700 | [diff] [blame] | 197 | int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | int seg_nr = 0; |
| 199 | |
| 200 | local_irq_save(flags); |
| 201 | |
| 202 | do { |
| 203 | start = (unsigned long)kmap_atomic(page + seg_nr, |
| 204 | KM_PPC_SYNC_PAGE) + seg_offset; |
| 205 | |
| 206 | /* Sync this buffer segment */ |
| 207 | __dma_sync((void *)start, seg_size, direction); |
| 208 | kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); |
| 209 | seg_nr++; |
| 210 | |
| 211 | /* Calculate next buffer segment size */ |
| 212 | seg_size = min((size_t)PAGE_SIZE, size - cur_size); |
| 213 | |
| 214 | /* Add the segment size to our running total */ |
| 215 | cur_size += seg_size; |
| 216 | seg_offset = 0; |
| 217 | } while (seg_nr < nr_segs); |
| 218 | |
| 219 | local_irq_restore(flags); |
| 220 | } |
| 221 | #endif /* CONFIG_HIGHMEM */ |
| 222 | |
| 223 | /* |
| 224 | * __dma_sync_page makes memory consistent. identical to __dma_sync, but |
| 225 | * takes a struct page instead of a virtual address |
| 226 | */ |
| 227 | void __dma_sync_page(struct page *page, unsigned long offset, |
| 228 | size_t size, int direction) |
| 229 | { |
| 230 | #ifdef CONFIG_HIGHMEM |
| 231 | __dma_sync_page_highmem(page, offset, size, direction); |
| 232 | #else |
| 233 | unsigned long start = (unsigned long)page_address(page) + offset; |
| 234 | __dma_sync((void *)start, size, direction); |
| 235 | #endif |
| 236 | } |
| 237 | EXPORT_SYMBOL(__dma_sync_page); |