| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Russell King | 0ddbccd | 2008-09-25 15:59:19 +0100 | [diff] [blame] | 2 | *  linux/arch/arm/mm/dma-mapping.c | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * | 
|  | 4 | *  Copyright (C) 2000-2004 Russell King | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License version 2 as | 
|  | 8 | * published by the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | *  DMA uncached mapping support. | 
|  | 11 | */ | 
|  | 12 | #include <linux/module.h> | 
|  | 13 | #include <linux/mm.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | #include <linux/gfp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/errno.h> | 
|  | 16 | #include <linux/list.h> | 
|  | 17 | #include <linux/init.h> | 
|  | 18 | #include <linux/device.h> | 
|  | 19 | #include <linux/dma-mapping.h> | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 20 | #include <linux/dma-contiguous.h> | 
| Nicolas Pitre | 39af22a | 2010-12-15 15:14:45 -0500 | [diff] [blame] | 21 | #include <linux/highmem.h> | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 22 | #include <linux/memblock.h> | 
| Jon Medhurst | 99d1717 | 2011-08-02 17:28:27 +0100 | [diff] [blame] | 23 | #include <linux/slab.h> | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 24 | #include <linux/iommu.h> | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 25 | #include <linux/io.h> | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 26 | #include <linux/vmalloc.h> | 
| Alessandro Rubini | 158e8bf | 2012-06-24 12:46:26 +0100 | [diff] [blame] | 27 | #include <linux/sizes.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
| Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 29 | #include <asm/memory.h> | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 30 | #include <asm/highmem.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <asm/cacheflush.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <asm/tlbflush.h> | 
| Jon Medhurst | 99d1717 | 2011-08-02 17:28:27 +0100 | [diff] [blame] | 33 | #include <asm/mach/arch.h> | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 34 | #include <asm/dma-iommu.h> | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 35 | #include <asm/mach/map.h> | 
|  | 36 | #include <asm/system_info.h> | 
|  | 37 | #include <asm/dma-contiguous.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 |  | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 39 | #include "mm.h" | 
|  | 40 |  | 
| Marek Szyprowski | 15237e1 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 41 | /* | 
|  | 42 | * The DMA API is built upon the notion of "buffer ownership".  A buffer | 
|  | 43 | * is either exclusively owned by the CPU (and therefore may be accessed | 
|  | 44 | * by it) or exclusively owned by the DMA device.  These helper functions | 
|  | 45 | * represent the transitions between these two ownership states. | 
|  | 46 | * | 
|  | 47 | * Note, however, that on later ARMs, this notion does not work due to | 
|  | 48 | * speculative prefetches.  We model our approach on the assumption that | 
|  | 49 | * the CPU does do speculative prefetches, which means we clean caches | 
|  | 50 | * before transfers and delay cache invalidation until transfer completion. | 
|  | 51 | * | 
| Marek Szyprowski | 15237e1 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 52 | */ | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 53 | static void __dma_page_cpu_to_dev(struct page *, unsigned long, | 
| Marek Szyprowski | 15237e1 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 54 | size_t, enum dma_data_direction); | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 55 | static void __dma_page_dev_to_cpu(struct page *, unsigned long, | 
| Marek Szyprowski | 15237e1 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 56 | size_t, enum dma_data_direction); | 
|  | 57 |  | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 58 | /** | 
|  | 59 | * arm_dma_map_page - map a portion of a page for streaming DMA | 
|  | 60 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 61 | * @page: page that buffer resides in | 
|  | 62 | * @offset: offset into page for start of buffer | 
|  | 63 | * @size: size of buffer to map | 
|  | 64 | * @dir: DMA transfer direction | 
|  | 65 | * | 
|  | 66 | * Ensure that any data held in the cache is appropriately discarded | 
|  | 67 | * or written back. | 
|  | 68 | * | 
|  | 69 | * The device owns this memory once this call has completed.  The CPU | 
|  | 70 | * can regain ownership by calling dma_unmap_page(). | 
|  | 71 | */ | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 72 | static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 73 | unsigned long offset, size_t size, enum dma_data_direction dir, | 
|  | 74 | struct dma_attrs *attrs) | 
|  | 75 | { | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 76 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 77 | __dma_page_cpu_to_dev(page, offset, size, dir); | 
|  | 78 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 81 | static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, | 
|  | 82 | unsigned long offset, size_t size, enum dma_data_direction dir, | 
|  | 83 | struct dma_attrs *attrs) | 
|  | 84 | { | 
|  | 85 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 
|  | 86 | } | 
|  | 87 |  | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 88 | /** | 
|  | 89 | * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | 
|  | 90 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 91 | * @handle: DMA address of buffer | 
|  | 92 | * @size: size of buffer (same as passed to dma_map_page) | 
|  | 93 | * @dir: DMA transfer direction (same as passed to dma_map_page) | 
|  | 94 | * | 
|  | 95 | * Unmap a page streaming mode DMA translation.  The handle and size | 
|  | 96 | * must match what was provided in the previous dma_map_page() call. | 
|  | 97 | * All other usages are undefined. | 
|  | 98 | * | 
|  | 99 | * After this call, reads by the CPU to the buffer are guaranteed to see | 
|  | 100 | * whatever the device wrote there. | 
|  | 101 | */ | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 102 | static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 103 | size_t size, enum dma_data_direction dir, | 
|  | 104 | struct dma_attrs *attrs) | 
|  | 105 | { | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 106 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 107 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | 
|  | 108 | handle & ~PAGE_MASK, size, dir); | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 109 | } | 
|  | 110 |  | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 111 | static void arm_dma_sync_single_for_cpu(struct device *dev, | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 112 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 
|  | 113 | { | 
|  | 114 | unsigned int offset = handle & (PAGE_SIZE - 1); | 
|  | 115 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 116 | __dma_page_dev_to_cpu(page, offset, size, dir); | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 117 | } | 
|  | 118 |  | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 119 | static void arm_dma_sync_single_for_device(struct device *dev, | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 120 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 
|  | 121 | { | 
|  | 122 | unsigned int offset = handle & (PAGE_SIZE - 1); | 
|  | 123 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 124 | __dma_page_cpu_to_dev(page, offset, size, dir); | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 125 | } | 
|  | 126 |  | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 127 | struct dma_map_ops arm_dma_ops = { | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 128 | .alloc			= arm_dma_alloc, | 
|  | 129 | .free			= arm_dma_free, | 
|  | 130 | .mmap			= arm_dma_mmap, | 
| Marek Szyprowski | dc2832e | 2012-06-13 10:01:15 +0200 | [diff] [blame] | 131 | .get_sgtable		= arm_dma_get_sgtable, | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 132 | .map_page		= arm_dma_map_page, | 
|  | 133 | .unmap_page		= arm_dma_unmap_page, | 
|  | 134 | .map_sg			= arm_dma_map_sg, | 
|  | 135 | .unmap_sg		= arm_dma_unmap_sg, | 
|  | 136 | .sync_single_for_cpu	= arm_dma_sync_single_for_cpu, | 
|  | 137 | .sync_single_for_device	= arm_dma_sync_single_for_device, | 
|  | 138 | .sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu, | 
|  | 139 | .sync_sg_for_device	= arm_dma_sync_sg_for_device, | 
|  | 140 | .set_dma_mask		= arm_dma_set_mask, | 
|  | 141 | }; | 
|  | 142 | EXPORT_SYMBOL(arm_dma_ops); | 
|  | 143 |  | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 144 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | 
|  | 145 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); | 
|  | 146 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | 
|  | 147 | dma_addr_t handle, struct dma_attrs *attrs); | 
|  | 148 |  | 
|  | 149 | struct dma_map_ops arm_coherent_dma_ops = { | 
|  | 150 | .alloc			= arm_coherent_dma_alloc, | 
|  | 151 | .free			= arm_coherent_dma_free, | 
|  | 152 | .mmap			= arm_dma_mmap, | 
|  | 153 | .get_sgtable		= arm_dma_get_sgtable, | 
|  | 154 | .map_page		= arm_coherent_dma_map_page, | 
|  | 155 | .map_sg			= arm_dma_map_sg, | 
|  | 156 | .set_dma_mask		= arm_dma_set_mask, | 
|  | 157 | }; | 
|  | 158 | EXPORT_SYMBOL(arm_coherent_dma_ops); | 
|  | 159 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 160 | static u64 get_coherent_dma_mask(struct device *dev) | 
|  | 161 | { | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 162 | u64 mask = (u64)arm_dma_limit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 164 | if (dev) { | 
|  | 165 | mask = dev->coherent_dma_mask; | 
|  | 166 |  | 
|  | 167 | /* | 
|  | 168 | * Sanity check the DMA mask - it must be non-zero, and | 
|  | 169 | * must be able to be satisfied by a DMA allocation. | 
|  | 170 | */ | 
|  | 171 | if (mask == 0) { | 
|  | 172 | dev_warn(dev, "coherent DMA mask is unset\n"); | 
|  | 173 | return 0; | 
|  | 174 | } | 
|  | 175 |  | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 176 | if ((~mask) & (u64)arm_dma_limit) { | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 177 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | 
|  | 178 | "than system GFP_DMA mask %#llx\n", | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 179 | mask, (u64)arm_dma_limit); | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 180 | return 0; | 
|  | 181 | } | 
|  | 182 | } | 
|  | 183 |  | 
|  | 184 | return mask; | 
|  | 185 | } | 
|  | 186 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 187 | static void __dma_clear_buffer(struct page *page, size_t size) | 
|  | 188 | { | 
|  | 189 | void *ptr; | 
|  | 190 | /* | 
|  | 191 | * Ensure that the allocated pages are zeroed, and that any data | 
|  | 192 | * lurking in the kernel direct-mapped region is invalidated. | 
|  | 193 | */ | 
|  | 194 | ptr = page_address(page); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 195 | if (ptr) { | 
|  | 196 | memset(ptr, 0, size); | 
|  | 197 | dmac_flush_range(ptr, ptr + size); | 
|  | 198 | outer_flush_range(__pa(ptr), __pa(ptr) + size); | 
|  | 199 | } | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 200 | } | 
|  | 201 |  | 
| Russell King | 7a9a32a | 2009-11-19 15:31:07 +0000 | [diff] [blame] | 202 | /* | 
|  | 203 | * Allocate a DMA buffer for 'dev' of size 'size' using the | 
|  | 204 | * specified gfp mask.  Note that 'size' must be page aligned. | 
|  | 205 | */ | 
|  | 206 | static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) | 
|  | 207 | { | 
|  | 208 | unsigned long order = get_order(size); | 
|  | 209 | struct page *page, *p, *e; | 
| Russell King | 7a9a32a | 2009-11-19 15:31:07 +0000 | [diff] [blame] | 210 |  | 
|  | 211 | page = alloc_pages(gfp, order); | 
|  | 212 | if (!page) | 
|  | 213 | return NULL; | 
|  | 214 |  | 
|  | 215 | /* | 
|  | 216 | * Now split the huge page and free the excess pages | 
|  | 217 | */ | 
|  | 218 | split_page(page, order); | 
|  | 219 | for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) | 
|  | 220 | __free_page(p); | 
|  | 221 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 222 | __dma_clear_buffer(page, size); | 
| Russell King | 7a9a32a | 2009-11-19 15:31:07 +0000 | [diff] [blame] | 223 |  | 
|  | 224 | return page; | 
|  | 225 | } | 
|  | 226 |  | 
|  | 227 | /* | 
|  | 228 | * Free a DMA buffer.  'size' must be page aligned. | 
|  | 229 | */ | 
|  | 230 | static void __dma_free_buffer(struct page *page, size_t size) | 
|  | 231 | { | 
|  | 232 | struct page *e = page + (size >> PAGE_SHIFT); | 
|  | 233 |  | 
|  | 234 | while (page < e) { | 
|  | 235 | __free_page(page); | 
|  | 236 | page++; | 
|  | 237 | } | 
|  | 238 | } | 
|  | 239 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 240 | #ifdef CONFIG_MMU | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 242 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 
|  | 243 | #endif | 
|  | 244 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 245 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 
|  | 246 | pgprot_t prot, struct page **ret_page); | 
|  | 247 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 248 | static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, | 
|  | 249 | pgprot_t prot, struct page **ret_page, | 
|  | 250 | const void *caller); | 
|  | 251 |  | 
|  | 252 | static void * | 
|  | 253 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, | 
|  | 254 | const void *caller) | 
|  | 255 | { | 
|  | 256 | struct vm_struct *area; | 
|  | 257 | unsigned long addr; | 
|  | 258 |  | 
|  | 259 | /* | 
|  | 260 | * DMA allocation can be mapped to user space, so lets | 
|  | 261 | * set VM_USERMAP flags too. | 
|  | 262 | */ | 
|  | 263 | area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, | 
|  | 264 | caller); | 
|  | 265 | if (!area) | 
|  | 266 | return NULL; | 
|  | 267 | addr = (unsigned long)area->addr; | 
|  | 268 | area->phys_addr = __pfn_to_phys(page_to_pfn(page)); | 
|  | 269 |  | 
|  | 270 | if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { | 
|  | 271 | vunmap((void *)addr); | 
|  | 272 | return NULL; | 
|  | 273 | } | 
|  | 274 | return (void *)addr; | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | static void __dma_free_remap(void *cpu_addr, size_t size) | 
|  | 278 | { | 
|  | 279 | unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; | 
|  | 280 | struct vm_struct *area = find_vm_area(cpu_addr); | 
|  | 281 | if (!area || (area->flags & flags) != flags) { | 
|  | 282 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | 
|  | 283 | return; | 
|  | 284 | } | 
|  | 285 | unmap_kernel_range((unsigned long)cpu_addr, size); | 
|  | 286 | vunmap(cpu_addr); | 
|  | 287 | } | 
|  | 288 |  | 
| Marek Szyprowski | 6e5267a | 2012-08-20 11:19:25 +0200 | [diff] [blame] | 289 | #define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K | 
|  | 290 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 291 | struct dma_pool { | 
|  | 292 | size_t size; | 
|  | 293 | spinlock_t lock; | 
|  | 294 | unsigned long *bitmap; | 
|  | 295 | unsigned long nr_pages; | 
|  | 296 | void *vaddr; | 
| Hiroshi Doyu | 6b3fe47 | 2012-08-28 08:13:01 +0300 | [diff] [blame] | 297 | struct page **pages; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 298 | }; | 
|  | 299 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 300 | static struct dma_pool atomic_pool = { | 
| Marek Szyprowski | 6e5267a | 2012-08-20 11:19:25 +0200 | [diff] [blame] | 301 | .size = DEFAULT_DMA_COHERENT_POOL_SIZE, | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 302 | }; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 303 |  | 
|  | 304 | static int __init early_coherent_pool(char *p) | 
|  | 305 | { | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 306 | atomic_pool.size = memparse(p, &p); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 307 | return 0; | 
|  | 308 | } | 
|  | 309 | early_param("coherent_pool", early_coherent_pool); | 
|  | 310 |  | 
| Marek Szyprowski | 6e5267a | 2012-08-20 11:19:25 +0200 | [diff] [blame] | 311 | void __init init_dma_coherent_pool_size(unsigned long size) | 
|  | 312 | { | 
|  | 313 | /* | 
|  | 314 | * Catch any attempt to set the pool size too late. | 
|  | 315 | */ | 
|  | 316 | BUG_ON(atomic_pool.vaddr); | 
|  | 317 |  | 
|  | 318 | /* | 
|  | 319 | * Set architecture specific coherent pool size only if | 
|  | 320 | * it has not been changed by kernel command line parameter. | 
|  | 321 | */ | 
|  | 322 | if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) | 
|  | 323 | atomic_pool.size = size; | 
|  | 324 | } | 
|  | 325 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 326 | /* | 
|  | 327 | * Initialise the coherent pool for atomic allocations. | 
|  | 328 | */ | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 329 | static int __init atomic_pool_init(void) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 330 | { | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 331 | struct dma_pool *pool = &atomic_pool; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 332 | pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 333 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; | 
|  | 334 | unsigned long *bitmap; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 335 | struct page *page; | 
| Hiroshi Doyu | 6b3fe47 | 2012-08-28 08:13:01 +0300 | [diff] [blame] | 336 | struct page **pages; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 337 | void *ptr; | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 338 | int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 339 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 340 | bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 
|  | 341 | if (!bitmap) | 
|  | 342 | goto no_bitmap; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 343 |  | 
| Hiroshi Doyu | 6b3fe47 | 2012-08-28 08:13:01 +0300 | [diff] [blame] | 344 | pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); | 
|  | 345 | if (!pages) | 
|  | 346 | goto no_pages; | 
|  | 347 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 348 | if (IS_ENABLED(CONFIG_CMA)) | 
|  | 349 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); | 
|  | 350 | else | 
|  | 351 | ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, | 
|  | 352 | &page, NULL); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 353 | if (ptr) { | 
| Hiroshi Doyu | 6b3fe47 | 2012-08-28 08:13:01 +0300 | [diff] [blame] | 354 | int i; | 
|  | 355 |  | 
|  | 356 | for (i = 0; i < nr_pages; i++) | 
|  | 357 | pages[i] = page + i; | 
|  | 358 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 359 | spin_lock_init(&pool->lock); | 
|  | 360 | pool->vaddr = ptr; | 
| Hiroshi Doyu | 6b3fe47 | 2012-08-28 08:13:01 +0300 | [diff] [blame] | 361 | pool->pages = pages; | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 362 | pool->bitmap = bitmap; | 
|  | 363 | pool->nr_pages = nr_pages; | 
|  | 364 | pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", | 
|  | 365 | (unsigned)pool->size / 1024); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 366 | return 0; | 
|  | 367 | } | 
| Sachin Kamat | ec10665 | 2012-09-24 08:35:03 +0200 | [diff] [blame] | 368 |  | 
|  | 369 | kfree(pages); | 
| Hiroshi Doyu | 6b3fe47 | 2012-08-28 08:13:01 +0300 | [diff] [blame] | 370 | no_pages: | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 371 | kfree(bitmap); | 
|  | 372 | no_bitmap: | 
|  | 373 | pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", | 
|  | 374 | (unsigned)pool->size / 1024); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 375 | return -ENOMEM; | 
|  | 376 | } | 
|  | 377 | /* | 
|  | 378 | * CMA is activated by core_initcall, so we must be called after it. | 
|  | 379 | */ | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 380 | postcore_initcall(atomic_pool_init); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 381 |  | 
|  | 382 | struct dma_contig_early_reserve { | 
|  | 383 | phys_addr_t base; | 
|  | 384 | unsigned long size; | 
|  | 385 | }; | 
|  | 386 |  | 
|  | 387 | static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; | 
|  | 388 |  | 
|  | 389 | static int dma_mmu_remap_num __initdata; | 
|  | 390 |  | 
|  | 391 | void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) | 
|  | 392 | { | 
|  | 393 | dma_mmu_remap[dma_mmu_remap_num].base = base; | 
|  | 394 | dma_mmu_remap[dma_mmu_remap_num].size = size; | 
|  | 395 | dma_mmu_remap_num++; | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | void __init dma_contiguous_remap(void) | 
|  | 399 | { | 
|  | 400 | int i; | 
|  | 401 | for (i = 0; i < dma_mmu_remap_num; i++) { | 
|  | 402 | phys_addr_t start = dma_mmu_remap[i].base; | 
|  | 403 | phys_addr_t end = start + dma_mmu_remap[i].size; | 
|  | 404 | struct map_desc map; | 
|  | 405 | unsigned long addr; | 
|  | 406 |  | 
|  | 407 | if (end > arm_lowmem_limit) | 
|  | 408 | end = arm_lowmem_limit; | 
|  | 409 | if (start >= end) | 
| Chris Brand | 39f78e7 | 2012-08-07 14:01:14 +0200 | [diff] [blame] | 410 | continue; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 411 |  | 
|  | 412 | map.pfn = __phys_to_pfn(start); | 
|  | 413 | map.virtual = __phys_to_virt(start); | 
|  | 414 | map.length = end - start; | 
|  | 415 | map.type = MT_MEMORY_DMA_READY; | 
|  | 416 |  | 
|  | 417 | /* | 
|  | 418 | * Clear previous low-memory mapping | 
|  | 419 | */ | 
|  | 420 | for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); | 
| Vitaly Andrianov | 61f6c7a | 2012-05-14 13:49:56 -0400 | [diff] [blame] | 421 | addr += PMD_SIZE) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 422 | pmd_clear(pmd_off_k(addr)); | 
|  | 423 |  | 
|  | 424 | iotable_init(&map, 1); | 
|  | 425 | } | 
|  | 426 | } | 
|  | 427 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 428 | static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, | 
|  | 429 | void *data) | 
|  | 430 | { | 
|  | 431 | struct page *page = virt_to_page(addr); | 
|  | 432 | pgprot_t prot = *(pgprot_t *)data; | 
|  | 433 |  | 
|  | 434 | set_pte_ext(pte, mk_pte(page, prot), 0); | 
|  | 435 | return 0; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | static void __dma_remap(struct page *page, size_t size, pgprot_t prot) | 
|  | 439 | { | 
|  | 440 | unsigned long start = (unsigned long) page_address(page); | 
|  | 441 | unsigned end = start + size; | 
|  | 442 |  | 
|  | 443 | apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); | 
|  | 444 | dsb(); | 
|  | 445 | flush_tlb_kernel_range(start, end); | 
|  | 446 | } | 
|  | 447 |  | 
|  | 448 | static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, | 
|  | 449 | pgprot_t prot, struct page **ret_page, | 
|  | 450 | const void *caller) | 
|  | 451 | { | 
|  | 452 | struct page *page; | 
|  | 453 | void *ptr; | 
|  | 454 | page = __dma_alloc_buffer(dev, size, gfp); | 
|  | 455 | if (!page) | 
|  | 456 | return NULL; | 
|  | 457 |  | 
|  | 458 | ptr = __dma_alloc_remap(page, size, gfp, prot, caller); | 
|  | 459 | if (!ptr) { | 
|  | 460 | __dma_free_buffer(page, size); | 
|  | 461 | return NULL; | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | *ret_page = page; | 
|  | 465 | return ptr; | 
|  | 466 | } | 
|  | 467 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 468 | static void *__alloc_from_pool(size_t size, struct page **ret_page) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 469 | { | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 470 | struct dma_pool *pool = &atomic_pool; | 
|  | 471 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
|  | 472 | unsigned int pageno; | 
|  | 473 | unsigned long flags; | 
|  | 474 | void *ptr = NULL; | 
| Aaro Koskinen | e4ea691 | 2012-08-07 14:39:25 +0200 | [diff] [blame] | 475 | unsigned long align_mask; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 476 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 477 | if (!pool->vaddr) { | 
|  | 478 | WARN(1, "coherent pool not initialised!\n"); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 479 | return NULL; | 
|  | 480 | } | 
|  | 481 |  | 
|  | 482 | /* | 
|  | 483 | * Align the region allocation - allocations from pool are rather | 
|  | 484 | * small, so align them to their order in pages, minimum is a page | 
|  | 485 | * size. This helps reduce fragmentation of the DMA space. | 
|  | 486 | */ | 
| Aaro Koskinen | e4ea691 | 2012-08-07 14:39:25 +0200 | [diff] [blame] | 487 | align_mask = (1 << get_order(size)) - 1; | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 488 |  | 
|  | 489 | spin_lock_irqsave(&pool->lock, flags); | 
|  | 490 | pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, | 
| Aaro Koskinen | e4ea691 | 2012-08-07 14:39:25 +0200 | [diff] [blame] | 491 | 0, count, align_mask); | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 492 | if (pageno < pool->nr_pages) { | 
|  | 493 | bitmap_set(pool->bitmap, pageno, count); | 
|  | 494 | ptr = pool->vaddr + PAGE_SIZE * pageno; | 
| Hiroshi Doyu | 6b3fe47 | 2012-08-28 08:13:01 +0300 | [diff] [blame] | 495 | *ret_page = pool->pages[pageno]; | 
| Marek Szyprowski | fb71285 | 2012-08-22 14:50:42 +0200 | [diff] [blame] | 496 | } else { | 
|  | 497 | pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" | 
|  | 498 | "Please increase it with coherent_pool= kernel parameter!\n", | 
|  | 499 | (unsigned)pool->size / 1024); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 500 | } | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 501 | spin_unlock_irqrestore(&pool->lock, flags); | 
|  | 502 |  | 
|  | 503 | return ptr; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 504 | } | 
|  | 505 |  | 
| Hiroshi Doyu | 21d0a75 | 2012-08-28 08:13:02 +0300 | [diff] [blame] | 506 | static bool __in_atomic_pool(void *start, size_t size) | 
|  | 507 | { | 
|  | 508 | struct dma_pool *pool = &atomic_pool; | 
|  | 509 | void *end = start + size; | 
|  | 510 | void *pool_start = pool->vaddr; | 
|  | 511 | void *pool_end = pool->vaddr + pool->size; | 
|  | 512 |  | 
| Thomas Petazzoni | f3d8752 | 2012-09-10 16:14:16 +0200 | [diff] [blame] | 513 | if (start < pool_start || start >= pool_end) | 
| Hiroshi Doyu | 21d0a75 | 2012-08-28 08:13:02 +0300 | [diff] [blame] | 514 | return false; | 
|  | 515 |  | 
|  | 516 | if (end <= pool_end) | 
|  | 517 | return true; | 
|  | 518 |  | 
|  | 519 | WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", | 
|  | 520 | start, end - 1, pool_start, pool_end - 1); | 
|  | 521 |  | 
|  | 522 | return false; | 
|  | 523 | } | 
|  | 524 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 525 | static int __free_from_pool(void *start, size_t size) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 526 | { | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 527 | struct dma_pool *pool = &atomic_pool; | 
|  | 528 | unsigned long pageno, count; | 
|  | 529 | unsigned long flags; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 530 |  | 
| Hiroshi Doyu | 21d0a75 | 2012-08-28 08:13:02 +0300 | [diff] [blame] | 531 | if (!__in_atomic_pool(start, size)) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 532 | return 0; | 
|  | 533 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 534 | pageno = (start - pool->vaddr) >> PAGE_SHIFT; | 
|  | 535 | count = size >> PAGE_SHIFT; | 
|  | 536 |  | 
|  | 537 | spin_lock_irqsave(&pool->lock, flags); | 
|  | 538 | bitmap_clear(pool->bitmap, pageno, count); | 
|  | 539 | spin_unlock_irqrestore(&pool->lock, flags); | 
|  | 540 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 541 | return 1; | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 
|  | 545 | pgprot_t prot, struct page **ret_page) | 
|  | 546 | { | 
|  | 547 | unsigned long order = get_order(size); | 
|  | 548 | size_t count = size >> PAGE_SHIFT; | 
|  | 549 | struct page *page; | 
|  | 550 |  | 
|  | 551 | page = dma_alloc_from_contiguous(dev, count, order); | 
|  | 552 | if (!page) | 
|  | 553 | return NULL; | 
|  | 554 |  | 
|  | 555 | __dma_clear_buffer(page, size); | 
|  | 556 | __dma_remap(page, size, prot); | 
|  | 557 |  | 
|  | 558 | *ret_page = page; | 
|  | 559 | return page_address(page); | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | static void __free_from_contiguous(struct device *dev, struct page *page, | 
|  | 563 | size_t size) | 
|  | 564 | { | 
|  | 565 | __dma_remap(page, size, pgprot_kernel); | 
|  | 566 | dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); | 
|  | 567 | } | 
|  | 568 |  | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 569 | static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) | 
|  | 570 | { | 
|  | 571 | prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? | 
|  | 572 | pgprot_writecombine(prot) : | 
|  | 573 | pgprot_dmacoherent(prot); | 
|  | 574 | return prot; | 
|  | 575 | } | 
|  | 576 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 577 | #define nommu() 0 | 
|  | 578 |  | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 579 | #else	/* !CONFIG_MMU */ | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 580 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 581 | #define nommu() 1 | 
|  | 582 |  | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 583 | #define __get_dma_pgprot(attrs, prot)	__pgprot(0) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 584 | #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)	NULL | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 585 | #define __alloc_from_pool(size, ret_page)			NULL | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 586 | #define __alloc_from_contiguous(dev, size, prot, ret)		NULL | 
|  | 587 | #define __free_from_pool(cpu_addr, size)			0 | 
|  | 588 | #define __free_from_contiguous(dev, page, size)			do { } while (0) | 
|  | 589 | #define __dma_free_remap(cpu_addr, size)			do { } while (0) | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 590 |  | 
|  | 591 | #endif	/* CONFIG_MMU */ | 
|  | 592 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 593 | static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, | 
|  | 594 | struct page **ret_page) | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 595 | { | 
| Russell King | 04da569 | 2009-11-19 15:54:45 +0000 | [diff] [blame] | 596 | struct page *page; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 597 | page = __dma_alloc_buffer(dev, size, gfp); | 
|  | 598 | if (!page) | 
|  | 599 | return NULL; | 
|  | 600 |  | 
|  | 601 | *ret_page = page; | 
|  | 602 | return page_address(page); | 
|  | 603 | } | 
|  | 604 |  | 
|  | 605 |  | 
|  | 606 |  | 
|  | 607 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 608 | gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 609 | { | 
|  | 610 | u64 mask = get_coherent_dma_mask(dev); | 
| Jingoo Han | 3dd7ea9 | 2012-10-24 14:09:14 +0900 | [diff] [blame] | 611 | struct page *page = NULL; | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 612 | void *addr; | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 613 |  | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 614 | #ifdef CONFIG_DMA_API_DEBUG | 
|  | 615 | u64 limit = (mask + 1) & ~mask; | 
|  | 616 | if (limit && size >= limit) { | 
|  | 617 | dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", | 
|  | 618 | size, mask); | 
|  | 619 | return NULL; | 
|  | 620 | } | 
|  | 621 | #endif | 
|  | 622 |  | 
|  | 623 | if (!mask) | 
|  | 624 | return NULL; | 
|  | 625 |  | 
|  | 626 | if (mask < 0xffffffffULL) | 
|  | 627 | gfp |= GFP_DMA; | 
|  | 628 |  | 
| Sumit Bhattacharya | ea2e705 | 2011-11-24 00:47:12 +0100 | [diff] [blame] | 629 | /* | 
|  | 630 | * Following is a work-around (a.k.a. hack) to prevent pages | 
|  | 631 | * with __GFP_COMP being passed to split_page() which cannot | 
|  | 632 | * handle them.  The real problem is that this flag probably | 
|  | 633 | * should be 0 on ARM as it is not supported on this | 
|  | 634 | * platform; see CONFIG_HUGETLBFS. | 
|  | 635 | */ | 
|  | 636 | gfp &= ~(__GFP_COMP); | 
|  | 637 |  | 
| Marek Szyprowski | 553ac78 | 2012-02-29 14:45:28 +0100 | [diff] [blame] | 638 | *handle = DMA_ERROR_CODE; | 
| Russell King | 04da569 | 2009-11-19 15:54:45 +0000 | [diff] [blame] | 639 | size = PAGE_ALIGN(size); | 
|  | 640 |  | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 641 | if (is_coherent || nommu()) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 642 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 643 | else if (gfp & GFP_ATOMIC) | 
|  | 644 | addr = __alloc_from_pool(size, &page); | 
| Marek Szyprowski | f1ae98d | 2012-05-30 10:48:29 +0200 | [diff] [blame] | 645 | else if (!IS_ENABLED(CONFIG_CMA)) | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 646 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 647 | else | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 648 | addr = __alloc_from_contiguous(dev, size, prot, &page); | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 649 |  | 
|  | 650 | if (addr) | 
| Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 651 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 
| Russell King | 31ebf94 | 2009-11-19 21:12:17 +0000 | [diff] [blame] | 652 |  | 
|  | 653 | return addr; | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 654 | } | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 655 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | /* | 
|  | 657 | * Allocate DMA-coherent memory space and return both the kernel remapped | 
|  | 658 | * virtual and bus address for that space. | 
|  | 659 | */ | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 660 | void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 
|  | 661 | gfp_t gfp, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | { | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 663 | pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); | 
| Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 664 | void *memory; | 
|  | 665 |  | 
|  | 666 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | 
|  | 667 | return memory; | 
|  | 668 |  | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 669 | return __dma_alloc(dev, size, handle, gfp, prot, false, | 
|  | 670 | __builtin_return_address(0)); | 
|  | 671 | } | 
|  | 672 |  | 
|  | 673 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | 
|  | 674 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 
|  | 675 | { | 
|  | 676 | pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); | 
|  | 677 | void *memory; | 
|  | 678 |  | 
|  | 679 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | 
|  | 680 | return memory; | 
|  | 681 |  | 
|  | 682 | return __dma_alloc(dev, size, handle, gfp, prot, true, | 
| Russell King | 45cd529 | 2012-01-12 23:08:07 +0000 | [diff] [blame] | 683 | __builtin_return_address(0)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 |  | 
|  | 686 | /* | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 687 | * Create userspace mapping for the DMA-coherent memory. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | */ | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 689 | int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | 
|  | 690 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 
|  | 691 | struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | { | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 693 | int ret = -ENXIO; | 
|  | 694 | #ifdef CONFIG_MMU | 
| Marek Szyprowski | 50262a4 | 2012-07-30 09:35:26 +0200 | [diff] [blame] | 695 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 
|  | 696 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 697 | unsigned long pfn = dma_to_pfn(dev, dma_addr); | 
| Marek Szyprowski | 50262a4 | 2012-07-30 09:35:26 +0200 | [diff] [blame] | 698 | unsigned long off = vma->vm_pgoff; | 
|  | 699 |  | 
| Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 700 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | 
|  | 701 |  | 
| Marek Szyprowski | 47142f0 | 2012-05-15 19:04:13 +0200 | [diff] [blame] | 702 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 
|  | 703 | return ret; | 
|  | 704 |  | 
| Marek Szyprowski | 50262a4 | 2012-07-30 09:35:26 +0200 | [diff] [blame] | 705 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { | 
|  | 706 | ret = remap_pfn_range(vma, vma->vm_start, | 
|  | 707 | pfn + off, | 
|  | 708 | vma->vm_end - vma->vm_start, | 
|  | 709 | vma->vm_page_prot); | 
|  | 710 | } | 
| Catalin Marinas | ab6494f | 2009-07-24 12:35:02 +0100 | [diff] [blame] | 711 | #endif	/* CONFIG_MMU */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 |  | 
|  | 713 | return ret; | 
|  | 714 | } | 
|  | 715 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | /* | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 717 | * Free a buffer as defined by the above mapping. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | */ | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 719 | static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 
|  | 720 | dma_addr_t handle, struct dma_attrs *attrs, | 
|  | 721 | bool is_coherent) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | { | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 723 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 
| Russell King | 5edf71a | 2005-11-25 15:52:51 +0000 | [diff] [blame] | 724 |  | 
| Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 725 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | 
|  | 726 | return; | 
|  | 727 |  | 
| Russell King | 3e82d01 | 2009-11-19 15:38:12 +0000 | [diff] [blame] | 728 | size = PAGE_ALIGN(size); | 
|  | 729 |  | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 730 | if (is_coherent || nommu()) { | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 731 | __dma_free_buffer(page, size); | 
| Aaro Koskinen | d9e0d14 | 2012-08-07 14:44:05 +0200 | [diff] [blame] | 732 | } else if (__free_from_pool(cpu_addr, size)) { | 
|  | 733 | return; | 
| Marek Szyprowski | f1ae98d | 2012-05-30 10:48:29 +0200 | [diff] [blame] | 734 | } else if (!IS_ENABLED(CONFIG_CMA)) { | 
| Russell King | 695ae0a | 2009-11-19 16:31:39 +0000 | [diff] [blame] | 735 | __dma_free_remap(cpu_addr, size); | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 736 | __dma_free_buffer(page, size); | 
|  | 737 | } else { | 
| Marek Szyprowski | c790950 | 2011-12-29 13:09:51 +0100 | [diff] [blame] | 738 | /* | 
|  | 739 | * Non-atomic allocations cannot be freed with IRQs disabled | 
|  | 740 | */ | 
|  | 741 | WARN_ON(irqs_disabled()); | 
|  | 742 | __free_from_contiguous(dev, page, size); | 
|  | 743 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | } | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 745 |  | 
| Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 746 | void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 
|  | 747 | dma_addr_t handle, struct dma_attrs *attrs) | 
|  | 748 | { | 
|  | 749 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); | 
|  | 750 | } | 
|  | 751 |  | 
|  | 752 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | 
|  | 753 | dma_addr_t handle, struct dma_attrs *attrs) | 
|  | 754 | { | 
|  | 755 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); | 
|  | 756 | } | 
|  | 757 |  | 
| Marek Szyprowski | dc2832e | 2012-06-13 10:01:15 +0200 | [diff] [blame] | 758 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | 
|  | 759 | void *cpu_addr, dma_addr_t handle, size_t size, | 
|  | 760 | struct dma_attrs *attrs) | 
|  | 761 | { | 
|  | 762 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 
|  | 763 | int ret; | 
|  | 764 |  | 
|  | 765 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | 
|  | 766 | if (unlikely(ret)) | 
|  | 767 | return ret; | 
|  | 768 |  | 
|  | 769 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | 
|  | 770 | return 0; | 
|  | 771 | } | 
|  | 772 |  | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 773 | static void dma_cache_maint_page(struct page *page, unsigned long offset, | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 774 | size_t size, enum dma_data_direction dir, | 
|  | 775 | void (*op)(const void *, size_t, int)) | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 776 | { | 
|  | 777 | /* | 
|  | 778 | * A single sg entry may refer to multiple physically contiguous | 
|  | 779 | * pages.  But we still need to process highmem pages individually. | 
|  | 780 | * If highmem is not configured then the bulk of this loop gets | 
|  | 781 | * optimized out. | 
|  | 782 | */ | 
|  | 783 | size_t left = size; | 
|  | 784 | do { | 
|  | 785 | size_t len = left; | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 786 | void *vaddr; | 
|  | 787 |  | 
|  | 788 | if (PageHighMem(page)) { | 
|  | 789 | if (len + offset > PAGE_SIZE) { | 
|  | 790 | if (offset >= PAGE_SIZE) { | 
|  | 791 | page += offset / PAGE_SIZE; | 
|  | 792 | offset %= PAGE_SIZE; | 
|  | 793 | } | 
|  | 794 | len = PAGE_SIZE - offset; | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 795 | } | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 796 | vaddr = kmap_high_get(page); | 
|  | 797 | if (vaddr) { | 
|  | 798 | vaddr += offset; | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 799 | op(vaddr, len, dir); | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 800 | kunmap_high(page); | 
| Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 801 | } else if (cache_is_vipt()) { | 
| Nicolas Pitre | 39af22a | 2010-12-15 15:14:45 -0500 | [diff] [blame] | 802 | /* unmapped pages might still be cached */ | 
|  | 803 | vaddr = kmap_atomic(page); | 
| Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 804 | op(vaddr + offset, len, dir); | 
| Nicolas Pitre | 39af22a | 2010-12-15 15:14:45 -0500 | [diff] [blame] | 805 | kunmap_atomic(vaddr); | 
| Russell King | 93f1d62 | 2009-11-24 14:41:01 +0000 | [diff] [blame] | 806 | } | 
|  | 807 | } else { | 
|  | 808 | vaddr = page_address(page) + offset; | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 809 | op(vaddr, len, dir); | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 810 | } | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 811 | offset = 0; | 
|  | 812 | page++; | 
|  | 813 | left -= len; | 
|  | 814 | } while (left); | 
|  | 815 | } | 
|  | 816 |  | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 817 | /* | 
|  | 818 | * Make an area consistent for devices. | 
|  | 819 | * Note: Drivers should NOT use this function directly, as it will break | 
|  | 820 | * platforms with CONFIG_DMABOUNCE. | 
|  | 821 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 
|  | 822 | */ | 
|  | 823 | static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 824 | size_t size, enum dma_data_direction dir) | 
|  | 825 | { | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 826 | unsigned long paddr; | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 827 |  | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 828 | dma_cache_maint_page(page, off, size, dir, dmac_map_area); | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 829 |  | 
| Russell King | 65af191 | 2009-11-24 17:53:33 +0000 | [diff] [blame] | 830 | paddr = page_to_phys(page) + off; | 
| Russell King | 2ffe2da | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 831 | if (dir == DMA_FROM_DEVICE) { | 
|  | 832 | outer_inv_range(paddr, paddr + size); | 
|  | 833 | } else { | 
|  | 834 | outer_clean_range(paddr, paddr + size); | 
|  | 835 | } | 
|  | 836 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 837 | } | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 838 |  | 
| Marek Szyprowski | 51fde349 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 839 | static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 840 | size_t size, enum dma_data_direction dir) | 
|  | 841 | { | 
| Russell King | 2ffe2da | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 842 | unsigned long paddr = page_to_phys(page) + off; | 
|  | 843 |  | 
|  | 844 | /* FIXME: non-speculating: not required */ | 
|  | 845 | /* don't bother invalidating if DMA to device */ | 
|  | 846 | if (dir != DMA_TO_DEVICE) | 
|  | 847 | outer_inv_range(paddr, paddr + size); | 
|  | 848 |  | 
| Russell King | a9c9147 | 2009-11-26 16:19:58 +0000 | [diff] [blame] | 849 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 
| Catalin Marinas | c017780 | 2010-09-13 15:57:36 +0100 | [diff] [blame] | 850 |  | 
|  | 851 | /* | 
|  | 852 | * Mark the D-cache clean for this page to avoid extra flushing. | 
|  | 853 | */ | 
|  | 854 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | 
|  | 855 | set_bit(PG_dcache_clean, &page->flags); | 
| Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 856 | } | 
| Nicolas Pitre | 4337745 | 2009-03-12 22:52:09 -0400 | [diff] [blame] | 857 |  | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 858 | /** | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 859 | * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 860 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 861 | * @sg: list of buffers | 
|  | 862 | * @nents: number of buffers to map | 
|  | 863 | * @dir: DMA transfer direction | 
|  | 864 | * | 
|  | 865 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 
|  | 866 | * This is the scatter-gather version of the dma_map_single interface. | 
|  | 867 | * Here the scatter gather list elements are each tagged with the | 
|  | 868 | * appropriate dma address and length.  They are obtained via | 
|  | 869 | * sg_dma_{address,length}. | 
|  | 870 | * | 
|  | 871 | * Device ownership issues as mentioned for dma_map_single are the same | 
|  | 872 | * here. | 
|  | 873 | */ | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 874 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 875 | enum dma_data_direction dir, struct dma_attrs *attrs) | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 876 | { | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 877 | struct dma_map_ops *ops = get_dma_ops(dev); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 878 | struct scatterlist *s; | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 879 | int i, j; | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 880 |  | 
|  | 881 | for_each_sg(sg, s, nents, i) { | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 882 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | 
|  | 883 | s->dma_length = s->length; | 
|  | 884 | #endif | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 885 | s->dma_address = ops->map_page(dev, sg_page(s), s->offset, | 
|  | 886 | s->length, dir, attrs); | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 887 | if (dma_mapping_error(dev, s->dma_address)) | 
|  | 888 | goto bad_mapping; | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 889 | } | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 890 | return nents; | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 891 |  | 
|  | 892 | bad_mapping: | 
|  | 893 | for_each_sg(sg, s, i, j) | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 894 | ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 895 | return 0; | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 896 | } | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 897 |  | 
|  | 898 | /** | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 899 | * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 900 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 901 | * @sg: list of buffers | 
| Linus Walleij | 0adfca6 | 2011-01-12 18:50:37 +0100 | [diff] [blame] | 902 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 903 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 904 | * | 
|  | 905 | * Unmap a set of streaming mode DMA translations.  Again, CPU access | 
|  | 906 | * rules concerning calls here are the same as for dma_unmap_single(). | 
|  | 907 | */ | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 908 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 909 | enum dma_data_direction dir, struct dma_attrs *attrs) | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 910 | { | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 911 | struct dma_map_ops *ops = get_dma_ops(dev); | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 912 | struct scatterlist *s; | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 913 |  | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 914 | int i; | 
| Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 915 |  | 
| Russell King | 01135d92 | 2008-09-25 21:05:02 +0100 | [diff] [blame] | 916 | for_each_sg(sg, s, nents, i) | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 917 | ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 918 | } | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 919 |  | 
|  | 920 | /** | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 921 | * arm_dma_sync_sg_for_cpu | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 922 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 923 | * @sg: list of buffers | 
|  | 924 | * @nents: number of buffers to map (returned from dma_map_sg) | 
|  | 925 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 926 | */ | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 927 | void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 928 | int nents, enum dma_data_direction dir) | 
|  | 929 | { | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 930 | struct dma_map_ops *ops = get_dma_ops(dev); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 931 | struct scatterlist *s; | 
|  | 932 | int i; | 
|  | 933 |  | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 934 | for_each_sg(sg, s, nents, i) | 
|  | 935 | ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, | 
|  | 936 | dir); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 937 | } | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 938 |  | 
|  | 939 | /** | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 940 | * arm_dma_sync_sg_for_device | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 941 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|  | 942 | * @sg: list of buffers | 
|  | 943 | * @nents: number of buffers to map (returned from dma_map_sg) | 
|  | 944 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 945 | */ | 
| Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 946 | void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 947 | int nents, enum dma_data_direction dir) | 
|  | 948 | { | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 949 | struct dma_map_ops *ops = get_dma_ops(dev); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 950 | struct scatterlist *s; | 
|  | 951 | int i; | 
|  | 952 |  | 
| Marek Szyprowski | 2a550e7 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 953 | for_each_sg(sg, s, nents, i) | 
|  | 954 | ops->sync_single_for_device(dev, sg_dma_address(s), s->length, | 
|  | 955 | dir); | 
| Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 956 | } | 
| Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 957 |  | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 958 | /* | 
|  | 959 | * Return whether the given device DMA address mask can be supported | 
|  | 960 | * properly.  For example, if your device can only drive the low 24-bits | 
|  | 961 | * during bus mastering, then you would pass 0x00ffffff as the mask | 
|  | 962 | * to this function. | 
|  | 963 | */ | 
|  | 964 | int dma_supported(struct device *dev, u64 mask) | 
|  | 965 | { | 
|  | 966 | if (mask < (u64)arm_dma_limit) | 
|  | 967 | return 0; | 
|  | 968 | return 1; | 
|  | 969 | } | 
|  | 970 | EXPORT_SYMBOL(dma_supported); | 
|  | 971 |  | 
| Gregory CLEMENT | 87b54e7 | 2012-11-21 09:39:19 +0100 | [diff] [blame] | 972 | int arm_dma_set_mask(struct device *dev, u64 dma_mask) | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 973 | { | 
|  | 974 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 
|  | 975 | return -EIO; | 
|  | 976 |  | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 977 | *dev->dma_mask = dma_mask; | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 978 |  | 
|  | 979 | return 0; | 
|  | 980 | } | 
| Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 981 |  | 
| Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 982 | #define PREALLOC_DMA_DEBUG_ENTRIES	4096 | 
|  | 983 |  | 
|  | 984 | static int __init dma_debug_do_init(void) | 
|  | 985 | { | 
|  | 986 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 
|  | 987 | return 0; | 
|  | 988 | } | 
|  | 989 | fs_initcall(dma_debug_do_init); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 990 |  | 
|  | 991 | #ifdef CONFIG_ARM_DMA_USE_IOMMU | 
|  | 992 |  | 
|  | 993 | /* IOMMU */ | 
|  | 994 |  | 
|  | 995 | static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, | 
|  | 996 | size_t size) | 
|  | 997 | { | 
|  | 998 | unsigned int order = get_order(size); | 
|  | 999 | unsigned int align = 0; | 
|  | 1000 | unsigned int count, start; | 
|  | 1001 | unsigned long flags; | 
|  | 1002 |  | 
|  | 1003 | count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + | 
|  | 1004 | (1 << mapping->order) - 1) >> mapping->order; | 
|  | 1005 |  | 
|  | 1006 | if (order > mapping->order) | 
|  | 1007 | align = (1 << (order - mapping->order)) - 1; | 
|  | 1008 |  | 
|  | 1009 | spin_lock_irqsave(&mapping->lock, flags); | 
|  | 1010 | start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, | 
|  | 1011 | count, align); | 
|  | 1012 | if (start > mapping->bits) { | 
|  | 1013 | spin_unlock_irqrestore(&mapping->lock, flags); | 
|  | 1014 | return DMA_ERROR_CODE; | 
|  | 1015 | } | 
|  | 1016 |  | 
|  | 1017 | bitmap_set(mapping->bitmap, start, count); | 
|  | 1018 | spin_unlock_irqrestore(&mapping->lock, flags); | 
|  | 1019 |  | 
|  | 1020 | return mapping->base + (start << (mapping->order + PAGE_SHIFT)); | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 | static inline void __free_iova(struct dma_iommu_mapping *mapping, | 
|  | 1024 | dma_addr_t addr, size_t size) | 
|  | 1025 | { | 
|  | 1026 | unsigned int start = (addr - mapping->base) >> | 
|  | 1027 | (mapping->order + PAGE_SHIFT); | 
|  | 1028 | unsigned int count = ((size >> PAGE_SHIFT) + | 
|  | 1029 | (1 << mapping->order) - 1) >> mapping->order; | 
|  | 1030 | unsigned long flags; | 
|  | 1031 |  | 
|  | 1032 | spin_lock_irqsave(&mapping->lock, flags); | 
|  | 1033 | bitmap_clear(mapping->bitmap, start, count); | 
|  | 1034 | spin_unlock_irqrestore(&mapping->lock, flags); | 
|  | 1035 | } | 
|  | 1036 |  | 
| Marek Szyprowski | 549a17e | 2012-10-15 16:03:52 +0200 | [diff] [blame] | 1037 | static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | 
|  | 1038 | gfp_t gfp, struct dma_attrs *attrs) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1039 | { | 
|  | 1040 | struct page **pages; | 
|  | 1041 | int count = size >> PAGE_SHIFT; | 
|  | 1042 | int array_size = count * sizeof(struct page *); | 
|  | 1043 | int i = 0; | 
|  | 1044 |  | 
|  | 1045 | if (array_size <= PAGE_SIZE) | 
|  | 1046 | pages = kzalloc(array_size, gfp); | 
|  | 1047 | else | 
|  | 1048 | pages = vzalloc(array_size); | 
|  | 1049 | if (!pages) | 
|  | 1050 | return NULL; | 
|  | 1051 |  | 
| Marek Szyprowski | 549a17e | 2012-10-15 16:03:52 +0200 | [diff] [blame] | 1052 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) | 
|  | 1053 | { | 
|  | 1054 | unsigned long order = get_order(size); | 
|  | 1055 | struct page *page; | 
|  | 1056 |  | 
|  | 1057 | page = dma_alloc_from_contiguous(dev, count, order); | 
|  | 1058 | if (!page) | 
|  | 1059 | goto error; | 
|  | 1060 |  | 
|  | 1061 | __dma_clear_buffer(page, size); | 
|  | 1062 |  | 
|  | 1063 | for (i = 0; i < count; i++) | 
|  | 1064 | pages[i] = page + i; | 
|  | 1065 |  | 
|  | 1066 | return pages; | 
|  | 1067 | } | 
|  | 1068 |  | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1069 | while (count) { | 
| Marek Szyprowski | 593f473 | 2012-06-21 11:48:11 +0200 | [diff] [blame] | 1070 | int j, order = __fls(count); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1071 |  | 
|  | 1072 | pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); | 
|  | 1073 | while (!pages[i] && order) | 
|  | 1074 | pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); | 
|  | 1075 | if (!pages[i]) | 
|  | 1076 | goto error; | 
|  | 1077 |  | 
| Hiroshi Doyu | 5a796ee | 2012-09-11 07:39:39 +0200 | [diff] [blame] | 1078 | if (order) { | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1079 | split_page(pages[i], order); | 
| Hiroshi Doyu | 5a796ee | 2012-09-11 07:39:39 +0200 | [diff] [blame] | 1080 | j = 1 << order; | 
|  | 1081 | while (--j) | 
|  | 1082 | pages[i + j] = pages[i] + j; | 
|  | 1083 | } | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1084 |  | 
|  | 1085 | __dma_clear_buffer(pages[i], PAGE_SIZE << order); | 
|  | 1086 | i += 1 << order; | 
|  | 1087 | count -= 1 << order; | 
|  | 1088 | } | 
|  | 1089 |  | 
|  | 1090 | return pages; | 
|  | 1091 | error: | 
| Marek Szyprowski | 9fa8af9 | 2012-07-27 17:12:50 +0200 | [diff] [blame] | 1092 | while (i--) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1093 | if (pages[i]) | 
|  | 1094 | __free_pages(pages[i], 0); | 
| Prathyush K | 46c8785 | 2012-07-16 08:59:55 +0200 | [diff] [blame] | 1095 | if (array_size <= PAGE_SIZE) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1096 | kfree(pages); | 
|  | 1097 | else | 
|  | 1098 | vfree(pages); | 
|  | 1099 | return NULL; | 
|  | 1100 | } | 
|  | 1101 |  | 
| Marek Szyprowski | 549a17e | 2012-10-15 16:03:52 +0200 | [diff] [blame] | 1102 | static int __iommu_free_buffer(struct device *dev, struct page **pages, | 
|  | 1103 | size_t size, struct dma_attrs *attrs) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1104 | { | 
|  | 1105 | int count = size >> PAGE_SHIFT; | 
|  | 1106 | int array_size = count * sizeof(struct page *); | 
|  | 1107 | int i; | 
| Marek Szyprowski | 549a17e | 2012-10-15 16:03:52 +0200 | [diff] [blame] | 1108 |  | 
|  | 1109 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { | 
|  | 1110 | dma_release_from_contiguous(dev, pages[0], count); | 
|  | 1111 | } else { | 
|  | 1112 | for (i = 0; i < count; i++) | 
|  | 1113 | if (pages[i]) | 
|  | 1114 | __free_pages(pages[i], 0); | 
|  | 1115 | } | 
|  | 1116 |  | 
| Prathyush K | 46c8785 | 2012-07-16 08:59:55 +0200 | [diff] [blame] | 1117 | if (array_size <= PAGE_SIZE) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1118 | kfree(pages); | 
|  | 1119 | else | 
|  | 1120 | vfree(pages); | 
|  | 1121 | return 0; | 
|  | 1122 | } | 
|  | 1123 |  | 
|  | 1124 | /* | 
|  | 1125 | * Create a CPU mapping for a specified pages | 
|  | 1126 | */ | 
|  | 1127 | static void * | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1128 | __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, | 
|  | 1129 | const void *caller) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1130 | { | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1131 | unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
|  | 1132 | struct vm_struct *area; | 
|  | 1133 | unsigned long p; | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1134 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1135 | area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, | 
|  | 1136 | caller); | 
|  | 1137 | if (!area) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1138 | return NULL; | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1139 |  | 
|  | 1140 | area->pages = pages; | 
|  | 1141 | area->nr_pages = nr_pages; | 
|  | 1142 | p = (unsigned long)area->addr; | 
|  | 1143 |  | 
|  | 1144 | for (i = 0; i < nr_pages; i++) { | 
|  | 1145 | phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); | 
|  | 1146 | if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) | 
|  | 1147 | goto err; | 
|  | 1148 | p += PAGE_SIZE; | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1149 | } | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1150 | return area->addr; | 
|  | 1151 | err: | 
|  | 1152 | unmap_kernel_range((unsigned long)area->addr, size); | 
|  | 1153 | vunmap(area->addr); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1154 | return NULL; | 
|  | 1155 | } | 
|  | 1156 |  | 
|  | 1157 | /* | 
|  | 1158 | * Create a mapping in device IO address space for specified pages | 
|  | 1159 | */ | 
|  | 1160 | static dma_addr_t | 
|  | 1161 | __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) | 
|  | 1162 | { | 
|  | 1163 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1164 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
|  | 1165 | dma_addr_t dma_addr, iova; | 
|  | 1166 | int i, ret = DMA_ERROR_CODE; | 
|  | 1167 |  | 
|  | 1168 | dma_addr = __alloc_iova(mapping, size); | 
|  | 1169 | if (dma_addr == DMA_ERROR_CODE) | 
|  | 1170 | return dma_addr; | 
|  | 1171 |  | 
|  | 1172 | iova = dma_addr; | 
|  | 1173 | for (i = 0; i < count; ) { | 
|  | 1174 | unsigned int next_pfn = page_to_pfn(pages[i]) + 1; | 
|  | 1175 | phys_addr_t phys = page_to_phys(pages[i]); | 
|  | 1176 | unsigned int len, j; | 
|  | 1177 |  | 
|  | 1178 | for (j = i + 1; j < count; j++, next_pfn++) | 
|  | 1179 | if (page_to_pfn(pages[j]) != next_pfn) | 
|  | 1180 | break; | 
|  | 1181 |  | 
|  | 1182 | len = (j - i) << PAGE_SHIFT; | 
|  | 1183 | ret = iommu_map(mapping->domain, iova, phys, len, 0); | 
|  | 1184 | if (ret < 0) | 
|  | 1185 | goto fail; | 
|  | 1186 | iova += len; | 
|  | 1187 | i = j; | 
|  | 1188 | } | 
|  | 1189 | return dma_addr; | 
|  | 1190 | fail: | 
|  | 1191 | iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); | 
|  | 1192 | __free_iova(mapping, dma_addr, size); | 
|  | 1193 | return DMA_ERROR_CODE; | 
|  | 1194 | } | 
|  | 1195 |  | 
|  | 1196 | static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) | 
|  | 1197 | { | 
|  | 1198 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1199 |  | 
|  | 1200 | /* | 
|  | 1201 | * add optional in-page offset from iova to size and align | 
|  | 1202 | * result to page size | 
|  | 1203 | */ | 
|  | 1204 | size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); | 
|  | 1205 | iova &= PAGE_MASK; | 
|  | 1206 |  | 
|  | 1207 | iommu_unmap(mapping->domain, iova, size); | 
|  | 1208 | __free_iova(mapping, iova, size); | 
|  | 1209 | return 0; | 
|  | 1210 | } | 
|  | 1211 |  | 
| Hiroshi Doyu | 665bad7 | 2012-08-28 08:13:03 +0300 | [diff] [blame] | 1212 | static struct page **__atomic_get_pages(void *addr) | 
|  | 1213 | { | 
|  | 1214 | struct dma_pool *pool = &atomic_pool; | 
|  | 1215 | struct page **pages = pool->pages; | 
|  | 1216 | int offs = (addr - pool->vaddr) >> PAGE_SHIFT; | 
|  | 1217 |  | 
|  | 1218 | return pages + offs; | 
|  | 1219 | } | 
|  | 1220 |  | 
| Marek Szyprowski | 955c757 | 2012-05-16 19:38:58 +0100 | [diff] [blame] | 1221 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1222 | { | 
|  | 1223 | struct vm_struct *area; | 
|  | 1224 |  | 
| Hiroshi Doyu | 665bad7 | 2012-08-28 08:13:03 +0300 | [diff] [blame] | 1225 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) | 
|  | 1226 | return __atomic_get_pages(cpu_addr); | 
|  | 1227 |  | 
| Marek Szyprowski | 955c757 | 2012-05-16 19:38:58 +0100 | [diff] [blame] | 1228 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 
|  | 1229 | return cpu_addr; | 
|  | 1230 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1231 | area = find_vm_area(cpu_addr); | 
|  | 1232 | if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) | 
|  | 1233 | return area->pages; | 
|  | 1234 | return NULL; | 
|  | 1235 | } | 
|  | 1236 |  | 
| Hiroshi Doyu | 479ed93 | 2012-08-28 08:13:04 +0300 | [diff] [blame] | 1237 | static void *__iommu_alloc_atomic(struct device *dev, size_t size, | 
|  | 1238 | dma_addr_t *handle) | 
|  | 1239 | { | 
|  | 1240 | struct page *page; | 
|  | 1241 | void *addr; | 
|  | 1242 |  | 
|  | 1243 | addr = __alloc_from_pool(size, &page); | 
|  | 1244 | if (!addr) | 
|  | 1245 | return NULL; | 
|  | 1246 |  | 
|  | 1247 | *handle = __iommu_create_mapping(dev, &page, size); | 
|  | 1248 | if (*handle == DMA_ERROR_CODE) | 
|  | 1249 | goto err_mapping; | 
|  | 1250 |  | 
|  | 1251 | return addr; | 
|  | 1252 |  | 
|  | 1253 | err_mapping: | 
|  | 1254 | __free_from_pool(addr, size); | 
|  | 1255 | return NULL; | 
|  | 1256 | } | 
|  | 1257 |  | 
|  | 1258 | static void __iommu_free_atomic(struct device *dev, struct page **pages, | 
|  | 1259 | dma_addr_t handle, size_t size) | 
|  | 1260 | { | 
|  | 1261 | __iommu_remove_mapping(dev, handle, size); | 
|  | 1262 | __free_from_pool(page_address(pages[0]), size); | 
|  | 1263 | } | 
|  | 1264 |  | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1265 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | 
|  | 1266 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 
|  | 1267 | { | 
|  | 1268 | pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); | 
|  | 1269 | struct page **pages; | 
|  | 1270 | void *addr = NULL; | 
|  | 1271 |  | 
|  | 1272 | *handle = DMA_ERROR_CODE; | 
|  | 1273 | size = PAGE_ALIGN(size); | 
|  | 1274 |  | 
| Hiroshi Doyu | 479ed93 | 2012-08-28 08:13:04 +0300 | [diff] [blame] | 1275 | if (gfp & GFP_ATOMIC) | 
|  | 1276 | return __iommu_alloc_atomic(dev, size, handle); | 
|  | 1277 |  | 
| Marek Szyprowski | 549a17e | 2012-10-15 16:03:52 +0200 | [diff] [blame] | 1278 | pages = __iommu_alloc_buffer(dev, size, gfp, attrs); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1279 | if (!pages) | 
|  | 1280 | return NULL; | 
|  | 1281 |  | 
|  | 1282 | *handle = __iommu_create_mapping(dev, pages, size); | 
|  | 1283 | if (*handle == DMA_ERROR_CODE) | 
|  | 1284 | goto err_buffer; | 
|  | 1285 |  | 
| Marek Szyprowski | 955c757 | 2012-05-16 19:38:58 +0100 | [diff] [blame] | 1286 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | 
|  | 1287 | return pages; | 
|  | 1288 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1289 | addr = __iommu_alloc_remap(pages, size, gfp, prot, | 
|  | 1290 | __builtin_return_address(0)); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1291 | if (!addr) | 
|  | 1292 | goto err_mapping; | 
|  | 1293 |  | 
|  | 1294 | return addr; | 
|  | 1295 |  | 
|  | 1296 | err_mapping: | 
|  | 1297 | __iommu_remove_mapping(dev, *handle, size); | 
|  | 1298 | err_buffer: | 
| Marek Szyprowski | 549a17e | 2012-10-15 16:03:52 +0200 | [diff] [blame] | 1299 | __iommu_free_buffer(dev, pages, size, attrs); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1300 | return NULL; | 
|  | 1301 | } | 
|  | 1302 |  | 
|  | 1303 | static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | 
|  | 1304 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 
|  | 1305 | struct dma_attrs *attrs) | 
|  | 1306 | { | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1307 | unsigned long uaddr = vma->vm_start; | 
|  | 1308 | unsigned long usize = vma->vm_end - vma->vm_start; | 
| Marek Szyprowski | 955c757 | 2012-05-16 19:38:58 +0100 | [diff] [blame] | 1309 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1310 |  | 
|  | 1311 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1312 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1313 | if (!pages) | 
|  | 1314 | return -ENXIO; | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1315 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1316 | do { | 
|  | 1317 | int ret = vm_insert_page(vma, uaddr, *pages++); | 
|  | 1318 | if (ret) { | 
|  | 1319 | pr_err("Remapping memory failed: %d\n", ret); | 
|  | 1320 | return ret; | 
|  | 1321 | } | 
|  | 1322 | uaddr += PAGE_SIZE; | 
|  | 1323 | usize -= PAGE_SIZE; | 
|  | 1324 | } while (usize > 0); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1325 |  | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1326 | return 0; | 
|  | 1327 | } | 
|  | 1328 |  | 
|  | 1329 | /* | 
|  | 1330 | * free a page as defined by the above mapping. | 
|  | 1331 | * Must not be called with IRQs disabled. | 
|  | 1332 | */ | 
|  | 1333 | void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 
|  | 1334 | dma_addr_t handle, struct dma_attrs *attrs) | 
|  | 1335 | { | 
| Marek Szyprowski | 955c757 | 2012-05-16 19:38:58 +0100 | [diff] [blame] | 1336 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1337 | size = PAGE_ALIGN(size); | 
|  | 1338 |  | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1339 | if (!pages) { | 
|  | 1340 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | 
|  | 1341 | return; | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1342 | } | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1343 |  | 
| Hiroshi Doyu | 479ed93 | 2012-08-28 08:13:04 +0300 | [diff] [blame] | 1344 | if (__in_atomic_pool(cpu_addr, size)) { | 
|  | 1345 | __iommu_free_atomic(dev, pages, handle, size); | 
|  | 1346 | return; | 
|  | 1347 | } | 
|  | 1348 |  | 
| Marek Szyprowski | 955c757 | 2012-05-16 19:38:58 +0100 | [diff] [blame] | 1349 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { | 
|  | 1350 | unmap_kernel_range((unsigned long)cpu_addr, size); | 
|  | 1351 | vunmap(cpu_addr); | 
|  | 1352 | } | 
| Marek Szyprowski | e9da6e9 | 2012-07-30 09:11:33 +0200 | [diff] [blame] | 1353 |  | 
|  | 1354 | __iommu_remove_mapping(dev, handle, size); | 
| Marek Szyprowski | 549a17e | 2012-10-15 16:03:52 +0200 | [diff] [blame] | 1355 | __iommu_free_buffer(dev, pages, size, attrs); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1356 | } | 
|  | 1357 |  | 
| Marek Szyprowski | dc2832e | 2012-06-13 10:01:15 +0200 | [diff] [blame] | 1358 | static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | 
|  | 1359 | void *cpu_addr, dma_addr_t dma_addr, | 
|  | 1360 | size_t size, struct dma_attrs *attrs) | 
|  | 1361 | { | 
|  | 1362 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
|  | 1363 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | 
|  | 1364 |  | 
|  | 1365 | if (!pages) | 
|  | 1366 | return -ENXIO; | 
|  | 1367 |  | 
|  | 1368 | return sg_alloc_table_from_pages(sgt, pages, count, 0, size, | 
|  | 1369 | GFP_KERNEL); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1370 | } | 
|  | 1371 |  | 
|  | 1372 | /* | 
|  | 1373 | * Map a part of the scatter-gather list into contiguous io address space | 
|  | 1374 | */ | 
|  | 1375 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | 
|  | 1376 | size_t size, dma_addr_t *handle, | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1377 | enum dma_data_direction dir, struct dma_attrs *attrs, | 
|  | 1378 | bool is_coherent) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1379 | { | 
|  | 1380 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1381 | dma_addr_t iova, iova_base; | 
|  | 1382 | int ret = 0; | 
|  | 1383 | unsigned int count; | 
|  | 1384 | struct scatterlist *s; | 
|  | 1385 |  | 
|  | 1386 | size = PAGE_ALIGN(size); | 
|  | 1387 | *handle = DMA_ERROR_CODE; | 
|  | 1388 |  | 
|  | 1389 | iova_base = iova = __alloc_iova(mapping, size); | 
|  | 1390 | if (iova == DMA_ERROR_CODE) | 
|  | 1391 | return -ENOMEM; | 
|  | 1392 |  | 
|  | 1393 | for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { | 
|  | 1394 | phys_addr_t phys = page_to_phys(sg_page(s)); | 
|  | 1395 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | 
|  | 1396 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1397 | if (!is_coherent && | 
|  | 1398 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1399 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | 
|  | 1400 |  | 
|  | 1401 | ret = iommu_map(mapping->domain, iova, phys, len, 0); | 
|  | 1402 | if (ret < 0) | 
|  | 1403 | goto fail; | 
|  | 1404 | count += len >> PAGE_SHIFT; | 
|  | 1405 | iova += len; | 
|  | 1406 | } | 
|  | 1407 | *handle = iova_base; | 
|  | 1408 |  | 
|  | 1409 | return 0; | 
|  | 1410 | fail: | 
|  | 1411 | iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); | 
|  | 1412 | __free_iova(mapping, iova_base, size); | 
|  | 1413 | return ret; | 
|  | 1414 | } | 
|  | 1415 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1416 | static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 1417 | enum dma_data_direction dir, struct dma_attrs *attrs, | 
|  | 1418 | bool is_coherent) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1419 | { | 
|  | 1420 | struct scatterlist *s = sg, *dma = sg, *start = sg; | 
|  | 1421 | int i, count = 0; | 
|  | 1422 | unsigned int offset = s->offset; | 
|  | 1423 | unsigned int size = s->offset + s->length; | 
|  | 1424 | unsigned int max = dma_get_max_seg_size(dev); | 
|  | 1425 |  | 
|  | 1426 | for (i = 1; i < nents; i++) { | 
|  | 1427 | s = sg_next(s); | 
|  | 1428 |  | 
|  | 1429 | s->dma_address = DMA_ERROR_CODE; | 
|  | 1430 | s->dma_length = 0; | 
|  | 1431 |  | 
|  | 1432 | if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { | 
|  | 1433 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1434 | dir, attrs, is_coherent) < 0) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1435 | goto bad_mapping; | 
|  | 1436 |  | 
|  | 1437 | dma->dma_address += offset; | 
|  | 1438 | dma->dma_length = size - offset; | 
|  | 1439 |  | 
|  | 1440 | size = offset = s->offset; | 
|  | 1441 | start = s; | 
|  | 1442 | dma = sg_next(dma); | 
|  | 1443 | count += 1; | 
|  | 1444 | } | 
|  | 1445 | size += s->length; | 
|  | 1446 | } | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1447 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, | 
|  | 1448 | is_coherent) < 0) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1449 | goto bad_mapping; | 
|  | 1450 |  | 
|  | 1451 | dma->dma_address += offset; | 
|  | 1452 | dma->dma_length = size - offset; | 
|  | 1453 |  | 
|  | 1454 | return count+1; | 
|  | 1455 |  | 
|  | 1456 | bad_mapping: | 
|  | 1457 | for_each_sg(sg, s, count, i) | 
|  | 1458 | __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); | 
|  | 1459 | return 0; | 
|  | 1460 | } | 
|  | 1461 |  | 
|  | 1462 | /** | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1463 | * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA | 
|  | 1464 | * @dev: valid struct device pointer | 
|  | 1465 | * @sg: list of buffers | 
|  | 1466 | * @nents: number of buffers to map | 
|  | 1467 | * @dir: DMA transfer direction | 
|  | 1468 | * | 
|  | 1469 | * Map a set of i/o coherent buffers described by scatterlist in streaming | 
|  | 1470 | * mode for DMA. The scatter gather list elements are merged together (if | 
|  | 1471 | * possible) and tagged with the appropriate dma address and length. They are | 
|  | 1472 | * obtained via sg_dma_{address,length}. | 
|  | 1473 | */ | 
|  | 1474 | int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, | 
|  | 1475 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 
|  | 1476 | { | 
|  | 1477 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); | 
|  | 1478 | } | 
|  | 1479 |  | 
|  | 1480 | /** | 
|  | 1481 | * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA | 
|  | 1482 | * @dev: valid struct device pointer | 
|  | 1483 | * @sg: list of buffers | 
|  | 1484 | * @nents: number of buffers to map | 
|  | 1485 | * @dir: DMA transfer direction | 
|  | 1486 | * | 
|  | 1487 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 
|  | 1488 | * The scatter gather list elements are merged together (if possible) and | 
|  | 1489 | * tagged with the appropriate dma address and length. They are obtained via | 
|  | 1490 | * sg_dma_{address,length}. | 
|  | 1491 | */ | 
|  | 1492 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, | 
|  | 1493 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 
|  | 1494 | { | 
|  | 1495 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); | 
|  | 1496 | } | 
|  | 1497 |  | 
|  | 1498 | static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | 
|  | 1499 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs, | 
|  | 1500 | bool is_coherent) | 
|  | 1501 | { | 
|  | 1502 | struct scatterlist *s; | 
|  | 1503 | int i; | 
|  | 1504 |  | 
|  | 1505 | for_each_sg(sg, s, nents, i) { | 
|  | 1506 | if (sg_dma_len(s)) | 
|  | 1507 | __iommu_remove_mapping(dev, sg_dma_address(s), | 
|  | 1508 | sg_dma_len(s)); | 
|  | 1509 | if (!is_coherent && | 
|  | 1510 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 
|  | 1511 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 
|  | 1512 | s->length, dir); | 
|  | 1513 | } | 
|  | 1514 | } | 
|  | 1515 |  | 
|  | 1516 | /** | 
|  | 1517 | * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 
|  | 1518 | * @dev: valid struct device pointer | 
|  | 1519 | * @sg: list of buffers | 
|  | 1520 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | 
|  | 1521 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 1522 | * | 
|  | 1523 | * Unmap a set of streaming mode DMA translations.  Again, CPU access | 
|  | 1524 | * rules concerning calls here are the same as for dma_unmap_single(). | 
|  | 1525 | */ | 
|  | 1526 | void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | 
|  | 1527 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | 
|  | 1528 | { | 
|  | 1529 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); | 
|  | 1530 | } | 
|  | 1531 |  | 
|  | 1532 | /** | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1533 | * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 
|  | 1534 | * @dev: valid struct device pointer | 
|  | 1535 | * @sg: list of buffers | 
|  | 1536 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | 
|  | 1537 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 1538 | * | 
|  | 1539 | * Unmap a set of streaming mode DMA translations.  Again, CPU access | 
|  | 1540 | * rules concerning calls here are the same as for dma_unmap_single(). | 
|  | 1541 | */ | 
|  | 1542 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 
|  | 1543 | enum dma_data_direction dir, struct dma_attrs *attrs) | 
|  | 1544 | { | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1545 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1546 | } | 
|  | 1547 |  | 
|  | 1548 | /** | 
|  | 1549 | * arm_iommu_sync_sg_for_cpu | 
|  | 1550 | * @dev: valid struct device pointer | 
|  | 1551 | * @sg: list of buffers | 
|  | 1552 | * @nents: number of buffers to map (returned from dma_map_sg) | 
|  | 1553 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 1554 | */ | 
|  | 1555 | void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 
|  | 1556 | int nents, enum dma_data_direction dir) | 
|  | 1557 | { | 
|  | 1558 | struct scatterlist *s; | 
|  | 1559 | int i; | 
|  | 1560 |  | 
|  | 1561 | for_each_sg(sg, s, nents, i) | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1562 | __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1563 |  | 
|  | 1564 | } | 
|  | 1565 |  | 
|  | 1566 | /** | 
|  | 1567 | * arm_iommu_sync_sg_for_device | 
|  | 1568 | * @dev: valid struct device pointer | 
|  | 1569 | * @sg: list of buffers | 
|  | 1570 | * @nents: number of buffers to map (returned from dma_map_sg) | 
|  | 1571 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 
|  | 1572 | */ | 
|  | 1573 | void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 
|  | 1574 | int nents, enum dma_data_direction dir) | 
|  | 1575 | { | 
|  | 1576 | struct scatterlist *s; | 
|  | 1577 | int i; | 
|  | 1578 |  | 
|  | 1579 | for_each_sg(sg, s, nents, i) | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1580 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1581 | } | 
|  | 1582 |  | 
|  | 1583 |  | 
|  | 1584 | /** | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1585 | * arm_coherent_iommu_map_page | 
|  | 1586 | * @dev: valid struct device pointer | 
|  | 1587 | * @page: page that buffer resides in | 
|  | 1588 | * @offset: offset into page for start of buffer | 
|  | 1589 | * @size: size of buffer to map | 
|  | 1590 | * @dir: DMA transfer direction | 
|  | 1591 | * | 
|  | 1592 | * Coherent IOMMU aware version of arm_dma_map_page() | 
|  | 1593 | */ | 
|  | 1594 | static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, | 
|  | 1595 | unsigned long offset, size_t size, enum dma_data_direction dir, | 
|  | 1596 | struct dma_attrs *attrs) | 
|  | 1597 | { | 
|  | 1598 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1599 | dma_addr_t dma_addr; | 
|  | 1600 | int ret, len = PAGE_ALIGN(size + offset); | 
|  | 1601 |  | 
|  | 1602 | dma_addr = __alloc_iova(mapping, len); | 
|  | 1603 | if (dma_addr == DMA_ERROR_CODE) | 
|  | 1604 | return dma_addr; | 
|  | 1605 |  | 
|  | 1606 | ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); | 
|  | 1607 | if (ret < 0) | 
|  | 1608 | goto fail; | 
|  | 1609 |  | 
|  | 1610 | return dma_addr + offset; | 
|  | 1611 | fail: | 
|  | 1612 | __free_iova(mapping, dma_addr, len); | 
|  | 1613 | return DMA_ERROR_CODE; | 
|  | 1614 | } | 
|  | 1615 |  | 
|  | 1616 | /** | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1617 | * arm_iommu_map_page | 
|  | 1618 | * @dev: valid struct device pointer | 
|  | 1619 | * @page: page that buffer resides in | 
|  | 1620 | * @offset: offset into page for start of buffer | 
|  | 1621 | * @size: size of buffer to map | 
|  | 1622 | * @dir: DMA transfer direction | 
|  | 1623 | * | 
|  | 1624 | * IOMMU aware version of arm_dma_map_page() | 
|  | 1625 | */ | 
|  | 1626 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | 
|  | 1627 | unsigned long offset, size_t size, enum dma_data_direction dir, | 
|  | 1628 | struct dma_attrs *attrs) | 
|  | 1629 | { | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1630 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1631 | __dma_page_cpu_to_dev(page, offset, size, dir); | 
|  | 1632 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1633 | return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); | 
|  | 1634 | } | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1635 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1636 | /** | 
|  | 1637 | * arm_coherent_iommu_unmap_page | 
|  | 1638 | * @dev: valid struct device pointer | 
|  | 1639 | * @handle: DMA address of buffer | 
|  | 1640 | * @size: size of buffer (same as passed to dma_map_page) | 
|  | 1641 | * @dir: DMA transfer direction (same as passed to dma_map_page) | 
|  | 1642 | * | 
|  | 1643 | * Coherent IOMMU aware version of arm_dma_unmap_page() | 
|  | 1644 | */ | 
|  | 1645 | static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, | 
|  | 1646 | size_t size, enum dma_data_direction dir, | 
|  | 1647 | struct dma_attrs *attrs) | 
|  | 1648 | { | 
|  | 1649 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1650 | dma_addr_t iova = handle & PAGE_MASK; | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1651 | int offset = handle & ~PAGE_MASK; | 
|  | 1652 | int len = PAGE_ALIGN(size + offset); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1653 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1654 | if (!iova) | 
|  | 1655 | return; | 
|  | 1656 |  | 
|  | 1657 | iommu_unmap(mapping->domain, iova, len); | 
|  | 1658 | __free_iova(mapping, iova, len); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1659 | } | 
|  | 1660 |  | 
|  | 1661 | /** | 
|  | 1662 | * arm_iommu_unmap_page | 
|  | 1663 | * @dev: valid struct device pointer | 
|  | 1664 | * @handle: DMA address of buffer | 
|  | 1665 | * @size: size of buffer (same as passed to dma_map_page) | 
|  | 1666 | * @dir: DMA transfer direction (same as passed to dma_map_page) | 
|  | 1667 | * | 
|  | 1668 | * IOMMU aware version of arm_dma_unmap_page() | 
|  | 1669 | */ | 
|  | 1670 | static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | 
|  | 1671 | size_t size, enum dma_data_direction dir, | 
|  | 1672 | struct dma_attrs *attrs) | 
|  | 1673 | { | 
|  | 1674 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1675 | dma_addr_t iova = handle & PAGE_MASK; | 
|  | 1676 | struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); | 
|  | 1677 | int offset = handle & ~PAGE_MASK; | 
|  | 1678 | int len = PAGE_ALIGN(size + offset); | 
|  | 1679 |  | 
|  | 1680 | if (!iova) | 
|  | 1681 | return; | 
|  | 1682 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1683 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1684 | __dma_page_dev_to_cpu(page, offset, size, dir); | 
|  | 1685 |  | 
|  | 1686 | iommu_unmap(mapping->domain, iova, len); | 
|  | 1687 | __free_iova(mapping, iova, len); | 
|  | 1688 | } | 
|  | 1689 |  | 
|  | 1690 | static void arm_iommu_sync_single_for_cpu(struct device *dev, | 
|  | 1691 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 
|  | 1692 | { | 
|  | 1693 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1694 | dma_addr_t iova = handle & PAGE_MASK; | 
|  | 1695 | struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); | 
|  | 1696 | unsigned int offset = handle & ~PAGE_MASK; | 
|  | 1697 |  | 
|  | 1698 | if (!iova) | 
|  | 1699 | return; | 
|  | 1700 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1701 | __dma_page_dev_to_cpu(page, offset, size, dir); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1702 | } | 
|  | 1703 |  | 
|  | 1704 | static void arm_iommu_sync_single_for_device(struct device *dev, | 
|  | 1705 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 
|  | 1706 | { | 
|  | 1707 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 
|  | 1708 | dma_addr_t iova = handle & PAGE_MASK; | 
|  | 1709 | struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); | 
|  | 1710 | unsigned int offset = handle & ~PAGE_MASK; | 
|  | 1711 |  | 
|  | 1712 | if (!iova) | 
|  | 1713 | return; | 
|  | 1714 |  | 
|  | 1715 | __dma_page_cpu_to_dev(page, offset, size, dir); | 
|  | 1716 | } | 
|  | 1717 |  | 
|  | 1718 | struct dma_map_ops iommu_ops = { | 
|  | 1719 | .alloc		= arm_iommu_alloc_attrs, | 
|  | 1720 | .free		= arm_iommu_free_attrs, | 
|  | 1721 | .mmap		= arm_iommu_mmap_attrs, | 
| Marek Szyprowski | dc2832e | 2012-06-13 10:01:15 +0200 | [diff] [blame] | 1722 | .get_sgtable	= arm_iommu_get_sgtable, | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1723 |  | 
|  | 1724 | .map_page		= arm_iommu_map_page, | 
|  | 1725 | .unmap_page		= arm_iommu_unmap_page, | 
|  | 1726 | .sync_single_for_cpu	= arm_iommu_sync_single_for_cpu, | 
|  | 1727 | .sync_single_for_device	= arm_iommu_sync_single_for_device, | 
|  | 1728 |  | 
|  | 1729 | .map_sg			= arm_iommu_map_sg, | 
|  | 1730 | .unmap_sg		= arm_iommu_unmap_sg, | 
|  | 1731 | .sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu, | 
|  | 1732 | .sync_sg_for_device	= arm_iommu_sync_sg_for_device, | 
|  | 1733 | }; | 
|  | 1734 |  | 
| Rob Herring | 0fa478d | 2012-08-21 12:23:23 +0200 | [diff] [blame] | 1735 | struct dma_map_ops iommu_coherent_ops = { | 
|  | 1736 | .alloc		= arm_iommu_alloc_attrs, | 
|  | 1737 | .free		= arm_iommu_free_attrs, | 
|  | 1738 | .mmap		= arm_iommu_mmap_attrs, | 
|  | 1739 | .get_sgtable	= arm_iommu_get_sgtable, | 
|  | 1740 |  | 
|  | 1741 | .map_page	= arm_coherent_iommu_map_page, | 
|  | 1742 | .unmap_page	= arm_coherent_iommu_unmap_page, | 
|  | 1743 |  | 
|  | 1744 | .map_sg		= arm_coherent_iommu_map_sg, | 
|  | 1745 | .unmap_sg	= arm_coherent_iommu_unmap_sg, | 
|  | 1746 | }; | 
|  | 1747 |  | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1748 | /** | 
|  | 1749 | * arm_iommu_create_mapping | 
|  | 1750 | * @bus: pointer to the bus holding the client device (for IOMMU calls) | 
|  | 1751 | * @base: start address of the valid IO address space | 
|  | 1752 | * @size: size of the valid IO address space | 
|  | 1753 | * @order: accuracy of the IO addresses allocations | 
|  | 1754 | * | 
|  | 1755 | * Creates a mapping structure which holds information about used/unused | 
|  | 1756 | * IO address ranges, which is required to perform memory allocation and | 
|  | 1757 | * mapping with IOMMU aware functions. | 
|  | 1758 | * | 
|  | 1759 | * The client device need to be attached to the mapping with | 
|  | 1760 | * arm_iommu_attach_device function. | 
|  | 1761 | */ | 
|  | 1762 | struct dma_iommu_mapping * | 
|  | 1763 | arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, | 
|  | 1764 | int order) | 
|  | 1765 | { | 
|  | 1766 | unsigned int count = size >> (PAGE_SHIFT + order); | 
|  | 1767 | unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); | 
|  | 1768 | struct dma_iommu_mapping *mapping; | 
|  | 1769 | int err = -ENOMEM; | 
|  | 1770 |  | 
|  | 1771 | if (!count) | 
|  | 1772 | return ERR_PTR(-EINVAL); | 
|  | 1773 |  | 
|  | 1774 | mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); | 
|  | 1775 | if (!mapping) | 
|  | 1776 | goto err; | 
|  | 1777 |  | 
|  | 1778 | mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 
|  | 1779 | if (!mapping->bitmap) | 
|  | 1780 | goto err2; | 
|  | 1781 |  | 
|  | 1782 | mapping->base = base; | 
|  | 1783 | mapping->bits = BITS_PER_BYTE * bitmap_size; | 
|  | 1784 | mapping->order = order; | 
|  | 1785 | spin_lock_init(&mapping->lock); | 
|  | 1786 |  | 
|  | 1787 | mapping->domain = iommu_domain_alloc(bus); | 
|  | 1788 | if (!mapping->domain) | 
|  | 1789 | goto err3; | 
|  | 1790 |  | 
|  | 1791 | kref_init(&mapping->kref); | 
|  | 1792 | return mapping; | 
|  | 1793 | err3: | 
|  | 1794 | kfree(mapping->bitmap); | 
|  | 1795 | err2: | 
|  | 1796 | kfree(mapping); | 
|  | 1797 | err: | 
|  | 1798 | return ERR_PTR(err); | 
|  | 1799 | } | 
|  | 1800 |  | 
|  | 1801 | static void release_iommu_mapping(struct kref *kref) | 
|  | 1802 | { | 
|  | 1803 | struct dma_iommu_mapping *mapping = | 
|  | 1804 | container_of(kref, struct dma_iommu_mapping, kref); | 
|  | 1805 |  | 
|  | 1806 | iommu_domain_free(mapping->domain); | 
|  | 1807 | kfree(mapping->bitmap); | 
|  | 1808 | kfree(mapping); | 
|  | 1809 | } | 
|  | 1810 |  | 
|  | 1811 | void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) | 
|  | 1812 | { | 
|  | 1813 | if (mapping) | 
|  | 1814 | kref_put(&mapping->kref, release_iommu_mapping); | 
|  | 1815 | } | 
|  | 1816 |  | 
|  | 1817 | /** | 
|  | 1818 | * arm_iommu_attach_device | 
|  | 1819 | * @dev: valid struct device pointer | 
|  | 1820 | * @mapping: io address space mapping structure (returned from | 
|  | 1821 | *	arm_iommu_create_mapping) | 
|  | 1822 | * | 
|  | 1823 | * Attaches specified io address space mapping to the provided device, | 
|  | 1824 | * this replaces the dma operations (dma_map_ops pointer) with the | 
|  | 1825 | * IOMMU aware version. More than one client might be attached to | 
|  | 1826 | * the same io address space mapping. | 
|  | 1827 | */ | 
|  | 1828 | int arm_iommu_attach_device(struct device *dev, | 
|  | 1829 | struct dma_iommu_mapping *mapping) | 
|  | 1830 | { | 
|  | 1831 | int err; | 
|  | 1832 |  | 
|  | 1833 | err = iommu_attach_device(mapping->domain, dev); | 
|  | 1834 | if (err) | 
|  | 1835 | return err; | 
|  | 1836 |  | 
|  | 1837 | kref_get(&mapping->kref); | 
|  | 1838 | dev->archdata.mapping = mapping; | 
|  | 1839 | set_dma_ops(dev, &iommu_ops); | 
|  | 1840 |  | 
| Hiroshi Doyu | 75c5971 | 2012-09-11 07:39:48 +0200 | [diff] [blame] | 1841 | pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); | 
| Marek Szyprowski | 4ce63fc | 2012-05-16 15:48:21 +0200 | [diff] [blame] | 1842 | return 0; | 
|  | 1843 | } | 
|  | 1844 |  | 
|  | 1845 | #endif |