| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 
|  | 3 | * | 
|  | 4 | * Rewrite, cleanup, new allocation schemes, virtual merging: | 
|  | 5 | * Copyright (C) 2004 Olof Johansson, IBM Corporation | 
|  | 6 | *               and  Ben. Herrenschmidt, IBM Corporation | 
|  | 7 | * | 
|  | 8 | * Dynamic DMA mapping support, bus-independent parts. | 
|  | 9 | * | 
|  | 10 | * This program is free software; you can redistribute it and/or modify | 
|  | 11 | * it under the terms of the GNU General Public License as published by | 
|  | 12 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 13 | * (at your option) any later version. | 
|  | 14 | * | 
|  | 15 | * This program is distributed in the hope that it will be useful, | 
|  | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 18 | * GNU General Public License for more details. | 
|  | 19 | * | 
|  | 20 | * You should have received a copy of the GNU General Public License | 
|  | 21 | * along with this program; if not, write to the Free Software | 
|  | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/init.h> | 
|  | 27 | #include <linux/types.h> | 
|  | 28 | #include <linux/slab.h> | 
|  | 29 | #include <linux/mm.h> | 
|  | 30 | #include <linux/spinlock.h> | 
|  | 31 | #include <linux/string.h> | 
|  | 32 | #include <linux/dma-mapping.h> | 
|  | 33 | #include <linux/init.h> | 
|  | 34 | #include <linux/bitops.h> | 
|  | 35 | #include <asm/io.h> | 
|  | 36 | #include <asm/prom.h> | 
|  | 37 | #include <asm/iommu.h> | 
|  | 38 | #include <asm/pci-bridge.h> | 
|  | 39 | #include <asm/machdep.h> | 
| Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 40 | #include <asm/kdump.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
|  | 42 | #define DBG(...) | 
|  | 43 |  | 
|  | 44 | #ifdef CONFIG_IOMMU_VMERGE | 
|  | 45 | static int novmerge = 0; | 
|  | 46 | #else | 
|  | 47 | static int novmerge = 1; | 
|  | 48 | #endif | 
|  | 49 |  | 
| Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 50 | static int protect4gb = 1; | 
|  | 51 |  | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 52 | static inline unsigned long iommu_num_pages(unsigned long vaddr, | 
|  | 53 | unsigned long slen) | 
|  | 54 | { | 
|  | 55 | unsigned long npages; | 
|  | 56 |  | 
|  | 57 | npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK); | 
|  | 58 | npages >>= IOMMU_PAGE_SHIFT; | 
|  | 59 |  | 
|  | 60 | return npages; | 
|  | 61 | } | 
|  | 62 |  | 
| Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 63 | static int __init setup_protect4gb(char *str) | 
|  | 64 | { | 
|  | 65 | if (strcmp(str, "on") == 0) | 
|  | 66 | protect4gb = 1; | 
|  | 67 | else if (strcmp(str, "off") == 0) | 
|  | 68 | protect4gb = 0; | 
|  | 69 |  | 
|  | 70 | return 1; | 
|  | 71 | } | 
|  | 72 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | static int __init setup_iommu(char *str) | 
|  | 74 | { | 
|  | 75 | if (!strcmp(str, "novmerge")) | 
|  | 76 | novmerge = 1; | 
|  | 77 | else if (!strcmp(str, "vmerge")) | 
|  | 78 | novmerge = 0; | 
|  | 79 | return 1; | 
|  | 80 | } | 
|  | 81 |  | 
| Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 82 | __setup("protect4gb=", setup_protect4gb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | __setup("iommu=", setup_iommu); | 
|  | 84 |  | 
|  | 85 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, | 
|  | 86 | unsigned long npages, | 
|  | 87 | unsigned long *handle, | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 88 | unsigned long mask, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | unsigned int align_order) | 
|  | 90 | { | 
|  | 91 | unsigned long n, end, i, start; | 
|  | 92 | unsigned long limit; | 
|  | 93 | int largealloc = npages > 15; | 
|  | 94 | int pass = 0; | 
|  | 95 | unsigned long align_mask; | 
|  | 96 |  | 
|  | 97 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | 
|  | 98 |  | 
|  | 99 | /* This allocator was derived from x86_64's bit string search */ | 
|  | 100 |  | 
|  | 101 | /* Sanity check */ | 
| Nick Piggin | 13a2eea | 2006-10-04 17:25:44 +0200 | [diff] [blame] | 102 | if (unlikely(npages == 0)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | if (printk_ratelimit()) | 
|  | 104 | WARN_ON(1); | 
|  | 105 | return DMA_ERROR_CODE; | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | if (handle && *handle) | 
|  | 109 | start = *handle; | 
|  | 110 | else | 
|  | 111 | start = largealloc ? tbl->it_largehint : tbl->it_hint; | 
|  | 112 |  | 
|  | 113 | /* Use only half of the table for small allocs (15 pages or less) */ | 
|  | 114 | limit = largealloc ? tbl->it_size : tbl->it_halfpoint; | 
|  | 115 |  | 
|  | 116 | if (largealloc && start < tbl->it_halfpoint) | 
|  | 117 | start = tbl->it_halfpoint; | 
|  | 118 |  | 
|  | 119 | /* The case below can happen if we have a small segment appended | 
|  | 120 | * to a large, or when the previous alloc was at the very end of | 
|  | 121 | * the available space. If so, go back to the initial start. | 
|  | 122 | */ | 
|  | 123 | if (start >= limit) | 
|  | 124 | start = largealloc ? tbl->it_largehint : tbl->it_hint; | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 125 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | again: | 
|  | 127 |  | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 128 | if (limit + tbl->it_offset > mask) { | 
|  | 129 | limit = mask - tbl->it_offset + 1; | 
|  | 130 | /* If we're constrained on address range, first try | 
|  | 131 | * at the masked hint to avoid O(n) search complexity, | 
|  | 132 | * but on second pass, start at 0. | 
|  | 133 | */ | 
|  | 134 | if ((start & mask) >= limit || pass > 0) | 
|  | 135 | start = 0; | 
|  | 136 | else | 
|  | 137 | start &= mask; | 
|  | 138 | } | 
|  | 139 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | n = find_next_zero_bit(tbl->it_map, limit, start); | 
|  | 141 |  | 
|  | 142 | /* Align allocation */ | 
|  | 143 | n = (n + align_mask) & ~align_mask; | 
|  | 144 |  | 
|  | 145 | end = n + npages; | 
|  | 146 |  | 
|  | 147 | if (unlikely(end >= limit)) { | 
|  | 148 | if (likely(pass < 2)) { | 
|  | 149 | /* First failure, just rescan the half of the table. | 
|  | 150 | * Second failure, rescan the other half of the table. | 
|  | 151 | */ | 
|  | 152 | start = (largealloc ^ pass) ? tbl->it_halfpoint : 0; | 
|  | 153 | limit = pass ? tbl->it_size : limit; | 
|  | 154 | pass++; | 
|  | 155 | goto again; | 
|  | 156 | } else { | 
|  | 157 | /* Third failure, give up */ | 
|  | 158 | return DMA_ERROR_CODE; | 
|  | 159 | } | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | for (i = n; i < end; i++) | 
|  | 163 | if (test_bit(i, tbl->it_map)) { | 
|  | 164 | start = i+1; | 
|  | 165 | goto again; | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | for (i = n; i < end; i++) | 
|  | 169 | __set_bit(i, tbl->it_map); | 
|  | 170 |  | 
|  | 171 | /* Bump the hint to a new block for small allocs. */ | 
|  | 172 | if (largealloc) { | 
|  | 173 | /* Don't bump to new block to avoid fragmentation */ | 
|  | 174 | tbl->it_largehint = end; | 
|  | 175 | } else { | 
|  | 176 | /* Overflow will be taken care of at the next allocation */ | 
|  | 177 | tbl->it_hint = (end + tbl->it_blocksize - 1) & | 
|  | 178 | ~(tbl->it_blocksize - 1); | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 | /* Update handle for SG allocations */ | 
|  | 182 | if (handle) | 
|  | 183 | *handle = end; | 
|  | 184 |  | 
|  | 185 | return n; | 
|  | 186 | } | 
|  | 187 |  | 
|  | 188 | static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, | 
|  | 189 | unsigned int npages, enum dma_data_direction direction, | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 190 | unsigned long mask, unsigned int align_order) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | { | 
|  | 192 | unsigned long entry, flags; | 
|  | 193 | dma_addr_t ret = DMA_ERROR_CODE; | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 194 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | spin_lock_irqsave(&(tbl->it_lock), flags); | 
|  | 196 |  | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 197 | entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 |  | 
|  | 199 | if (unlikely(entry == DMA_ERROR_CODE)) { | 
|  | 200 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 
|  | 201 | return DMA_ERROR_CODE; | 
|  | 202 | } | 
|  | 203 |  | 
|  | 204 | entry += tbl->it_offset;	/* Offset into real TCE table */ | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 205 | ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 |  | 
|  | 207 | /* Put the TCEs in the HW table */ | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 208 | ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | direction); | 
|  | 210 |  | 
|  | 211 |  | 
|  | 212 | /* Flush/invalidate TLB caches if necessary */ | 
|  | 213 | if (ppc_md.tce_flush) | 
|  | 214 | ppc_md.tce_flush(tbl); | 
|  | 215 |  | 
|  | 216 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 
|  | 217 |  | 
|  | 218 | /* Make sure updates are seen by hardware */ | 
|  | 219 | mb(); | 
|  | 220 |  | 
|  | 221 | return ret; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | 
|  | 225 | unsigned int npages) | 
|  | 226 | { | 
|  | 227 | unsigned long entry, free_entry; | 
|  | 228 | unsigned long i; | 
|  | 229 |  | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 230 | entry = dma_addr >> IOMMU_PAGE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | free_entry = entry - tbl->it_offset; | 
|  | 232 |  | 
|  | 233 | if (((free_entry + npages) > tbl->it_size) || | 
|  | 234 | (entry < tbl->it_offset)) { | 
|  | 235 | if (printk_ratelimit()) { | 
|  | 236 | printk(KERN_INFO "iommu_free: invalid entry\n"); | 
|  | 237 | printk(KERN_INFO "\tentry     = 0x%lx\n", entry); | 
|  | 238 | printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr); | 
|  | 239 | printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl); | 
|  | 240 | printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno); | 
|  | 241 | printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size); | 
|  | 242 | printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset); | 
|  | 243 | printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index); | 
|  | 244 | WARN_ON(1); | 
|  | 245 | } | 
|  | 246 | return; | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | ppc_md.tce_free(tbl, entry, npages); | 
|  | 250 |  | 
|  | 251 | for (i = 0; i < npages; i++) | 
|  | 252 | __clear_bit(free_entry+i, tbl->it_map); | 
|  | 253 | } | 
|  | 254 |  | 
|  | 255 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | 
|  | 256 | unsigned int npages) | 
|  | 257 | { | 
|  | 258 | unsigned long flags; | 
|  | 259 |  | 
|  | 260 | spin_lock_irqsave(&(tbl->it_lock), flags); | 
|  | 261 |  | 
|  | 262 | __iommu_free(tbl, dma_addr, npages); | 
|  | 263 |  | 
|  | 264 | /* Make sure TLB cache is flushed if the HW needs it. We do | 
|  | 265 | * not do an mb() here on purpose, it is not needed on any of | 
|  | 266 | * the current platforms. | 
|  | 267 | */ | 
|  | 268 | if (ppc_md.tce_flush) | 
|  | 269 | ppc_md.tce_flush(tbl); | 
|  | 270 |  | 
|  | 271 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 
|  | 272 | } | 
|  | 273 |  | 
| Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 274 | int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 
|  | 275 | int nelems, unsigned long mask, | 
|  | 276 | enum dma_data_direction direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | { | 
|  | 278 | dma_addr_t dma_next = 0, dma_addr; | 
|  | 279 | unsigned long flags; | 
|  | 280 | struct scatterlist *s, *outs, *segstart; | 
| Brian King | ac9af7c | 2005-08-18 07:32:18 +1000 | [diff] [blame] | 281 | int outcount, incount; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | unsigned long handle; | 
|  | 283 |  | 
|  | 284 | BUG_ON(direction == DMA_NONE); | 
|  | 285 |  | 
|  | 286 | if ((nelems == 0) || !tbl) | 
|  | 287 | return 0; | 
|  | 288 |  | 
|  | 289 | outs = s = segstart = &sglist[0]; | 
|  | 290 | outcount = 1; | 
| Brian King | ac9af7c | 2005-08-18 07:32:18 +1000 | [diff] [blame] | 291 | incount = nelems; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | handle = 0; | 
|  | 293 |  | 
|  | 294 | /* Init first segment length for backout at failure */ | 
|  | 295 | outs->dma_length = 0; | 
|  | 296 |  | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 297 | DBG("sg mapping %d elements:\n", nelems); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 |  | 
|  | 299 | spin_lock_irqsave(&(tbl->it_lock), flags); | 
|  | 300 |  | 
|  | 301 | for (s = outs; nelems; nelems--, s++) { | 
|  | 302 | unsigned long vaddr, npages, entry, slen; | 
|  | 303 |  | 
|  | 304 | slen = s->length; | 
|  | 305 | /* Sanity check */ | 
|  | 306 | if (slen == 0) { | 
|  | 307 | dma_next = 0; | 
|  | 308 | continue; | 
|  | 309 | } | 
|  | 310 | /* Allocate iommu entries for that segment */ | 
|  | 311 | vaddr = (unsigned long)page_address(s->page) + s->offset; | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 312 | npages = iommu_num_pages(vaddr, slen); | 
|  | 313 | entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 |  | 
|  | 315 | DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen); | 
|  | 316 |  | 
|  | 317 | /* Handle failure */ | 
|  | 318 | if (unlikely(entry == DMA_ERROR_CODE)) { | 
|  | 319 | if (printk_ratelimit()) | 
|  | 320 | printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" | 
|  | 321 | " npages %lx\n", tbl, vaddr, npages); | 
|  | 322 | goto failure; | 
|  | 323 | } | 
|  | 324 |  | 
|  | 325 | /* Convert entry to a dma_addr_t */ | 
|  | 326 | entry += tbl->it_offset; | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 327 | dma_addr = entry << IOMMU_PAGE_SHIFT; | 
|  | 328 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 |  | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 330 | DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | npages, entry, dma_addr); | 
|  | 332 |  | 
|  | 333 | /* Insert into HW table */ | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 334 | ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 |  | 
|  | 336 | /* If we are in an open segment, try merging */ | 
|  | 337 | if (segstart != s) { | 
|  | 338 | DBG("  - trying merge...\n"); | 
|  | 339 | /* We cannot merge if: | 
|  | 340 | * - allocated dma_addr isn't contiguous to previous allocation | 
|  | 341 | */ | 
|  | 342 | if (novmerge || (dma_addr != dma_next)) { | 
|  | 343 | /* Can't merge: create a new segment */ | 
|  | 344 | segstart = s; | 
|  | 345 | outcount++; outs++; | 
|  | 346 | DBG("    can't merge, new segment.\n"); | 
|  | 347 | } else { | 
|  | 348 | outs->dma_length += s->length; | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 349 | DBG("    merged, new len: %ux\n", outs->dma_length); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | } | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | if (segstart == s) { | 
|  | 354 | /* This is a new segment, fill entries */ | 
|  | 355 | DBG("  - filling new segment.\n"); | 
|  | 356 | outs->dma_address = dma_addr; | 
|  | 357 | outs->dma_length = slen; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | /* Calculate next page pointer for contiguous check */ | 
|  | 361 | dma_next = dma_addr + slen; | 
|  | 362 |  | 
|  | 363 | DBG("  - dma next is: %lx\n", dma_next); | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 | /* Flush/invalidate TLB caches if necessary */ | 
|  | 367 | if (ppc_md.tce_flush) | 
|  | 368 | ppc_md.tce_flush(tbl); | 
|  | 369 |  | 
|  | 370 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 
|  | 371 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | DBG("mapped %d elements:\n", outcount); | 
|  | 373 |  | 
| Brian King | ac9af7c | 2005-08-18 07:32:18 +1000 | [diff] [blame] | 374 | /* For the sake of iommu_unmap_sg, we clear out the length in the | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | * next entry of the sglist if we didn't fill the list completely | 
|  | 376 | */ | 
| Brian King | ac9af7c | 2005-08-18 07:32:18 +1000 | [diff] [blame] | 377 | if (outcount < incount) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | outs++; | 
|  | 379 | outs->dma_address = DMA_ERROR_CODE; | 
|  | 380 | outs->dma_length = 0; | 
|  | 381 | } | 
| Jake Moilanen | a958a26 | 2006-01-30 21:51:54 -0600 | [diff] [blame] | 382 |  | 
|  | 383 | /* Make sure updates are seen by hardware */ | 
|  | 384 | mb(); | 
|  | 385 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | return outcount; | 
|  | 387 |  | 
|  | 388 | failure: | 
|  | 389 | for (s = &sglist[0]; s <= outs; s++) { | 
|  | 390 | if (s->dma_length != 0) { | 
|  | 391 | unsigned long vaddr, npages; | 
|  | 392 |  | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 393 | vaddr = s->dma_address & IOMMU_PAGE_MASK; | 
|  | 394 | npages = iommu_num_pages(s->dma_address, s->dma_length); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | __iommu_free(tbl, vaddr, npages); | 
| Jake Moilanen | a958a26 | 2006-01-30 21:51:54 -0600 | [diff] [blame] | 396 | s->dma_address = DMA_ERROR_CODE; | 
|  | 397 | s->dma_length = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | } | 
|  | 399 | } | 
|  | 400 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 
|  | 401 | return 0; | 
|  | 402 | } | 
|  | 403 |  | 
|  | 404 |  | 
|  | 405 | void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 
|  | 406 | int nelems, enum dma_data_direction direction) | 
|  | 407 | { | 
|  | 408 | unsigned long flags; | 
|  | 409 |  | 
|  | 410 | BUG_ON(direction == DMA_NONE); | 
|  | 411 |  | 
|  | 412 | if (!tbl) | 
|  | 413 | return; | 
|  | 414 |  | 
|  | 415 | spin_lock_irqsave(&(tbl->it_lock), flags); | 
|  | 416 |  | 
|  | 417 | while (nelems--) { | 
|  | 418 | unsigned int npages; | 
|  | 419 | dma_addr_t dma_handle = sglist->dma_address; | 
|  | 420 |  | 
|  | 421 | if (sglist->dma_length == 0) | 
|  | 422 | break; | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 423 | npages = iommu_num_pages(dma_handle,sglist->dma_length); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | __iommu_free(tbl, dma_handle, npages); | 
|  | 425 | sglist++; | 
|  | 426 | } | 
|  | 427 |  | 
|  | 428 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we | 
|  | 429 | * do not do an mb() here, the affected platforms do not need it | 
|  | 430 | * when freeing. | 
|  | 431 | */ | 
|  | 432 | if (ppc_md.tce_flush) | 
|  | 433 | ppc_md.tce_flush(tbl); | 
|  | 434 |  | 
|  | 435 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | /* | 
|  | 439 | * Build a iommu_table structure.  This contains a bit map which | 
|  | 440 | * is used to manage allocation of the tce space. | 
|  | 441 | */ | 
| Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 442 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | { | 
|  | 444 | unsigned long sz; | 
| Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 445 | unsigned long start_index, end_index; | 
|  | 446 | unsigned long entries_per_4g; | 
|  | 447 | unsigned long index; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | static int welcomed = 0; | 
| Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 449 | struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 |  | 
|  | 451 | /* Set aside 1/4 of the table for large allocations. */ | 
|  | 452 | tbl->it_halfpoint = tbl->it_size * 3 / 4; | 
|  | 453 |  | 
|  | 454 | /* number of bytes needed for the bitmap */ | 
|  | 455 | sz = (tbl->it_size + 7) >> 3; | 
|  | 456 |  | 
| Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 457 | page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); | 
|  | 458 | if (!page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | panic("iommu_init_table: Can't allocate %ld bytes\n", sz); | 
| Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 460 | tbl->it_map = page_address(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | memset(tbl->it_map, 0, sz); | 
|  | 462 |  | 
|  | 463 | tbl->it_hint = 0; | 
|  | 464 | tbl->it_largehint = tbl->it_halfpoint; | 
|  | 465 | spin_lock_init(&tbl->it_lock); | 
|  | 466 |  | 
| Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 467 | #ifdef CONFIG_CRASH_DUMP | 
|  | 468 | if (ppc_md.tce_get) { | 
| Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 469 | unsigned long tceval; | 
| Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 470 | unsigned long tcecount = 0; | 
|  | 471 |  | 
|  | 472 | /* | 
|  | 473 | * Reserve the existing mappings left by the first kernel. | 
|  | 474 | */ | 
|  | 475 | for (index = 0; index < tbl->it_size; index++) { | 
|  | 476 | tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); | 
|  | 477 | /* | 
|  | 478 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 | 
|  | 479 | */ | 
|  | 480 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { | 
|  | 481 | __set_bit(index, tbl->it_map); | 
|  | 482 | tcecount++; | 
|  | 483 | } | 
|  | 484 | } | 
|  | 485 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { | 
|  | 486 | printk(KERN_WARNING "TCE table is full; "); | 
|  | 487 | printk(KERN_WARNING "freeing %d entries for the kdump boot\n", | 
|  | 488 | KDUMP_MIN_TCE_ENTRIES); | 
|  | 489 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; | 
|  | 490 | index < tbl->it_size; index++) | 
|  | 491 | __clear_bit(index, tbl->it_map); | 
|  | 492 | } | 
|  | 493 | } | 
|  | 494 | #else | 
| John Rose | d3588ba | 2005-06-20 21:43:48 +1000 | [diff] [blame] | 495 | /* Clear the hardware table in case firmware left allocations in it */ | 
|  | 496 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | 
| Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 497 | #endif | 
| John Rose | d3588ba | 2005-06-20 21:43:48 +1000 | [diff] [blame] | 498 |  | 
| Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 499 | /* | 
|  | 500 | * DMA cannot cross 4 GB boundary.  Mark last entry of each 4 | 
|  | 501 | * GB chunk as reserved. | 
|  | 502 | */ | 
|  | 503 | if (protect4gb) { | 
|  | 504 | entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT; | 
|  | 505 |  | 
|  | 506 | /* Mark the last bit before a 4GB boundary as used */ | 
|  | 507 | start_index = tbl->it_offset | (entries_per_4g - 1); | 
|  | 508 | start_index -= tbl->it_offset; | 
|  | 509 |  | 
|  | 510 | end_index = tbl->it_size; | 
|  | 511 |  | 
|  | 512 | for (index = start_index; index < end_index - 1; index += entries_per_4g) | 
|  | 513 | __set_bit(index, tbl->it_map); | 
|  | 514 | } | 
|  | 515 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | if (!welcomed) { | 
|  | 517 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | 
|  | 518 | novmerge ? "disabled" : "enabled"); | 
|  | 519 | welcomed = 1; | 
|  | 520 | } | 
|  | 521 |  | 
|  | 522 | return tbl; | 
|  | 523 | } | 
|  | 524 |  | 
|  | 525 | void iommu_free_table(struct device_node *dn) | 
|  | 526 | { | 
| Paul Mackerras | 1635317 | 2005-09-06 13:17:54 +1000 | [diff] [blame] | 527 | struct pci_dn *pdn = dn->data; | 
|  | 528 | struct iommu_table *tbl = pdn->iommu_table; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | unsigned long bitmap_sz, i; | 
|  | 530 | unsigned int order; | 
|  | 531 |  | 
|  | 532 | if (!tbl || !tbl->it_map) { | 
|  | 533 | printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__, | 
|  | 534 | dn->full_name); | 
|  | 535 | return; | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | /* verify that table contains no entries */ | 
|  | 539 | /* it_size is in entries, and we're examining 64 at a time */ | 
|  | 540 | for (i = 0; i < (tbl->it_size/64); i++) { | 
|  | 541 | if (tbl->it_map[i] != 0) { | 
|  | 542 | printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", | 
|  | 543 | __FUNCTION__, dn->full_name); | 
|  | 544 | break; | 
|  | 545 | } | 
|  | 546 | } | 
|  | 547 |  | 
|  | 548 | /* calculate bitmap size in bytes */ | 
|  | 549 | bitmap_sz = (tbl->it_size + 7) / 8; | 
|  | 550 |  | 
|  | 551 | /* free bitmap */ | 
|  | 552 | order = get_order(bitmap_sz); | 
|  | 553 | free_pages((unsigned long) tbl->it_map, order); | 
|  | 554 |  | 
|  | 555 | /* free table */ | 
|  | 556 | kfree(tbl); | 
|  | 557 | } | 
|  | 558 |  | 
|  | 559 | /* Creates TCEs for a user provided buffer.  The user buffer must be | 
|  | 560 | * contiguous real kernel storage (not vmalloc).  The address of the buffer | 
|  | 561 | * passed here is the kernel (virtual) address of the buffer.  The buffer | 
|  | 562 | * need not be page aligned, the dma_addr_t returned will point to the same | 
|  | 563 | * byte within the page as vaddr. | 
|  | 564 | */ | 
|  | 565 | dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 566 | size_t size, unsigned long mask, | 
|  | 567 | enum dma_data_direction direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | { | 
|  | 569 | dma_addr_t dma_handle = DMA_ERROR_CODE; | 
|  | 570 | unsigned long uaddr; | 
|  | 571 | unsigned int npages; | 
|  | 572 |  | 
|  | 573 | BUG_ON(direction == DMA_NONE); | 
|  | 574 |  | 
|  | 575 | uaddr = (unsigned long)vaddr; | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 576 | npages = iommu_num_pages(uaddr, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 |  | 
|  | 578 | if (tbl) { | 
| Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 579 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction, | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 580 | mask >> IOMMU_PAGE_SHIFT, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | if (dma_handle == DMA_ERROR_CODE) { | 
|  | 582 | if (printk_ratelimit())  { | 
|  | 583 | printk(KERN_INFO "iommu_alloc failed, " | 
|  | 584 | "tbl %p vaddr %p npages %d\n", | 
|  | 585 | tbl, vaddr, npages); | 
|  | 586 | } | 
|  | 587 | } else | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 588 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | } | 
|  | 590 |  | 
|  | 591 | return dma_handle; | 
|  | 592 | } | 
|  | 593 |  | 
|  | 594 | void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | 
|  | 595 | size_t size, enum dma_data_direction direction) | 
|  | 596 | { | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 597 | unsigned int npages; | 
|  | 598 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | BUG_ON(direction == DMA_NONE); | 
|  | 600 |  | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 601 | if (tbl) { | 
|  | 602 | npages = iommu_num_pages(dma_handle, size); | 
|  | 603 | iommu_free(tbl, dma_handle, npages); | 
|  | 604 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | } | 
|  | 606 |  | 
|  | 607 | /* Allocates a contiguous real buffer and creates mappings over it. | 
|  | 608 | * Returns the virtual address of the buffer and sets dma_handle | 
|  | 609 | * to the dma address (mapping) of the first page. | 
|  | 610 | */ | 
|  | 611 | void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | 
| Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 612 | dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | { | 
|  | 614 | void *ret = NULL; | 
|  | 615 | dma_addr_t mapping; | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 616 | unsigned int order; | 
|  | 617 | unsigned int nio_pages, io_order; | 
| Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 618 | struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 |  | 
|  | 620 | size = PAGE_ALIGN(size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | order = get_order(size); | 
|  | 622 |  | 
|  | 623 | /* | 
|  | 624 | * Client asked for way too much space.  This is checked later | 
|  | 625 | * anyway.  It is easier to debug here for the drivers than in | 
|  | 626 | * the tce tables. | 
|  | 627 | */ | 
|  | 628 | if (order >= IOMAP_MAX_ORDER) { | 
|  | 629 | printk("iommu_alloc_consistent size too large: 0x%lx\n", size); | 
|  | 630 | return NULL; | 
|  | 631 | } | 
|  | 632 |  | 
|  | 633 | if (!tbl) | 
|  | 634 | return NULL; | 
|  | 635 |  | 
|  | 636 | /* Alloc enough pages (and possibly more) */ | 
| Paul Mackerras | 0506135 | 2006-06-10 18:17:35 +1000 | [diff] [blame] | 637 | page = alloc_pages_node(node, flag, order); | 
| Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 638 | if (!page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | return NULL; | 
| Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 640 | ret = page_address(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | memset(ret, 0, size); | 
|  | 642 |  | 
|  | 643 | /* Set up tces to cover the allocated range */ | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 644 | nio_pages = size >> IOMMU_PAGE_SHIFT; | 
|  | 645 | io_order = get_iommu_order(size); | 
|  | 646 | mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL, | 
|  | 647 | mask >> IOMMU_PAGE_SHIFT, io_order); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | if (mapping == DMA_ERROR_CODE) { | 
|  | 649 | free_pages((unsigned long)ret, order); | 
| Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 650 | return NULL; | 
|  | 651 | } | 
|  | 652 | *dma_handle = mapping; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | return ret; | 
|  | 654 | } | 
|  | 655 |  | 
|  | 656 | void iommu_free_coherent(struct iommu_table *tbl, size_t size, | 
|  | 657 | void *vaddr, dma_addr_t dma_handle) | 
|  | 658 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | if (tbl) { | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 660 | unsigned int nio_pages; | 
|  | 661 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | size = PAGE_ALIGN(size); | 
| Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 663 | nio_pages = size >> IOMMU_PAGE_SHIFT; | 
|  | 664 | iommu_free(tbl, dma_handle, nio_pages); | 
|  | 665 | size = PAGE_ALIGN(size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | free_pages((unsigned long)vaddr, get_order(size)); | 
|  | 667 | } | 
|  | 668 | } |