Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
| 3 | * |
| 4 | * Rewrite, cleanup, new allocation schemes, virtual merging: |
| 5 | * Copyright (C) 2004 Olof Johansson, IBM Corporation |
| 6 | * and Ben. Herrenschmidt, IBM Corporation |
| 7 | * |
| 8 | * Dynamic DMA mapping support, bus-independent parts. |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License as published by |
| 12 | * the Free Software Foundation; either version 2 of the License, or |
| 13 | * (at your option) any later version. |
| 14 | * |
| 15 | * This program is distributed in the hope that it will be useful, |
| 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 18 | * GNU General Public License for more details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with this program; if not, write to the Free Software |
| 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 23 | */ |
| 24 | |
| 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/init.h> |
| 27 | #include <linux/types.h> |
| 28 | #include <linux/slab.h> |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/spinlock.h> |
| 31 | #include <linux/string.h> |
| 32 | #include <linux/dma-mapping.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <linux/bitops.h> |
| 34 | #include <asm/io.h> |
| 35 | #include <asm/prom.h> |
| 36 | #include <asm/iommu.h> |
| 37 | #include <asm/pci-bridge.h> |
| 38 | #include <asm/machdep.h> |
Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 39 | #include <asm/kdump.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
| 41 | #define DBG(...) |
| 42 | |
| 43 | #ifdef CONFIG_IOMMU_VMERGE |
| 44 | static int novmerge = 0; |
| 45 | #else |
| 46 | static int novmerge = 1; |
| 47 | #endif |
| 48 | |
Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 49 | static int protect4gb = 1; |
| 50 | |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 51 | static inline unsigned long iommu_num_pages(unsigned long vaddr, |
| 52 | unsigned long slen) |
| 53 | { |
| 54 | unsigned long npages; |
| 55 | |
| 56 | npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK); |
| 57 | npages >>= IOMMU_PAGE_SHIFT; |
| 58 | |
| 59 | return npages; |
| 60 | } |
| 61 | |
Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 62 | static int __init setup_protect4gb(char *str) |
| 63 | { |
| 64 | if (strcmp(str, "on") == 0) |
| 65 | protect4gb = 1; |
| 66 | else if (strcmp(str, "off") == 0) |
| 67 | protect4gb = 0; |
| 68 | |
| 69 | return 1; |
| 70 | } |
| 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | static int __init setup_iommu(char *str) |
| 73 | { |
| 74 | if (!strcmp(str, "novmerge")) |
| 75 | novmerge = 1; |
| 76 | else if (!strcmp(str, "vmerge")) |
| 77 | novmerge = 0; |
| 78 | return 1; |
| 79 | } |
| 80 | |
Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 81 | __setup("protect4gb=", setup_protect4gb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | __setup("iommu=", setup_iommu); |
| 83 | |
| 84 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, |
| 85 | unsigned long npages, |
| 86 | unsigned long *handle, |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 87 | unsigned long mask, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | unsigned int align_order) |
| 89 | { |
| 90 | unsigned long n, end, i, start; |
| 91 | unsigned long limit; |
| 92 | int largealloc = npages > 15; |
| 93 | int pass = 0; |
| 94 | unsigned long align_mask; |
| 95 | |
| 96 | align_mask = 0xffffffffffffffffl >> (64 - align_order); |
| 97 | |
| 98 | /* This allocator was derived from x86_64's bit string search */ |
| 99 | |
| 100 | /* Sanity check */ |
Nick Piggin | 13a2eea | 2006-10-04 17:25:44 +0200 | [diff] [blame] | 101 | if (unlikely(npages == 0)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | if (printk_ratelimit()) |
| 103 | WARN_ON(1); |
| 104 | return DMA_ERROR_CODE; |
| 105 | } |
| 106 | |
| 107 | if (handle && *handle) |
| 108 | start = *handle; |
| 109 | else |
| 110 | start = largealloc ? tbl->it_largehint : tbl->it_hint; |
| 111 | |
| 112 | /* Use only half of the table for small allocs (15 pages or less) */ |
| 113 | limit = largealloc ? tbl->it_size : tbl->it_halfpoint; |
| 114 | |
| 115 | if (largealloc && start < tbl->it_halfpoint) |
| 116 | start = tbl->it_halfpoint; |
| 117 | |
| 118 | /* The case below can happen if we have a small segment appended |
| 119 | * to a large, or when the previous alloc was at the very end of |
| 120 | * the available space. If so, go back to the initial start. |
| 121 | */ |
| 122 | if (start >= limit) |
| 123 | start = largealloc ? tbl->it_largehint : tbl->it_hint; |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | again: |
| 126 | |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 127 | if (limit + tbl->it_offset > mask) { |
| 128 | limit = mask - tbl->it_offset + 1; |
| 129 | /* If we're constrained on address range, first try |
| 130 | * at the masked hint to avoid O(n) search complexity, |
| 131 | * but on second pass, start at 0. |
| 132 | */ |
| 133 | if ((start & mask) >= limit || pass > 0) |
| 134 | start = 0; |
| 135 | else |
| 136 | start &= mask; |
| 137 | } |
| 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | n = find_next_zero_bit(tbl->it_map, limit, start); |
| 140 | |
| 141 | /* Align allocation */ |
| 142 | n = (n + align_mask) & ~align_mask; |
| 143 | |
| 144 | end = n + npages; |
| 145 | |
| 146 | if (unlikely(end >= limit)) { |
| 147 | if (likely(pass < 2)) { |
| 148 | /* First failure, just rescan the half of the table. |
| 149 | * Second failure, rescan the other half of the table. |
| 150 | */ |
| 151 | start = (largealloc ^ pass) ? tbl->it_halfpoint : 0; |
| 152 | limit = pass ? tbl->it_size : limit; |
| 153 | pass++; |
| 154 | goto again; |
| 155 | } else { |
| 156 | /* Third failure, give up */ |
| 157 | return DMA_ERROR_CODE; |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | for (i = n; i < end; i++) |
| 162 | if (test_bit(i, tbl->it_map)) { |
| 163 | start = i+1; |
| 164 | goto again; |
| 165 | } |
| 166 | |
| 167 | for (i = n; i < end; i++) |
| 168 | __set_bit(i, tbl->it_map); |
| 169 | |
| 170 | /* Bump the hint to a new block for small allocs. */ |
| 171 | if (largealloc) { |
| 172 | /* Don't bump to new block to avoid fragmentation */ |
| 173 | tbl->it_largehint = end; |
| 174 | } else { |
| 175 | /* Overflow will be taken care of at the next allocation */ |
| 176 | tbl->it_hint = (end + tbl->it_blocksize - 1) & |
| 177 | ~(tbl->it_blocksize - 1); |
| 178 | } |
| 179 | |
| 180 | /* Update handle for SG allocations */ |
| 181 | if (handle) |
| 182 | *handle = end; |
| 183 | |
| 184 | return n; |
| 185 | } |
| 186 | |
| 187 | static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, |
| 188 | unsigned int npages, enum dma_data_direction direction, |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 189 | unsigned long mask, unsigned int align_order) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | { |
| 191 | unsigned long entry, flags; |
| 192 | dma_addr_t ret = DMA_ERROR_CODE; |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | spin_lock_irqsave(&(tbl->it_lock), flags); |
| 195 | |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 196 | entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | |
| 198 | if (unlikely(entry == DMA_ERROR_CODE)) { |
| 199 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
| 200 | return DMA_ERROR_CODE; |
| 201 | } |
| 202 | |
| 203 | entry += tbl->it_offset; /* Offset into real TCE table */ |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 204 | ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | |
| 206 | /* Put the TCEs in the HW table */ |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 207 | ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | direction); |
| 209 | |
| 210 | |
| 211 | /* Flush/invalidate TLB caches if necessary */ |
| 212 | if (ppc_md.tce_flush) |
| 213 | ppc_md.tce_flush(tbl); |
| 214 | |
| 215 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
| 216 | |
| 217 | /* Make sure updates are seen by hardware */ |
| 218 | mb(); |
| 219 | |
| 220 | return ret; |
| 221 | } |
| 222 | |
| 223 | static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
| 224 | unsigned int npages) |
| 225 | { |
| 226 | unsigned long entry, free_entry; |
| 227 | unsigned long i; |
| 228 | |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 229 | entry = dma_addr >> IOMMU_PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | free_entry = entry - tbl->it_offset; |
| 231 | |
| 232 | if (((free_entry + npages) > tbl->it_size) || |
| 233 | (entry < tbl->it_offset)) { |
| 234 | if (printk_ratelimit()) { |
| 235 | printk(KERN_INFO "iommu_free: invalid entry\n"); |
| 236 | printk(KERN_INFO "\tentry = 0x%lx\n", entry); |
| 237 | printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr); |
| 238 | printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl); |
| 239 | printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno); |
| 240 | printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size); |
| 241 | printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset); |
| 242 | printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index); |
| 243 | WARN_ON(1); |
| 244 | } |
| 245 | return; |
| 246 | } |
| 247 | |
| 248 | ppc_md.tce_free(tbl, entry, npages); |
| 249 | |
| 250 | for (i = 0; i < npages; i++) |
| 251 | __clear_bit(free_entry+i, tbl->it_map); |
| 252 | } |
| 253 | |
| 254 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, |
| 255 | unsigned int npages) |
| 256 | { |
| 257 | unsigned long flags; |
| 258 | |
| 259 | spin_lock_irqsave(&(tbl->it_lock), flags); |
| 260 | |
| 261 | __iommu_free(tbl, dma_addr, npages); |
| 262 | |
| 263 | /* Make sure TLB cache is flushed if the HW needs it. We do |
| 264 | * not do an mb() here on purpose, it is not needed on any of |
| 265 | * the current platforms. |
| 266 | */ |
| 267 | if (ppc_md.tce_flush) |
| 268 | ppc_md.tce_flush(tbl); |
| 269 | |
| 270 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
| 271 | } |
| 272 | |
FUJITA Tomonori | 740c3ce | 2008-02-04 22:27:57 -0800 | [diff] [blame^] | 273 | int iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
Benjamin Herrenschmidt | 12d04ee | 2006-11-11 17:25:02 +1100 | [diff] [blame] | 274 | int nelems, unsigned long mask, |
| 275 | enum dma_data_direction direction) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { |
FUJITA Tomonori | 740c3ce | 2008-02-04 22:27:57 -0800 | [diff] [blame^] | 277 | struct iommu_table *tbl = dev->archdata.dma_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | dma_addr_t dma_next = 0, dma_addr; |
| 279 | unsigned long flags; |
| 280 | struct scatterlist *s, *outs, *segstart; |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 281 | int outcount, incount, i; |
Benjamin Herrenschmidt | d262c32 | 2008-01-08 10:34:22 +1100 | [diff] [blame] | 282 | unsigned int align; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | unsigned long handle; |
FUJITA Tomonori | 740c3ce | 2008-02-04 22:27:57 -0800 | [diff] [blame^] | 284 | unsigned int max_seg_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
| 286 | BUG_ON(direction == DMA_NONE); |
| 287 | |
| 288 | if ((nelems == 0) || !tbl) |
| 289 | return 0; |
| 290 | |
| 291 | outs = s = segstart = &sglist[0]; |
| 292 | outcount = 1; |
Brian King | ac9af7c | 2005-08-18 07:32:18 +1000 | [diff] [blame] | 293 | incount = nelems; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | handle = 0; |
| 295 | |
| 296 | /* Init first segment length for backout at failure */ |
| 297 | outs->dma_length = 0; |
| 298 | |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 299 | DBG("sg mapping %d elements:\n", nelems); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | |
| 301 | spin_lock_irqsave(&(tbl->it_lock), flags); |
| 302 | |
FUJITA Tomonori | 740c3ce | 2008-02-04 22:27:57 -0800 | [diff] [blame^] | 303 | max_seg_size = dma_get_max_seg_size(dev); |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 304 | for_each_sg(sglist, s, nelems, i) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | unsigned long vaddr, npages, entry, slen; |
| 306 | |
| 307 | slen = s->length; |
| 308 | /* Sanity check */ |
| 309 | if (slen == 0) { |
| 310 | dma_next = 0; |
| 311 | continue; |
| 312 | } |
| 313 | /* Allocate iommu entries for that segment */ |
Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 314 | vaddr = (unsigned long) sg_virt(s); |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 315 | npages = iommu_num_pages(vaddr, slen); |
Benjamin Herrenschmidt | d262c32 | 2008-01-08 10:34:22 +1100 | [diff] [blame] | 316 | align = 0; |
| 317 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && |
| 318 | (vaddr & ~PAGE_MASK) == 0) |
| 319 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; |
| 320 | entry = iommu_range_alloc(tbl, npages, &handle, |
| 321 | mask >> IOMMU_PAGE_SHIFT, align); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | |
| 323 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); |
| 324 | |
| 325 | /* Handle failure */ |
| 326 | if (unlikely(entry == DMA_ERROR_CODE)) { |
| 327 | if (printk_ratelimit()) |
| 328 | printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" |
| 329 | " npages %lx\n", tbl, vaddr, npages); |
| 330 | goto failure; |
| 331 | } |
| 332 | |
| 333 | /* Convert entry to a dma_addr_t */ |
| 334 | entry += tbl->it_offset; |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 335 | dma_addr = entry << IOMMU_PAGE_SHIFT; |
| 336 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 338 | DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | npages, entry, dma_addr); |
| 340 | |
| 341 | /* Insert into HW table */ |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 342 | ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | |
| 344 | /* If we are in an open segment, try merging */ |
| 345 | if (segstart != s) { |
| 346 | DBG(" - trying merge...\n"); |
| 347 | /* We cannot merge if: |
| 348 | * - allocated dma_addr isn't contiguous to previous allocation |
| 349 | */ |
FUJITA Tomonori | 740c3ce | 2008-02-04 22:27:57 -0800 | [diff] [blame^] | 350 | if (novmerge || (dma_addr != dma_next) || |
| 351 | (outs->dma_length + s->length > max_seg_size)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | /* Can't merge: create a new segment */ |
| 353 | segstart = s; |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 354 | outcount++; |
| 355 | outs = sg_next(outs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | DBG(" can't merge, new segment.\n"); |
| 357 | } else { |
| 358 | outs->dma_length += s->length; |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 359 | DBG(" merged, new len: %ux\n", outs->dma_length); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | } |
| 361 | } |
| 362 | |
| 363 | if (segstart == s) { |
| 364 | /* This is a new segment, fill entries */ |
| 365 | DBG(" - filling new segment.\n"); |
| 366 | outs->dma_address = dma_addr; |
| 367 | outs->dma_length = slen; |
| 368 | } |
| 369 | |
| 370 | /* Calculate next page pointer for contiguous check */ |
| 371 | dma_next = dma_addr + slen; |
| 372 | |
| 373 | DBG(" - dma next is: %lx\n", dma_next); |
| 374 | } |
| 375 | |
| 376 | /* Flush/invalidate TLB caches if necessary */ |
| 377 | if (ppc_md.tce_flush) |
| 378 | ppc_md.tce_flush(tbl); |
| 379 | |
| 380 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
| 381 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | DBG("mapped %d elements:\n", outcount); |
| 383 | |
Brian King | ac9af7c | 2005-08-18 07:32:18 +1000 | [diff] [blame] | 384 | /* For the sake of iommu_unmap_sg, we clear out the length in the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | * next entry of the sglist if we didn't fill the list completely |
| 386 | */ |
Brian King | ac9af7c | 2005-08-18 07:32:18 +1000 | [diff] [blame] | 387 | if (outcount < incount) { |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 388 | outs = sg_next(outs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | outs->dma_address = DMA_ERROR_CODE; |
| 390 | outs->dma_length = 0; |
| 391 | } |
Jake Moilanen | a958a26 | 2006-01-30 21:51:54 -0600 | [diff] [blame] | 392 | |
| 393 | /* Make sure updates are seen by hardware */ |
| 394 | mb(); |
| 395 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | return outcount; |
| 397 | |
| 398 | failure: |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 399 | for_each_sg(sglist, s, nelems, i) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | if (s->dma_length != 0) { |
| 401 | unsigned long vaddr, npages; |
| 402 | |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 403 | vaddr = s->dma_address & IOMMU_PAGE_MASK; |
| 404 | npages = iommu_num_pages(s->dma_address, s->dma_length); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | __iommu_free(tbl, vaddr, npages); |
Jake Moilanen | a958a26 | 2006-01-30 21:51:54 -0600 | [diff] [blame] | 406 | s->dma_address = DMA_ERROR_CODE; |
| 407 | s->dma_length = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | } |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 409 | if (s == outs) |
| 410 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | } |
| 412 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
| 413 | return 0; |
| 414 | } |
| 415 | |
| 416 | |
| 417 | void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
| 418 | int nelems, enum dma_data_direction direction) |
| 419 | { |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 420 | struct scatterlist *sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | unsigned long flags; |
| 422 | |
| 423 | BUG_ON(direction == DMA_NONE); |
| 424 | |
| 425 | if (!tbl) |
| 426 | return; |
| 427 | |
| 428 | spin_lock_irqsave(&(tbl->it_lock), flags); |
| 429 | |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 430 | sg = sglist; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | while (nelems--) { |
| 432 | unsigned int npages; |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 433 | dma_addr_t dma_handle = sg->dma_address; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 435 | if (sg->dma_length == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | break; |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 437 | npages = iommu_num_pages(dma_handle, sg->dma_length); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | __iommu_free(tbl, dma_handle, npages); |
Jens Axboe | 78bdc31 | 2007-10-12 13:44:12 +0200 | [diff] [blame] | 439 | sg = sg_next(sg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | } |
| 441 | |
| 442 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we |
| 443 | * do not do an mb() here, the affected platforms do not need it |
| 444 | * when freeing. |
| 445 | */ |
| 446 | if (ppc_md.tce_flush) |
| 447 | ppc_md.tce_flush(tbl); |
| 448 | |
| 449 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
| 450 | } |
| 451 | |
| 452 | /* |
| 453 | * Build a iommu_table structure. This contains a bit map which |
| 454 | * is used to manage allocation of the tce space. |
| 455 | */ |
Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 456 | struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | { |
| 458 | unsigned long sz; |
Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 459 | unsigned long start_index, end_index; |
| 460 | unsigned long entries_per_4g; |
| 461 | unsigned long index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | static int welcomed = 0; |
Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 463 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | |
| 465 | /* Set aside 1/4 of the table for large allocations. */ |
| 466 | tbl->it_halfpoint = tbl->it_size * 3 / 4; |
| 467 | |
| 468 | /* number of bytes needed for the bitmap */ |
| 469 | sz = (tbl->it_size + 7) >> 3; |
| 470 | |
Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 471 | page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); |
| 472 | if (!page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | panic("iommu_init_table: Can't allocate %ld bytes\n", sz); |
Anton Blanchard | ca1588e | 2006-06-10 20:58:08 +1000 | [diff] [blame] | 474 | tbl->it_map = page_address(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | memset(tbl->it_map, 0, sz); |
| 476 | |
| 477 | tbl->it_hint = 0; |
| 478 | tbl->it_largehint = tbl->it_halfpoint; |
| 479 | spin_lock_init(&tbl->it_lock); |
| 480 | |
Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 481 | #ifdef CONFIG_CRASH_DUMP |
| 482 | if (ppc_md.tce_get) { |
Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 483 | unsigned long tceval; |
Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 484 | unsigned long tcecount = 0; |
| 485 | |
| 486 | /* |
| 487 | * Reserve the existing mappings left by the first kernel. |
| 488 | */ |
| 489 | for (index = 0; index < tbl->it_size; index++) { |
| 490 | tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); |
| 491 | /* |
| 492 | * Freed TCE entry contains 0x7fffffffffffffff on JS20 |
| 493 | */ |
| 494 | if (tceval && (tceval != 0x7fffffffffffffffUL)) { |
| 495 | __set_bit(index, tbl->it_map); |
| 496 | tcecount++; |
| 497 | } |
| 498 | } |
| 499 | if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { |
| 500 | printk(KERN_WARNING "TCE table is full; "); |
| 501 | printk(KERN_WARNING "freeing %d entries for the kdump boot\n", |
| 502 | KDUMP_MIN_TCE_ENTRIES); |
| 503 | for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; |
| 504 | index < tbl->it_size; index++) |
| 505 | __clear_bit(index, tbl->it_map); |
| 506 | } |
| 507 | } |
| 508 | #else |
John Rose | d3588ba | 2005-06-20 21:43:48 +1000 | [diff] [blame] | 509 | /* Clear the hardware table in case firmware left allocations in it */ |
| 510 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); |
Haren Myneni | 5f50867 | 2006-06-22 23:35:10 -0700 | [diff] [blame] | 511 | #endif |
John Rose | d3588ba | 2005-06-20 21:43:48 +1000 | [diff] [blame] | 512 | |
Jake Moilanen | 5699755 | 2007-03-29 08:44:02 -0500 | [diff] [blame] | 513 | /* |
| 514 | * DMA cannot cross 4 GB boundary. Mark last entry of each 4 |
| 515 | * GB chunk as reserved. |
| 516 | */ |
| 517 | if (protect4gb) { |
| 518 | entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT; |
| 519 | |
| 520 | /* Mark the last bit before a 4GB boundary as used */ |
| 521 | start_index = tbl->it_offset | (entries_per_4g - 1); |
| 522 | start_index -= tbl->it_offset; |
| 523 | |
| 524 | end_index = tbl->it_size; |
| 525 | |
| 526 | for (index = start_index; index < end_index - 1; index += entries_per_4g) |
| 527 | __set_bit(index, tbl->it_map); |
| 528 | } |
| 529 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | if (!welcomed) { |
| 531 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", |
| 532 | novmerge ? "disabled" : "enabled"); |
| 533 | welcomed = 1; |
| 534 | } |
| 535 | |
| 536 | return tbl; |
| 537 | } |
| 538 | |
Stephen Rothwell | 68d315f | 2007-12-06 13:39:19 +1100 | [diff] [blame] | 539 | void iommu_free_table(struct iommu_table *tbl, const char *node_name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | unsigned long bitmap_sz, i; |
| 542 | unsigned int order; |
| 543 | |
| 544 | if (!tbl || !tbl->it_map) { |
| 545 | printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__, |
Stephen Rothwell | 68d315f | 2007-12-06 13:39:19 +1100 | [diff] [blame] | 546 | node_name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | return; |
| 548 | } |
| 549 | |
| 550 | /* verify that table contains no entries */ |
| 551 | /* it_size is in entries, and we're examining 64 at a time */ |
| 552 | for (i = 0; i < (tbl->it_size/64); i++) { |
| 553 | if (tbl->it_map[i] != 0) { |
| 554 | printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", |
Stephen Rothwell | 68d315f | 2007-12-06 13:39:19 +1100 | [diff] [blame] | 555 | __FUNCTION__, node_name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | break; |
| 557 | } |
| 558 | } |
| 559 | |
| 560 | /* calculate bitmap size in bytes */ |
| 561 | bitmap_sz = (tbl->it_size + 7) / 8; |
| 562 | |
| 563 | /* free bitmap */ |
| 564 | order = get_order(bitmap_sz); |
| 565 | free_pages((unsigned long) tbl->it_map, order); |
| 566 | |
| 567 | /* free table */ |
| 568 | kfree(tbl); |
| 569 | } |
| 570 | |
| 571 | /* Creates TCEs for a user provided buffer. The user buffer must be |
| 572 | * contiguous real kernel storage (not vmalloc). The address of the buffer |
| 573 | * passed here is the kernel (virtual) address of the buffer. The buffer |
| 574 | * need not be page aligned, the dma_addr_t returned will point to the same |
| 575 | * byte within the page as vaddr. |
| 576 | */ |
| 577 | dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 578 | size_t size, unsigned long mask, |
| 579 | enum dma_data_direction direction) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | { |
| 581 | dma_addr_t dma_handle = DMA_ERROR_CODE; |
| 582 | unsigned long uaddr; |
Benjamin Herrenschmidt | d262c32 | 2008-01-08 10:34:22 +1100 | [diff] [blame] | 583 | unsigned int npages, align; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | |
| 585 | BUG_ON(direction == DMA_NONE); |
| 586 | |
| 587 | uaddr = (unsigned long)vaddr; |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 588 | npages = iommu_num_pages(uaddr, size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
| 590 | if (tbl) { |
Benjamin Herrenschmidt | d262c32 | 2008-01-08 10:34:22 +1100 | [diff] [blame] | 591 | align = 0; |
| 592 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && |
| 593 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) |
| 594 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; |
| 595 | |
Olof Johansson | 7daa411 | 2006-04-12 21:05:59 -0500 | [diff] [blame] | 596 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction, |
Benjamin Herrenschmidt | d262c32 | 2008-01-08 10:34:22 +1100 | [diff] [blame] | 597 | mask >> IOMMU_PAGE_SHIFT, align); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | if (dma_handle == DMA_ERROR_CODE) { |
| 599 | if (printk_ratelimit()) { |
| 600 | printk(KERN_INFO "iommu_alloc failed, " |
| 601 | "tbl %p vaddr %p npages %d\n", |
| 602 | tbl, vaddr, npages); |
| 603 | } |
| 604 | } else |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 605 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | } |
| 607 | |
| 608 | return dma_handle; |
| 609 | } |
| 610 | |
| 611 | void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, |
| 612 | size_t size, enum dma_data_direction direction) |
| 613 | { |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 614 | unsigned int npages; |
| 615 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | BUG_ON(direction == DMA_NONE); |
| 617 | |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 618 | if (tbl) { |
| 619 | npages = iommu_num_pages(dma_handle, size); |
| 620 | iommu_free(tbl, dma_handle, npages); |
| 621 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | /* Allocates a contiguous real buffer and creates mappings over it. |
| 625 | * Returns the virtual address of the buffer and sets dma_handle |
| 626 | * to the dma address (mapping) of the first page. |
| 627 | */ |
| 628 | void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, |
Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 629 | dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | { |
| 631 | void *ret = NULL; |
| 632 | dma_addr_t mapping; |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 633 | unsigned int order; |
| 634 | unsigned int nio_pages, io_order; |
Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 635 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | |
| 637 | size = PAGE_ALIGN(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | order = get_order(size); |
| 639 | |
| 640 | /* |
| 641 | * Client asked for way too much space. This is checked later |
| 642 | * anyway. It is easier to debug here for the drivers than in |
| 643 | * the tce tables. |
| 644 | */ |
| 645 | if (order >= IOMAP_MAX_ORDER) { |
| 646 | printk("iommu_alloc_consistent size too large: 0x%lx\n", size); |
| 647 | return NULL; |
| 648 | } |
| 649 | |
| 650 | if (!tbl) |
| 651 | return NULL; |
| 652 | |
| 653 | /* Alloc enough pages (and possibly more) */ |
Paul Mackerras | 0506135 | 2006-06-10 18:17:35 +1000 | [diff] [blame] | 654 | page = alloc_pages_node(node, flag, order); |
Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 655 | if (!page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | return NULL; |
Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 657 | ret = page_address(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 658 | memset(ret, 0, size); |
| 659 | |
| 660 | /* Set up tces to cover the allocated range */ |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 661 | nio_pages = size >> IOMMU_PAGE_SHIFT; |
| 662 | io_order = get_iommu_order(size); |
| 663 | mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
| 664 | mask >> IOMMU_PAGE_SHIFT, io_order); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | if (mapping == DMA_ERROR_CODE) { |
| 666 | free_pages((unsigned long)ret, order); |
Christoph Hellwig | 8eb6c6e | 2006-06-06 16:11:35 +0200 | [diff] [blame] | 667 | return NULL; |
| 668 | } |
| 669 | *dma_handle = mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 670 | return ret; |
| 671 | } |
| 672 | |
| 673 | void iommu_free_coherent(struct iommu_table *tbl, size_t size, |
| 674 | void *vaddr, dma_addr_t dma_handle) |
| 675 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | if (tbl) { |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 677 | unsigned int nio_pages; |
| 678 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | size = PAGE_ALIGN(size); |
Linas Vepstas | 5d2efba | 2006-10-30 16:15:59 +1100 | [diff] [blame] | 680 | nio_pages = size >> IOMMU_PAGE_SHIFT; |
| 681 | iommu_free(tbl, dma_handle, nio_pages); |
| 682 | size = PAGE_ALIGN(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | free_pages((unsigned long)vaddr, get_order(size)); |
| 684 | } |
| 685 | } |