blob: d855cfc0732d5746cc5860dd1b80bdb0382c1816 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080033#include <linux/bitmap.h>
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080034#include <linux/iommu-helper.h>
Milton Miller62a8bd62008-10-22 15:39:04 -050035#include <linux/crash_dump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/iommu.h>
39#include <asm/pci-bridge.h>
40#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070041#include <asm/kdump.h>
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +000042#include <asm/fadump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#define DBG(...)
45
FUJITA Tomonori191aee52010-03-02 14:25:38 +000046static int novmerge;
Jake Moilanen56997552007-03-29 08:44:02 -050047
Robert Jennings6490c492008-07-24 04:31:16 +100048static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050static int __init setup_iommu(char *str)
51{
52 if (!strcmp(str, "novmerge"))
53 novmerge = 1;
54 else if (!strcmp(str, "vmerge"))
55 novmerge = 0;
56 return 1;
57}
58
59__setup("iommu=", setup_iommu);
60
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080061static unsigned long iommu_range_alloc(struct device *dev,
62 struct iommu_table *tbl,
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 unsigned long npages,
64 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -050065 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 unsigned int align_order)
67{
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080068 unsigned long n, end, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 unsigned long limit;
70 int largealloc = npages > 15;
71 int pass = 0;
72 unsigned long align_mask;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080073 unsigned long boundary_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 align_mask = 0xffffffffffffffffl >> (64 - align_order);
76
77 /* This allocator was derived from x86_64's bit string search */
78
79 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +020080 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 if (printk_ratelimit())
82 WARN_ON(1);
83 return DMA_ERROR_CODE;
84 }
85
86 if (handle && *handle)
87 start = *handle;
88 else
89 start = largealloc ? tbl->it_largehint : tbl->it_hint;
90
91 /* Use only half of the table for small allocs (15 pages or less) */
92 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
93
94 if (largealloc && start < tbl->it_halfpoint)
95 start = tbl->it_halfpoint;
96
97 /* The case below can happen if we have a small segment appended
98 * to a large, or when the previous alloc was at the very end of
99 * the available space. If so, go back to the initial start.
100 */
101 if (start >= limit)
102 start = largealloc ? tbl->it_largehint : tbl->it_hint;
Olof Johansson7daa4112006-04-12 21:05:59 -0500103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 again:
105
Olof Johansson7daa4112006-04-12 21:05:59 -0500106 if (limit + tbl->it_offset > mask) {
107 limit = mask - tbl->it_offset + 1;
108 /* If we're constrained on address range, first try
109 * at the masked hint to avoid O(n) search complexity,
110 * but on second pass, start at 0.
111 */
112 if ((start & mask) >= limit || pass > 0)
113 start = 0;
114 else
115 start &= mask;
116 }
117
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800118 if (dev)
119 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
120 1 << IOMMU_PAGE_SHIFT);
121 else
122 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
123 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800125 n = iommu_area_alloc(tbl->it_map, limit, start, npages,
126 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
127 align_mask);
128 if (n == -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 if (likely(pass < 2)) {
130 /* First failure, just rescan the half of the table.
131 * Second failure, rescan the other half of the table.
132 */
133 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
134 limit = pass ? tbl->it_size : limit;
135 pass++;
136 goto again;
137 } else {
138 /* Third failure, give up */
139 return DMA_ERROR_CODE;
140 }
141 }
142
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800143 end = n + npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 /* Bump the hint to a new block for small allocs. */
146 if (largealloc) {
147 /* Don't bump to new block to avoid fragmentation */
148 tbl->it_largehint = end;
149 } else {
150 /* Overflow will be taken care of at the next allocation */
151 tbl->it_hint = (end + tbl->it_blocksize - 1) &
152 ~(tbl->it_blocksize - 1);
153 }
154
155 /* Update handle for SG allocations */
156 if (handle)
157 *handle = end;
158
159 return n;
160}
161
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800162static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
163 void *page, unsigned int npages,
164 enum dma_data_direction direction,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000165 unsigned long mask, unsigned int align_order,
166 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 unsigned long entry, flags;
169 dma_addr_t ret = DMA_ERROR_CODE;
Robert Jennings6490c492008-07-24 04:31:16 +1000170 int build_fail;
Olof Johansson7daa4112006-04-12 21:05:59 -0500171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 spin_lock_irqsave(&(tbl->it_lock), flags);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800173 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
Anton Blanchard0e4bc952012-06-03 19:43:02 +0000174 spin_unlock_irqrestore(&(tbl->it_lock), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Anton Blanchard0e4bc952012-06-03 19:43:02 +0000176 if (unlikely(entry == DMA_ERROR_CODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 return DMA_ERROR_CODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179 entry += tbl->it_offset; /* Offset into real TCE table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100180 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 /* Put the TCEs in the HW table */
Robert Jennings6490c492008-07-24 04:31:16 +1000183 build_fail = ppc_md.tce_build(tbl, entry, npages,
184 (unsigned long)page & IOMMU_PAGE_MASK,
185 direction, attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Robert Jennings6490c492008-07-24 04:31:16 +1000187 /* ppc_md.tce_build() only returns non-zero for transient errors.
188 * Clean up the table bitmap in this case and return
189 * DMA_ERROR_CODE. For all other errors the functionality is
190 * not altered.
191 */
192 if (unlikely(build_fail)) {
193 __iommu_free(tbl, ret, npages);
Robert Jennings6490c492008-07-24 04:31:16 +1000194 return DMA_ERROR_CODE;
195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /* Flush/invalidate TLB caches if necessary */
198 if (ppc_md.tce_flush)
199 ppc_md.tce_flush(tbl);
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 /* Make sure updates are seen by hardware */
202 mb();
203
204 return ret;
205}
206
Anton Blanchard67ca1412012-06-03 19:43:44 +0000207static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
208 unsigned int npages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
210 unsigned long entry, free_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100212 entry = dma_addr >> IOMMU_PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 free_entry = entry - tbl->it_offset;
214
215 if (((free_entry + npages) > tbl->it_size) ||
216 (entry < tbl->it_offset)) {
217 if (printk_ratelimit()) {
218 printk(KERN_INFO "iommu_free: invalid entry\n");
219 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
Ingo Molnarfe333322009-01-06 14:26:03 +0000220 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
221 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
222 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
223 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
224 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
225 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 WARN_ON(1);
227 }
Anton Blanchard67ca1412012-06-03 19:43:44 +0000228
229 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231
Anton Blanchard67ca1412012-06-03 19:43:44 +0000232 return true;
233}
234
235static void __iommu_free_locked(struct iommu_table *tbl, dma_addr_t dma_addr,
236 unsigned int npages)
237{
238 unsigned long entry, free_entry;
239
240 BUG_ON(!spin_is_locked(&tbl->it_lock));
241
242 entry = dma_addr >> IOMMU_PAGE_SHIFT;
243 free_entry = entry - tbl->it_offset;
244
245 if (!iommu_free_check(tbl, dma_addr, npages))
246 return;
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 ppc_md.tce_free(tbl, entry, npages);
Akinobu Mitaa66022c2009-12-15 16:48:28 -0800249 bitmap_clear(tbl->it_map, free_entry, npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Anton Blanchard67ca1412012-06-03 19:43:44 +0000252static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
253 unsigned int npages)
254{
255 unsigned long entry, free_entry;
256 unsigned long flags;
257
258 entry = dma_addr >> IOMMU_PAGE_SHIFT;
259 free_entry = entry - tbl->it_offset;
260
261 if (!iommu_free_check(tbl, dma_addr, npages))
262 return;
263
264 ppc_md.tce_free(tbl, entry, npages);
265
266 spin_lock_irqsave(&(tbl->it_lock), flags);
267 bitmap_clear(tbl->it_map, free_entry, npages);
268 spin_unlock_irqrestore(&(tbl->it_lock), flags);
269}
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
272 unsigned int npages)
273{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 __iommu_free(tbl, dma_addr, npages);
275
276 /* Make sure TLB cache is flushed if the HW needs it. We do
277 * not do an mb() here on purpose, it is not needed on any of
278 * the current platforms.
279 */
280 if (ppc_md.tce_flush)
281 ppc_md.tce_flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
Mark Nelsonc8692362008-07-05 05:05:41 +1000284int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
285 struct scatterlist *sglist, int nelems,
Mark Nelson3affedc2008-07-05 05:05:42 +1000286 unsigned long mask, enum dma_data_direction direction,
287 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 dma_addr_t dma_next = 0, dma_addr;
290 unsigned long flags;
291 struct scatterlist *s, *outs, *segstart;
Robert Jennings6490c492008-07-24 04:31:16 +1000292 int outcount, incount, i, build_fail = 0;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100293 unsigned int align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 unsigned long handle;
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800295 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297 BUG_ON(direction == DMA_NONE);
298
299 if ((nelems == 0) || !tbl)
300 return 0;
301
302 outs = s = segstart = &sglist[0];
303 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000304 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 handle = 0;
306
307 /* Init first segment length for backout at failure */
308 outs->dma_length = 0;
309
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100310 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 spin_lock_irqsave(&(tbl->it_lock), flags);
313
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800314 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe78bdc312007-10-12 13:44:12 +0200315 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 unsigned long vaddr, npages, entry, slen;
317
318 slen = s->length;
319 /* Sanity check */
320 if (slen == 0) {
321 dma_next = 0;
322 continue;
323 }
324 /* Allocate iommu entries for that segment */
Jens Axboe58b053e2007-10-22 20:02:46 +0200325 vaddr = (unsigned long) sg_virt(s);
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700326 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100327 align = 0;
328 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
329 (vaddr & ~PAGE_MASK) == 0)
330 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800331 entry = iommu_range_alloc(dev, tbl, npages, &handle,
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100332 mask >> IOMMU_PAGE_SHIFT, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
335
336 /* Handle failure */
337 if (unlikely(entry == DMA_ERROR_CODE)) {
338 if (printk_ratelimit())
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000339 dev_info(dev, "iommu_alloc failed, tbl %p "
340 "vaddr %lx npages %lu\n", tbl, vaddr,
341 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 goto failure;
343 }
344
345 /* Convert entry to a dma_addr_t */
346 entry += tbl->it_offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100347 dma_addr = entry << IOMMU_PAGE_SHIFT;
348 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100350 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 npages, entry, dma_addr);
352
353 /* Insert into HW table */
Robert Jennings6490c492008-07-24 04:31:16 +1000354 build_fail = ppc_md.tce_build(tbl, entry, npages,
355 vaddr & IOMMU_PAGE_MASK,
356 direction, attrs);
357 if(unlikely(build_fail))
358 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 /* If we are in an open segment, try merging */
361 if (segstart != s) {
362 DBG(" - trying merge...\n");
363 /* We cannot merge if:
364 * - allocated dma_addr isn't contiguous to previous allocation
365 */
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800366 if (novmerge || (dma_addr != dma_next) ||
367 (outs->dma_length + s->length > max_seg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 /* Can't merge: create a new segment */
369 segstart = s;
Jens Axboe78bdc312007-10-12 13:44:12 +0200370 outcount++;
371 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 DBG(" can't merge, new segment.\n");
373 } else {
374 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100375 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 }
377 }
378
379 if (segstart == s) {
380 /* This is a new segment, fill entries */
381 DBG(" - filling new segment.\n");
382 outs->dma_address = dma_addr;
383 outs->dma_length = slen;
384 }
385
386 /* Calculate next page pointer for contiguous check */
387 dma_next = dma_addr + slen;
388
389 DBG(" - dma next is: %lx\n", dma_next);
390 }
391
392 /* Flush/invalidate TLB caches if necessary */
393 if (ppc_md.tce_flush)
394 ppc_md.tce_flush(tbl);
395
396 spin_unlock_irqrestore(&(tbl->it_lock), flags);
397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 DBG("mapped %d elements:\n", outcount);
399
Brian Kingac9af7c2005-08-18 07:32:18 +1000400 /* For the sake of iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 * next entry of the sglist if we didn't fill the list completely
402 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000403 if (outcount < incount) {
Jens Axboe78bdc312007-10-12 13:44:12 +0200404 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 outs->dma_address = DMA_ERROR_CODE;
406 outs->dma_length = 0;
407 }
Jake Moilanena958a262006-01-30 21:51:54 -0600408
409 /* Make sure updates are seen by hardware */
410 mb();
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 return outcount;
413
414 failure:
Jens Axboe78bdc312007-10-12 13:44:12 +0200415 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (s->dma_length != 0) {
417 unsigned long vaddr, npages;
418
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100419 vaddr = s->dma_address & IOMMU_PAGE_MASK;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700420 npages = iommu_num_pages(s->dma_address, s->dma_length,
421 IOMMU_PAGE_SIZE);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000422 __iommu_free_locked(tbl, vaddr, npages);
Jake Moilanena958a262006-01-30 21:51:54 -0600423 s->dma_address = DMA_ERROR_CODE;
424 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
Jens Axboe78bdc312007-10-12 13:44:12 +0200426 if (s == outs)
427 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 }
429 spin_unlock_irqrestore(&(tbl->it_lock), flags);
430 return 0;
431}
432
433
434void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
Mark Nelson3affedc2008-07-05 05:05:42 +1000435 int nelems, enum dma_data_direction direction,
436 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437{
Jens Axboe78bdc312007-10-12 13:44:12 +0200438 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 unsigned long flags;
440
441 BUG_ON(direction == DMA_NONE);
442
443 if (!tbl)
444 return;
445
446 spin_lock_irqsave(&(tbl->it_lock), flags);
447
Jens Axboe78bdc312007-10-12 13:44:12 +0200448 sg = sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 while (nelems--) {
450 unsigned int npages;
Jens Axboe78bdc312007-10-12 13:44:12 +0200451 dma_addr_t dma_handle = sg->dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Jens Axboe78bdc312007-10-12 13:44:12 +0200453 if (sg->dma_length == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 break;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700455 npages = iommu_num_pages(dma_handle, sg->dma_length,
456 IOMMU_PAGE_SIZE);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000457 __iommu_free_locked(tbl, dma_handle, npages);
Jens Axboe78bdc312007-10-12 13:44:12 +0200458 sg = sg_next(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 }
460
461 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
462 * do not do an mb() here, the affected platforms do not need it
463 * when freeing.
464 */
465 if (ppc_md.tce_flush)
466 ppc_md.tce_flush(tbl);
467
468 spin_unlock_irqrestore(&(tbl->it_lock), flags);
469}
470
Mohan Kumar M54622f12008-10-21 17:38:10 +0000471static void iommu_table_clear(struct iommu_table *tbl)
472{
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +0000473 /*
474 * In case of firmware assisted dump system goes through clean
475 * reboot process at the time of system crash. Hence it's safe to
476 * clear the TCE entries if firmware assisted dump is active.
477 */
478 if (!is_kdump_kernel() || is_fadump_active()) {
Mohan Kumar M54622f12008-10-21 17:38:10 +0000479 /* Clear the table in case firmware left allocations in it */
480 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
481 return;
482 }
483
484#ifdef CONFIG_CRASH_DUMP
485 if (ppc_md.tce_get) {
486 unsigned long index, tceval, tcecount = 0;
487
488 /* Reserve the existing mappings left by the first kernel. */
489 for (index = 0; index < tbl->it_size; index++) {
490 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
491 /*
492 * Freed TCE entry contains 0x7fffffffffffffff on JS20
493 */
494 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
495 __set_bit(index, tbl->it_map);
496 tcecount++;
497 }
498 }
499
500 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
501 printk(KERN_WARNING "TCE table is full; freeing ");
502 printk(KERN_WARNING "%d entries for the kdump boot\n",
503 KDUMP_MIN_TCE_ENTRIES);
504 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
505 index < tbl->it_size; index++)
506 __clear_bit(index, tbl->it_map);
507 }
508 }
509#endif
510}
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512/*
513 * Build a iommu_table structure. This contains a bit map which
514 * is used to manage allocation of the tce space.
515 */
Anton Blanchardca1588e2006-06-10 20:58:08 +1000516struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
518 unsigned long sz;
519 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000520 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522 /* Set aside 1/4 of the table for large allocations. */
523 tbl->it_halfpoint = tbl->it_size * 3 / 4;
524
525 /* number of bytes needed for the bitmap */
526 sz = (tbl->it_size + 7) >> 3;
527
Anton Blanchardca1588e2006-06-10 20:58:08 +1000528 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
529 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000531 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 memset(tbl->it_map, 0, sz);
533
Thadeu Lima de Souza Cascardod12b5242011-09-20 03:07:24 +0000534 /*
535 * Reserve page 0 so it will not be used for any mappings.
536 * This avoids buggy drivers that consider page 0 to be invalid
537 * to crash the machine or even lose data.
538 */
539 if (tbl->it_offset == 0)
540 set_bit(0, tbl->it_map);
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 tbl->it_hint = 0;
543 tbl->it_largehint = tbl->it_halfpoint;
544 spin_lock_init(&tbl->it_lock);
545
Mohan Kumar M54622f12008-10-21 17:38:10 +0000546 iommu_table_clear(tbl);
John Rosed3588ba2005-06-20 21:43:48 +1000547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (!welcomed) {
549 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
550 novmerge ? "disabled" : "enabled");
551 welcomed = 1;
552 }
553
554 return tbl;
555}
556
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100557void iommu_free_table(struct iommu_table *tbl, const char *node_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 unsigned long bitmap_sz, i;
560 unsigned int order;
561
562 if (!tbl || !tbl->it_map) {
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100563 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100564 node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 return;
566 }
567
568 /* verify that table contains no entries */
569 /* it_size is in entries, and we're examining 64 at a time */
570 for (i = 0; i < (tbl->it_size/64); i++) {
571 if (tbl->it_map[i] != 0) {
572 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100573 __func__, node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 break;
575 }
576 }
577
578 /* calculate bitmap size in bytes */
579 bitmap_sz = (tbl->it_size + 7) / 8;
580
581 /* free bitmap */
582 order = get_order(bitmap_sz);
583 free_pages((unsigned long) tbl->it_map, order);
584
585 /* free table */
586 kfree(tbl);
587}
588
589/* Creates TCEs for a user provided buffer. The user buffer must be
Mark Nelsonf9226d52008-10-27 20:38:08 +0000590 * contiguous real kernel storage (not vmalloc). The address passed here
591 * comprises a page address and offset into that page. The dma_addr_t
592 * returned will point to the same byte within the page as was passed in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 */
Mark Nelsonf9226d52008-10-27 20:38:08 +0000594dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
595 struct page *page, unsigned long offset, size_t size,
596 unsigned long mask, enum dma_data_direction direction,
597 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598{
599 dma_addr_t dma_handle = DMA_ERROR_CODE;
Mark Nelsonf9226d52008-10-27 20:38:08 +0000600 void *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 unsigned long uaddr;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100602 unsigned int npages, align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
604 BUG_ON(direction == DMA_NONE);
605
Mark Nelsonf9226d52008-10-27 20:38:08 +0000606 vaddr = page_address(page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 uaddr = (unsigned long)vaddr;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700608 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610 if (tbl) {
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100611 align = 0;
612 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
613 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
614 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
615
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800616 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000617 mask >> IOMMU_PAGE_SHIFT, align,
618 attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 if (dma_handle == DMA_ERROR_CODE) {
620 if (printk_ratelimit()) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000621 dev_info(dev, "iommu_alloc failed, tbl %p "
622 "vaddr %p npages %d\n", tbl, vaddr,
623 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 }
625 } else
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100626 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628
629 return dma_handle;
630}
631
Mark Nelsonf9226d52008-10-27 20:38:08 +0000632void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
633 size_t size, enum dma_data_direction direction,
634 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100636 unsigned int npages;
637
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 BUG_ON(direction == DMA_NONE);
639
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100640 if (tbl) {
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700641 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100642 iommu_free(tbl, dma_handle, npages);
643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644}
645
646/* Allocates a contiguous real buffer and creates mappings over it.
647 * Returns the virtual address of the buffer and sets dma_handle
648 * to the dma address (mapping) of the first page.
649 */
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800650void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
651 size_t size, dma_addr_t *dma_handle,
652 unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653{
654 void *ret = NULL;
655 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100656 unsigned int order;
657 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200658 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
660 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 order = get_order(size);
662
663 /*
664 * Client asked for way too much space. This is checked later
665 * anyway. It is easier to debug here for the drivers than in
666 * the tce tables.
667 */
668 if (order >= IOMAP_MAX_ORDER) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000669 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
670 size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return NULL;
672 }
673
674 if (!tbl)
675 return NULL;
676
677 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000678 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200679 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200681 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 memset(ret, 0, size);
683
684 /* Set up tces to cover the allocated range */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100685 nio_pages = size >> IOMMU_PAGE_SHIFT;
686 io_order = get_iommu_order(size);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800687 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000688 mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 if (mapping == DMA_ERROR_CODE) {
690 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200691 return NULL;
692 }
693 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return ret;
695}
696
697void iommu_free_coherent(struct iommu_table *tbl, size_t size,
698 void *vaddr, dma_addr_t dma_handle)
699{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100701 unsigned int nio_pages;
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 size = PAGE_ALIGN(size);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100704 nio_pages = size >> IOMMU_PAGE_SHIFT;
705 iommu_free(tbl, dma_handle, nio_pages);
706 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 free_pages((unsigned long)vaddr, get_order(size));
708 }
709}