blob: d2598e2e7bbef78d8157b6477bdf129e9a9f17ed [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
33#include <linux/init.h>
34#include <linux/bitops.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <asm/iommu.h>
38#include <asm/pci-bridge.h>
39#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070040#include <asm/kdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#define DBG(...)
43
44#ifdef CONFIG_IOMMU_VMERGE
45static int novmerge = 0;
46#else
47static int novmerge = 1;
48#endif
49
Jake Moilanen56997552007-03-29 08:44:02 -050050static int protect4gb = 1;
51
Linas Vepstas5d2efba2006-10-30 16:15:59 +110052static inline unsigned long iommu_num_pages(unsigned long vaddr,
53 unsigned long slen)
54{
55 unsigned long npages;
56
57 npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
58 npages >>= IOMMU_PAGE_SHIFT;
59
60 return npages;
61}
62
Jake Moilanen56997552007-03-29 08:44:02 -050063static int __init setup_protect4gb(char *str)
64{
65 if (strcmp(str, "on") == 0)
66 protect4gb = 1;
67 else if (strcmp(str, "off") == 0)
68 protect4gb = 0;
69
70 return 1;
71}
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073static int __init setup_iommu(char *str)
74{
75 if (!strcmp(str, "novmerge"))
76 novmerge = 1;
77 else if (!strcmp(str, "vmerge"))
78 novmerge = 0;
79 return 1;
80}
81
Jake Moilanen56997552007-03-29 08:44:02 -050082__setup("protect4gb=", setup_protect4gb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083__setup("iommu=", setup_iommu);
84
85static unsigned long iommu_range_alloc(struct iommu_table *tbl,
86 unsigned long npages,
87 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -050088 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 unsigned int align_order)
90{
91 unsigned long n, end, i, start;
Jake Moilanen618d3ad2007-03-02 15:49:43 -060092 unsigned long start_addr, end_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 unsigned long limit;
94 int largealloc = npages > 15;
95 int pass = 0;
96 unsigned long align_mask;
97
98 align_mask = 0xffffffffffffffffl >> (64 - align_order);
99
100 /* This allocator was derived from x86_64's bit string search */
101
102 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +0200103 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 if (printk_ratelimit())
105 WARN_ON(1);
106 return DMA_ERROR_CODE;
107 }
108
109 if (handle && *handle)
110 start = *handle;
111 else
112 start = largealloc ? tbl->it_largehint : tbl->it_hint;
113
114 /* Use only half of the table for small allocs (15 pages or less) */
115 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
116
117 if (largealloc && start < tbl->it_halfpoint)
118 start = tbl->it_halfpoint;
119
120 /* The case below can happen if we have a small segment appended
121 * to a large, or when the previous alloc was at the very end of
122 * the available space. If so, go back to the initial start.
123 */
124 if (start >= limit)
125 start = largealloc ? tbl->it_largehint : tbl->it_hint;
Olof Johansson7daa4112006-04-12 21:05:59 -0500126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 again:
128
Olof Johansson7daa4112006-04-12 21:05:59 -0500129 if (limit + tbl->it_offset > mask) {
130 limit = mask - tbl->it_offset + 1;
131 /* If we're constrained on address range, first try
132 * at the masked hint to avoid O(n) search complexity,
133 * but on second pass, start at 0.
134 */
135 if ((start & mask) >= limit || pass > 0)
136 start = 0;
137 else
138 start &= mask;
139 }
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 n = find_next_zero_bit(tbl->it_map, limit, start);
142
143 /* Align allocation */
144 n = (n + align_mask) & ~align_mask;
145
146 end = n + npages;
147
148 if (unlikely(end >= limit)) {
149 if (likely(pass < 2)) {
150 /* First failure, just rescan the half of the table.
151 * Second failure, rescan the other half of the table.
152 */
153 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
154 limit = pass ? tbl->it_size : limit;
155 pass++;
156 goto again;
157 } else {
158 /* Third failure, give up */
159 return DMA_ERROR_CODE;
160 }
161 }
162
Jake Moilanen618d3ad2007-03-02 15:49:43 -0600163 /* DMA cannot cross 4 GB boundary */
164 start_addr = (n + tbl->it_offset) << PAGE_SHIFT;
165 end_addr = (end + tbl->it_offset) << PAGE_SHIFT;
166 if ((start_addr >> 32) != (end_addr >> 32)) {
167 end_addr &= 0xffffffff00000000l;
168 start = (end_addr >> PAGE_SHIFT) - tbl->it_offset;
169 goto again;
170 }
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 for (i = n; i < end; i++)
173 if (test_bit(i, tbl->it_map)) {
174 start = i+1;
175 goto again;
176 }
177
178 for (i = n; i < end; i++)
179 __set_bit(i, tbl->it_map);
180
181 /* Bump the hint to a new block for small allocs. */
182 if (largealloc) {
183 /* Don't bump to new block to avoid fragmentation */
184 tbl->it_largehint = end;
185 } else {
186 /* Overflow will be taken care of at the next allocation */
187 tbl->it_hint = (end + tbl->it_blocksize - 1) &
188 ~(tbl->it_blocksize - 1);
189 }
190
191 /* Update handle for SG allocations */
192 if (handle)
193 *handle = end;
194
195 return n;
196}
197
198static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
199 unsigned int npages, enum dma_data_direction direction,
Olof Johansson7daa4112006-04-12 21:05:59 -0500200 unsigned long mask, unsigned int align_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201{
202 unsigned long entry, flags;
203 dma_addr_t ret = DMA_ERROR_CODE;
Olof Johansson7daa4112006-04-12 21:05:59 -0500204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 spin_lock_irqsave(&(tbl->it_lock), flags);
206
Olof Johansson7daa4112006-04-12 21:05:59 -0500207 entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 if (unlikely(entry == DMA_ERROR_CODE)) {
210 spin_unlock_irqrestore(&(tbl->it_lock), flags);
211 return DMA_ERROR_CODE;
212 }
213
214 entry += tbl->it_offset; /* Offset into real TCE table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100215 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 /* Put the TCEs in the HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100218 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 direction);
220
221
222 /* Flush/invalidate TLB caches if necessary */
223 if (ppc_md.tce_flush)
224 ppc_md.tce_flush(tbl);
225
226 spin_unlock_irqrestore(&(tbl->it_lock), flags);
227
228 /* Make sure updates are seen by hardware */
229 mb();
230
231 return ret;
232}
233
234static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
235 unsigned int npages)
236{
237 unsigned long entry, free_entry;
238 unsigned long i;
239
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100240 entry = dma_addr >> IOMMU_PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 free_entry = entry - tbl->it_offset;
242
243 if (((free_entry + npages) > tbl->it_size) ||
244 (entry < tbl->it_offset)) {
245 if (printk_ratelimit()) {
246 printk(KERN_INFO "iommu_free: invalid entry\n");
247 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
248 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
249 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
250 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
251 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
252 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
253 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
254 WARN_ON(1);
255 }
256 return;
257 }
258
259 ppc_md.tce_free(tbl, entry, npages);
260
261 for (i = 0; i < npages; i++)
262 __clear_bit(free_entry+i, tbl->it_map);
263}
264
265static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
266 unsigned int npages)
267{
268 unsigned long flags;
269
270 spin_lock_irqsave(&(tbl->it_lock), flags);
271
272 __iommu_free(tbl, dma_addr, npages);
273
274 /* Make sure TLB cache is flushed if the HW needs it. We do
275 * not do an mb() here on purpose, it is not needed on any of
276 * the current platforms.
277 */
278 if (ppc_md.tce_flush)
279 ppc_md.tce_flush(tbl);
280
281 spin_unlock_irqrestore(&(tbl->it_lock), flags);
282}
283
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100284int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
285 int nelems, unsigned long mask,
286 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
288 dma_addr_t dma_next = 0, dma_addr;
289 unsigned long flags;
290 struct scatterlist *s, *outs, *segstart;
Brian Kingac9af7c2005-08-18 07:32:18 +1000291 int outcount, incount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 unsigned long handle;
293
294 BUG_ON(direction == DMA_NONE);
295
296 if ((nelems == 0) || !tbl)
297 return 0;
298
299 outs = s = segstart = &sglist[0];
300 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000301 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 handle = 0;
303
304 /* Init first segment length for backout at failure */
305 outs->dma_length = 0;
306
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100307 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
309 spin_lock_irqsave(&(tbl->it_lock), flags);
310
311 for (s = outs; nelems; nelems--, s++) {
312 unsigned long vaddr, npages, entry, slen;
313
314 slen = s->length;
315 /* Sanity check */
316 if (slen == 0) {
317 dma_next = 0;
318 continue;
319 }
320 /* Allocate iommu entries for that segment */
321 vaddr = (unsigned long)page_address(s->page) + s->offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100322 npages = iommu_num_pages(vaddr, slen);
323 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
325 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
326
327 /* Handle failure */
328 if (unlikely(entry == DMA_ERROR_CODE)) {
329 if (printk_ratelimit())
330 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
331 " npages %lx\n", tbl, vaddr, npages);
332 goto failure;
333 }
334
335 /* Convert entry to a dma_addr_t */
336 entry += tbl->it_offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100337 dma_addr = entry << IOMMU_PAGE_SHIFT;
338 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100340 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 npages, entry, dma_addr);
342
343 /* Insert into HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100344 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346 /* If we are in an open segment, try merging */
347 if (segstart != s) {
348 DBG(" - trying merge...\n");
349 /* We cannot merge if:
350 * - allocated dma_addr isn't contiguous to previous allocation
351 */
352 if (novmerge || (dma_addr != dma_next)) {
353 /* Can't merge: create a new segment */
354 segstart = s;
355 outcount++; outs++;
356 DBG(" can't merge, new segment.\n");
357 } else {
358 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100359 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 }
361 }
362
363 if (segstart == s) {
364 /* This is a new segment, fill entries */
365 DBG(" - filling new segment.\n");
366 outs->dma_address = dma_addr;
367 outs->dma_length = slen;
368 }
369
370 /* Calculate next page pointer for contiguous check */
371 dma_next = dma_addr + slen;
372
373 DBG(" - dma next is: %lx\n", dma_next);
374 }
375
376 /* Flush/invalidate TLB caches if necessary */
377 if (ppc_md.tce_flush)
378 ppc_md.tce_flush(tbl);
379
380 spin_unlock_irqrestore(&(tbl->it_lock), flags);
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 DBG("mapped %d elements:\n", outcount);
383
Brian Kingac9af7c2005-08-18 07:32:18 +1000384 /* For the sake of iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 * next entry of the sglist if we didn't fill the list completely
386 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000387 if (outcount < incount) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 outs++;
389 outs->dma_address = DMA_ERROR_CODE;
390 outs->dma_length = 0;
391 }
Jake Moilanena958a262006-01-30 21:51:54 -0600392
393 /* Make sure updates are seen by hardware */
394 mb();
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 return outcount;
397
398 failure:
399 for (s = &sglist[0]; s <= outs; s++) {
400 if (s->dma_length != 0) {
401 unsigned long vaddr, npages;
402
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100403 vaddr = s->dma_address & IOMMU_PAGE_MASK;
404 npages = iommu_num_pages(s->dma_address, s->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 __iommu_free(tbl, vaddr, npages);
Jake Moilanena958a262006-01-30 21:51:54 -0600406 s->dma_address = DMA_ERROR_CODE;
407 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 }
409 }
410 spin_unlock_irqrestore(&(tbl->it_lock), flags);
411 return 0;
412}
413
414
415void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
416 int nelems, enum dma_data_direction direction)
417{
418 unsigned long flags;
419
420 BUG_ON(direction == DMA_NONE);
421
422 if (!tbl)
423 return;
424
425 spin_lock_irqsave(&(tbl->it_lock), flags);
426
427 while (nelems--) {
428 unsigned int npages;
429 dma_addr_t dma_handle = sglist->dma_address;
430
431 if (sglist->dma_length == 0)
432 break;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100433 npages = iommu_num_pages(dma_handle,sglist->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 __iommu_free(tbl, dma_handle, npages);
435 sglist++;
436 }
437
438 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
439 * do not do an mb() here, the affected platforms do not need it
440 * when freeing.
441 */
442 if (ppc_md.tce_flush)
443 ppc_md.tce_flush(tbl);
444
445 spin_unlock_irqrestore(&(tbl->it_lock), flags);
446}
447
448/*
449 * Build a iommu_table structure. This contains a bit map which
450 * is used to manage allocation of the tce space.
451 */
Anton Blanchardca1588e2006-06-10 20:58:08 +1000452struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 unsigned long sz;
Jake Moilanen56997552007-03-29 08:44:02 -0500455 unsigned long start_index, end_index;
456 unsigned long entries_per_4g;
457 unsigned long index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000459 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 /* Set aside 1/4 of the table for large allocations. */
462 tbl->it_halfpoint = tbl->it_size * 3 / 4;
463
464 /* number of bytes needed for the bitmap */
465 sz = (tbl->it_size + 7) >> 3;
466
Anton Blanchardca1588e2006-06-10 20:58:08 +1000467 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
468 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000470 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 memset(tbl->it_map, 0, sz);
472
473 tbl->it_hint = 0;
474 tbl->it_largehint = tbl->it_halfpoint;
475 spin_lock_init(&tbl->it_lock);
476
Haren Myneni5f508672006-06-22 23:35:10 -0700477#ifdef CONFIG_CRASH_DUMP
478 if (ppc_md.tce_get) {
Jake Moilanen56997552007-03-29 08:44:02 -0500479 unsigned long tceval;
Haren Myneni5f508672006-06-22 23:35:10 -0700480 unsigned long tcecount = 0;
481
482 /*
483 * Reserve the existing mappings left by the first kernel.
484 */
485 for (index = 0; index < tbl->it_size; index++) {
486 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
487 /*
488 * Freed TCE entry contains 0x7fffffffffffffff on JS20
489 */
490 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
491 __set_bit(index, tbl->it_map);
492 tcecount++;
493 }
494 }
495 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
496 printk(KERN_WARNING "TCE table is full; ");
497 printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
498 KDUMP_MIN_TCE_ENTRIES);
499 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
500 index < tbl->it_size; index++)
501 __clear_bit(index, tbl->it_map);
502 }
503 }
504#else
John Rosed3588ba2005-06-20 21:43:48 +1000505 /* Clear the hardware table in case firmware left allocations in it */
506 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
Haren Myneni5f508672006-06-22 23:35:10 -0700507#endif
John Rosed3588ba2005-06-20 21:43:48 +1000508
Jake Moilanen56997552007-03-29 08:44:02 -0500509 /*
510 * DMA cannot cross 4 GB boundary. Mark last entry of each 4
511 * GB chunk as reserved.
512 */
513 if (protect4gb) {
514 entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
515
516 /* Mark the last bit before a 4GB boundary as used */
517 start_index = tbl->it_offset | (entries_per_4g - 1);
518 start_index -= tbl->it_offset;
519
520 end_index = tbl->it_size;
521
522 for (index = start_index; index < end_index - 1; index += entries_per_4g)
523 __set_bit(index, tbl->it_map);
524 }
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 if (!welcomed) {
527 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
528 novmerge ? "disabled" : "enabled");
529 welcomed = 1;
530 }
531
532 return tbl;
533}
534
535void iommu_free_table(struct device_node *dn)
536{
Paul Mackerras16353172005-09-06 13:17:54 +1000537 struct pci_dn *pdn = dn->data;
538 struct iommu_table *tbl = pdn->iommu_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 unsigned long bitmap_sz, i;
540 unsigned int order;
541
542 if (!tbl || !tbl->it_map) {
543 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
544 dn->full_name);
545 return;
546 }
547
548 /* verify that table contains no entries */
549 /* it_size is in entries, and we're examining 64 at a time */
550 for (i = 0; i < (tbl->it_size/64); i++) {
551 if (tbl->it_map[i] != 0) {
552 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
553 __FUNCTION__, dn->full_name);
554 break;
555 }
556 }
557
558 /* calculate bitmap size in bytes */
559 bitmap_sz = (tbl->it_size + 7) / 8;
560
561 /* free bitmap */
562 order = get_order(bitmap_sz);
563 free_pages((unsigned long) tbl->it_map, order);
564
565 /* free table */
566 kfree(tbl);
567}
568
569/* Creates TCEs for a user provided buffer. The user buffer must be
570 * contiguous real kernel storage (not vmalloc). The address of the buffer
571 * passed here is the kernel (virtual) address of the buffer. The buffer
572 * need not be page aligned, the dma_addr_t returned will point to the same
573 * byte within the page as vaddr.
574 */
575dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
Olof Johansson7daa4112006-04-12 21:05:59 -0500576 size_t size, unsigned long mask,
577 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578{
579 dma_addr_t dma_handle = DMA_ERROR_CODE;
580 unsigned long uaddr;
581 unsigned int npages;
582
583 BUG_ON(direction == DMA_NONE);
584
585 uaddr = (unsigned long)vaddr;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100586 npages = iommu_num_pages(uaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
588 if (tbl) {
Olof Johansson7daa4112006-04-12 21:05:59 -0500589 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100590 mask >> IOMMU_PAGE_SHIFT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 if (dma_handle == DMA_ERROR_CODE) {
592 if (printk_ratelimit()) {
593 printk(KERN_INFO "iommu_alloc failed, "
594 "tbl %p vaddr %p npages %d\n",
595 tbl, vaddr, npages);
596 }
597 } else
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100598 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 }
600
601 return dma_handle;
602}
603
604void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
605 size_t size, enum dma_data_direction direction)
606{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100607 unsigned int npages;
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 BUG_ON(direction == DMA_NONE);
610
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100611 if (tbl) {
612 npages = iommu_num_pages(dma_handle, size);
613 iommu_free(tbl, dma_handle, npages);
614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615}
616
617/* Allocates a contiguous real buffer and creates mappings over it.
618 * Returns the virtual address of the buffer and sets dma_handle
619 * to the dma address (mapping) of the first page.
620 */
621void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200622 dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
624 void *ret = NULL;
625 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100626 unsigned int order;
627 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200628 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
630 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 order = get_order(size);
632
633 /*
634 * Client asked for way too much space. This is checked later
635 * anyway. It is easier to debug here for the drivers than in
636 * the tce tables.
637 */
638 if (order >= IOMAP_MAX_ORDER) {
639 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
640 return NULL;
641 }
642
643 if (!tbl)
644 return NULL;
645
646 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000647 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200648 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200650 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 memset(ret, 0, size);
652
653 /* Set up tces to cover the allocated range */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100654 nio_pages = size >> IOMMU_PAGE_SHIFT;
655 io_order = get_iommu_order(size);
656 mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
657 mask >> IOMMU_PAGE_SHIFT, io_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 if (mapping == DMA_ERROR_CODE) {
659 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200660 return NULL;
661 }
662 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return ret;
664}
665
666void iommu_free_coherent(struct iommu_table *tbl, size_t size,
667 void *vaddr, dma_addr_t dma_handle)
668{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100670 unsigned int nio_pages;
671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 size = PAGE_ALIGN(size);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100673 nio_pages = size >> IOMMU_PAGE_SHIFT;
674 iommu_free(tbl, dma_handle, nio_pages);
675 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 free_pages((unsigned long)vaddr, get_order(size));
677 }
678}