blob: c50d7072f30573436081dfdd81cc433fce9052ee [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
33#include <linux/init.h>
34#include <linux/bitops.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <asm/iommu.h>
38#include <asm/pci-bridge.h>
39#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070040#include <asm/kdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#define DBG(...)
43
44#ifdef CONFIG_IOMMU_VMERGE
45static int novmerge = 0;
46#else
47static int novmerge = 1;
48#endif
49
Linas Vepstas5d2efba2006-10-30 16:15:59 +110050static inline unsigned long iommu_num_pages(unsigned long vaddr,
51 unsigned long slen)
52{
53 unsigned long npages;
54
55 npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
56 npages >>= IOMMU_PAGE_SHIFT;
57
58 return npages;
59}
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static int __init setup_iommu(char *str)
62{
63 if (!strcmp(str, "novmerge"))
64 novmerge = 1;
65 else if (!strcmp(str, "vmerge"))
66 novmerge = 0;
67 return 1;
68}
69
70__setup("iommu=", setup_iommu);
71
72static unsigned long iommu_range_alloc(struct iommu_table *tbl,
73 unsigned long npages,
74 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -050075 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 unsigned int align_order)
77{
78 unsigned long n, end, i, start;
Jake Moilanen618d3ad2007-03-02 15:49:43 -060079 unsigned long start_addr, end_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 unsigned long limit;
81 int largealloc = npages > 15;
82 int pass = 0;
83 unsigned long align_mask;
84
85 align_mask = 0xffffffffffffffffl >> (64 - align_order);
86
87 /* This allocator was derived from x86_64's bit string search */
88
89 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +020090 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 if (printk_ratelimit())
92 WARN_ON(1);
93 return DMA_ERROR_CODE;
94 }
95
96 if (handle && *handle)
97 start = *handle;
98 else
99 start = largealloc ? tbl->it_largehint : tbl->it_hint;
100
101 /* Use only half of the table for small allocs (15 pages or less) */
102 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
103
104 if (largealloc && start < tbl->it_halfpoint)
105 start = tbl->it_halfpoint;
106
107 /* The case below can happen if we have a small segment appended
108 * to a large, or when the previous alloc was at the very end of
109 * the available space. If so, go back to the initial start.
110 */
111 if (start >= limit)
112 start = largealloc ? tbl->it_largehint : tbl->it_hint;
Olof Johansson7daa4112006-04-12 21:05:59 -0500113
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 again:
115
Olof Johansson7daa4112006-04-12 21:05:59 -0500116 if (limit + tbl->it_offset > mask) {
117 limit = mask - tbl->it_offset + 1;
118 /* If we're constrained on address range, first try
119 * at the masked hint to avoid O(n) search complexity,
120 * but on second pass, start at 0.
121 */
122 if ((start & mask) >= limit || pass > 0)
123 start = 0;
124 else
125 start &= mask;
126 }
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 n = find_next_zero_bit(tbl->it_map, limit, start);
129
130 /* Align allocation */
131 n = (n + align_mask) & ~align_mask;
132
133 end = n + npages;
134
135 if (unlikely(end >= limit)) {
136 if (likely(pass < 2)) {
137 /* First failure, just rescan the half of the table.
138 * Second failure, rescan the other half of the table.
139 */
140 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
141 limit = pass ? tbl->it_size : limit;
142 pass++;
143 goto again;
144 } else {
145 /* Third failure, give up */
146 return DMA_ERROR_CODE;
147 }
148 }
149
Jake Moilanen618d3ad2007-03-02 15:49:43 -0600150 /* DMA cannot cross 4 GB boundary */
151 start_addr = (n + tbl->it_offset) << PAGE_SHIFT;
152 end_addr = (end + tbl->it_offset) << PAGE_SHIFT;
153 if ((start_addr >> 32) != (end_addr >> 32)) {
154 end_addr &= 0xffffffff00000000l;
155 start = (end_addr >> PAGE_SHIFT) - tbl->it_offset;
156 goto again;
157 }
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 for (i = n; i < end; i++)
160 if (test_bit(i, tbl->it_map)) {
161 start = i+1;
162 goto again;
163 }
164
165 for (i = n; i < end; i++)
166 __set_bit(i, tbl->it_map);
167
168 /* Bump the hint to a new block for small allocs. */
169 if (largealloc) {
170 /* Don't bump to new block to avoid fragmentation */
171 tbl->it_largehint = end;
172 } else {
173 /* Overflow will be taken care of at the next allocation */
174 tbl->it_hint = (end + tbl->it_blocksize - 1) &
175 ~(tbl->it_blocksize - 1);
176 }
177
178 /* Update handle for SG allocations */
179 if (handle)
180 *handle = end;
181
182 return n;
183}
184
185static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
186 unsigned int npages, enum dma_data_direction direction,
Olof Johansson7daa4112006-04-12 21:05:59 -0500187 unsigned long mask, unsigned int align_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
189 unsigned long entry, flags;
190 dma_addr_t ret = DMA_ERROR_CODE;
Olof Johansson7daa4112006-04-12 21:05:59 -0500191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 spin_lock_irqsave(&(tbl->it_lock), flags);
193
Olof Johansson7daa4112006-04-12 21:05:59 -0500194 entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
196 if (unlikely(entry == DMA_ERROR_CODE)) {
197 spin_unlock_irqrestore(&(tbl->it_lock), flags);
198 return DMA_ERROR_CODE;
199 }
200
201 entry += tbl->it_offset; /* Offset into real TCE table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100202 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 /* Put the TCEs in the HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100205 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 direction);
207
208
209 /* Flush/invalidate TLB caches if necessary */
210 if (ppc_md.tce_flush)
211 ppc_md.tce_flush(tbl);
212
213 spin_unlock_irqrestore(&(tbl->it_lock), flags);
214
215 /* Make sure updates are seen by hardware */
216 mb();
217
218 return ret;
219}
220
221static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
222 unsigned int npages)
223{
224 unsigned long entry, free_entry;
225 unsigned long i;
226
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100227 entry = dma_addr >> IOMMU_PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 free_entry = entry - tbl->it_offset;
229
230 if (((free_entry + npages) > tbl->it_size) ||
231 (entry < tbl->it_offset)) {
232 if (printk_ratelimit()) {
233 printk(KERN_INFO "iommu_free: invalid entry\n");
234 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
235 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
236 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
237 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
238 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
239 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
240 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
241 WARN_ON(1);
242 }
243 return;
244 }
245
246 ppc_md.tce_free(tbl, entry, npages);
247
248 for (i = 0; i < npages; i++)
249 __clear_bit(free_entry+i, tbl->it_map);
250}
251
252static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
253 unsigned int npages)
254{
255 unsigned long flags;
256
257 spin_lock_irqsave(&(tbl->it_lock), flags);
258
259 __iommu_free(tbl, dma_addr, npages);
260
261 /* Make sure TLB cache is flushed if the HW needs it. We do
262 * not do an mb() here on purpose, it is not needed on any of
263 * the current platforms.
264 */
265 if (ppc_md.tce_flush)
266 ppc_md.tce_flush(tbl);
267
268 spin_unlock_irqrestore(&(tbl->it_lock), flags);
269}
270
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100271int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
272 int nelems, unsigned long mask,
273 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
275 dma_addr_t dma_next = 0, dma_addr;
276 unsigned long flags;
277 struct scatterlist *s, *outs, *segstart;
Brian Kingac9af7c2005-08-18 07:32:18 +1000278 int outcount, incount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 unsigned long handle;
280
281 BUG_ON(direction == DMA_NONE);
282
283 if ((nelems == 0) || !tbl)
284 return 0;
285
286 outs = s = segstart = &sglist[0];
287 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000288 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 handle = 0;
290
291 /* Init first segment length for backout at failure */
292 outs->dma_length = 0;
293
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100294 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
296 spin_lock_irqsave(&(tbl->it_lock), flags);
297
298 for (s = outs; nelems; nelems--, s++) {
299 unsigned long vaddr, npages, entry, slen;
300
301 slen = s->length;
302 /* Sanity check */
303 if (slen == 0) {
304 dma_next = 0;
305 continue;
306 }
307 /* Allocate iommu entries for that segment */
308 vaddr = (unsigned long)page_address(s->page) + s->offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100309 npages = iommu_num_pages(vaddr, slen);
310 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
313
314 /* Handle failure */
315 if (unlikely(entry == DMA_ERROR_CODE)) {
316 if (printk_ratelimit())
317 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
318 " npages %lx\n", tbl, vaddr, npages);
319 goto failure;
320 }
321
322 /* Convert entry to a dma_addr_t */
323 entry += tbl->it_offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100324 dma_addr = entry << IOMMU_PAGE_SHIFT;
325 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100327 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 npages, entry, dma_addr);
329
330 /* Insert into HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100331 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 /* If we are in an open segment, try merging */
334 if (segstart != s) {
335 DBG(" - trying merge...\n");
336 /* We cannot merge if:
337 * - allocated dma_addr isn't contiguous to previous allocation
338 */
339 if (novmerge || (dma_addr != dma_next)) {
340 /* Can't merge: create a new segment */
341 segstart = s;
342 outcount++; outs++;
343 DBG(" can't merge, new segment.\n");
344 } else {
345 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100346 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 }
348 }
349
350 if (segstart == s) {
351 /* This is a new segment, fill entries */
352 DBG(" - filling new segment.\n");
353 outs->dma_address = dma_addr;
354 outs->dma_length = slen;
355 }
356
357 /* Calculate next page pointer for contiguous check */
358 dma_next = dma_addr + slen;
359
360 DBG(" - dma next is: %lx\n", dma_next);
361 }
362
363 /* Flush/invalidate TLB caches if necessary */
364 if (ppc_md.tce_flush)
365 ppc_md.tce_flush(tbl);
366
367 spin_unlock_irqrestore(&(tbl->it_lock), flags);
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 DBG("mapped %d elements:\n", outcount);
370
Brian Kingac9af7c2005-08-18 07:32:18 +1000371 /* For the sake of iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 * next entry of the sglist if we didn't fill the list completely
373 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000374 if (outcount < incount) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 outs++;
376 outs->dma_address = DMA_ERROR_CODE;
377 outs->dma_length = 0;
378 }
Jake Moilanena958a262006-01-30 21:51:54 -0600379
380 /* Make sure updates are seen by hardware */
381 mb();
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 return outcount;
384
385 failure:
386 for (s = &sglist[0]; s <= outs; s++) {
387 if (s->dma_length != 0) {
388 unsigned long vaddr, npages;
389
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100390 vaddr = s->dma_address & IOMMU_PAGE_MASK;
391 npages = iommu_num_pages(s->dma_address, s->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 __iommu_free(tbl, vaddr, npages);
Jake Moilanena958a262006-01-30 21:51:54 -0600393 s->dma_address = DMA_ERROR_CODE;
394 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
396 }
397 spin_unlock_irqrestore(&(tbl->it_lock), flags);
398 return 0;
399}
400
401
402void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
403 int nelems, enum dma_data_direction direction)
404{
405 unsigned long flags;
406
407 BUG_ON(direction == DMA_NONE);
408
409 if (!tbl)
410 return;
411
412 spin_lock_irqsave(&(tbl->it_lock), flags);
413
414 while (nelems--) {
415 unsigned int npages;
416 dma_addr_t dma_handle = sglist->dma_address;
417
418 if (sglist->dma_length == 0)
419 break;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100420 npages = iommu_num_pages(dma_handle,sglist->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 __iommu_free(tbl, dma_handle, npages);
422 sglist++;
423 }
424
425 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
426 * do not do an mb() here, the affected platforms do not need it
427 * when freeing.
428 */
429 if (ppc_md.tce_flush)
430 ppc_md.tce_flush(tbl);
431
432 spin_unlock_irqrestore(&(tbl->it_lock), flags);
433}
434
435/*
436 * Build a iommu_table structure. This contains a bit map which
437 * is used to manage allocation of the tce space.
438 */
Anton Blanchardca1588e2006-06-10 20:58:08 +1000439struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 unsigned long sz;
442 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000443 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445 /* Set aside 1/4 of the table for large allocations. */
446 tbl->it_halfpoint = tbl->it_size * 3 / 4;
447
448 /* number of bytes needed for the bitmap */
449 sz = (tbl->it_size + 7) >> 3;
450
Anton Blanchardca1588e2006-06-10 20:58:08 +1000451 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
452 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000454 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 memset(tbl->it_map, 0, sz);
456
457 tbl->it_hint = 0;
458 tbl->it_largehint = tbl->it_halfpoint;
459 spin_lock_init(&tbl->it_lock);
460
Haren Myneni5f508672006-06-22 23:35:10 -0700461#ifdef CONFIG_CRASH_DUMP
462 if (ppc_md.tce_get) {
463 unsigned long index, tceval;
464 unsigned long tcecount = 0;
465
466 /*
467 * Reserve the existing mappings left by the first kernel.
468 */
469 for (index = 0; index < tbl->it_size; index++) {
470 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
471 /*
472 * Freed TCE entry contains 0x7fffffffffffffff on JS20
473 */
474 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
475 __set_bit(index, tbl->it_map);
476 tcecount++;
477 }
478 }
479 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
480 printk(KERN_WARNING "TCE table is full; ");
481 printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
482 KDUMP_MIN_TCE_ENTRIES);
483 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
484 index < tbl->it_size; index++)
485 __clear_bit(index, tbl->it_map);
486 }
487 }
488#else
John Rosed3588ba2005-06-20 21:43:48 +1000489 /* Clear the hardware table in case firmware left allocations in it */
490 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
Haren Myneni5f508672006-06-22 23:35:10 -0700491#endif
John Rosed3588ba2005-06-20 21:43:48 +1000492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (!welcomed) {
494 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
495 novmerge ? "disabled" : "enabled");
496 welcomed = 1;
497 }
498
499 return tbl;
500}
501
502void iommu_free_table(struct device_node *dn)
503{
Paul Mackerras16353172005-09-06 13:17:54 +1000504 struct pci_dn *pdn = dn->data;
505 struct iommu_table *tbl = pdn->iommu_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 unsigned long bitmap_sz, i;
507 unsigned int order;
508
509 if (!tbl || !tbl->it_map) {
510 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
511 dn->full_name);
512 return;
513 }
514
515 /* verify that table contains no entries */
516 /* it_size is in entries, and we're examining 64 at a time */
517 for (i = 0; i < (tbl->it_size/64); i++) {
518 if (tbl->it_map[i] != 0) {
519 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
520 __FUNCTION__, dn->full_name);
521 break;
522 }
523 }
524
525 /* calculate bitmap size in bytes */
526 bitmap_sz = (tbl->it_size + 7) / 8;
527
528 /* free bitmap */
529 order = get_order(bitmap_sz);
530 free_pages((unsigned long) tbl->it_map, order);
531
532 /* free table */
533 kfree(tbl);
534}
535
536/* Creates TCEs for a user provided buffer. The user buffer must be
537 * contiguous real kernel storage (not vmalloc). The address of the buffer
538 * passed here is the kernel (virtual) address of the buffer. The buffer
539 * need not be page aligned, the dma_addr_t returned will point to the same
540 * byte within the page as vaddr.
541 */
542dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
Olof Johansson7daa4112006-04-12 21:05:59 -0500543 size_t size, unsigned long mask,
544 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545{
546 dma_addr_t dma_handle = DMA_ERROR_CODE;
547 unsigned long uaddr;
548 unsigned int npages;
549
550 BUG_ON(direction == DMA_NONE);
551
552 uaddr = (unsigned long)vaddr;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100553 npages = iommu_num_pages(uaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 if (tbl) {
Olof Johansson7daa4112006-04-12 21:05:59 -0500556 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100557 mask >> IOMMU_PAGE_SHIFT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 if (dma_handle == DMA_ERROR_CODE) {
559 if (printk_ratelimit()) {
560 printk(KERN_INFO "iommu_alloc failed, "
561 "tbl %p vaddr %p npages %d\n",
562 tbl, vaddr, npages);
563 }
564 } else
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100565 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
567
568 return dma_handle;
569}
570
571void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
572 size_t size, enum dma_data_direction direction)
573{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100574 unsigned int npages;
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 BUG_ON(direction == DMA_NONE);
577
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100578 if (tbl) {
579 npages = iommu_num_pages(dma_handle, size);
580 iommu_free(tbl, dma_handle, npages);
581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582}
583
584/* Allocates a contiguous real buffer and creates mappings over it.
585 * Returns the virtual address of the buffer and sets dma_handle
586 * to the dma address (mapping) of the first page.
587 */
588void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200589 dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590{
591 void *ret = NULL;
592 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100593 unsigned int order;
594 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200595 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 order = get_order(size);
599
600 /*
601 * Client asked for way too much space. This is checked later
602 * anyway. It is easier to debug here for the drivers than in
603 * the tce tables.
604 */
605 if (order >= IOMAP_MAX_ORDER) {
606 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
607 return NULL;
608 }
609
610 if (!tbl)
611 return NULL;
612
613 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000614 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200615 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200617 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 memset(ret, 0, size);
619
620 /* Set up tces to cover the allocated range */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100621 nio_pages = size >> IOMMU_PAGE_SHIFT;
622 io_order = get_iommu_order(size);
623 mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
624 mask >> IOMMU_PAGE_SHIFT, io_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 if (mapping == DMA_ERROR_CODE) {
626 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200627 return NULL;
628 }
629 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 return ret;
631}
632
633void iommu_free_coherent(struct iommu_table *tbl, size_t size,
634 void *vaddr, dma_addr_t dma_handle)
635{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100637 unsigned int nio_pages;
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 size = PAGE_ALIGN(size);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100640 nio_pages = size >> IOMMU_PAGE_SHIFT;
641 iommu_free(tbl, dma_handle, nio_pages);
642 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 free_pages((unsigned long)vaddr, get_order(size));
644 }
645}