blob: d0e6fac4ef42bd63eaba13f66112aa3ae8bfa87c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/bitops.h>
34#include <asm/io.h>
35#include <asm/prom.h>
36#include <asm/iommu.h>
37#include <asm/pci-bridge.h>
38#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070039#include <asm/kdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#define DBG(...)
42
43#ifdef CONFIG_IOMMU_VMERGE
44static int novmerge = 0;
45#else
46static int novmerge = 1;
47#endif
48
Jake Moilanen56997552007-03-29 08:44:02 -050049static int protect4gb = 1;
50
Linas Vepstas5d2efba2006-10-30 16:15:59 +110051static inline unsigned long iommu_num_pages(unsigned long vaddr,
52 unsigned long slen)
53{
54 unsigned long npages;
55
56 npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
57 npages >>= IOMMU_PAGE_SHIFT;
58
59 return npages;
60}
61
Jake Moilanen56997552007-03-29 08:44:02 -050062static int __init setup_protect4gb(char *str)
63{
64 if (strcmp(str, "on") == 0)
65 protect4gb = 1;
66 else if (strcmp(str, "off") == 0)
67 protect4gb = 0;
68
69 return 1;
70}
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072static int __init setup_iommu(char *str)
73{
74 if (!strcmp(str, "novmerge"))
75 novmerge = 1;
76 else if (!strcmp(str, "vmerge"))
77 novmerge = 0;
78 return 1;
79}
80
Jake Moilanen56997552007-03-29 08:44:02 -050081__setup("protect4gb=", setup_protect4gb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082__setup("iommu=", setup_iommu);
83
84static unsigned long iommu_range_alloc(struct iommu_table *tbl,
85 unsigned long npages,
86 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -050087 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 unsigned int align_order)
89{
90 unsigned long n, end, i, start;
91 unsigned long limit;
92 int largealloc = npages > 15;
93 int pass = 0;
94 unsigned long align_mask;
95
96 align_mask = 0xffffffffffffffffl >> (64 - align_order);
97
98 /* This allocator was derived from x86_64's bit string search */
99
100 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +0200101 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 if (printk_ratelimit())
103 WARN_ON(1);
104 return DMA_ERROR_CODE;
105 }
106
107 if (handle && *handle)
108 start = *handle;
109 else
110 start = largealloc ? tbl->it_largehint : tbl->it_hint;
111
112 /* Use only half of the table for small allocs (15 pages or less) */
113 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
114
115 if (largealloc && start < tbl->it_halfpoint)
116 start = tbl->it_halfpoint;
117
118 /* The case below can happen if we have a small segment appended
119 * to a large, or when the previous alloc was at the very end of
120 * the available space. If so, go back to the initial start.
121 */
122 if (start >= limit)
123 start = largealloc ? tbl->it_largehint : tbl->it_hint;
Olof Johansson7daa4112006-04-12 21:05:59 -0500124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 again:
126
Olof Johansson7daa4112006-04-12 21:05:59 -0500127 if (limit + tbl->it_offset > mask) {
128 limit = mask - tbl->it_offset + 1;
129 /* If we're constrained on address range, first try
130 * at the masked hint to avoid O(n) search complexity,
131 * but on second pass, start at 0.
132 */
133 if ((start & mask) >= limit || pass > 0)
134 start = 0;
135 else
136 start &= mask;
137 }
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 n = find_next_zero_bit(tbl->it_map, limit, start);
140
141 /* Align allocation */
142 n = (n + align_mask) & ~align_mask;
143
144 end = n + npages;
145
146 if (unlikely(end >= limit)) {
147 if (likely(pass < 2)) {
148 /* First failure, just rescan the half of the table.
149 * Second failure, rescan the other half of the table.
150 */
151 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
152 limit = pass ? tbl->it_size : limit;
153 pass++;
154 goto again;
155 } else {
156 /* Third failure, give up */
157 return DMA_ERROR_CODE;
158 }
159 }
160
161 for (i = n; i < end; i++)
162 if (test_bit(i, tbl->it_map)) {
163 start = i+1;
164 goto again;
165 }
166
167 for (i = n; i < end; i++)
168 __set_bit(i, tbl->it_map);
169
170 /* Bump the hint to a new block for small allocs. */
171 if (largealloc) {
172 /* Don't bump to new block to avoid fragmentation */
173 tbl->it_largehint = end;
174 } else {
175 /* Overflow will be taken care of at the next allocation */
176 tbl->it_hint = (end + tbl->it_blocksize - 1) &
177 ~(tbl->it_blocksize - 1);
178 }
179
180 /* Update handle for SG allocations */
181 if (handle)
182 *handle = end;
183
184 return n;
185}
186
187static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
188 unsigned int npages, enum dma_data_direction direction,
Olof Johansson7daa4112006-04-12 21:05:59 -0500189 unsigned long mask, unsigned int align_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 unsigned long entry, flags;
192 dma_addr_t ret = DMA_ERROR_CODE;
Olof Johansson7daa4112006-04-12 21:05:59 -0500193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 spin_lock_irqsave(&(tbl->it_lock), flags);
195
Olof Johansson7daa4112006-04-12 21:05:59 -0500196 entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 if (unlikely(entry == DMA_ERROR_CODE)) {
199 spin_unlock_irqrestore(&(tbl->it_lock), flags);
200 return DMA_ERROR_CODE;
201 }
202
203 entry += tbl->it_offset; /* Offset into real TCE table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100204 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Put the TCEs in the HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100207 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 direction);
209
210
211 /* Flush/invalidate TLB caches if necessary */
212 if (ppc_md.tce_flush)
213 ppc_md.tce_flush(tbl);
214
215 spin_unlock_irqrestore(&(tbl->it_lock), flags);
216
217 /* Make sure updates are seen by hardware */
218 mb();
219
220 return ret;
221}
222
223static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
224 unsigned int npages)
225{
226 unsigned long entry, free_entry;
227 unsigned long i;
228
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100229 entry = dma_addr >> IOMMU_PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 free_entry = entry - tbl->it_offset;
231
232 if (((free_entry + npages) > tbl->it_size) ||
233 (entry < tbl->it_offset)) {
234 if (printk_ratelimit()) {
235 printk(KERN_INFO "iommu_free: invalid entry\n");
236 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
237 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
238 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
239 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
240 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
241 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
242 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
243 WARN_ON(1);
244 }
245 return;
246 }
247
248 ppc_md.tce_free(tbl, entry, npages);
249
250 for (i = 0; i < npages; i++)
251 __clear_bit(free_entry+i, tbl->it_map);
252}
253
254static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
255 unsigned int npages)
256{
257 unsigned long flags;
258
259 spin_lock_irqsave(&(tbl->it_lock), flags);
260
261 __iommu_free(tbl, dma_addr, npages);
262
263 /* Make sure TLB cache is flushed if the HW needs it. We do
264 * not do an mb() here on purpose, it is not needed on any of
265 * the current platforms.
266 */
267 if (ppc_md.tce_flush)
268 ppc_md.tce_flush(tbl);
269
270 spin_unlock_irqrestore(&(tbl->it_lock), flags);
271}
272
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800273int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100274 int nelems, unsigned long mask,
275 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800277 struct iommu_table *tbl = dev->archdata.dma_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 dma_addr_t dma_next = 0, dma_addr;
279 unsigned long flags;
280 struct scatterlist *s, *outs, *segstart;
Jens Axboe78bdc312007-10-12 13:44:12 +0200281 int outcount, incount, i;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100282 unsigned int align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 unsigned long handle;
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800284 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
286 BUG_ON(direction == DMA_NONE);
287
288 if ((nelems == 0) || !tbl)
289 return 0;
290
291 outs = s = segstart = &sglist[0];
292 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000293 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 handle = 0;
295
296 /* Init first segment length for backout at failure */
297 outs->dma_length = 0;
298
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100299 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 spin_lock_irqsave(&(tbl->it_lock), flags);
302
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800303 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe78bdc312007-10-12 13:44:12 +0200304 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 unsigned long vaddr, npages, entry, slen;
306
307 slen = s->length;
308 /* Sanity check */
309 if (slen == 0) {
310 dma_next = 0;
311 continue;
312 }
313 /* Allocate iommu entries for that segment */
Jens Axboe58b053e2007-10-22 20:02:46 +0200314 vaddr = (unsigned long) sg_virt(s);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100315 npages = iommu_num_pages(vaddr, slen);
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100316 align = 0;
317 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
318 (vaddr & ~PAGE_MASK) == 0)
319 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
320 entry = iommu_range_alloc(tbl, npages, &handle,
321 mask >> IOMMU_PAGE_SHIFT, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
324
325 /* Handle failure */
326 if (unlikely(entry == DMA_ERROR_CODE)) {
327 if (printk_ratelimit())
328 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
329 " npages %lx\n", tbl, vaddr, npages);
330 goto failure;
331 }
332
333 /* Convert entry to a dma_addr_t */
334 entry += tbl->it_offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100335 dma_addr = entry << IOMMU_PAGE_SHIFT;
336 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100338 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 npages, entry, dma_addr);
340
341 /* Insert into HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100342 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* If we are in an open segment, try merging */
345 if (segstart != s) {
346 DBG(" - trying merge...\n");
347 /* We cannot merge if:
348 * - allocated dma_addr isn't contiguous to previous allocation
349 */
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800350 if (novmerge || (dma_addr != dma_next) ||
351 (outs->dma_length + s->length > max_seg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 /* Can't merge: create a new segment */
353 segstart = s;
Jens Axboe78bdc312007-10-12 13:44:12 +0200354 outcount++;
355 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 DBG(" can't merge, new segment.\n");
357 } else {
358 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100359 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 }
361 }
362
363 if (segstart == s) {
364 /* This is a new segment, fill entries */
365 DBG(" - filling new segment.\n");
366 outs->dma_address = dma_addr;
367 outs->dma_length = slen;
368 }
369
370 /* Calculate next page pointer for contiguous check */
371 dma_next = dma_addr + slen;
372
373 DBG(" - dma next is: %lx\n", dma_next);
374 }
375
376 /* Flush/invalidate TLB caches if necessary */
377 if (ppc_md.tce_flush)
378 ppc_md.tce_flush(tbl);
379
380 spin_unlock_irqrestore(&(tbl->it_lock), flags);
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 DBG("mapped %d elements:\n", outcount);
383
Brian Kingac9af7c2005-08-18 07:32:18 +1000384 /* For the sake of iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 * next entry of the sglist if we didn't fill the list completely
386 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000387 if (outcount < incount) {
Jens Axboe78bdc312007-10-12 13:44:12 +0200388 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 outs->dma_address = DMA_ERROR_CODE;
390 outs->dma_length = 0;
391 }
Jake Moilanena958a262006-01-30 21:51:54 -0600392
393 /* Make sure updates are seen by hardware */
394 mb();
395
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 return outcount;
397
398 failure:
Jens Axboe78bdc312007-10-12 13:44:12 +0200399 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 if (s->dma_length != 0) {
401 unsigned long vaddr, npages;
402
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100403 vaddr = s->dma_address & IOMMU_PAGE_MASK;
404 npages = iommu_num_pages(s->dma_address, s->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 __iommu_free(tbl, vaddr, npages);
Jake Moilanena958a262006-01-30 21:51:54 -0600406 s->dma_address = DMA_ERROR_CODE;
407 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 }
Jens Axboe78bdc312007-10-12 13:44:12 +0200409 if (s == outs)
410 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
412 spin_unlock_irqrestore(&(tbl->it_lock), flags);
413 return 0;
414}
415
416
417void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
418 int nelems, enum dma_data_direction direction)
419{
Jens Axboe78bdc312007-10-12 13:44:12 +0200420 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 unsigned long flags;
422
423 BUG_ON(direction == DMA_NONE);
424
425 if (!tbl)
426 return;
427
428 spin_lock_irqsave(&(tbl->it_lock), flags);
429
Jens Axboe78bdc312007-10-12 13:44:12 +0200430 sg = sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 while (nelems--) {
432 unsigned int npages;
Jens Axboe78bdc312007-10-12 13:44:12 +0200433 dma_addr_t dma_handle = sg->dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Jens Axboe78bdc312007-10-12 13:44:12 +0200435 if (sg->dma_length == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 break;
Jens Axboe78bdc312007-10-12 13:44:12 +0200437 npages = iommu_num_pages(dma_handle, sg->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 __iommu_free(tbl, dma_handle, npages);
Jens Axboe78bdc312007-10-12 13:44:12 +0200439 sg = sg_next(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 }
441
442 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
443 * do not do an mb() here, the affected platforms do not need it
444 * when freeing.
445 */
446 if (ppc_md.tce_flush)
447 ppc_md.tce_flush(tbl);
448
449 spin_unlock_irqrestore(&(tbl->it_lock), flags);
450}
451
452/*
453 * Build a iommu_table structure. This contains a bit map which
454 * is used to manage allocation of the tce space.
455 */
Anton Blanchardca1588e2006-06-10 20:58:08 +1000456struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457{
458 unsigned long sz;
Jake Moilanen56997552007-03-29 08:44:02 -0500459 unsigned long start_index, end_index;
460 unsigned long entries_per_4g;
461 unsigned long index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000463 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 /* Set aside 1/4 of the table for large allocations. */
466 tbl->it_halfpoint = tbl->it_size * 3 / 4;
467
468 /* number of bytes needed for the bitmap */
469 sz = (tbl->it_size + 7) >> 3;
470
Anton Blanchardca1588e2006-06-10 20:58:08 +1000471 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
472 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000474 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 memset(tbl->it_map, 0, sz);
476
477 tbl->it_hint = 0;
478 tbl->it_largehint = tbl->it_halfpoint;
479 spin_lock_init(&tbl->it_lock);
480
Haren Myneni5f508672006-06-22 23:35:10 -0700481#ifdef CONFIG_CRASH_DUMP
482 if (ppc_md.tce_get) {
Jake Moilanen56997552007-03-29 08:44:02 -0500483 unsigned long tceval;
Haren Myneni5f508672006-06-22 23:35:10 -0700484 unsigned long tcecount = 0;
485
486 /*
487 * Reserve the existing mappings left by the first kernel.
488 */
489 for (index = 0; index < tbl->it_size; index++) {
490 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
491 /*
492 * Freed TCE entry contains 0x7fffffffffffffff on JS20
493 */
494 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
495 __set_bit(index, tbl->it_map);
496 tcecount++;
497 }
498 }
499 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
500 printk(KERN_WARNING "TCE table is full; ");
501 printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
502 KDUMP_MIN_TCE_ENTRIES);
503 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
504 index < tbl->it_size; index++)
505 __clear_bit(index, tbl->it_map);
506 }
507 }
508#else
John Rosed3588ba2005-06-20 21:43:48 +1000509 /* Clear the hardware table in case firmware left allocations in it */
510 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
Haren Myneni5f508672006-06-22 23:35:10 -0700511#endif
John Rosed3588ba2005-06-20 21:43:48 +1000512
Jake Moilanen56997552007-03-29 08:44:02 -0500513 /*
514 * DMA cannot cross 4 GB boundary. Mark last entry of each 4
515 * GB chunk as reserved.
516 */
517 if (protect4gb) {
518 entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
519
520 /* Mark the last bit before a 4GB boundary as used */
521 start_index = tbl->it_offset | (entries_per_4g - 1);
522 start_index -= tbl->it_offset;
523
524 end_index = tbl->it_size;
525
526 for (index = start_index; index < end_index - 1; index += entries_per_4g)
527 __set_bit(index, tbl->it_map);
528 }
529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 if (!welcomed) {
531 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
532 novmerge ? "disabled" : "enabled");
533 welcomed = 1;
534 }
535
536 return tbl;
537}
538
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100539void iommu_free_table(struct iommu_table *tbl, const char *node_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 unsigned long bitmap_sz, i;
542 unsigned int order;
543
544 if (!tbl || !tbl->it_map) {
545 printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100546 node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 return;
548 }
549
550 /* verify that table contains no entries */
551 /* it_size is in entries, and we're examining 64 at a time */
552 for (i = 0; i < (tbl->it_size/64); i++) {
553 if (tbl->it_map[i] != 0) {
554 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100555 __FUNCTION__, node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 break;
557 }
558 }
559
560 /* calculate bitmap size in bytes */
561 bitmap_sz = (tbl->it_size + 7) / 8;
562
563 /* free bitmap */
564 order = get_order(bitmap_sz);
565 free_pages((unsigned long) tbl->it_map, order);
566
567 /* free table */
568 kfree(tbl);
569}
570
571/* Creates TCEs for a user provided buffer. The user buffer must be
572 * contiguous real kernel storage (not vmalloc). The address of the buffer
573 * passed here is the kernel (virtual) address of the buffer. The buffer
574 * need not be page aligned, the dma_addr_t returned will point to the same
575 * byte within the page as vaddr.
576 */
577dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
Olof Johansson7daa4112006-04-12 21:05:59 -0500578 size_t size, unsigned long mask,
579 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580{
581 dma_addr_t dma_handle = DMA_ERROR_CODE;
582 unsigned long uaddr;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100583 unsigned int npages, align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
585 BUG_ON(direction == DMA_NONE);
586
587 uaddr = (unsigned long)vaddr;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100588 npages = iommu_num_pages(uaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 if (tbl) {
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100591 align = 0;
592 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
593 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
594 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
595
Olof Johansson7daa4112006-04-12 21:05:59 -0500596 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100597 mask >> IOMMU_PAGE_SHIFT, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 if (dma_handle == DMA_ERROR_CODE) {
599 if (printk_ratelimit()) {
600 printk(KERN_INFO "iommu_alloc failed, "
601 "tbl %p vaddr %p npages %d\n",
602 tbl, vaddr, npages);
603 }
604 } else
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100605 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
607
608 return dma_handle;
609}
610
611void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
612 size_t size, enum dma_data_direction direction)
613{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100614 unsigned int npages;
615
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 BUG_ON(direction == DMA_NONE);
617
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100618 if (tbl) {
619 npages = iommu_num_pages(dma_handle, size);
620 iommu_free(tbl, dma_handle, npages);
621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622}
623
624/* Allocates a contiguous real buffer and creates mappings over it.
625 * Returns the virtual address of the buffer and sets dma_handle
626 * to the dma address (mapping) of the first page.
627 */
628void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200629 dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
631 void *ret = NULL;
632 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100633 unsigned int order;
634 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200635 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 order = get_order(size);
639
640 /*
641 * Client asked for way too much space. This is checked later
642 * anyway. It is easier to debug here for the drivers than in
643 * the tce tables.
644 */
645 if (order >= IOMAP_MAX_ORDER) {
646 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
647 return NULL;
648 }
649
650 if (!tbl)
651 return NULL;
652
653 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000654 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200655 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200657 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 memset(ret, 0, size);
659
660 /* Set up tces to cover the allocated range */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100661 nio_pages = size >> IOMMU_PAGE_SHIFT;
662 io_order = get_iommu_order(size);
663 mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
664 mask >> IOMMU_PAGE_SHIFT, io_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (mapping == DMA_ERROR_CODE) {
666 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200667 return NULL;
668 }
669 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return ret;
671}
672
673void iommu_free_coherent(struct iommu_table *tbl, size_t size,
674 void *vaddr, dma_addr_t dma_handle)
675{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100677 unsigned int nio_pages;
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 size = PAGE_ALIGN(size);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100680 nio_pages = size >> IOMMU_PAGE_SHIFT;
681 iommu_free(tbl, dma_handle, nio_pages);
682 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 free_pages((unsigned long)vaddr, get_order(size));
684 }
685}