blob: ccf00fe9cee68ecf2599d7a5bac50dd498b44f49 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/bitops.h>
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080034#include <linux/iommu-helper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/io.h>
36#include <asm/prom.h>
37#include <asm/iommu.h>
38#include <asm/pci-bridge.h>
39#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070040#include <asm/kdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#define DBG(...)
43
44#ifdef CONFIG_IOMMU_VMERGE
45static int novmerge = 0;
46#else
47static int novmerge = 1;
48#endif
49
Jake Moilanen56997552007-03-29 08:44:02 -050050static int protect4gb = 1;
51
Linas Vepstas5d2efba2006-10-30 16:15:59 +110052static inline unsigned long iommu_num_pages(unsigned long vaddr,
53 unsigned long slen)
54{
55 unsigned long npages;
56
57 npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
58 npages >>= IOMMU_PAGE_SHIFT;
59
60 return npages;
61}
62
Jake Moilanen56997552007-03-29 08:44:02 -050063static int __init setup_protect4gb(char *str)
64{
65 if (strcmp(str, "on") == 0)
66 protect4gb = 1;
67 else if (strcmp(str, "off") == 0)
68 protect4gb = 0;
69
70 return 1;
71}
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073static int __init setup_iommu(char *str)
74{
75 if (!strcmp(str, "novmerge"))
76 novmerge = 1;
77 else if (!strcmp(str, "vmerge"))
78 novmerge = 0;
79 return 1;
80}
81
Jake Moilanen56997552007-03-29 08:44:02 -050082__setup("protect4gb=", setup_protect4gb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083__setup("iommu=", setup_iommu);
84
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080085static unsigned long iommu_range_alloc(struct device *dev,
86 struct iommu_table *tbl,
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 unsigned long npages,
88 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -050089 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 unsigned int align_order)
91{
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080092 unsigned long n, end, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 unsigned long limit;
94 int largealloc = npages > 15;
95 int pass = 0;
96 unsigned long align_mask;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080097 unsigned long boundary_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
99 align_mask = 0xffffffffffffffffl >> (64 - align_order);
100
101 /* This allocator was derived from x86_64's bit string search */
102
103 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +0200104 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 if (printk_ratelimit())
106 WARN_ON(1);
107 return DMA_ERROR_CODE;
108 }
109
110 if (handle && *handle)
111 start = *handle;
112 else
113 start = largealloc ? tbl->it_largehint : tbl->it_hint;
114
115 /* Use only half of the table for small allocs (15 pages or less) */
116 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
117
118 if (largealloc && start < tbl->it_halfpoint)
119 start = tbl->it_halfpoint;
120
121 /* The case below can happen if we have a small segment appended
122 * to a large, or when the previous alloc was at the very end of
123 * the available space. If so, go back to the initial start.
124 */
125 if (start >= limit)
126 start = largealloc ? tbl->it_largehint : tbl->it_hint;
Olof Johansson7daa4112006-04-12 21:05:59 -0500127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 again:
129
Olof Johansson7daa4112006-04-12 21:05:59 -0500130 if (limit + tbl->it_offset > mask) {
131 limit = mask - tbl->it_offset + 1;
132 /* If we're constrained on address range, first try
133 * at the masked hint to avoid O(n) search complexity,
134 * but on second pass, start at 0.
135 */
136 if ((start & mask) >= limit || pass > 0)
137 start = 0;
138 else
139 start &= mask;
140 }
141
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800142 if (dev)
143 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
144 1 << IOMMU_PAGE_SHIFT);
145 else
146 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
147 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800149 n = iommu_area_alloc(tbl->it_map, limit, start, npages,
150 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
151 align_mask);
152 if (n == -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 if (likely(pass < 2)) {
154 /* First failure, just rescan the half of the table.
155 * Second failure, rescan the other half of the table.
156 */
157 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
158 limit = pass ? tbl->it_size : limit;
159 pass++;
160 goto again;
161 } else {
162 /* Third failure, give up */
163 return DMA_ERROR_CODE;
164 }
165 }
166
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800167 end = n + npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 /* Bump the hint to a new block for small allocs. */
170 if (largealloc) {
171 /* Don't bump to new block to avoid fragmentation */
172 tbl->it_largehint = end;
173 } else {
174 /* Overflow will be taken care of at the next allocation */
175 tbl->it_hint = (end + tbl->it_blocksize - 1) &
176 ~(tbl->it_blocksize - 1);
177 }
178
179 /* Update handle for SG allocations */
180 if (handle)
181 *handle = end;
182
183 return n;
184}
185
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800186static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
187 void *page, unsigned int npages,
188 enum dma_data_direction direction,
189 unsigned long mask, unsigned int align_order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 unsigned long entry, flags;
192 dma_addr_t ret = DMA_ERROR_CODE;
Olof Johansson7daa4112006-04-12 21:05:59 -0500193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 spin_lock_irqsave(&(tbl->it_lock), flags);
195
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800196 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 if (unlikely(entry == DMA_ERROR_CODE)) {
199 spin_unlock_irqrestore(&(tbl->it_lock), flags);
200 return DMA_ERROR_CODE;
201 }
202
203 entry += tbl->it_offset; /* Offset into real TCE table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100204 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Put the TCEs in the HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100207 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 direction);
209
210
211 /* Flush/invalidate TLB caches if necessary */
212 if (ppc_md.tce_flush)
213 ppc_md.tce_flush(tbl);
214
215 spin_unlock_irqrestore(&(tbl->it_lock), flags);
216
217 /* Make sure updates are seen by hardware */
218 mb();
219
220 return ret;
221}
222
223static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
224 unsigned int npages)
225{
226 unsigned long entry, free_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100228 entry = dma_addr >> IOMMU_PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 free_entry = entry - tbl->it_offset;
230
231 if (((free_entry + npages) > tbl->it_size) ||
232 (entry < tbl->it_offset)) {
233 if (printk_ratelimit()) {
234 printk(KERN_INFO "iommu_free: invalid entry\n");
235 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
236 printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr);
237 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
238 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
239 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
240 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
241 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
242 WARN_ON(1);
243 }
244 return;
245 }
246
247 ppc_md.tce_free(tbl, entry, npages);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800248 iommu_area_free(tbl->it_map, free_entry, npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
252 unsigned int npages)
253{
254 unsigned long flags;
255
256 spin_lock_irqsave(&(tbl->it_lock), flags);
257
258 __iommu_free(tbl, dma_addr, npages);
259
260 /* Make sure TLB cache is flushed if the HW needs it. We do
261 * not do an mb() here on purpose, it is not needed on any of
262 * the current platforms.
263 */
264 if (ppc_md.tce_flush)
265 ppc_md.tce_flush(tbl);
266
267 spin_unlock_irqrestore(&(tbl->it_lock), flags);
268}
269
Mark Nelsonc8692362008-07-05 05:05:41 +1000270int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
271 struct scatterlist *sglist, int nelems,
272 unsigned long mask, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
274 dma_addr_t dma_next = 0, dma_addr;
275 unsigned long flags;
276 struct scatterlist *s, *outs, *segstart;
Jens Axboe78bdc312007-10-12 13:44:12 +0200277 int outcount, incount, i;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100278 unsigned int align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 unsigned long handle;
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800280 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
282 BUG_ON(direction == DMA_NONE);
283
284 if ((nelems == 0) || !tbl)
285 return 0;
286
287 outs = s = segstart = &sglist[0];
288 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000289 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 handle = 0;
291
292 /* Init first segment length for backout at failure */
293 outs->dma_length = 0;
294
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100295 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297 spin_lock_irqsave(&(tbl->it_lock), flags);
298
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800299 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe78bdc312007-10-12 13:44:12 +0200300 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 unsigned long vaddr, npages, entry, slen;
302
303 slen = s->length;
304 /* Sanity check */
305 if (slen == 0) {
306 dma_next = 0;
307 continue;
308 }
309 /* Allocate iommu entries for that segment */
Jens Axboe58b053e2007-10-22 20:02:46 +0200310 vaddr = (unsigned long) sg_virt(s);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100311 npages = iommu_num_pages(vaddr, slen);
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100312 align = 0;
313 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
314 (vaddr & ~PAGE_MASK) == 0)
315 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800316 entry = iommu_range_alloc(dev, tbl, npages, &handle,
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100317 mask >> IOMMU_PAGE_SHIFT, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
319 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
320
321 /* Handle failure */
322 if (unlikely(entry == DMA_ERROR_CODE)) {
323 if (printk_ratelimit())
324 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
325 " npages %lx\n", tbl, vaddr, npages);
326 goto failure;
327 }
328
329 /* Convert entry to a dma_addr_t */
330 entry += tbl->it_offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100331 dma_addr = entry << IOMMU_PAGE_SHIFT;
332 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100334 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 npages, entry, dma_addr);
336
337 /* Insert into HW table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100338 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340 /* If we are in an open segment, try merging */
341 if (segstart != s) {
342 DBG(" - trying merge...\n");
343 /* We cannot merge if:
344 * - allocated dma_addr isn't contiguous to previous allocation
345 */
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800346 if (novmerge || (dma_addr != dma_next) ||
347 (outs->dma_length + s->length > max_seg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 /* Can't merge: create a new segment */
349 segstart = s;
Jens Axboe78bdc312007-10-12 13:44:12 +0200350 outcount++;
351 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 DBG(" can't merge, new segment.\n");
353 } else {
354 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100355 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 }
357 }
358
359 if (segstart == s) {
360 /* This is a new segment, fill entries */
361 DBG(" - filling new segment.\n");
362 outs->dma_address = dma_addr;
363 outs->dma_length = slen;
364 }
365
366 /* Calculate next page pointer for contiguous check */
367 dma_next = dma_addr + slen;
368
369 DBG(" - dma next is: %lx\n", dma_next);
370 }
371
372 /* Flush/invalidate TLB caches if necessary */
373 if (ppc_md.tce_flush)
374 ppc_md.tce_flush(tbl);
375
376 spin_unlock_irqrestore(&(tbl->it_lock), flags);
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 DBG("mapped %d elements:\n", outcount);
379
Brian Kingac9af7c2005-08-18 07:32:18 +1000380 /* For the sake of iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 * next entry of the sglist if we didn't fill the list completely
382 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000383 if (outcount < incount) {
Jens Axboe78bdc312007-10-12 13:44:12 +0200384 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 outs->dma_address = DMA_ERROR_CODE;
386 outs->dma_length = 0;
387 }
Jake Moilanena958a262006-01-30 21:51:54 -0600388
389 /* Make sure updates are seen by hardware */
390 mb();
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 return outcount;
393
394 failure:
Jens Axboe78bdc312007-10-12 13:44:12 +0200395 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (s->dma_length != 0) {
397 unsigned long vaddr, npages;
398
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100399 vaddr = s->dma_address & IOMMU_PAGE_MASK;
400 npages = iommu_num_pages(s->dma_address, s->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 __iommu_free(tbl, vaddr, npages);
Jake Moilanena958a262006-01-30 21:51:54 -0600402 s->dma_address = DMA_ERROR_CODE;
403 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 }
Jens Axboe78bdc312007-10-12 13:44:12 +0200405 if (s == outs)
406 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 }
408 spin_unlock_irqrestore(&(tbl->it_lock), flags);
409 return 0;
410}
411
412
413void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
414 int nelems, enum dma_data_direction direction)
415{
Jens Axboe78bdc312007-10-12 13:44:12 +0200416 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 unsigned long flags;
418
419 BUG_ON(direction == DMA_NONE);
420
421 if (!tbl)
422 return;
423
424 spin_lock_irqsave(&(tbl->it_lock), flags);
425
Jens Axboe78bdc312007-10-12 13:44:12 +0200426 sg = sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 while (nelems--) {
428 unsigned int npages;
Jens Axboe78bdc312007-10-12 13:44:12 +0200429 dma_addr_t dma_handle = sg->dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Jens Axboe78bdc312007-10-12 13:44:12 +0200431 if (sg->dma_length == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 break;
Jens Axboe78bdc312007-10-12 13:44:12 +0200433 npages = iommu_num_pages(dma_handle, sg->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 __iommu_free(tbl, dma_handle, npages);
Jens Axboe78bdc312007-10-12 13:44:12 +0200435 sg = sg_next(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 }
437
438 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
439 * do not do an mb() here, the affected platforms do not need it
440 * when freeing.
441 */
442 if (ppc_md.tce_flush)
443 ppc_md.tce_flush(tbl);
444
445 spin_unlock_irqrestore(&(tbl->it_lock), flags);
446}
447
448/*
449 * Build a iommu_table structure. This contains a bit map which
450 * is used to manage allocation of the tce space.
451 */
Anton Blanchardca1588e2006-06-10 20:58:08 +1000452struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 unsigned long sz;
455 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000456 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
458 /* Set aside 1/4 of the table for large allocations. */
459 tbl->it_halfpoint = tbl->it_size * 3 / 4;
460
461 /* number of bytes needed for the bitmap */
462 sz = (tbl->it_size + 7) >> 3;
463
Anton Blanchardca1588e2006-06-10 20:58:08 +1000464 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
465 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000467 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 memset(tbl->it_map, 0, sz);
469
470 tbl->it_hint = 0;
471 tbl->it_largehint = tbl->it_halfpoint;
472 spin_lock_init(&tbl->it_lock);
473
Haren Myneni5f508672006-06-22 23:35:10 -0700474#ifdef CONFIG_CRASH_DUMP
475 if (ppc_md.tce_get) {
FUJITA Tomonori383af952008-02-04 22:28:09 -0800476 unsigned long index;
Jake Moilanen56997552007-03-29 08:44:02 -0500477 unsigned long tceval;
Haren Myneni5f508672006-06-22 23:35:10 -0700478 unsigned long tcecount = 0;
479
480 /*
481 * Reserve the existing mappings left by the first kernel.
482 */
483 for (index = 0; index < tbl->it_size; index++) {
484 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
485 /*
486 * Freed TCE entry contains 0x7fffffffffffffff on JS20
487 */
488 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
489 __set_bit(index, tbl->it_map);
490 tcecount++;
491 }
492 }
493 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
494 printk(KERN_WARNING "TCE table is full; ");
495 printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
496 KDUMP_MIN_TCE_ENTRIES);
497 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
498 index < tbl->it_size; index++)
499 __clear_bit(index, tbl->it_map);
500 }
501 }
502#else
John Rosed3588ba2005-06-20 21:43:48 +1000503 /* Clear the hardware table in case firmware left allocations in it */
504 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
Haren Myneni5f508672006-06-22 23:35:10 -0700505#endif
John Rosed3588ba2005-06-20 21:43:48 +1000506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 if (!welcomed) {
508 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
509 novmerge ? "disabled" : "enabled");
510 welcomed = 1;
511 }
512
513 return tbl;
514}
515
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100516void iommu_free_table(struct iommu_table *tbl, const char *node_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 unsigned long bitmap_sz, i;
519 unsigned int order;
520
521 if (!tbl || !tbl->it_map) {
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100522 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100523 node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 return;
525 }
526
527 /* verify that table contains no entries */
528 /* it_size is in entries, and we're examining 64 at a time */
529 for (i = 0; i < (tbl->it_size/64); i++) {
530 if (tbl->it_map[i] != 0) {
531 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100532 __func__, node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 break;
534 }
535 }
536
537 /* calculate bitmap size in bytes */
538 bitmap_sz = (tbl->it_size + 7) / 8;
539
540 /* free bitmap */
541 order = get_order(bitmap_sz);
542 free_pages((unsigned long) tbl->it_map, order);
543
544 /* free table */
545 kfree(tbl);
546}
547
548/* Creates TCEs for a user provided buffer. The user buffer must be
549 * contiguous real kernel storage (not vmalloc). The address of the buffer
550 * passed here is the kernel (virtual) address of the buffer. The buffer
551 * need not be page aligned, the dma_addr_t returned will point to the same
552 * byte within the page as vaddr.
553 */
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800554dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
555 void *vaddr, size_t size, unsigned long mask,
556 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557{
558 dma_addr_t dma_handle = DMA_ERROR_CODE;
559 unsigned long uaddr;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100560 unsigned int npages, align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 BUG_ON(direction == DMA_NONE);
563
564 uaddr = (unsigned long)vaddr;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100565 npages = iommu_num_pages(uaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 if (tbl) {
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100568 align = 0;
569 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
570 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
571 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
572
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800573 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100574 mask >> IOMMU_PAGE_SHIFT, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 if (dma_handle == DMA_ERROR_CODE) {
576 if (printk_ratelimit()) {
577 printk(KERN_INFO "iommu_alloc failed, "
578 "tbl %p vaddr %p npages %d\n",
579 tbl, vaddr, npages);
580 }
581 } else
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100582 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 }
584
585 return dma_handle;
586}
587
588void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
589 size_t size, enum dma_data_direction direction)
590{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100591 unsigned int npages;
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 BUG_ON(direction == DMA_NONE);
594
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100595 if (tbl) {
596 npages = iommu_num_pages(dma_handle, size);
597 iommu_free(tbl, dma_handle, npages);
598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599}
600
601/* Allocates a contiguous real buffer and creates mappings over it.
602 * Returns the virtual address of the buffer and sets dma_handle
603 * to the dma address (mapping) of the first page.
604 */
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800605void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
606 size_t size, dma_addr_t *dma_handle,
607 unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
609 void *ret = NULL;
610 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100611 unsigned int order;
612 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200613 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 order = get_order(size);
617
618 /*
619 * Client asked for way too much space. This is checked later
620 * anyway. It is easier to debug here for the drivers than in
621 * the tce tables.
622 */
623 if (order >= IOMAP_MAX_ORDER) {
624 printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
625 return NULL;
626 }
627
628 if (!tbl)
629 return NULL;
630
631 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000632 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200633 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200635 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 memset(ret, 0, size);
637
638 /* Set up tces to cover the allocated range */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100639 nio_pages = size >> IOMMU_PAGE_SHIFT;
640 io_order = get_iommu_order(size);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800641 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100642 mask >> IOMMU_PAGE_SHIFT, io_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 if (mapping == DMA_ERROR_CODE) {
644 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200645 return NULL;
646 }
647 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return ret;
649}
650
651void iommu_free_coherent(struct iommu_table *tbl, size_t size,
652 void *vaddr, dma_addr_t dma_handle)
653{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100655 unsigned int nio_pages;
656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 size = PAGE_ALIGN(size);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100658 nio_pages = size >> IOMMU_PAGE_SHIFT;
659 iommu_free(tbl, dma_handle, nio_pages);
660 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 free_pages((unsigned long)vaddr, get_order(size));
662 }
663}