blob: 7bc94da1a837229591d180eb04a61d263304cd10 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup, new allocation schemes, virtual merging:
5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
6 * and Ben. Herrenschmidt, IBM Corporation
7 *
8 * Dynamic DMA mapping support, bus-independent parts.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/dma-mapping.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080033#include <linux/bitmap.h>
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080034#include <linux/iommu-helper.h>
Milton Miller62a8bd62008-10-22 15:39:04 -050035#include <linux/crash_dump.h>
Anton Blanchardb4c3a872012-06-07 18:14:48 +000036#include <linux/hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/io.h>
38#include <asm/prom.h>
39#include <asm/iommu.h>
40#include <asm/pci-bridge.h>
41#include <asm/machdep.h>
Haren Myneni5f508672006-06-22 23:35:10 -070042#include <asm/kdump.h>
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +000043#include <asm/fadump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#define DBG(...)
46
FUJITA Tomonori191aee52010-03-02 14:25:38 +000047static int novmerge;
Jake Moilanen56997552007-03-29 08:44:02 -050048
Robert Jennings6490c492008-07-24 04:31:16 +100049static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static int __init setup_iommu(char *str)
52{
53 if (!strcmp(str, "novmerge"))
54 novmerge = 1;
55 else if (!strcmp(str, "vmerge"))
56 novmerge = 0;
57 return 1;
58}
59
60__setup("iommu=", setup_iommu);
61
Anton Blanchardb4c3a872012-06-07 18:14:48 +000062static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
63
64/*
65 * We precalculate the hash to avoid doing it on every allocation.
66 *
67 * The hash is important to spread CPUs across all the pools. For example,
68 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
69 * with 4 pools all primary threads would map to the same pool.
70 */
71static int __init setup_iommu_pool_hash(void)
72{
73 unsigned int i;
74
75 for_each_possible_cpu(i)
76 per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
77
78 return 0;
79}
80subsys_initcall(setup_iommu_pool_hash);
81
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080082static unsigned long iommu_range_alloc(struct device *dev,
83 struct iommu_table *tbl,
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 unsigned long npages,
85 unsigned long *handle,
Olof Johansson7daa4112006-04-12 21:05:59 -050086 unsigned long mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 unsigned int align_order)
88{
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080089 unsigned long n, end, start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 unsigned long limit;
91 int largealloc = npages > 15;
92 int pass = 0;
93 unsigned long align_mask;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -080094 unsigned long boundary_size;
Anton Blanchardd3622132012-06-03 19:44:25 +000095 unsigned long flags;
Anton Blanchardb4c3a872012-06-07 18:14:48 +000096 unsigned int pool_nr;
97 struct iommu_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
99 align_mask = 0xffffffffffffffffl >> (64 - align_order);
100
101 /* This allocator was derived from x86_64's bit string search */
102
103 /* Sanity check */
Nick Piggin13a2eea2006-10-04 17:25:44 +0200104 if (unlikely(npages == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 if (printk_ratelimit())
106 WARN_ON(1);
107 return DMA_ERROR_CODE;
108 }
109
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000110 /*
111 * We don't need to disable preemption here because any CPU can
112 * safely use any IOMMU pool.
113 */
114 pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
Anton Blanchardd3622132012-06-03 19:44:25 +0000115
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000116 if (largealloc)
117 pool = &(tbl->large_pool);
118 else
119 pool = &(tbl->pools[pool_nr]);
120
121 spin_lock_irqsave(&(pool->lock), flags);
122
123again:
124 if ((pass == 0) && handle && *handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 start = *handle;
126 else
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000127 start = pool->hint;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000129 limit = pool->end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131 /* The case below can happen if we have a small segment appended
132 * to a large, or when the previous alloc was at the very end of
133 * the available space. If so, go back to the initial start.
134 */
135 if (start >= limit)
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000136 start = pool->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Olof Johansson7daa4112006-04-12 21:05:59 -0500138 if (limit + tbl->it_offset > mask) {
139 limit = mask - tbl->it_offset + 1;
140 /* If we're constrained on address range, first try
141 * at the masked hint to avoid O(n) search complexity,
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000142 * but on second pass, start at 0 in pool 0.
Olof Johansson7daa4112006-04-12 21:05:59 -0500143 */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000144 if ((start & mask) >= limit || pass > 0) {
145 pool = &(tbl->pools[0]);
146 start = pool->start;
147 } else {
Olof Johansson7daa4112006-04-12 21:05:59 -0500148 start &= mask;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000149 }
Olof Johansson7daa4112006-04-12 21:05:59 -0500150 }
151
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800152 if (dev)
153 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
154 1 << IOMMU_PAGE_SHIFT);
155 else
156 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
157 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800159 n = iommu_area_alloc(tbl->it_map, limit, start, npages,
160 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
161 align_mask);
162 if (n == -1) {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000163 if (likely(pass == 0)) {
164 /* First try the pool from the start */
165 pool->hint = pool->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 pass++;
167 goto again;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000168
169 } else if (pass <= tbl->nr_pools) {
170 /* Now try scanning all the other pools */
171 spin_unlock(&(pool->lock));
172 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
173 pool = &tbl->pools[pool_nr];
174 spin_lock(&(pool->lock));
175 pool->hint = pool->start;
176 pass++;
177 goto again;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 } else {
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000180 /* Give up */
181 spin_unlock_irqrestore(&(pool->lock), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 return DMA_ERROR_CODE;
183 }
184 }
185
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800186 end = n + npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 /* Bump the hint to a new block for small allocs. */
189 if (largealloc) {
190 /* Don't bump to new block to avoid fragmentation */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000191 pool->hint = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 } else {
193 /* Overflow will be taken care of at the next allocation */
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000194 pool->hint = (end + tbl->it_blocksize - 1) &
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 ~(tbl->it_blocksize - 1);
196 }
197
198 /* Update handle for SG allocations */
199 if (handle)
200 *handle = end;
201
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000202 spin_unlock_irqrestore(&(pool->lock), flags);
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return n;
205}
206
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800207static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
208 void *page, unsigned int npages,
209 enum dma_data_direction direction,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000210 unsigned long mask, unsigned int align_order,
211 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Anton Blanchardd3622132012-06-03 19:44:25 +0000213 unsigned long entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 dma_addr_t ret = DMA_ERROR_CODE;
Robert Jennings6490c492008-07-24 04:31:16 +1000215 int build_fail;
Olof Johansson7daa4112006-04-12 21:05:59 -0500216
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800217 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Anton Blanchard0e4bc952012-06-03 19:43:02 +0000219 if (unlikely(entry == DMA_ERROR_CODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 return DMA_ERROR_CODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 entry += tbl->it_offset; /* Offset into real TCE table */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100223 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /* Put the TCEs in the HW table */
Robert Jennings6490c492008-07-24 04:31:16 +1000226 build_fail = ppc_md.tce_build(tbl, entry, npages,
227 (unsigned long)page & IOMMU_PAGE_MASK,
228 direction, attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Robert Jennings6490c492008-07-24 04:31:16 +1000230 /* ppc_md.tce_build() only returns non-zero for transient errors.
231 * Clean up the table bitmap in this case and return
232 * DMA_ERROR_CODE. For all other errors the functionality is
233 * not altered.
234 */
235 if (unlikely(build_fail)) {
236 __iommu_free(tbl, ret, npages);
Robert Jennings6490c492008-07-24 04:31:16 +1000237 return DMA_ERROR_CODE;
238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 /* Flush/invalidate TLB caches if necessary */
241 if (ppc_md.tce_flush)
242 ppc_md.tce_flush(tbl);
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 /* Make sure updates are seen by hardware */
245 mb();
246
247 return ret;
248}
249
Anton Blanchard67ca1412012-06-03 19:43:44 +0000250static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
251 unsigned int npages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252{
253 unsigned long entry, free_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100255 entry = dma_addr >> IOMMU_PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 free_entry = entry - tbl->it_offset;
257
258 if (((free_entry + npages) > tbl->it_size) ||
259 (entry < tbl->it_offset)) {
260 if (printk_ratelimit()) {
261 printk(KERN_INFO "iommu_free: invalid entry\n");
262 printk(KERN_INFO "\tentry = 0x%lx\n", entry);
Ingo Molnarfe333322009-01-06 14:26:03 +0000263 printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
264 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
265 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
266 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
267 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
268 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 WARN_ON(1);
270 }
Anton Blanchard67ca1412012-06-03 19:43:44 +0000271
272 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 }
274
Anton Blanchard67ca1412012-06-03 19:43:44 +0000275 return true;
276}
277
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000278static struct iommu_pool *get_pool(struct iommu_table *tbl,
279 unsigned long entry)
280{
281 struct iommu_pool *p;
282 unsigned long largepool_start = tbl->large_pool.start;
283
284 /* The large pool is the last pool at the top of the table */
285 if (entry >= largepool_start) {
286 p = &tbl->large_pool;
287 } else {
288 unsigned int pool_nr = entry / tbl->poolsize;
289
290 BUG_ON(pool_nr > tbl->nr_pools);
291 p = &tbl->pools[pool_nr];
292 }
293
294 return p;
295}
296
Anton Blanchard67ca1412012-06-03 19:43:44 +0000297static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
298 unsigned int npages)
299{
300 unsigned long entry, free_entry;
301 unsigned long flags;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000302 struct iommu_pool *pool;
Anton Blanchard67ca1412012-06-03 19:43:44 +0000303
304 entry = dma_addr >> IOMMU_PAGE_SHIFT;
305 free_entry = entry - tbl->it_offset;
306
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000307 pool = get_pool(tbl, free_entry);
308
Anton Blanchard67ca1412012-06-03 19:43:44 +0000309 if (!iommu_free_check(tbl, dma_addr, npages))
310 return;
311
312 ppc_md.tce_free(tbl, entry, npages);
313
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000314 spin_lock_irqsave(&(pool->lock), flags);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000315 bitmap_clear(tbl->it_map, free_entry, npages);
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000316 spin_unlock_irqrestore(&(pool->lock), flags);
Anton Blanchard67ca1412012-06-03 19:43:44 +0000317}
318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
320 unsigned int npages)
321{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 __iommu_free(tbl, dma_addr, npages);
323
324 /* Make sure TLB cache is flushed if the HW needs it. We do
325 * not do an mb() here on purpose, it is not needed on any of
326 * the current platforms.
327 */
328 if (ppc_md.tce_flush)
329 ppc_md.tce_flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
331
Mark Nelsonc8692362008-07-05 05:05:41 +1000332int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
333 struct scatterlist *sglist, int nelems,
Mark Nelson3affedc2008-07-05 05:05:42 +1000334 unsigned long mask, enum dma_data_direction direction,
335 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 dma_addr_t dma_next = 0, dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 struct scatterlist *s, *outs, *segstart;
Robert Jennings6490c492008-07-24 04:31:16 +1000339 int outcount, incount, i, build_fail = 0;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100340 unsigned int align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 unsigned long handle;
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800342 unsigned int max_seg_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 BUG_ON(direction == DMA_NONE);
345
346 if ((nelems == 0) || !tbl)
347 return 0;
348
349 outs = s = segstart = &sglist[0];
350 outcount = 1;
Brian Kingac9af7c2005-08-18 07:32:18 +1000351 incount = nelems;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 handle = 0;
353
354 /* Init first segment length for backout at failure */
355 outs->dma_length = 0;
356
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100357 DBG("sg mapping %d elements:\n", nelems);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800359 max_seg_size = dma_get_max_seg_size(dev);
Jens Axboe78bdc312007-10-12 13:44:12 +0200360 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 unsigned long vaddr, npages, entry, slen;
362
363 slen = s->length;
364 /* Sanity check */
365 if (slen == 0) {
366 dma_next = 0;
367 continue;
368 }
369 /* Allocate iommu entries for that segment */
Jens Axboe58b053e2007-10-22 20:02:46 +0200370 vaddr = (unsigned long) sg_virt(s);
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700371 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100372 align = 0;
373 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
374 (vaddr & ~PAGE_MASK) == 0)
375 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800376 entry = iommu_range_alloc(dev, tbl, npages, &handle,
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100377 mask >> IOMMU_PAGE_SHIFT, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
380
381 /* Handle failure */
382 if (unlikely(entry == DMA_ERROR_CODE)) {
383 if (printk_ratelimit())
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000384 dev_info(dev, "iommu_alloc failed, tbl %p "
385 "vaddr %lx npages %lu\n", tbl, vaddr,
386 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 goto failure;
388 }
389
390 /* Convert entry to a dma_addr_t */
391 entry += tbl->it_offset;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100392 dma_addr = entry << IOMMU_PAGE_SHIFT;
393 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100395 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 npages, entry, dma_addr);
397
398 /* Insert into HW table */
Robert Jennings6490c492008-07-24 04:31:16 +1000399 build_fail = ppc_md.tce_build(tbl, entry, npages,
400 vaddr & IOMMU_PAGE_MASK,
401 direction, attrs);
402 if(unlikely(build_fail))
403 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 /* If we are in an open segment, try merging */
406 if (segstart != s) {
407 DBG(" - trying merge...\n");
408 /* We cannot merge if:
409 * - allocated dma_addr isn't contiguous to previous allocation
410 */
FUJITA Tomonori740c3ce2008-02-04 22:27:57 -0800411 if (novmerge || (dma_addr != dma_next) ||
412 (outs->dma_length + s->length > max_seg_size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 /* Can't merge: create a new segment */
414 segstart = s;
Jens Axboe78bdc312007-10-12 13:44:12 +0200415 outcount++;
416 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 DBG(" can't merge, new segment.\n");
418 } else {
419 outs->dma_length += s->length;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100420 DBG(" merged, new len: %ux\n", outs->dma_length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 }
422 }
423
424 if (segstart == s) {
425 /* This is a new segment, fill entries */
426 DBG(" - filling new segment.\n");
427 outs->dma_address = dma_addr;
428 outs->dma_length = slen;
429 }
430
431 /* Calculate next page pointer for contiguous check */
432 dma_next = dma_addr + slen;
433
434 DBG(" - dma next is: %lx\n", dma_next);
435 }
436
437 /* Flush/invalidate TLB caches if necessary */
438 if (ppc_md.tce_flush)
439 ppc_md.tce_flush(tbl);
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 DBG("mapped %d elements:\n", outcount);
442
Brian Kingac9af7c2005-08-18 07:32:18 +1000443 /* For the sake of iommu_unmap_sg, we clear out the length in the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 * next entry of the sglist if we didn't fill the list completely
445 */
Brian Kingac9af7c2005-08-18 07:32:18 +1000446 if (outcount < incount) {
Jens Axboe78bdc312007-10-12 13:44:12 +0200447 outs = sg_next(outs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 outs->dma_address = DMA_ERROR_CODE;
449 outs->dma_length = 0;
450 }
Jake Moilanena958a262006-01-30 21:51:54 -0600451
452 /* Make sure updates are seen by hardware */
453 mb();
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 return outcount;
456
457 failure:
Jens Axboe78bdc312007-10-12 13:44:12 +0200458 for_each_sg(sglist, s, nelems, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (s->dma_length != 0) {
460 unsigned long vaddr, npages;
461
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100462 vaddr = s->dma_address & IOMMU_PAGE_MASK;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700463 npages = iommu_num_pages(s->dma_address, s->dma_length,
464 IOMMU_PAGE_SIZE);
Anton Blanchardd3622132012-06-03 19:44:25 +0000465 __iommu_free(tbl, vaddr, npages);
Jake Moilanena958a262006-01-30 21:51:54 -0600466 s->dma_address = DMA_ERROR_CODE;
467 s->dma_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 }
Jens Axboe78bdc312007-10-12 13:44:12 +0200469 if (s == outs)
470 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 return 0;
473}
474
475
476void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
Mark Nelson3affedc2008-07-05 05:05:42 +1000477 int nelems, enum dma_data_direction direction,
478 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Jens Axboe78bdc312007-10-12 13:44:12 +0200480 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482 BUG_ON(direction == DMA_NONE);
483
484 if (!tbl)
485 return;
486
Jens Axboe78bdc312007-10-12 13:44:12 +0200487 sg = sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 while (nelems--) {
489 unsigned int npages;
Jens Axboe78bdc312007-10-12 13:44:12 +0200490 dma_addr_t dma_handle = sg->dma_address;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Jens Axboe78bdc312007-10-12 13:44:12 +0200492 if (sg->dma_length == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 break;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700494 npages = iommu_num_pages(dma_handle, sg->dma_length,
495 IOMMU_PAGE_SIZE);
Anton Blanchardd3622132012-06-03 19:44:25 +0000496 __iommu_free(tbl, dma_handle, npages);
Jens Axboe78bdc312007-10-12 13:44:12 +0200497 sg = sg_next(sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 }
499
500 /* Flush/invalidate TLBs if necessary. As for iommu_free(), we
501 * do not do an mb() here, the affected platforms do not need it
502 * when freeing.
503 */
504 if (ppc_md.tce_flush)
505 ppc_md.tce_flush(tbl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506}
507
Mohan Kumar M54622f12008-10-21 17:38:10 +0000508static void iommu_table_clear(struct iommu_table *tbl)
509{
Mahesh Salgaonkar3ccc00a2012-02-20 02:15:03 +0000510 /*
511 * In case of firmware assisted dump system goes through clean
512 * reboot process at the time of system crash. Hence it's safe to
513 * clear the TCE entries if firmware assisted dump is active.
514 */
515 if (!is_kdump_kernel() || is_fadump_active()) {
Mohan Kumar M54622f12008-10-21 17:38:10 +0000516 /* Clear the table in case firmware left allocations in it */
517 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
518 return;
519 }
520
521#ifdef CONFIG_CRASH_DUMP
522 if (ppc_md.tce_get) {
523 unsigned long index, tceval, tcecount = 0;
524
525 /* Reserve the existing mappings left by the first kernel. */
526 for (index = 0; index < tbl->it_size; index++) {
527 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
528 /*
529 * Freed TCE entry contains 0x7fffffffffffffff on JS20
530 */
531 if (tceval && (tceval != 0x7fffffffffffffffUL)) {
532 __set_bit(index, tbl->it_map);
533 tcecount++;
534 }
535 }
536
537 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
538 printk(KERN_WARNING "TCE table is full; freeing ");
539 printk(KERN_WARNING "%d entries for the kdump boot\n",
540 KDUMP_MIN_TCE_ENTRIES);
541 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
542 index < tbl->it_size; index++)
543 __clear_bit(index, tbl->it_map);
544 }
545 }
546#endif
547}
548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549/*
550 * Build a iommu_table structure. This contains a bit map which
551 * is used to manage allocation of the tce space.
552 */
Anton Blanchardca1588e2006-06-10 20:58:08 +1000553struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
555 unsigned long sz;
556 static int welcomed = 0;
Anton Blanchardca1588e2006-06-10 20:58:08 +1000557 struct page *page;
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000558 unsigned int i;
559 struct iommu_pool *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
561 /* number of bytes needed for the bitmap */
562 sz = (tbl->it_size + 7) >> 3;
563
Anton Blanchardca1588e2006-06-10 20:58:08 +1000564 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
565 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
Anton Blanchardca1588e2006-06-10 20:58:08 +1000567 tbl->it_map = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 memset(tbl->it_map, 0, sz);
569
Thadeu Lima de Souza Cascardod12b5242011-09-20 03:07:24 +0000570 /*
571 * Reserve page 0 so it will not be used for any mappings.
572 * This avoids buggy drivers that consider page 0 to be invalid
573 * to crash the machine or even lose data.
574 */
575 if (tbl->it_offset == 0)
576 set_bit(0, tbl->it_map);
577
Anton Blanchardb4c3a872012-06-07 18:14:48 +0000578 /* We only split the IOMMU table if we have 1GB or more of space */
579 if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
580 tbl->nr_pools = IOMMU_NR_POOLS;
581 else
582 tbl->nr_pools = 1;
583
584 /* We reserve the top 1/4 of the table for large allocations */
585 tbl->poolsize = (tbl->it_size * 3 / 4) / IOMMU_NR_POOLS;
586
587 for (i = 0; i < IOMMU_NR_POOLS; i++) {
588 p = &tbl->pools[i];
589 spin_lock_init(&(p->lock));
590 p->start = tbl->poolsize * i;
591 p->hint = p->start;
592 p->end = p->start + tbl->poolsize;
593 }
594
595 p = &tbl->large_pool;
596 spin_lock_init(&(p->lock));
597 p->start = tbl->poolsize * i;
598 p->hint = p->start;
599 p->end = tbl->it_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Mohan Kumar M54622f12008-10-21 17:38:10 +0000601 iommu_table_clear(tbl);
John Rosed3588ba2005-06-20 21:43:48 +1000602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 if (!welcomed) {
604 printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
605 novmerge ? "disabled" : "enabled");
606 welcomed = 1;
607 }
608
609 return tbl;
610}
611
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100612void iommu_free_table(struct iommu_table *tbl, const char *node_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 unsigned long bitmap_sz, i;
615 unsigned int order;
616
617 if (!tbl || !tbl->it_map) {
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100618 printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
Stephen Rothwell68d315f2007-12-06 13:39:19 +1100619 node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 return;
621 }
622
623 /* verify that table contains no entries */
624 /* it_size is in entries, and we're examining 64 at a time */
625 for (i = 0; i < (tbl->it_size/64); i++) {
626 if (tbl->it_map[i] != 0) {
627 printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100628 __func__, node_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 break;
630 }
631 }
632
633 /* calculate bitmap size in bytes */
634 bitmap_sz = (tbl->it_size + 7) / 8;
635
636 /* free bitmap */
637 order = get_order(bitmap_sz);
638 free_pages((unsigned long) tbl->it_map, order);
639
640 /* free table */
641 kfree(tbl);
642}
643
644/* Creates TCEs for a user provided buffer. The user buffer must be
Mark Nelsonf9226d52008-10-27 20:38:08 +0000645 * contiguous real kernel storage (not vmalloc). The address passed here
646 * comprises a page address and offset into that page. The dma_addr_t
647 * returned will point to the same byte within the page as was passed in.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 */
Mark Nelsonf9226d52008-10-27 20:38:08 +0000649dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
650 struct page *page, unsigned long offset, size_t size,
651 unsigned long mask, enum dma_data_direction direction,
652 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653{
654 dma_addr_t dma_handle = DMA_ERROR_CODE;
Mark Nelsonf9226d52008-10-27 20:38:08 +0000655 void *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 unsigned long uaddr;
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100657 unsigned int npages, align;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
659 BUG_ON(direction == DMA_NONE);
660
Mark Nelsonf9226d52008-10-27 20:38:08 +0000661 vaddr = page_address(page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 uaddr = (unsigned long)vaddr;
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700663 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
665 if (tbl) {
Benjamin Herrenschmidtd262c322008-01-08 10:34:22 +1100666 align = 0;
667 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
668 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
669 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
670
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800671 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000672 mask >> IOMMU_PAGE_SHIFT, align,
673 attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (dma_handle == DMA_ERROR_CODE) {
675 if (printk_ratelimit()) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000676 dev_info(dev, "iommu_alloc failed, tbl %p "
677 "vaddr %p npages %d\n", tbl, vaddr,
678 npages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
680 } else
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100681 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
684 return dma_handle;
685}
686
Mark Nelsonf9226d52008-10-27 20:38:08 +0000687void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
688 size_t size, enum dma_data_direction direction,
689 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100691 unsigned int npages;
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 BUG_ON(direction == DMA_NONE);
694
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100695 if (tbl) {
Joerg Roedel2994a3b2008-10-15 22:02:13 -0700696 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100697 iommu_free(tbl, dma_handle, npages);
698 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
700
701/* Allocates a contiguous real buffer and creates mappings over it.
702 * Returns the virtual address of the buffer and sets dma_handle
703 * to the dma address (mapping) of the first page.
704 */
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800705void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
706 size_t size, dma_addr_t *dma_handle,
707 unsigned long mask, gfp_t flag, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 void *ret = NULL;
710 dma_addr_t mapping;
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100711 unsigned int order;
712 unsigned int nio_pages, io_order;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200713 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
715 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 order = get_order(size);
717
718 /*
719 * Client asked for way too much space. This is checked later
720 * anyway. It is easier to debug here for the drivers than in
721 * the tce tables.
722 */
723 if (order >= IOMAP_MAX_ORDER) {
Anton Blanchard4dfa9c42010-12-07 14:36:05 +0000724 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
725 size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 return NULL;
727 }
728
729 if (!tbl)
730 return NULL;
731
732 /* Alloc enough pages (and possibly more) */
Paul Mackerras05061352006-06-10 18:17:35 +1000733 page = alloc_pages_node(node, flag, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200734 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return NULL;
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200736 ret = page_address(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 memset(ret, 0, size);
738
739 /* Set up tces to cover the allocated range */
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100740 nio_pages = size >> IOMMU_PAGE_SHIFT;
741 io_order = get_iommu_order(size);
FUJITA Tomonorifb3475e2008-02-04 22:28:08 -0800742 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
Mark Nelson4f3dd8a2008-07-16 05:51:47 +1000743 mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (mapping == DMA_ERROR_CODE) {
745 free_pages((unsigned long)ret, order);
Christoph Hellwig8eb6c6e2006-06-06 16:11:35 +0200746 return NULL;
747 }
748 *dma_handle = mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return ret;
750}
751
752void iommu_free_coherent(struct iommu_table *tbl, size_t size,
753 void *vaddr, dma_addr_t dma_handle)
754{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (tbl) {
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100756 unsigned int nio_pages;
757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 size = PAGE_ALIGN(size);
Linas Vepstas5d2efba2006-10-30 16:15:59 +1100759 nio_pages = size >> IOMMU_PAGE_SHIFT;
760 iommu_free(tbl, dma_handle, nio_pages);
761 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 free_pages((unsigned long)vaddr, get_order(size));
763 }
764}