blob: 1807876f8c36fe2cbbacd3bc31247267ca71e10b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
David S. Miller4dbc30f2005-05-11 11:37:00 -070011#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13#include <asm/pbm.h>
14
15#include "iommu_common.h"
16
17#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
18 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
19
20/* Accessing IOMMU and Streaming Buffer registers.
21 * REG parameter is a physical address. All registers
22 * are 64-bits in size.
23 */
24#define pci_iommu_read(__reg) \
25({ u64 __ret; \
26 __asm__ __volatile__("ldxa [%1] %2, %0" \
27 : "=r" (__ret) \
28 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
29 : "memory"); \
30 __ret; \
31})
32#define pci_iommu_write(__reg, __val) \
33 __asm__ __volatile__("stxa %0, [%1] %2" \
34 : /* no outputs */ \
35 : "r" (__val), "r" (__reg), \
36 "i" (ASI_PHYS_BYPASS_EC_E))
37
38/* Must be invoked under the IOMMU lock. */
39static void __iommu_flushall(struct pci_iommu *iommu)
40{
41 unsigned long tag;
42 int entry;
43
44 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
45 for (entry = 0; entry < 16; entry++) {
46 pci_iommu_write(tag, 0);
47 tag += 8;
48 }
49
50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu->write_complete_reg);
52
53 /* Now update everyone's flush point. */
54 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
55 iommu->alloc_info[entry].flush =
56 iommu->alloc_info[entry].next;
57 }
58}
59
60#define IOPTE_CONSISTENT(CTX) \
61 (IOPTE_VALID | IOPTE_CACHE | \
62 (((CTX) << 47) & IOPTE_CONTEXT))
63
64#define IOPTE_STREAMING(CTX) \
65 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
66
67/* Existing mappings are never marked invalid, instead they
68 * are pointed to a dummy page.
69 */
70#define IOPTE_IS_DUMMY(iommu, iopte) \
71 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
72
73static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
74{
75 unsigned long val = iopte_val(*iopte);
76
77 val &= ~IOPTE_PAGE;
78 val |= iommu->dummy_page_pa;
79
80 iopte_val(*iopte) = val;
81}
82
83void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
84{
85 int i;
86
87 tsbsize /= sizeof(iopte_t);
88
89 for (i = 0; i < tsbsize; i++)
90 iopte_make_dummy(iommu, &iommu->page_table[i]);
91}
92
93static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
94{
95 iopte_t *iopte, *limit, *first;
96 unsigned long cnum, ent, flush_point;
97
98 cnum = 0;
99 while ((1UL << cnum) < npages)
100 cnum++;
101 iopte = (iommu->page_table +
102 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
103
104 if (cnum == 0)
105 limit = (iommu->page_table +
106 iommu->lowest_consistent_map);
107 else
108 limit = (iopte +
109 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
110
111 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
112 flush_point = iommu->alloc_info[cnum].flush;
113
114 first = iopte;
115 for (;;) {
116 if (IOPTE_IS_DUMMY(iommu, iopte)) {
117 if ((iopte + (1 << cnum)) >= limit)
118 ent = 0;
119 else
120 ent = ent + 1;
121 iommu->alloc_info[cnum].next = ent;
122 if (ent == flush_point)
123 __iommu_flushall(iommu);
124 break;
125 }
126 iopte += (1 << cnum);
127 ent++;
128 if (iopte >= limit) {
129 iopte = (iommu->page_table +
130 (cnum <<
131 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
132 ent = 0;
133 }
134 if (ent == flush_point)
135 __iommu_flushall(iommu);
136 if (iopte == first)
137 goto bad;
138 }
139
140 /* I've got your streaming cluster right here buddy boy... */
141 return iopte;
142
143bad:
144 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
145 npages);
146 return NULL;
147}
148
149static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
150 unsigned long npages, unsigned long ctx)
151{
152 unsigned long cnum, ent;
153
154 cnum = 0;
155 while ((1UL << cnum) < npages)
156 cnum++;
157
158 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
159 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
160
161 /* If the global flush might not have caught this entry,
162 * adjust the flush point such that we will flush before
163 * ever trying to reuse it.
164 */
165#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
166 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
167 iommu->alloc_info[cnum].flush = ent;
168#undef between
169}
170
171/* We allocate consistent mappings from the end of cluster zero. */
172static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
173{
174 iopte_t *iopte;
175
176 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
177 while (iopte > iommu->page_table) {
178 iopte--;
179 if (IOPTE_IS_DUMMY(iommu, iopte)) {
180 unsigned long tmp = npages;
181
182 while (--tmp) {
183 iopte--;
184 if (!IOPTE_IS_DUMMY(iommu, iopte))
185 break;
186 }
187 if (tmp == 0) {
188 u32 entry = (iopte - iommu->page_table);
189
190 if (entry < iommu->lowest_consistent_map)
191 iommu->lowest_consistent_map = entry;
192 return iopte;
193 }
194 }
195 }
196 return NULL;
197}
198
David S. Miller7c963ad2005-05-31 16:57:59 -0700199static int iommu_alloc_ctx(struct pci_iommu *iommu)
200{
201 int lowest = iommu->ctx_lowest_free;
202 int sz = IOMMU_NUM_CTXS - lowest;
203 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
204
205 if (unlikely(n == sz)) {
206 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
207 if (unlikely(n == lowest)) {
208 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
209 n = 0;
210 }
211 }
212 if (n)
213 __set_bit(n, iommu->ctx_bitmap);
214
215 return n;
216}
217
218static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
219{
220 if (likely(ctx)) {
221 __clear_bit(ctx, iommu->ctx_bitmap);
222 if (ctx < iommu->ctx_lowest_free)
223 iommu->ctx_lowest_free = ctx;
224 }
225}
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227/* Allocate and map kernel buffer of size SIZE using consistent mode
228 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
229 * successful and set *DMA_ADDRP to the PCI side dma address.
230 */
231void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
232{
233 struct pcidev_cookie *pcp;
234 struct pci_iommu *iommu;
235 iopte_t *iopte;
236 unsigned long flags, order, first_page, ctx;
237 void *ret;
238 int npages;
239
240 size = IO_PAGE_ALIGN(size);
241 order = get_order(size);
242 if (order >= 10)
243 return NULL;
244
245 first_page = __get_free_pages(GFP_ATOMIC, order);
246 if (first_page == 0UL)
247 return NULL;
248 memset((char *)first_page, 0, PAGE_SIZE << order);
249
250 pcp = pdev->sysdata;
251 iommu = pcp->pbm->iommu;
252
253 spin_lock_irqsave(&iommu->lock, flags);
254 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
255 if (iopte == NULL) {
256 spin_unlock_irqrestore(&iommu->lock, flags);
257 free_pages(first_page, order);
258 return NULL;
259 }
260
261 *dma_addrp = (iommu->page_table_map_base +
262 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
263 ret = (void *) first_page;
264 npages = size >> IO_PAGE_SHIFT;
265 ctx = 0;
266 if (iommu->iommu_ctxflush)
David S. Miller7c963ad2005-05-31 16:57:59 -0700267 ctx = iommu_alloc_ctx(iommu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 first_page = __pa(first_page);
269 while (npages--) {
270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
271 IOPTE_WRITE |
272 (first_page & IOPTE_PAGE));
273 iopte++;
274 first_page += IO_PAGE_SIZE;
275 }
276
277 {
278 int i;
279 u32 daddr = *dma_addrp;
280
281 npages = size >> IO_PAGE_SHIFT;
282 for (i = 0; i < npages; i++) {
283 pci_iommu_write(iommu->iommu_flush, daddr);
284 daddr += IO_PAGE_SIZE;
285 }
286 }
287
288 spin_unlock_irqrestore(&iommu->lock, flags);
289
290 return ret;
291}
292
293/* Free and unmap a consistent DMA translation. */
294void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
295{
296 struct pcidev_cookie *pcp;
297 struct pci_iommu *iommu;
298 iopte_t *iopte;
299 unsigned long flags, order, npages, i, ctx;
300
301 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
302 pcp = pdev->sysdata;
303 iommu = pcp->pbm->iommu;
304 iopte = iommu->page_table +
305 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
306
307 spin_lock_irqsave(&iommu->lock, flags);
308
309 if ((iopte - iommu->page_table) ==
310 iommu->lowest_consistent_map) {
311 iopte_t *walk = iopte + npages;
312 iopte_t *limit;
313
314 limit = (iommu->page_table +
315 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
316 while (walk < limit) {
317 if (!IOPTE_IS_DUMMY(iommu, walk))
318 break;
319 walk++;
320 }
321 iommu->lowest_consistent_map =
322 (walk - iommu->page_table);
323 }
324
325 /* Data for consistent mappings cannot enter the streaming
326 * buffers, so we only need to update the TSB. We flush
327 * the IOMMU here as well to prevent conflicts with the
328 * streaming mapping deferred tlb flush scheme.
329 */
330
331 ctx = 0;
332 if (iommu->iommu_ctxflush)
333 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
334
335 for (i = 0; i < npages; i++, iopte++)
336 iopte_make_dummy(iommu, iopte);
337
338 if (iommu->iommu_ctxflush) {
339 pci_iommu_write(iommu->iommu_ctxflush, ctx);
340 } else {
341 for (i = 0; i < npages; i++) {
342 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
343
344 pci_iommu_write(iommu->iommu_flush, daddr);
345 }
346 }
347
David S. Miller7c963ad2005-05-31 16:57:59 -0700348 iommu_free_ctx(iommu, ctx);
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 spin_unlock_irqrestore(&iommu->lock, flags);
351
352 order = get_order(size);
353 if (order < 10)
354 free_pages((unsigned long)cpu, order);
355}
356
357/* Map a single buffer at PTR of SZ bytes for PCI DMA
358 * in streaming mode.
359 */
360dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
361{
362 struct pcidev_cookie *pcp;
363 struct pci_iommu *iommu;
364 struct pci_strbuf *strbuf;
365 iopte_t *base;
366 unsigned long flags, npages, oaddr;
367 unsigned long i, base_paddr, ctx;
368 u32 bus_addr, ret;
369 unsigned long iopte_protection;
370
371 pcp = pdev->sysdata;
372 iommu = pcp->pbm->iommu;
373 strbuf = &pcp->pbm->stc;
374
375 if (direction == PCI_DMA_NONE)
376 BUG();
377
378 oaddr = (unsigned long)ptr;
379 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
380 npages >>= IO_PAGE_SHIFT;
381
382 spin_lock_irqsave(&iommu->lock, flags);
383
384 base = alloc_streaming_cluster(iommu, npages);
385 if (base == NULL)
386 goto bad;
387 bus_addr = (iommu->page_table_map_base +
388 ((base - iommu->page_table) << IO_PAGE_SHIFT));
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK);
391 ctx = 0;
392 if (iommu->iommu_ctxflush)
David S. Miller7c963ad2005-05-31 16:57:59 -0700393 ctx = iommu_alloc_ctx(iommu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 if (strbuf->strbuf_enabled)
395 iopte_protection = IOPTE_STREAMING(ctx);
396 else
397 iopte_protection = IOPTE_CONSISTENT(ctx);
398 if (direction != PCI_DMA_TODEVICE)
399 iopte_protection |= IOPTE_WRITE;
400
401 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
402 iopte_val(*base) = iopte_protection | base_paddr;
403
404 spin_unlock_irqrestore(&iommu->lock, flags);
405
406 return ret;
407
408bad:
409 spin_unlock_irqrestore(&iommu->lock, flags);
410 return PCI_DMA_ERROR_CODE;
411}
412
David S. Miller7c963ad2005-05-31 16:57:59 -0700413static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
David S. Miller4dbc30f2005-05-11 11:37:00 -0700414{
415 int limit;
416
David S. Miller4dbc30f2005-05-11 11:37:00 -0700417 if (strbuf->strbuf_ctxflush &&
418 iommu->iommu_ctxflush) {
419 unsigned long matchreg, flushreg;
David S. Miller7c963ad2005-05-31 16:57:59 -0700420 u64 val;
David S. Miller4dbc30f2005-05-11 11:37:00 -0700421
422 flushreg = strbuf->strbuf_ctxflush;
423 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
424
David S. Miller7c963ad2005-05-31 16:57:59 -0700425 if (pci_iommu_read(matchreg) == 0)
426 goto do_flush_sync;
427
David S. Millera228dfd2005-05-20 11:40:32 -0700428 pci_iommu_write(flushreg, ctx);
David S. Miller7c963ad2005-05-31 16:57:59 -0700429 if ((val = pci_iommu_read(matchreg)) == 0)
430 goto do_flush_sync;
431
432 val &= 0xffff;
433 while (val) {
434 if (val & 0x1)
435 pci_iommu_write(flushreg, ctx);
436 val >>= 1;
David S. Millera228dfd2005-05-20 11:40:32 -0700437 }
David S. Miller7c963ad2005-05-31 16:57:59 -0700438 val = pci_iommu_read(matchreg);
439 if (unlikely(val)) {
David S. Miller4dbc30f2005-05-11 11:37:00 -0700440 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
David S. Miller7c963ad2005-05-31 16:57:59 -0700441 "timeout matchreg[%lx] ctx[%lx]\n",
442 val, ctx);
443 goto do_page_flush;
444 }
David S. Miller4dbc30f2005-05-11 11:37:00 -0700445 } else {
446 unsigned long i;
447
David S. Miller7c963ad2005-05-31 16:57:59 -0700448 do_page_flush:
David S. Miller4dbc30f2005-05-11 11:37:00 -0700449 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
450 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
451 }
452
David S. Miller7c963ad2005-05-31 16:57:59 -0700453do_flush_sync:
454 /* If the device could not have possibly put dirty data into
455 * the streaming cache, no flush-flag synchronization needs
456 * to be performed.
457 */
458 if (direction == PCI_DMA_TODEVICE)
459 return;
460
461 PCI_STC_FLUSHFLAG_INIT(strbuf);
David S. Miller4dbc30f2005-05-11 11:37:00 -0700462 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
463 (void) pci_iommu_read(iommu->write_complete_reg);
464
David S. Millera228dfd2005-05-20 11:40:32 -0700465 limit = 100000;
David S. Miller4dbc30f2005-05-11 11:37:00 -0700466 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
467 limit--;
468 if (!limit)
469 break;
David S. Millera228dfd2005-05-20 11:40:32 -0700470 udelay(1);
David S. Miller4dbc30f2005-05-11 11:37:00 -0700471 membar("#LoadLoad");
472 }
473 if (!limit)
474 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
475 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
476 vaddr, ctx, npages);
477}
478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479/* Unmap a single streaming mode DMA translation. */
480void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
481{
482 struct pcidev_cookie *pcp;
483 struct pci_iommu *iommu;
484 struct pci_strbuf *strbuf;
485 iopte_t *base;
David S. Miller4dbc30f2005-05-11 11:37:00 -0700486 unsigned long flags, npages, ctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 if (direction == PCI_DMA_NONE)
489 BUG();
490
491 pcp = pdev->sysdata;
492 iommu = pcp->pbm->iommu;
493 strbuf = &pcp->pbm->stc;
494
495 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
496 npages >>= IO_PAGE_SHIFT;
497 base = iommu->page_table +
498 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
499#ifdef DEBUG_PCI_IOMMU
500 if (IOPTE_IS_DUMMY(iommu, base))
501 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
502 bus_addr, sz, __builtin_return_address(0));
503#endif
504 bus_addr &= IO_PAGE_MASK;
505
506 spin_lock_irqsave(&iommu->lock, flags);
507
508 /* Record the context, if any. */
509 ctx = 0;
510 if (iommu->iommu_ctxflush)
511 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
512
513 /* Step 1: Kick data out of streaming buffers if necessary. */
David S. Miller4dbc30f2005-05-11 11:37:00 -0700514 if (strbuf->strbuf_enabled)
David S. Miller7c963ad2005-05-31 16:57:59 -0700515 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517 /* Step 2: Clear out first TSB entry. */
518 iopte_make_dummy(iommu, base);
519
520 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
521 npages, ctx);
522
David S. Miller7c963ad2005-05-31 16:57:59 -0700523 iommu_free_ctx(iommu, ctx);
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 spin_unlock_irqrestore(&iommu->lock, flags);
526}
527
528#define SG_ENT_PHYS_ADDRESS(SG) \
529 (__pa(page_address((SG)->page)) + (SG)->offset)
530
531static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
532 int nused, int nelems, unsigned long iopte_protection)
533{
534 struct scatterlist *dma_sg = sg;
535 struct scatterlist *sg_end = sg + nelems;
536 int i;
537
538 for (i = 0; i < nused; i++) {
539 unsigned long pteval = ~0UL;
540 u32 dma_npages;
541
542 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
543 dma_sg->dma_length +
544 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
545 do {
546 unsigned long offset;
547 signed int len;
548
549 /* If we are here, we know we have at least one
550 * more page to map. So walk forward until we
551 * hit a page crossing, and begin creating new
552 * mappings from that spot.
553 */
554 for (;;) {
555 unsigned long tmp;
556
557 tmp = SG_ENT_PHYS_ADDRESS(sg);
558 len = sg->length;
559 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
560 pteval = tmp & IO_PAGE_MASK;
561 offset = tmp & (IO_PAGE_SIZE - 1UL);
562 break;
563 }
564 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
565 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
566 offset = 0UL;
567 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
568 break;
569 }
570 sg++;
571 }
572
573 pteval = iopte_protection | (pteval & IOPTE_PAGE);
574 while (len > 0) {
575 *iopte++ = __iopte(pteval);
576 pteval += IO_PAGE_SIZE;
577 len -= (IO_PAGE_SIZE - offset);
578 offset = 0;
579 dma_npages--;
580 }
581
582 pteval = (pteval & IOPTE_PAGE) + len;
583 sg++;
584
585 /* Skip over any tail mappings we've fully mapped,
586 * adjusting pteval along the way. Stop when we
587 * detect a page crossing event.
588 */
589 while (sg < sg_end &&
590 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
591 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
592 ((pteval ^
593 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
594 pteval += sg->length;
595 sg++;
596 }
597 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
598 pteval = ~0UL;
599 } while (dma_npages != 0);
600 dma_sg++;
601 }
602}
603
604/* Map a set of buffers described by SGLIST with NELEMS array
605 * elements in streaming mode for PCI DMA.
606 * When making changes here, inspect the assembly output. I was having
607 * hard time to kepp this routine out of using stack slots for holding variables.
608 */
609int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
610{
611 struct pcidev_cookie *pcp;
612 struct pci_iommu *iommu;
613 struct pci_strbuf *strbuf;
614 unsigned long flags, ctx, npages, iopte_protection;
615 iopte_t *base;
616 u32 dma_base;
617 struct scatterlist *sgtmp;
618 int used;
619
620 /* Fast path single entry scatterlists. */
621 if (nelems == 1) {
622 sglist->dma_address =
623 pci_map_single(pdev,
624 (page_address(sglist->page) + sglist->offset),
625 sglist->length, direction);
626 sglist->dma_length = sglist->length;
627 return 1;
628 }
629
630 pcp = pdev->sysdata;
631 iommu = pcp->pbm->iommu;
632 strbuf = &pcp->pbm->stc;
633
634 if (direction == PCI_DMA_NONE)
635 BUG();
636
637 /* Step 1: Prepare scatter list. */
638
639 npages = prepare_sg(sglist, nelems);
640
641 /* Step 2: Allocate a cluster. */
642
643 spin_lock_irqsave(&iommu->lock, flags);
644
645 base = alloc_streaming_cluster(iommu, npages);
646 if (base == NULL)
647 goto bad;
648 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
649
650 /* Step 3: Normalize DMA addresses. */
651 used = nelems;
652
653 sgtmp = sglist;
654 while (used && sgtmp->dma_length) {
655 sgtmp->dma_address += dma_base;
656 sgtmp++;
657 used--;
658 }
659 used = nelems - used;
660
661 /* Step 4: Choose a context if necessary. */
662 ctx = 0;
663 if (iommu->iommu_ctxflush)
David S. Miller7c963ad2005-05-31 16:57:59 -0700664 ctx = iommu_alloc_ctx(iommu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 /* Step 5: Create the mappings. */
667 if (strbuf->strbuf_enabled)
668 iopte_protection = IOPTE_STREAMING(ctx);
669 else
670 iopte_protection = IOPTE_CONSISTENT(ctx);
671 if (direction != PCI_DMA_TODEVICE)
672 iopte_protection |= IOPTE_WRITE;
673 fill_sg (base, sglist, used, nelems, iopte_protection);
674#ifdef VERIFY_SG
675 verify_sglist(sglist, nelems, base, npages);
676#endif
677
678 spin_unlock_irqrestore(&iommu->lock, flags);
679
680 return used;
681
682bad:
683 spin_unlock_irqrestore(&iommu->lock, flags);
684 return PCI_DMA_ERROR_CODE;
685}
686
687/* Unmap a set of streaming mode DMA translations. */
688void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
689{
690 struct pcidev_cookie *pcp;
691 struct pci_iommu *iommu;
692 struct pci_strbuf *strbuf;
693 iopte_t *base;
694 unsigned long flags, ctx, i, npages;
695 u32 bus_addr;
696
697 if (direction == PCI_DMA_NONE)
698 BUG();
699
700 pcp = pdev->sysdata;
701 iommu = pcp->pbm->iommu;
702 strbuf = &pcp->pbm->stc;
703
704 bus_addr = sglist->dma_address & IO_PAGE_MASK;
705
706 for (i = 1; i < nelems; i++)
707 if (sglist[i].dma_length == 0)
708 break;
709 i--;
710 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
711
712 base = iommu->page_table +
713 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
714
715#ifdef DEBUG_PCI_IOMMU
716 if (IOPTE_IS_DUMMY(iommu, base))
717 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
718#endif
719
720 spin_lock_irqsave(&iommu->lock, flags);
721
722 /* Record the context, if any. */
723 ctx = 0;
724 if (iommu->iommu_ctxflush)
725 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
726
727 /* Step 1: Kick data out of streaming buffers if necessary. */
David S. Miller4dbc30f2005-05-11 11:37:00 -0700728 if (strbuf->strbuf_enabled)
David S. Miller7c963ad2005-05-31 16:57:59 -0700729 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
731 /* Step 2: Clear out first TSB entry. */
732 iopte_make_dummy(iommu, base);
733
734 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
735 npages, ctx);
736
David S. Miller7c963ad2005-05-31 16:57:59 -0700737 iommu_free_ctx(iommu, ctx);
738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 spin_unlock_irqrestore(&iommu->lock, flags);
740}
741
742/* Make physical memory consistent for a single
743 * streaming mode DMA translation after a transfer.
744 */
745void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
746{
747 struct pcidev_cookie *pcp;
748 struct pci_iommu *iommu;
749 struct pci_strbuf *strbuf;
750 unsigned long flags, ctx, npages;
751
752 pcp = pdev->sysdata;
753 iommu = pcp->pbm->iommu;
754 strbuf = &pcp->pbm->stc;
755
756 if (!strbuf->strbuf_enabled)
757 return;
758
759 spin_lock_irqsave(&iommu->lock, flags);
760
761 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
762 npages >>= IO_PAGE_SHIFT;
763 bus_addr &= IO_PAGE_MASK;
764
765 /* Step 1: Record the context, if any. */
766 ctx = 0;
767 if (iommu->iommu_ctxflush &&
768 strbuf->strbuf_ctxflush) {
769 iopte_t *iopte;
770
771 iopte = iommu->page_table +
772 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
773 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
774 }
775
776 /* Step 2: Kick data out of streaming buffers. */
David S. Miller7c963ad2005-05-31 16:57:59 -0700777 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 spin_unlock_irqrestore(&iommu->lock, flags);
780}
781
782/* Make physical memory consistent for a set of streaming
783 * mode DMA translations after a transfer.
784 */
785void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
786{
787 struct pcidev_cookie *pcp;
788 struct pci_iommu *iommu;
789 struct pci_strbuf *strbuf;
David S. Miller4dbc30f2005-05-11 11:37:00 -0700790 unsigned long flags, ctx, npages, i;
791 u32 bus_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
793 pcp = pdev->sysdata;
794 iommu = pcp->pbm->iommu;
795 strbuf = &pcp->pbm->stc;
796
797 if (!strbuf->strbuf_enabled)
798 return;
799
800 spin_lock_irqsave(&iommu->lock, flags);
801
802 /* Step 1: Record the context, if any. */
803 ctx = 0;
804 if (iommu->iommu_ctxflush &&
805 strbuf->strbuf_ctxflush) {
806 iopte_t *iopte;
807
808 iopte = iommu->page_table +
809 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
810 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
811 }
812
813 /* Step 2: Kick data out of streaming buffers. */
David S. Miller4dbc30f2005-05-11 11:37:00 -0700814 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
815 for(i = 1; i < nelems; i++)
816 if (!sglist[i].dma_length)
817 break;
818 i--;
819 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
820 - bus_addr) >> IO_PAGE_SHIFT;
David S. Miller7c963ad2005-05-31 16:57:59 -0700821 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822
823 spin_unlock_irqrestore(&iommu->lock, flags);
824}
825
826static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
827{
828 struct pci_dev *ali_isa_bridge;
829 u8 val;
830
831 /* ALI sound chips generate 31-bits of DMA, a special register
832 * determines what bit 31 is emitted as.
833 */
834 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
835 PCI_DEVICE_ID_AL_M1533,
836 NULL);
837
838 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
839 if (set_bit)
840 val |= 0x01;
841 else
842 val &= ~0x01;
843 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
844 pci_dev_put(ali_isa_bridge);
845}
846
847int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
848{
849 struct pcidev_cookie *pcp = pdev->sysdata;
850 u64 dma_addr_mask;
851
852 if (pdev == NULL) {
853 dma_addr_mask = 0xffffffff;
854 } else {
855 struct pci_iommu *iommu = pcp->pbm->iommu;
856
857 dma_addr_mask = iommu->dma_addr_mask;
858
859 if (pdev->vendor == PCI_VENDOR_ID_AL &&
860 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
861 device_mask == 0x7fffffff) {
862 ali_sound_dma_hack(pdev,
863 (dma_addr_mask & 0x80000000) != 0);
864 return 1;
865 }
866 }
867
868 if (device_mask >= (1UL << 32UL))
869 return 0;
870
871 return (device_mask & dma_addr_mask) == dma_addr_mask;
872}