blob: e71ce79d8c1573fd7e2bc6693ff9ef7eaab29c45 [file] [log] [blame]
Adrian Bunk88278ca2008-05-19 16:53:02 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * ioport.c: Simple io mapping allocator.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
8 *
9 * 2000/01/29
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
11 * things are ok.
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
16 * <zaitcev> Hmm
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
18 * So far so good.
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
20 * remap_it_my_way()).
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
24 * <zaitcev> Hmm
25 * <zaitcev> Sounds reasonable
26 */
27
David S. Miller3ca9fab2006-06-29 14:35:33 -070028#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/ioport.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h>
Alexey Dobriyane7a088f2009-09-01 17:54:07 -070038#include <linux/seq_file.h>
Jens Axboe0912a5d2007-05-14 15:44:38 +020039#include <linux/scatterlist.h>
Stephen Rothwell764f2572008-08-07 15:33:36 -070040#include <linux/of_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42#include <asm/io.h>
43#include <asm/vaddrs.h>
44#include <asm/oplib.h>
David S. Miller576c3522006-06-23 15:55:45 -070045#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/page.h>
47#include <asm/pgalloc.h>
48#include <asm/dma.h>
David S. Millere0039342008-08-25 22:47:20 -070049#include <asm/iommu.h>
50#include <asm/io-unit.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
David S. Miller944c67df2008-08-27 18:01:36 -070052#include "dma.h"
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
55
Adrian Bunkc61c65c2008-06-05 11:40:58 -070056static struct resource *_sparc_find_resource(struct resource *r,
57 unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
60static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
61 unsigned long size, char *name);
62static void _sparc_free_io(struct resource *res);
63
Adrian Bunkc61c65c2008-06-05 11:40:58 -070064static void register_proc_sparc_ioport(void);
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* This points to the next to use virtual memory for DVMA mappings */
67static struct resource _sparc_dvma = {
68 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
69};
70/* This points to the start of I/O mappings, cluable from outside. */
71/*ext*/ struct resource sparc_iomap = {
72 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
73};
74
75/*
76 * Our mini-allocator...
77 * Boy this is gross! We need it because we must map I/O for
78 * timers and interrupt controller before the kmalloc is available.
79 */
80
81#define XNMLN 15
82#define XNRES 10 /* SS-10 uses 8 */
83
84struct xresource {
85 struct resource xres; /* Must be first */
86 int xflag; /* 1 == used */
87 char xname[XNMLN+1];
88};
89
90static struct xresource xresv[XNRES];
91
92static struct xresource *xres_alloc(void) {
93 struct xresource *xrp;
94 int n;
95
96 xrp = xresv;
97 for (n = 0; n < XNRES; n++) {
98 if (xrp->xflag == 0) {
99 xrp->xflag = 1;
100 return xrp;
101 }
102 xrp++;
103 }
104 return NULL;
105}
106
107static void xres_free(struct xresource *xrp) {
108 xrp->xflag = 0;
109}
110
111/*
112 * These are typically used in PCI drivers
113 * which are trying to be cross-platform.
114 *
115 * Bus type is always zero on IIep.
116 */
117void __iomem *ioremap(unsigned long offset, unsigned long size)
118{
119 char name[14];
120
121 sprintf(name, "phys_%08x", (u32)offset);
122 return _sparc_alloc_io(0, offset, size, name);
123}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800124EXPORT_SYMBOL(ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126/*
127 * Comlimentary to ioremap().
128 */
129void iounmap(volatile void __iomem *virtual)
130{
131 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
132 struct resource *res;
133
134 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
135 printk("free_io/iounmap: cannot free %lx\n", vaddr);
136 return;
137 }
138 _sparc_free_io(res);
139
140 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
141 xres_free((struct xresource *)res);
142 } else {
143 kfree(res);
144 }
145}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800146EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
David S. Miller3ca9fab2006-06-29 14:35:33 -0700148void __iomem *of_ioremap(struct resource *res, unsigned long offset,
149 unsigned long size, char *name)
150{
151 return _sparc_alloc_io(res->flags & 0xF,
152 res->start + offset,
153 size, name);
154}
155EXPORT_SYMBOL(of_ioremap);
156
David S. Millere3a411a32006-12-28 21:01:32 -0800157void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
David S. Miller3ca9fab2006-06-29 14:35:33 -0700158{
159 iounmap(base);
160}
161EXPORT_SYMBOL(of_iounmap);
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 * Meat of mapping
165 */
166static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
167 unsigned long size, char *name)
168{
169 static int printed_full;
170 struct xresource *xres;
171 struct resource *res;
172 char *tack;
173 int tlen;
174 void __iomem *va; /* P3 diag */
175
176 if (name == NULL) name = "???";
177
178 if ((xres = xres_alloc()) != 0) {
179 tack = xres->xname;
180 res = &xres->xres;
181 } else {
182 if (!printed_full) {
183 printk("ioremap: done with statics, switching to malloc\n");
184 printed_full = 1;
185 }
186 tlen = strlen(name);
187 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
188 if (tack == NULL) return NULL;
189 memset(tack, 0, sizeof(struct resource));
190 res = (struct resource *) tack;
191 tack += sizeof (struct resource);
192 }
193
194 strlcpy(tack, name, XNMLN+1);
195 res->name = tack;
196
197 va = _sparc_ioremap(res, busno, phys, size);
198 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
199 return va;
200}
201
202/*
203 */
204static void __iomem *
205_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
206{
207 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
208
209 if (allocate_resource(&sparc_iomap, res,
210 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
211 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
212 /* Usually we cannot see printks in this case. */
213 prom_printf("alloc_io_res(%s): cannot occupy\n",
214 (res->name != NULL)? res->name: "???");
215 prom_halt();
216 }
217
218 pa &= PAGE_MASK;
219 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
220
Greg Kroah-Hartmand75fc8b2006-06-12 16:09:23 -0700221 return (void __iomem *)(unsigned long)(res->start + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
224/*
225 * Comlimentary to _sparc_ioremap().
226 */
227static void _sparc_free_io(struct resource *res)
228{
229 unsigned long plen;
230
231 plen = res->end - res->start + 1;
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800232 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 sparc_unmapiorange(res->start, plen);
234 release_resource(res);
235}
236
237#ifdef CONFIG_SBUS
238
David S. Miller63237ee2008-08-26 23:33:42 -0700239void sbus_set_sbus64(struct device *dev, int x)
David S. Miller8fae0972006-06-20 15:23:28 -0700240{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 printk("sbus_set_sbus64: unsupported\n");
242}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800243EXPORT_SYMBOL(sbus_set_sbus64);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245/*
246 * Allocate a chunk of memory suitable for DMA.
247 * Typically devices use them for control blocks.
248 * CPU may access them without any explicit flushing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 */
David S. Miller7a715f42008-08-27 18:37:58 -0700250void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251{
David S. Miller7a715f42008-08-27 18:37:58 -0700252 struct of_device *op = to_of_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
254 unsigned long va;
255 struct resource *res;
256 int order;
257
Paulius Zaleckasefad798b2008-02-03 15:42:53 +0200258 /* XXX why are some lengths signed, others unsigned? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 if (len <= 0) {
260 return NULL;
261 }
262 /* XXX So what is maxphys for us and how do drivers know it? */
263 if (len > 256*1024) { /* __get_free_pages() limit */
264 return NULL;
265 }
266
267 order = get_order(len_total);
Hugh Dickinsf3d48f02005-11-21 21:32:22 -0800268 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 goto err_nopages;
270
Yan Burmanc80892d2006-11-30 17:07:04 -0800271 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 goto err_nomem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274 if (allocate_resource(&_sparc_dvma, res, len_total,
275 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
276 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
277 goto err_nova;
278 }
279 mmu_inval_dma_area(va, len_total);
280 // XXX The mmu_map_dma_area does this for us below, see comments.
281 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
282 /*
283 * XXX That's where sdev would be used. Currently we load
284 * all iommu tables with the same translations.
285 */
David S. Miller4b1c5df2008-08-27 18:40:38 -0700286 if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 goto err_noiommu;
288
David S. Miller7a715f42008-08-27 18:37:58 -0700289 res->name = op->node->name;
Martin Habets4cfbd7e2006-05-07 23:43:19 -0700290
Greg Kroah-Hartmand75fc8b2006-06-12 16:09:23 -0700291 return (void *)(unsigned long)res->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
293err_noiommu:
294 release_resource(res);
295err_nova:
296 free_pages(va, order);
297err_nomem:
298 kfree(res);
299err_nopages:
300 return NULL;
301}
302
David S. Miller7a715f42008-08-27 18:37:58 -0700303void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
305 struct resource *res;
306 struct page *pgv;
307
308 if ((res = _sparc_find_resource(&_sparc_dvma,
309 (unsigned long)p)) == NULL) {
310 printk("sbus_free_consistent: cannot free %p\n", p);
311 return;
312 }
313
314 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
315 printk("sbus_free_consistent: unaligned va %p\n", p);
316 return;
317 }
318
319 n = (n + PAGE_SIZE-1) & PAGE_MASK;
320 if ((res->end-res->start)+1 != n) {
321 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
322 (long)((res->end-res->start)+1), n);
323 return;
324 }
325
326 release_resource(res);
327 kfree(res);
328
329 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
David S. Milleraba945e2008-08-27 02:20:35 -0700330 pgv = virt_to_page(p);
David S. Miller4b1c5df2008-08-27 18:40:38 -0700331 mmu_unmap_dma_area(dev, ba, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 __free_pages(pgv, get_order(n));
334}
335
336/*
337 * Map a chunk of memory so that devices can see it.
338 * CPU view of this memory may be inconsistent with
339 * a device view and explicit flushing is necessary.
340 */
David S. Miller7a715f42008-08-27 18:37:58 -0700341dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342{
Paulius Zaleckasefad798b2008-02-03 15:42:53 +0200343 /* XXX why are some lengths signed, others unsigned? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 if (len <= 0) {
345 return 0;
346 }
347 /* XXX So what is maxphys for us and how do drivers know it? */
348 if (len > 256*1024) { /* __get_free_pages() limit */
349 return 0;
350 }
David S. Miller260489f2008-08-26 23:00:58 -0700351 return mmu_get_scsi_one(dev, va, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
David S. Miller7a715f42008-08-27 18:37:58 -0700354void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
David S. Miller260489f2008-08-26 23:00:58 -0700356 mmu_release_scsi_one(dev, ba, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
David S. Miller7a715f42008-08-27 18:37:58 -0700359int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
David S. Miller260489f2008-08-26 23:00:58 -0700361 mmu_get_scsi_sgl(dev, sg, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 /*
364 * XXX sparc64 can return a partial length here. sun4c should do this
365 * but it currently panics if it can't fulfill the request - Anton
366 */
367 return n;
368}
369
David S. Miller7a715f42008-08-27 18:37:58 -0700370void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371{
David S. Miller260489f2008-08-26 23:00:58 -0700372 mmu_release_scsi_sgl(dev, sg, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
David S. Miller7a715f42008-08-27 18:37:58 -0700375void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377}
378
David S. Miller7a715f42008-08-27 18:37:58 -0700379void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
David S. Millerf8e4d322008-08-27 04:20:14 -0700383static int __init sparc_register_ioport(void)
David S. Miller576c3522006-06-23 15:55:45 -0700384{
David S. Miller576c3522006-06-23 15:55:45 -0700385 register_proc_sparc_ioport();
386
David S. Miller576c3522006-06-23 15:55:45 -0700387 return 0;
David S. Miller576c3522006-06-23 15:55:45 -0700388}
389
David S. Millerf8e4d322008-08-27 04:20:14 -0700390arch_initcall(sparc_register_ioport);
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392#endif /* CONFIG_SBUS */
393
394#ifdef CONFIG_PCI
395
396/* Allocate and map kernel buffer using consistent mode DMA for a device.
397 * hwdev should be valid struct pci_dev pointer for PCI devices.
398 */
399void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
400{
401 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
402 unsigned long va;
403 struct resource *res;
404 int order;
405
406 if (len == 0) {
407 return NULL;
408 }
409 if (len > 256*1024) { /* __get_free_pages() limit */
410 return NULL;
411 }
412
413 order = get_order(len_total);
414 va = __get_free_pages(GFP_KERNEL, order);
415 if (va == 0) {
416 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
417 return NULL;
418 }
419
Yan Burmanc80892d2006-11-30 17:07:04 -0800420 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 free_pages(va, order);
422 printk("pci_alloc_consistent: no core\n");
423 return NULL;
424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 if (allocate_resource(&_sparc_dvma, res, len_total,
427 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
428 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
429 free_pages(va, order);
430 kfree(res);
431 return NULL;
432 }
433 mmu_inval_dma_area(va, len_total);
434#if 0
435/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
436 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
437#endif
438 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
439
440 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
441 return (void *) res->start;
442}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800443EXPORT_SYMBOL(pci_alloc_consistent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445/* Free and unmap a consistent DMA buffer.
446 * cpu_addr is what was returned from pci_alloc_consistent,
447 * size must be the same as what as passed into pci_alloc_consistent,
448 * and likewise dma_addr must be the same as what *dma_addrp was set to.
449 *
Simon Arlottd1a78c32007-05-11 13:51:23 -0700450 * References to the memory and mappings associated with cpu_addr/dma_addr
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 * past this call are illegal.
452 */
453void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
454{
455 struct resource *res;
456 unsigned long pgp;
457
458 if ((res = _sparc_find_resource(&_sparc_dvma,
459 (unsigned long)p)) == NULL) {
460 printk("pci_free_consistent: cannot free %p\n", p);
461 return;
462 }
463
464 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
465 printk("pci_free_consistent: unaligned va %p\n", p);
466 return;
467 }
468
469 n = (n + PAGE_SIZE-1) & PAGE_MASK;
470 if ((res->end-res->start)+1 != n) {
471 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
472 (long)((res->end-res->start)+1), (long)n);
473 return;
474 }
475
476 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
477 mmu_inval_dma_area(pgp, n);
478 sparc_unmapiorange((unsigned long)p, n);
479
480 release_resource(res);
481 kfree(res);
482
483 free_pages(pgp, get_order(n));
484}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800485EXPORT_SYMBOL(pci_free_consistent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
487/* Map a single buffer of the indicated size for DMA in streaming mode.
488 * The 32-bit bus address to use is returned.
489 *
490 * Once the device is given the dma address, the device owns this memory
491 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
492 */
493dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
494 int direction)
495{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800496 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 /* IIep is write-through, not flushing. */
498 return virt_to_phys(ptr);
499}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800500EXPORT_SYMBOL(pci_map_single);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502/* Unmap a single streaming mode DMA translation. The dma_addr and size
503 * must match what was provided for in a previous pci_map_single call. All
504 * other usages are undefined.
505 *
506 * After this call, reads by the cpu to the buffer are guaranteed to see
507 * whatever the device wrote there.
508 */
509void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
510 int direction)
511{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800512 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 if (direction != PCI_DMA_TODEVICE) {
514 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
515 (size + PAGE_SIZE-1) & PAGE_MASK);
516 }
517}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800518EXPORT_SYMBOL(pci_unmap_single);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520/*
521 * Same as pci_map_single, but with pages.
522 */
523dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
524 unsigned long offset, size_t size, int direction)
525{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800526 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /* IIep is write-through, not flushing. */
528 return page_to_phys(page) + offset;
529}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800530EXPORT_SYMBOL(pci_map_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
532void pci_unmap_page(struct pci_dev *hwdev,
533 dma_addr_t dma_address, size_t size, int direction)
534{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800535 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 /* mmu_inval_dma_area XXX */
537}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800538EXPORT_SYMBOL(pci_unmap_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
540/* Map a set of buffers described by scatterlist in streaming
541 * mode for DMA. This is the scather-gather version of the
542 * above pci_map_single interface. Here the scatter gather list
543 * elements are each tagged with the appropriate dma address
544 * and length. They are obtained via sg_dma_{address,length}(SG).
545 *
546 * NOTE: An implementation may be able to use a smaller number of
547 * DMA address/length pairs than there are SG table elements.
548 * (for example via virtual mapping capabilities)
549 * The routine returns the number of addr/length pairs actually
550 * used, at most nents.
551 *
552 * Device ownership issues as mentioned above for pci_map_single are
553 * the same here.
554 */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200555int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 int direction)
557{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200558 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 int n;
560
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800561 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 /* IIep is write-through, not flushing. */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200563 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200564 BUG_ON(page_address(sg_page(sg)) == NULL);
Robert Reifaa83a262008-12-11 20:24:58 -0800565 sg->dma_address = virt_to_phys(sg_virt(sg));
566 sg->dma_length = sg->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
568 return nents;
569}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800570EXPORT_SYMBOL(pci_map_sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
572/* Unmap a set of streaming mode DMA translations.
573 * Again, cpu read rules concerning calls here are the same as for
574 * pci_unmap_single() above.
575 */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200576void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 int direction)
578{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200579 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 int n;
581
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800582 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 if (direction != PCI_DMA_TODEVICE) {
Jens Axboe0912a5d2007-05-14 15:44:38 +0200584 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200585 BUG_ON(page_address(sg_page(sg)) == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 mmu_inval_dma_area(
Jens Axboe58b053e2007-10-22 20:02:46 +0200587 (unsigned long) page_address(sg_page(sg)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 }
590 }
591}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800592EXPORT_SYMBOL(pci_unmap_sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
594/* Make physical memory consistent for a single
595 * streaming mode DMA translation before or after a transfer.
596 *
597 * If you perform a pci_map_single() but wish to interrogate the
598 * buffer using the cpu, yet do not wish to teardown the PCI dma
599 * mapping, you must call this function before doing so. At the
600 * next point you give the PCI dma address back to the card, you
601 * must first perform a pci_dma_sync_for_device, and then the
602 * device again owns the buffer.
603 */
604void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
605{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800606 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 if (direction != PCI_DMA_TODEVICE) {
608 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
609 (size + PAGE_SIZE-1) & PAGE_MASK);
610 }
611}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800612EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
615{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800616 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 if (direction != PCI_DMA_TODEVICE) {
618 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
619 (size + PAGE_SIZE-1) & PAGE_MASK);
620 }
621}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800622EXPORT_SYMBOL(pci_dma_sync_single_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
624/* Make physical memory consistent for a set of streaming
625 * mode DMA translations after a transfer.
626 *
627 * The same as pci_dma_sync_single_* but for a scatter-gather list,
628 * same rules and usage.
629 */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200630void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200632 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 int n;
634
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800635 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 if (direction != PCI_DMA_TODEVICE) {
Jens Axboe0912a5d2007-05-14 15:44:38 +0200637 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200638 BUG_ON(page_address(sg_page(sg)) == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 mmu_inval_dma_area(
Jens Axboe58b053e2007-10-22 20:02:46 +0200640 (unsigned long) page_address(sg_page(sg)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 }
643 }
644}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800645EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Jens Axboe0912a5d2007-05-14 15:44:38 +0200647void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200649 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 int n;
651
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800652 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 if (direction != PCI_DMA_TODEVICE) {
Jens Axboe0912a5d2007-05-14 15:44:38 +0200654 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200655 BUG_ON(page_address(sg_page(sg)) == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 mmu_inval_dma_area(
Jens Axboe58b053e2007-10-22 20:02:46 +0200657 (unsigned long) page_address(sg_page(sg)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 }
660 }
661}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800662EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663#endif /* CONFIG_PCI */
664
665#ifdef CONFIG_PROC_FS
666
Alexey Dobriyane7a088f2009-09-01 17:54:07 -0700667static int sparc_io_proc_show(struct seq_file *m, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
Alexey Dobriyane7a088f2009-09-01 17:54:07 -0700669 struct resource *root = m->private, *r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 const char *nm;
671
Alexey Dobriyane7a088f2009-09-01 17:54:07 -0700672 for (r = root->child; r != NULL; r = r->sibling) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if ((nm = r->name) == 0) nm = "???";
Alexey Dobriyane7a088f2009-09-01 17:54:07 -0700674 seq_printf(m, "%016llx-%016llx: %s\n",
Greg Kroah-Hartman685143ac2006-06-12 15:18:31 -0700675 (unsigned long long)r->start,
676 (unsigned long long)r->end, nm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
678
Alexey Dobriyane7a088f2009-09-01 17:54:07 -0700679 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680}
681
Alexey Dobriyane7a088f2009-09-01 17:54:07 -0700682static int sparc_io_proc_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, sparc_io_proc_show, PDE(inode)->data);
685}
686
687static const struct file_operations sparc_io_proc_fops = {
688 .owner = THIS_MODULE,
689 .open = sparc_io_proc_open,
690 .read = seq_read,
691 .llseek = seq_lseek,
692 .release = single_release,
693};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694#endif /* CONFIG_PROC_FS */
695
696/*
697 * This is a version of find_resource and it belongs to kernel/resource.c.
698 * Until we have agreement with Linus and Martin, it lingers here.
699 *
700 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
701 * This probably warrants some sort of hashing.
702 */
Adrian Bunkc61c65c2008-06-05 11:40:58 -0700703static struct resource *_sparc_find_resource(struct resource *root,
704 unsigned long hit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705{
706 struct resource *tmp;
707
708 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
709 if (tmp->start <= hit && tmp->end >= hit)
710 return tmp;
711 }
712 return NULL;
713}
714
Adrian Bunkc61c65c2008-06-05 11:40:58 -0700715static void register_proc_sparc_ioport(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717#ifdef CONFIG_PROC_FS
Alexey Dobriyane7a088f2009-09-01 17:54:07 -0700718 proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap);
719 proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720#endif
721}