blob: 87ea0d03d975ce26f7651e149a7ddfca9c9fd44f [file] [log] [blame]
Adrian Bunk88278ca2008-05-19 16:53:02 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * ioport.c: Simple io mapping allocator.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 *
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
8 *
9 * 2000/01/29
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
11 * things are ok.
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
16 * <zaitcev> Hmm
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
18 * So far so good.
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
20 * remap_it_my_way()).
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
24 * <zaitcev> Hmm
25 * <zaitcev> Sounds reasonable
26 */
27
David S. Miller3ca9fab2006-06-29 14:35:33 -070028#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/types.h>
33#include <linux/ioport.h>
34#include <linux/mm.h>
35#include <linux/slab.h>
36#include <linux/pci.h> /* struct pci_dev */
37#include <linux/proc_fs.h>
Jens Axboe0912a5d2007-05-14 15:44:38 +020038#include <linux/scatterlist.h>
Stephen Rothwell764f2572008-08-07 15:33:36 -070039#include <linux/of_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#include <asm/io.h>
42#include <asm/vaddrs.h>
43#include <asm/oplib.h>
David S. Miller576c3522006-06-23 15:55:45 -070044#include <asm/prom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <asm/page.h>
46#include <asm/pgalloc.h>
47#include <asm/dma.h>
David S. Millere0039342008-08-25 22:47:20 -070048#include <asm/iommu.h>
49#include <asm/io-unit.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
David S. Miller944c67df2008-08-27 18:01:36 -070051#include "dma.h"
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
54
Adrian Bunkc61c65c2008-06-05 11:40:58 -070055static struct resource *_sparc_find_resource(struct resource *r,
56 unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
59static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
60 unsigned long size, char *name);
61static void _sparc_free_io(struct resource *res);
62
Adrian Bunkc61c65c2008-06-05 11:40:58 -070063static void register_proc_sparc_ioport(void);
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* This points to the next to use virtual memory for DVMA mappings */
66static struct resource _sparc_dvma = {
67 .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1
68};
69/* This points to the start of I/O mappings, cluable from outside. */
70/*ext*/ struct resource sparc_iomap = {
71 .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1
72};
73
74/*
75 * Our mini-allocator...
76 * Boy this is gross! We need it because we must map I/O for
77 * timers and interrupt controller before the kmalloc is available.
78 */
79
80#define XNMLN 15
81#define XNRES 10 /* SS-10 uses 8 */
82
83struct xresource {
84 struct resource xres; /* Must be first */
85 int xflag; /* 1 == used */
86 char xname[XNMLN+1];
87};
88
89static struct xresource xresv[XNRES];
90
91static struct xresource *xres_alloc(void) {
92 struct xresource *xrp;
93 int n;
94
95 xrp = xresv;
96 for (n = 0; n < XNRES; n++) {
97 if (xrp->xflag == 0) {
98 xrp->xflag = 1;
99 return xrp;
100 }
101 xrp++;
102 }
103 return NULL;
104}
105
106static void xres_free(struct xresource *xrp) {
107 xrp->xflag = 0;
108}
109
110/*
111 * These are typically used in PCI drivers
112 * which are trying to be cross-platform.
113 *
114 * Bus type is always zero on IIep.
115 */
116void __iomem *ioremap(unsigned long offset, unsigned long size)
117{
118 char name[14];
119
120 sprintf(name, "phys_%08x", (u32)offset);
121 return _sparc_alloc_io(0, offset, size, name);
122}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800123EXPORT_SYMBOL(ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
125/*
126 * Comlimentary to ioremap().
127 */
128void iounmap(volatile void __iomem *virtual)
129{
130 unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
131 struct resource *res;
132
133 if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) {
134 printk("free_io/iounmap: cannot free %lx\n", vaddr);
135 return;
136 }
137 _sparc_free_io(res);
138
139 if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) {
140 xres_free((struct xresource *)res);
141 } else {
142 kfree(res);
143 }
144}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800145EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
David S. Miller3ca9fab2006-06-29 14:35:33 -0700147void __iomem *of_ioremap(struct resource *res, unsigned long offset,
148 unsigned long size, char *name)
149{
150 return _sparc_alloc_io(res->flags & 0xF,
151 res->start + offset,
152 size, name);
153}
154EXPORT_SYMBOL(of_ioremap);
155
David S. Millere3a411a2006-12-28 21:01:32 -0800156void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
David S. Miller3ca9fab2006-06-29 14:35:33 -0700157{
158 iounmap(base);
159}
160EXPORT_SYMBOL(of_iounmap);
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 * Meat of mapping
164 */
165static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
166 unsigned long size, char *name)
167{
168 static int printed_full;
169 struct xresource *xres;
170 struct resource *res;
171 char *tack;
172 int tlen;
173 void __iomem *va; /* P3 diag */
174
175 if (name == NULL) name = "???";
176
177 if ((xres = xres_alloc()) != 0) {
178 tack = xres->xname;
179 res = &xres->xres;
180 } else {
181 if (!printed_full) {
182 printk("ioremap: done with statics, switching to malloc\n");
183 printed_full = 1;
184 }
185 tlen = strlen(name);
186 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
187 if (tack == NULL) return NULL;
188 memset(tack, 0, sizeof(struct resource));
189 res = (struct resource *) tack;
190 tack += sizeof (struct resource);
191 }
192
193 strlcpy(tack, name, XNMLN+1);
194 res->name = tack;
195
196 va = _sparc_ioremap(res, busno, phys, size);
197 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
198 return va;
199}
200
201/*
202 */
203static void __iomem *
204_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz)
205{
206 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
207
208 if (allocate_resource(&sparc_iomap, res,
209 (offset + sz + PAGE_SIZE-1) & PAGE_MASK,
210 sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) {
211 /* Usually we cannot see printks in this case. */
212 prom_printf("alloc_io_res(%s): cannot occupy\n",
213 (res->name != NULL)? res->name: "???");
214 prom_halt();
215 }
216
217 pa &= PAGE_MASK;
218 sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1);
219
Greg Kroah-Hartmand75fc8b2006-06-12 16:09:23 -0700220 return (void __iomem *)(unsigned long)(res->start + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
223/*
224 * Comlimentary to _sparc_ioremap().
225 */
226static void _sparc_free_io(struct resource *res)
227{
228 unsigned long plen;
229
230 plen = res->end - res->start + 1;
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800231 BUG_ON((plen & (PAGE_SIZE-1)) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 sparc_unmapiorange(res->start, plen);
233 release_resource(res);
234}
235
236#ifdef CONFIG_SBUS
237
David S. Miller63237ee2008-08-26 23:33:42 -0700238void sbus_set_sbus64(struct device *dev, int x)
David S. Miller8fae0972006-06-20 15:23:28 -0700239{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 printk("sbus_set_sbus64: unsupported\n");
241}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800242EXPORT_SYMBOL(sbus_set_sbus64);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244/*
245 * Allocate a chunk of memory suitable for DMA.
246 * Typically devices use them for control blocks.
247 * CPU may access them without any explicit flushing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 */
David S. Miller7a715f42008-08-27 18:37:58 -0700249void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
David S. Miller7a715f42008-08-27 18:37:58 -0700251 struct of_device *op = to_of_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
253 unsigned long va;
254 struct resource *res;
255 int order;
256
Paulius Zaleckasefad798b2008-02-03 15:42:53 +0200257 /* XXX why are some lengths signed, others unsigned? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 if (len <= 0) {
259 return NULL;
260 }
261 /* XXX So what is maxphys for us and how do drivers know it? */
262 if (len > 256*1024) { /* __get_free_pages() limit */
263 return NULL;
264 }
265
266 order = get_order(len_total);
Hugh Dickinsf3d48f02005-11-21 21:32:22 -0800267 if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 goto err_nopages;
269
Yan Burmanc80892d2006-11-30 17:07:04 -0800270 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 goto err_nomem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273 if (allocate_resource(&_sparc_dvma, res, len_total,
274 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
275 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
276 goto err_nova;
277 }
278 mmu_inval_dma_area(va, len_total);
279 // XXX The mmu_map_dma_area does this for us below, see comments.
280 // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
281 /*
282 * XXX That's where sdev would be used. Currently we load
283 * all iommu tables with the same translations.
284 */
David S. Miller4b1c5df2008-08-27 18:40:38 -0700285 if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 goto err_noiommu;
287
David S. Miller7a715f42008-08-27 18:37:58 -0700288 res->name = op->node->name;
Martin Habets4cfbd7e2006-05-07 23:43:19 -0700289
Greg Kroah-Hartmand75fc8b2006-06-12 16:09:23 -0700290 return (void *)(unsigned long)res->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
292err_noiommu:
293 release_resource(res);
294err_nova:
295 free_pages(va, order);
296err_nomem:
297 kfree(res);
298err_nopages:
299 return NULL;
300}
301
David S. Miller7a715f42008-08-27 18:37:58 -0700302void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
304 struct resource *res;
305 struct page *pgv;
306
307 if ((res = _sparc_find_resource(&_sparc_dvma,
308 (unsigned long)p)) == NULL) {
309 printk("sbus_free_consistent: cannot free %p\n", p);
310 return;
311 }
312
313 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
314 printk("sbus_free_consistent: unaligned va %p\n", p);
315 return;
316 }
317
318 n = (n + PAGE_SIZE-1) & PAGE_MASK;
319 if ((res->end-res->start)+1 != n) {
320 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n",
321 (long)((res->end-res->start)+1), n);
322 return;
323 }
324
325 release_resource(res);
326 kfree(res);
327
328 /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */
David S. Milleraba945e2008-08-27 02:20:35 -0700329 pgv = virt_to_page(p);
David S. Miller4b1c5df2008-08-27 18:40:38 -0700330 mmu_unmap_dma_area(dev, ba, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332 __free_pages(pgv, get_order(n));
333}
334
335/*
336 * Map a chunk of memory so that devices can see it.
337 * CPU view of this memory may be inconsistent with
338 * a device view and explicit flushing is necessary.
339 */
David S. Miller7a715f42008-08-27 18:37:58 -0700340dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Paulius Zaleckasefad798b2008-02-03 15:42:53 +0200342 /* XXX why are some lengths signed, others unsigned? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 if (len <= 0) {
344 return 0;
345 }
346 /* XXX So what is maxphys for us and how do drivers know it? */
347 if (len > 256*1024) { /* __get_free_pages() limit */
348 return 0;
349 }
David S. Miller260489f2008-08-26 23:00:58 -0700350 return mmu_get_scsi_one(dev, va, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
David S. Miller7a715f42008-08-27 18:37:58 -0700353void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
David S. Miller260489f2008-08-26 23:00:58 -0700355 mmu_release_scsi_one(dev, ba, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
357
David S. Miller7a715f42008-08-27 18:37:58 -0700358int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359{
David S. Miller260489f2008-08-26 23:00:58 -0700360 mmu_get_scsi_sgl(dev, sg, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 /*
363 * XXX sparc64 can return a partial length here. sun4c should do this
364 * but it currently panics if it can't fulfill the request - Anton
365 */
366 return n;
367}
368
David S. Miller7a715f42008-08-27 18:37:58 -0700369void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
David S. Miller260489f2008-08-26 23:00:58 -0700371 mmu_release_scsi_sgl(dev, sg, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372}
373
David S. Miller7a715f42008-08-27 18:37:58 -0700374void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
David S. Miller7a715f42008-08-27 18:37:58 -0700378void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
David S. Millerf8e4d322008-08-27 04:20:14 -0700382static int __init sparc_register_ioport(void)
David S. Miller576c3522006-06-23 15:55:45 -0700383{
David S. Miller576c3522006-06-23 15:55:45 -0700384 register_proc_sparc_ioport();
385
David S. Miller576c3522006-06-23 15:55:45 -0700386 return 0;
David S. Miller576c3522006-06-23 15:55:45 -0700387}
388
David S. Millerf8e4d322008-08-27 04:20:14 -0700389arch_initcall(sparc_register_ioport);
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391#endif /* CONFIG_SBUS */
392
393#ifdef CONFIG_PCI
394
395/* Allocate and map kernel buffer using consistent mode DMA for a device.
396 * hwdev should be valid struct pci_dev pointer for PCI devices.
397 */
398void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
399{
400 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
401 unsigned long va;
402 struct resource *res;
403 int order;
404
405 if (len == 0) {
406 return NULL;
407 }
408 if (len > 256*1024) { /* __get_free_pages() limit */
409 return NULL;
410 }
411
412 order = get_order(len_total);
413 va = __get_free_pages(GFP_KERNEL, order);
414 if (va == 0) {
415 printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
416 return NULL;
417 }
418
Yan Burmanc80892d2006-11-30 17:07:04 -0800419 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 free_pages(va, order);
421 printk("pci_alloc_consistent: no core\n");
422 return NULL;
423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
425 if (allocate_resource(&_sparc_dvma, res, len_total,
426 _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
427 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
428 free_pages(va, order);
429 kfree(res);
430 return NULL;
431 }
432 mmu_inval_dma_area(va, len_total);
433#if 0
434/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
435 (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
436#endif
437 sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
438
439 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
440 return (void *) res->start;
441}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800442EXPORT_SYMBOL(pci_alloc_consistent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444/* Free and unmap a consistent DMA buffer.
445 * cpu_addr is what was returned from pci_alloc_consistent,
446 * size must be the same as what as passed into pci_alloc_consistent,
447 * and likewise dma_addr must be the same as what *dma_addrp was set to.
448 *
Simon Arlottd1a78c32007-05-11 13:51:23 -0700449 * References to the memory and mappings associated with cpu_addr/dma_addr
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 * past this call are illegal.
451 */
452void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
453{
454 struct resource *res;
455 unsigned long pgp;
456
457 if ((res = _sparc_find_resource(&_sparc_dvma,
458 (unsigned long)p)) == NULL) {
459 printk("pci_free_consistent: cannot free %p\n", p);
460 return;
461 }
462
463 if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
464 printk("pci_free_consistent: unaligned va %p\n", p);
465 return;
466 }
467
468 n = (n + PAGE_SIZE-1) & PAGE_MASK;
469 if ((res->end-res->start)+1 != n) {
470 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
471 (long)((res->end-res->start)+1), (long)n);
472 return;
473 }
474
475 pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
476 mmu_inval_dma_area(pgp, n);
477 sparc_unmapiorange((unsigned long)p, n);
478
479 release_resource(res);
480 kfree(res);
481
482 free_pages(pgp, get_order(n));
483}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800484EXPORT_SYMBOL(pci_free_consistent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486/* Map a single buffer of the indicated size for DMA in streaming mode.
487 * The 32-bit bus address to use is returned.
488 *
489 * Once the device is given the dma address, the device owns this memory
490 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
491 */
492dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
493 int direction)
494{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800495 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 /* IIep is write-through, not flushing. */
497 return virt_to_phys(ptr);
498}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800499EXPORT_SYMBOL(pci_map_single);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501/* Unmap a single streaming mode DMA translation. The dma_addr and size
502 * must match what was provided for in a previous pci_map_single call. All
503 * other usages are undefined.
504 *
505 * After this call, reads by the cpu to the buffer are guaranteed to see
506 * whatever the device wrote there.
507 */
508void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
509 int direction)
510{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800511 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 if (direction != PCI_DMA_TODEVICE) {
513 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
514 (size + PAGE_SIZE-1) & PAGE_MASK);
515 }
516}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800517EXPORT_SYMBOL(pci_unmap_single);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519/*
520 * Same as pci_map_single, but with pages.
521 */
522dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
523 unsigned long offset, size_t size, int direction)
524{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800525 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 /* IIep is write-through, not flushing. */
527 return page_to_phys(page) + offset;
528}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800529EXPORT_SYMBOL(pci_map_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531void pci_unmap_page(struct pci_dev *hwdev,
532 dma_addr_t dma_address, size_t size, int direction)
533{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800534 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 /* mmu_inval_dma_area XXX */
536}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800537EXPORT_SYMBOL(pci_unmap_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
539/* Map a set of buffers described by scatterlist in streaming
540 * mode for DMA. This is the scather-gather version of the
541 * above pci_map_single interface. Here the scatter gather list
542 * elements are each tagged with the appropriate dma address
543 * and length. They are obtained via sg_dma_{address,length}(SG).
544 *
545 * NOTE: An implementation may be able to use a smaller number of
546 * DMA address/length pairs than there are SG table elements.
547 * (for example via virtual mapping capabilities)
548 * The routine returns the number of addr/length pairs actually
549 * used, at most nents.
550 *
551 * Device ownership issues as mentioned above for pci_map_single are
552 * the same here.
553 */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200554int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 int direction)
556{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200557 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 int n;
559
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800560 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 /* IIep is write-through, not flushing. */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200562 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200563 BUG_ON(page_address(sg_page(sg)) == NULL);
Robert Reifaa83a262008-12-11 20:24:58 -0800564 sg->dma_address = virt_to_phys(sg_virt(sg));
565 sg->dma_length = sg->length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
567 return nents;
568}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800569EXPORT_SYMBOL(pci_map_sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
571/* Unmap a set of streaming mode DMA translations.
572 * Again, cpu read rules concerning calls here are the same as for
573 * pci_unmap_single() above.
574 */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200575void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 int direction)
577{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200578 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 int n;
580
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800581 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 if (direction != PCI_DMA_TODEVICE) {
Jens Axboe0912a5d2007-05-14 15:44:38 +0200583 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200584 BUG_ON(page_address(sg_page(sg)) == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 mmu_inval_dma_area(
Jens Axboe58b053e2007-10-22 20:02:46 +0200586 (unsigned long) page_address(sg_page(sg)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
589 }
590}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800591EXPORT_SYMBOL(pci_unmap_sg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593/* Make physical memory consistent for a single
594 * streaming mode DMA translation before or after a transfer.
595 *
596 * If you perform a pci_map_single() but wish to interrogate the
597 * buffer using the cpu, yet do not wish to teardown the PCI dma
598 * mapping, you must call this function before doing so. At the
599 * next point you give the PCI dma address back to the card, you
600 * must first perform a pci_dma_sync_for_device, and then the
601 * device again owns the buffer.
602 */
603void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
604{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800605 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 if (direction != PCI_DMA_TODEVICE) {
607 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
608 (size + PAGE_SIZE-1) & PAGE_MASK);
609 }
610}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800611EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
614{
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800615 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (direction != PCI_DMA_TODEVICE) {
617 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
618 (size + PAGE_SIZE-1) & PAGE_MASK);
619 }
620}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800621EXPORT_SYMBOL(pci_dma_sync_single_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623/* Make physical memory consistent for a set of streaming
624 * mode DMA translations after a transfer.
625 *
626 * The same as pci_dma_sync_single_* but for a scatter-gather list,
627 * same rules and usage.
628 */
Jens Axboe0912a5d2007-05-14 15:44:38 +0200629void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200631 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 int n;
633
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800634 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (direction != PCI_DMA_TODEVICE) {
Jens Axboe0912a5d2007-05-14 15:44:38 +0200636 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200637 BUG_ON(page_address(sg_page(sg)) == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 mmu_inval_dma_area(
Jens Axboe58b053e2007-10-22 20:02:46 +0200639 (unsigned long) page_address(sg_page(sg)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
642 }
643}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800644EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Jens Axboe0912a5d2007-05-14 15:44:38 +0200646void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Jens Axboe0912a5d2007-05-14 15:44:38 +0200648 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 int n;
650
Eric Sesterhenn30d4d1f2006-03-10 02:55:20 -0800651 BUG_ON(direction == PCI_DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (direction != PCI_DMA_TODEVICE) {
Jens Axboe0912a5d2007-05-14 15:44:38 +0200653 for_each_sg(sgl, sg, nents, n) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200654 BUG_ON(page_address(sg_page(sg)) == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 mmu_inval_dma_area(
Jens Axboe58b053e2007-10-22 20:02:46 +0200656 (unsigned long) page_address(sg_page(sg)),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 (sg->length + PAGE_SIZE-1) & PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 }
659 }
660}
Sam Ravnborg6943f3d2009-01-08 16:58:05 -0800661EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662#endif /* CONFIG_PCI */
663
664#ifdef CONFIG_PROC_FS
665
666static int
667_sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof,
668 void *data)
669{
670 char *p = buf, *e = buf + length;
671 struct resource *r;
672 const char *nm;
673
674 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
675 if (p + 32 >= e) /* Better than nothing */
676 break;
677 if ((nm = r->name) == 0) nm = "???";
Greg Kroah-Hartman685143a2006-06-12 15:18:31 -0700678 p += sprintf(p, "%016llx-%016llx: %s\n",
679 (unsigned long long)r->start,
680 (unsigned long long)r->end, nm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 }
682
683 return p-buf;
684}
685
686#endif /* CONFIG_PROC_FS */
687
688/*
689 * This is a version of find_resource and it belongs to kernel/resource.c.
690 * Until we have agreement with Linus and Martin, it lingers here.
691 *
692 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
693 * This probably warrants some sort of hashing.
694 */
Adrian Bunkc61c65c2008-06-05 11:40:58 -0700695static struct resource *_sparc_find_resource(struct resource *root,
696 unsigned long hit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697{
698 struct resource *tmp;
699
700 for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
701 if (tmp->start <= hit && tmp->end >= hit)
702 return tmp;
703 }
704 return NULL;
705}
706
Adrian Bunkc61c65c2008-06-05 11:40:58 -0700707static void register_proc_sparc_ioport(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709#ifdef CONFIG_PROC_FS
710 create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap);
711 create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma);
712#endif
713}