| Adrian Bunk | 88278ca | 2008-05-19 16:53:02 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * ioport.c:  Simple io mapping allocator. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 
 | 5 |  * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) | 
 | 6 |  * | 
 | 7 |  * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. | 
 | 8 |  * | 
 | 9 |  * 2000/01/29 | 
 | 10 |  * <rth> zait: as long as pci_alloc_consistent produces something addressable,  | 
 | 11 |  *	things are ok. | 
 | 12 |  * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a | 
 | 13 |  *	pointer into the big page mapping | 
 | 14 |  * <rth> zait: so what? | 
 | 15 |  * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page())) | 
 | 16 |  * <zaitcev> Hmm | 
 | 17 |  * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())). | 
 | 18 |  *	So far so good. | 
 | 19 |  * <zaitcev> Now, driver calls pci_free_consistent(with result of | 
 | 20 |  *	remap_it_my_way()). | 
 | 21 |  * <zaitcev> How do you find the address to pass to free_pages()? | 
 | 22 |  * <rth> zait: walk the page tables?  It's only two or three level after all. | 
 | 23 |  * <rth> zait: you have to walk them anyway to remove the mapping. | 
 | 24 |  * <zaitcev> Hmm | 
 | 25 |  * <zaitcev> Sounds reasonable | 
 | 26 |  */ | 
 | 27 |  | 
| David S. Miller | 3ca9fab | 2006-06-29 14:35:33 -0700 | [diff] [blame] | 28 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/sched.h> | 
 | 30 | #include <linux/kernel.h> | 
 | 31 | #include <linux/errno.h> | 
 | 32 | #include <linux/types.h> | 
 | 33 | #include <linux/ioport.h> | 
 | 34 | #include <linux/mm.h> | 
 | 35 | #include <linux/slab.h> | 
 | 36 | #include <linux/pci.h>		/* struct pci_dev */ | 
 | 37 | #include <linux/proc_fs.h> | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 38 | #include <linux/scatterlist.h> | 
| Stephen Rothwell | 764f257 | 2008-08-07 15:33:36 -0700 | [diff] [blame] | 39 | #include <linux/of_device.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
 | 41 | #include <asm/io.h> | 
 | 42 | #include <asm/vaddrs.h> | 
 | 43 | #include <asm/oplib.h> | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 44 | #include <asm/prom.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | #include <asm/page.h> | 
 | 46 | #include <asm/pgalloc.h> | 
 | 47 | #include <asm/dma.h> | 
| David S. Miller | e003934 | 2008-08-25 22:47:20 -0700 | [diff] [blame] | 48 | #include <asm/iommu.h> | 
 | 49 | #include <asm/io-unit.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 |  | 
| David S. Miller | 944c67df | 2008-08-27 18:01:36 -0700 | [diff] [blame] | 51 | #include "dma.h" | 
 | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #define mmu_inval_dma_area(p, l)	/* Anton pulled it out for 2.4.0-xx */ | 
 | 54 |  | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 55 | static struct resource *_sparc_find_resource(struct resource *r, | 
 | 56 | 					     unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
 | 58 | static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); | 
 | 59 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | 
 | 60 |     unsigned long size, char *name); | 
 | 61 | static void _sparc_free_io(struct resource *res); | 
 | 62 |  | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 63 | static void register_proc_sparc_ioport(void); | 
 | 64 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | /* This points to the next to use virtual memory for DVMA mappings */ | 
 | 66 | static struct resource _sparc_dvma = { | 
 | 67 | 	.name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 | 
 | 68 | }; | 
 | 69 | /* This points to the start of I/O mappings, cluable from outside. */ | 
 | 70 | /*ext*/ struct resource sparc_iomap = { | 
 | 71 | 	.name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 | 
 | 72 | }; | 
 | 73 |  | 
 | 74 | /* | 
 | 75 |  * Our mini-allocator... | 
 | 76 |  * Boy this is gross! We need it because we must map I/O for | 
 | 77 |  * timers and interrupt controller before the kmalloc is available. | 
 | 78 |  */ | 
 | 79 |  | 
 | 80 | #define XNMLN  15 | 
 | 81 | #define XNRES  10	/* SS-10 uses 8 */ | 
 | 82 |  | 
 | 83 | struct xresource { | 
 | 84 | 	struct resource xres;	/* Must be first */ | 
 | 85 | 	int xflag;		/* 1 == used */ | 
 | 86 | 	char xname[XNMLN+1]; | 
 | 87 | }; | 
 | 88 |  | 
 | 89 | static struct xresource xresv[XNRES]; | 
 | 90 |  | 
 | 91 | static struct xresource *xres_alloc(void) { | 
 | 92 | 	struct xresource *xrp; | 
 | 93 | 	int n; | 
 | 94 |  | 
 | 95 | 	xrp = xresv; | 
 | 96 | 	for (n = 0; n < XNRES; n++) { | 
 | 97 | 		if (xrp->xflag == 0) { | 
 | 98 | 			xrp->xflag = 1; | 
 | 99 | 			return xrp; | 
 | 100 | 		} | 
 | 101 | 		xrp++; | 
 | 102 | 	} | 
 | 103 | 	return NULL; | 
 | 104 | } | 
 | 105 |  | 
 | 106 | static void xres_free(struct xresource *xrp) { | 
 | 107 | 	xrp->xflag = 0; | 
 | 108 | } | 
 | 109 |  | 
 | 110 | /* | 
 | 111 |  * These are typically used in PCI drivers | 
 | 112 |  * which are trying to be cross-platform. | 
 | 113 |  * | 
 | 114 |  * Bus type is always zero on IIep. | 
 | 115 |  */ | 
 | 116 | void __iomem *ioremap(unsigned long offset, unsigned long size) | 
 | 117 | { | 
 | 118 | 	char name[14]; | 
 | 119 |  | 
 | 120 | 	sprintf(name, "phys_%08x", (u32)offset); | 
 | 121 | 	return _sparc_alloc_io(0, offset, size, name); | 
 | 122 | } | 
 | 123 |  | 
 | 124 | /* | 
 | 125 |  * Comlimentary to ioremap(). | 
 | 126 |  */ | 
 | 127 | void iounmap(volatile void __iomem *virtual) | 
 | 128 | { | 
 | 129 | 	unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; | 
 | 130 | 	struct resource *res; | 
 | 131 |  | 
 | 132 | 	if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) { | 
 | 133 | 		printk("free_io/iounmap: cannot free %lx\n", vaddr); | 
 | 134 | 		return; | 
 | 135 | 	} | 
 | 136 | 	_sparc_free_io(res); | 
 | 137 |  | 
 | 138 | 	if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { | 
 | 139 | 		xres_free((struct xresource *)res); | 
 | 140 | 	} else { | 
 | 141 | 		kfree(res); | 
 | 142 | 	} | 
 | 143 | } | 
 | 144 |  | 
| David S. Miller | 3ca9fab | 2006-06-29 14:35:33 -0700 | [diff] [blame] | 145 | void __iomem *of_ioremap(struct resource *res, unsigned long offset, | 
 | 146 | 			 unsigned long size, char *name) | 
 | 147 | { | 
 | 148 | 	return _sparc_alloc_io(res->flags & 0xF, | 
 | 149 | 			       res->start + offset, | 
 | 150 | 			       size, name); | 
 | 151 | } | 
 | 152 | EXPORT_SYMBOL(of_ioremap); | 
 | 153 |  | 
| David S. Miller | e3a411a | 2006-12-28 21:01:32 -0800 | [diff] [blame] | 154 | void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) | 
| David S. Miller | 3ca9fab | 2006-06-29 14:35:33 -0700 | [diff] [blame] | 155 | { | 
 | 156 | 	iounmap(base); | 
 | 157 | } | 
 | 158 | EXPORT_SYMBOL(of_iounmap); | 
 | 159 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 |  * Meat of mapping | 
 | 162 |  */ | 
 | 163 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | 
 | 164 |     unsigned long size, char *name) | 
 | 165 | { | 
 | 166 | 	static int printed_full; | 
 | 167 | 	struct xresource *xres; | 
 | 168 | 	struct resource *res; | 
 | 169 | 	char *tack; | 
 | 170 | 	int tlen; | 
 | 171 | 	void __iomem *va;	/* P3 diag */ | 
 | 172 |  | 
 | 173 | 	if (name == NULL) name = "???"; | 
 | 174 |  | 
 | 175 | 	if ((xres = xres_alloc()) != 0) { | 
 | 176 | 		tack = xres->xname; | 
 | 177 | 		res = &xres->xres; | 
 | 178 | 	} else { | 
 | 179 | 		if (!printed_full) { | 
 | 180 | 			printk("ioremap: done with statics, switching to malloc\n"); | 
 | 181 | 			printed_full = 1; | 
 | 182 | 		} | 
 | 183 | 		tlen = strlen(name); | 
 | 184 | 		tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); | 
 | 185 | 		if (tack == NULL) return NULL; | 
 | 186 | 		memset(tack, 0, sizeof(struct resource)); | 
 | 187 | 		res = (struct resource *) tack; | 
 | 188 | 		tack += sizeof (struct resource); | 
 | 189 | 	} | 
 | 190 |  | 
 | 191 | 	strlcpy(tack, name, XNMLN+1); | 
 | 192 | 	res->name = tack; | 
 | 193 |  | 
 | 194 | 	va = _sparc_ioremap(res, busno, phys, size); | 
 | 195 | 	/* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ | 
 | 196 | 	return va; | 
 | 197 | } | 
 | 198 |  | 
 | 199 | /* | 
 | 200 |  */ | 
 | 201 | static void __iomem * | 
 | 202 | _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) | 
 | 203 | { | 
 | 204 | 	unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | 
 | 205 |  | 
 | 206 | 	if (allocate_resource(&sparc_iomap, res, | 
 | 207 | 	    (offset + sz + PAGE_SIZE-1) & PAGE_MASK, | 
 | 208 | 	    sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { | 
 | 209 | 		/* Usually we cannot see printks in this case. */ | 
 | 210 | 		prom_printf("alloc_io_res(%s): cannot occupy\n", | 
 | 211 | 		    (res->name != NULL)? res->name: "???"); | 
 | 212 | 		prom_halt(); | 
 | 213 | 	} | 
 | 214 |  | 
 | 215 | 	pa &= PAGE_MASK; | 
 | 216 | 	sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); | 
 | 217 |  | 
| Greg Kroah-Hartman | d75fc8b | 2006-06-12 16:09:23 -0700 | [diff] [blame] | 218 | 	return (void __iomem *)(unsigned long)(res->start + offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | } | 
 | 220 |  | 
 | 221 | /* | 
 | 222 |  * Comlimentary to _sparc_ioremap(). | 
 | 223 |  */ | 
 | 224 | static void _sparc_free_io(struct resource *res) | 
 | 225 | { | 
 | 226 | 	unsigned long plen; | 
 | 227 |  | 
 | 228 | 	plen = res->end - res->start + 1; | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 229 | 	BUG_ON((plen & (PAGE_SIZE-1)) != 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | 	sparc_unmapiorange(res->start, plen); | 
 | 231 | 	release_resource(res); | 
 | 232 | } | 
 | 233 |  | 
 | 234 | #ifdef CONFIG_SBUS | 
 | 235 |  | 
| David S. Miller | 63237ee | 2008-08-26 23:33:42 -0700 | [diff] [blame] | 236 | void sbus_set_sbus64(struct device *dev, int x) | 
| David S. Miller | 8fae097 | 2006-06-20 15:23:28 -0700 | [diff] [blame] | 237 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | 	printk("sbus_set_sbus64: unsupported\n"); | 
 | 239 | } | 
 | 240 |  | 
 | 241 | /* | 
 | 242 |  * Allocate a chunk of memory suitable for DMA. | 
 | 243 |  * Typically devices use them for control blocks. | 
 | 244 |  * CPU may access them without any explicit flushing. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 |  */ | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 246 | void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | { | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 248 | 	struct of_device *op = to_of_device(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | 	unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 
 | 250 | 	unsigned long va; | 
 | 251 | 	struct resource *res; | 
 | 252 | 	int order; | 
 | 253 |  | 
| Paulius Zaleckas | efad798b | 2008-02-03 15:42:53 +0200 | [diff] [blame] | 254 | 	/* XXX why are some lengths signed, others unsigned? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | 	if (len <= 0) { | 
 | 256 | 		return NULL; | 
 | 257 | 	} | 
 | 258 | 	/* XXX So what is maxphys for us and how do drivers know it? */ | 
 | 259 | 	if (len > 256*1024) {			/* __get_free_pages() limit */ | 
 | 260 | 		return NULL; | 
 | 261 | 	} | 
 | 262 |  | 
 | 263 | 	order = get_order(len_total); | 
| Hugh Dickins | f3d48f0 | 2005-11-21 21:32:22 -0800 | [diff] [blame] | 264 | 	if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | 		goto err_nopages; | 
 | 266 |  | 
| Yan Burman | c80892d | 2006-11-30 17:07:04 -0800 | [diff] [blame] | 267 | 	if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | 		goto err_nomem; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 |  | 
 | 270 | 	if (allocate_resource(&_sparc_dvma, res, len_total, | 
 | 271 | 	    _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | 
 | 272 | 		printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); | 
 | 273 | 		goto err_nova; | 
 | 274 | 	} | 
 | 275 | 	mmu_inval_dma_area(va, len_total); | 
 | 276 | 	// XXX The mmu_map_dma_area does this for us below, see comments. | 
 | 277 | 	// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 
 | 278 | 	/* | 
 | 279 | 	 * XXX That's where sdev would be used. Currently we load | 
 | 280 | 	 * all iommu tables with the same translations. | 
 | 281 | 	 */ | 
| David S. Miller | 4b1c5df | 2008-08-27 18:40:38 -0700 | [diff] [blame] | 282 | 	if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | 		goto err_noiommu; | 
 | 284 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 285 | 	res->name = op->node->name; | 
| Martin Habets | 4cfbd7e | 2006-05-07 23:43:19 -0700 | [diff] [blame] | 286 |  | 
| Greg Kroah-Hartman | d75fc8b | 2006-06-12 16:09:23 -0700 | [diff] [blame] | 287 | 	return (void *)(unsigned long)res->start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 |  | 
 | 289 | err_noiommu: | 
 | 290 | 	release_resource(res); | 
 | 291 | err_nova: | 
 | 292 | 	free_pages(va, order); | 
 | 293 | err_nomem: | 
 | 294 | 	kfree(res); | 
 | 295 | err_nopages: | 
 | 296 | 	return NULL; | 
 | 297 | } | 
 | 298 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 299 | void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | { | 
 | 301 | 	struct resource *res; | 
 | 302 | 	struct page *pgv; | 
 | 303 |  | 
 | 304 | 	if ((res = _sparc_find_resource(&_sparc_dvma, | 
 | 305 | 	    (unsigned long)p)) == NULL) { | 
 | 306 | 		printk("sbus_free_consistent: cannot free %p\n", p); | 
 | 307 | 		return; | 
 | 308 | 	} | 
 | 309 |  | 
 | 310 | 	if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | 
 | 311 | 		printk("sbus_free_consistent: unaligned va %p\n", p); | 
 | 312 | 		return; | 
 | 313 | 	} | 
 | 314 |  | 
 | 315 | 	n = (n + PAGE_SIZE-1) & PAGE_MASK; | 
 | 316 | 	if ((res->end-res->start)+1 != n) { | 
 | 317 | 		printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", | 
 | 318 | 		    (long)((res->end-res->start)+1), n); | 
 | 319 | 		return; | 
 | 320 | 	} | 
 | 321 |  | 
 | 322 | 	release_resource(res); | 
 | 323 | 	kfree(res); | 
 | 324 |  | 
 | 325 | 	/* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ | 
| David S. Miller | aba945e | 2008-08-27 02:20:35 -0700 | [diff] [blame] | 326 | 	pgv = virt_to_page(p); | 
| David S. Miller | 4b1c5df | 2008-08-27 18:40:38 -0700 | [diff] [blame] | 327 | 	mmu_unmap_dma_area(dev, ba, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 |  | 
 | 329 | 	__free_pages(pgv, get_order(n)); | 
 | 330 | } | 
 | 331 |  | 
 | 332 | /* | 
 | 333 |  * Map a chunk of memory so that devices can see it. | 
 | 334 |  * CPU view of this memory may be inconsistent with | 
 | 335 |  * a device view and explicit flushing is necessary. | 
 | 336 |  */ | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 337 | dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | { | 
| Paulius Zaleckas | efad798b | 2008-02-03 15:42:53 +0200 | [diff] [blame] | 339 | 	/* XXX why are some lengths signed, others unsigned? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | 	if (len <= 0) { | 
 | 341 | 		return 0; | 
 | 342 | 	} | 
 | 343 | 	/* XXX So what is maxphys for us and how do drivers know it? */ | 
 | 344 | 	if (len > 256*1024) {			/* __get_free_pages() limit */ | 
 | 345 | 		return 0; | 
 | 346 | 	} | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 347 | 	return mmu_get_scsi_one(dev, va, len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | } | 
 | 349 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 350 | void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | { | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 352 | 	mmu_release_scsi_one(dev, ba, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | } | 
 | 354 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 355 | int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | { | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 357 | 	mmu_get_scsi_sgl(dev, sg, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 |  | 
 | 359 | 	/* | 
 | 360 | 	 * XXX sparc64 can return a partial length here. sun4c should do this | 
 | 361 | 	 * but it currently panics if it can't fulfill the request - Anton | 
 | 362 | 	 */ | 
 | 363 | 	return n; | 
 | 364 | } | 
 | 365 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 366 | void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | { | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 368 | 	mmu_release_scsi_sgl(dev, sg, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | } | 
 | 370 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 371 | void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | } | 
 | 374 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 375 | void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } | 
 | 378 |  | 
| David S. Miller | f8e4d32 | 2008-08-27 04:20:14 -0700 | [diff] [blame] | 379 | static int __init sparc_register_ioport(void) | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 380 | { | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 381 | 	register_proc_sparc_ioport(); | 
 | 382 |  | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 383 | 	return 0; | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 384 | } | 
 | 385 |  | 
| David S. Miller | f8e4d32 | 2008-08-27 04:20:14 -0700 | [diff] [blame] | 386 | arch_initcall(sparc_register_ioport); | 
 | 387 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | #endif /* CONFIG_SBUS */ | 
 | 389 |  | 
 | 390 | #ifdef CONFIG_PCI | 
 | 391 |  | 
 | 392 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 
 | 393 |  * hwdev should be valid struct pci_dev pointer for PCI devices. | 
 | 394 |  */ | 
 | 395 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | 
 | 396 | { | 
 | 397 | 	unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 
 | 398 | 	unsigned long va; | 
 | 399 | 	struct resource *res; | 
 | 400 | 	int order; | 
 | 401 |  | 
 | 402 | 	if (len == 0) { | 
 | 403 | 		return NULL; | 
 | 404 | 	} | 
 | 405 | 	if (len > 256*1024) {			/* __get_free_pages() limit */ | 
 | 406 | 		return NULL; | 
 | 407 | 	} | 
 | 408 |  | 
 | 409 | 	order = get_order(len_total); | 
 | 410 | 	va = __get_free_pages(GFP_KERNEL, order); | 
 | 411 | 	if (va == 0) { | 
 | 412 | 		printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); | 
 | 413 | 		return NULL; | 
 | 414 | 	} | 
 | 415 |  | 
| Yan Burman | c80892d | 2006-11-30 17:07:04 -0800 | [diff] [blame] | 416 | 	if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | 		free_pages(va, order); | 
 | 418 | 		printk("pci_alloc_consistent: no core\n"); | 
 | 419 | 		return NULL; | 
 | 420 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 |  | 
 | 422 | 	if (allocate_resource(&_sparc_dvma, res, len_total, | 
 | 423 | 	    _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | 
 | 424 | 		printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); | 
 | 425 | 		free_pages(va, order); | 
 | 426 | 		kfree(res); | 
 | 427 | 		return NULL; | 
 | 428 | 	} | 
 | 429 | 	mmu_inval_dma_area(va, len_total); | 
 | 430 | #if 0 | 
 | 431 | /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", | 
 | 432 |   (long)va, (long)res->start, (long)virt_to_phys(va), len_total); | 
 | 433 | #endif | 
 | 434 | 	sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 
 | 435 |  | 
 | 436 | 	*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 
 | 437 | 	return (void *) res->start; | 
 | 438 | } | 
 | 439 |  | 
 | 440 | /* Free and unmap a consistent DMA buffer. | 
 | 441 |  * cpu_addr is what was returned from pci_alloc_consistent, | 
 | 442 |  * size must be the same as what as passed into pci_alloc_consistent, | 
 | 443 |  * and likewise dma_addr must be the same as what *dma_addrp was set to. | 
 | 444 |  * | 
| Simon Arlott | d1a78c3 | 2007-05-11 13:51:23 -0700 | [diff] [blame] | 445 |  * References to the memory and mappings associated with cpu_addr/dma_addr | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 |  * past this call are illegal. | 
 | 447 |  */ | 
 | 448 | void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | 
 | 449 | { | 
 | 450 | 	struct resource *res; | 
 | 451 | 	unsigned long pgp; | 
 | 452 |  | 
 | 453 | 	if ((res = _sparc_find_resource(&_sparc_dvma, | 
 | 454 | 	    (unsigned long)p)) == NULL) { | 
 | 455 | 		printk("pci_free_consistent: cannot free %p\n", p); | 
 | 456 | 		return; | 
 | 457 | 	} | 
 | 458 |  | 
 | 459 | 	if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | 
 | 460 | 		printk("pci_free_consistent: unaligned va %p\n", p); | 
 | 461 | 		return; | 
 | 462 | 	} | 
 | 463 |  | 
 | 464 | 	n = (n + PAGE_SIZE-1) & PAGE_MASK; | 
 | 465 | 	if ((res->end-res->start)+1 != n) { | 
 | 466 | 		printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", | 
 | 467 | 		    (long)((res->end-res->start)+1), (long)n); | 
 | 468 | 		return; | 
 | 469 | 	} | 
 | 470 |  | 
 | 471 | 	pgp = (unsigned long) phys_to_virt(ba);	/* bus_to_virt actually */ | 
 | 472 | 	mmu_inval_dma_area(pgp, n); | 
 | 473 | 	sparc_unmapiorange((unsigned long)p, n); | 
 | 474 |  | 
 | 475 | 	release_resource(res); | 
 | 476 | 	kfree(res); | 
 | 477 |  | 
 | 478 | 	free_pages(pgp, get_order(n)); | 
 | 479 | } | 
 | 480 |  | 
 | 481 | /* Map a single buffer of the indicated size for DMA in streaming mode. | 
 | 482 |  * The 32-bit bus address to use is returned. | 
 | 483 |  * | 
 | 484 |  * Once the device is given the dma address, the device owns this memory | 
 | 485 |  * until either pci_unmap_single or pci_dma_sync_single_* is performed. | 
 | 486 |  */ | 
 | 487 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | 
 | 488 |     int direction) | 
 | 489 | { | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 490 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | 	/* IIep is write-through, not flushing. */ | 
 | 492 | 	return virt_to_phys(ptr); | 
 | 493 | } | 
 | 494 |  | 
 | 495 | /* Unmap a single streaming mode DMA translation.  The dma_addr and size | 
 | 496 |  * must match what was provided for in a previous pci_map_single call.  All | 
 | 497 |  * other usages are undefined. | 
 | 498 |  * | 
 | 499 |  * After this call, reads by the cpu to the buffer are guaranteed to see | 
 | 500 |  * whatever the device wrote there. | 
 | 501 |  */ | 
 | 502 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | 
 | 503 |     int direction) | 
 | 504 | { | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 505 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | 	if (direction != PCI_DMA_TODEVICE) { | 
 | 507 | 		mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 
 | 508 | 		    (size + PAGE_SIZE-1) & PAGE_MASK); | 
 | 509 | 	} | 
 | 510 | } | 
 | 511 |  | 
 | 512 | /* | 
 | 513 |  * Same as pci_map_single, but with pages. | 
 | 514 |  */ | 
 | 515 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | 
 | 516 | 			unsigned long offset, size_t size, int direction) | 
 | 517 | { | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 518 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | 	/* IIep is write-through, not flushing. */ | 
 | 520 | 	return page_to_phys(page) + offset; | 
 | 521 | } | 
 | 522 |  | 
 | 523 | void pci_unmap_page(struct pci_dev *hwdev, | 
 | 524 | 			dma_addr_t dma_address, size_t size, int direction) | 
 | 525 | { | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 526 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | 	/* mmu_inval_dma_area XXX */ | 
 | 528 | } | 
 | 529 |  | 
 | 530 | /* Map a set of buffers described by scatterlist in streaming | 
 | 531 |  * mode for DMA.  This is the scather-gather version of the | 
 | 532 |  * above pci_map_single interface.  Here the scatter gather list | 
 | 533 |  * elements are each tagged with the appropriate dma address | 
 | 534 |  * and length.  They are obtained via sg_dma_{address,length}(SG). | 
 | 535 |  * | 
 | 536 |  * NOTE: An implementation may be able to use a smaller number of | 
 | 537 |  *       DMA address/length pairs than there are SG table elements. | 
 | 538 |  *       (for example via virtual mapping capabilities) | 
 | 539 |  *       The routine returns the number of addr/length pairs actually | 
 | 540 |  *       used, at most nents. | 
 | 541 |  * | 
 | 542 |  * Device ownership issues as mentioned above for pci_map_single are | 
 | 543 |  * the same here. | 
 | 544 |  */ | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 545 | int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 |     int direction) | 
 | 547 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 548 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | 	int n; | 
 | 550 |  | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 551 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | 	/* IIep is write-through, not flushing. */ | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 553 | 	for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 554 | 		BUG_ON(page_address(sg_page(sg)) == NULL); | 
 | 555 | 		sg->dvma_address = virt_to_phys(sg_virt(sg)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | 		sg->dvma_length = sg->length; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | 	} | 
 | 558 | 	return nents; | 
 | 559 | } | 
 | 560 |  | 
 | 561 | /* Unmap a set of streaming mode DMA translations. | 
 | 562 |  * Again, cpu read rules concerning calls here are the same as for | 
 | 563 |  * pci_unmap_single() above. | 
 | 564 |  */ | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 565 | void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 |     int direction) | 
 | 567 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 568 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | 	int n; | 
 | 570 |  | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 571 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | 	if (direction != PCI_DMA_TODEVICE) { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 573 | 		for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 574 | 			BUG_ON(page_address(sg_page(sg)) == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | 			mmu_inval_dma_area( | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 576 | 			    (unsigned long) page_address(sg_page(sg)), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | 		} | 
 | 579 | 	} | 
 | 580 | } | 
 | 581 |  | 
 | 582 | /* Make physical memory consistent for a single | 
 | 583 |  * streaming mode DMA translation before or after a transfer. | 
 | 584 |  * | 
 | 585 |  * If you perform a pci_map_single() but wish to interrogate the | 
 | 586 |  * buffer using the cpu, yet do not wish to teardown the PCI dma | 
 | 587 |  * mapping, you must call this function before doing so.  At the | 
 | 588 |  * next point you give the PCI dma address back to the card, you | 
 | 589 |  * must first perform a pci_dma_sync_for_device, and then the | 
 | 590 |  * device again owns the buffer. | 
 | 591 |  */ | 
 | 592 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 
 | 593 | { | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 594 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | 	if (direction != PCI_DMA_TODEVICE) { | 
 | 596 | 		mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 
 | 597 | 		    (size + PAGE_SIZE-1) & PAGE_MASK); | 
 | 598 | 	} | 
 | 599 | } | 
 | 600 |  | 
 | 601 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | 
 | 602 | { | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 603 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | 	if (direction != PCI_DMA_TODEVICE) { | 
 | 605 | 		mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 
 | 606 | 		    (size + PAGE_SIZE-1) & PAGE_MASK); | 
 | 607 | 	} | 
 | 608 | } | 
 | 609 |  | 
 | 610 | /* Make physical memory consistent for a set of streaming | 
 | 611 |  * mode DMA translations after a transfer. | 
 | 612 |  * | 
 | 613 |  * The same as pci_dma_sync_single_* but for a scatter-gather list, | 
 | 614 |  * same rules and usage. | 
 | 615 |  */ | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 616 | void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 618 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | 	int n; | 
 | 620 |  | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 621 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | 	if (direction != PCI_DMA_TODEVICE) { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 623 | 		for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 624 | 			BUG_ON(page_address(sg_page(sg)) == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | 			mmu_inval_dma_area( | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 626 | 			    (unsigned long) page_address(sg_page(sg)), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | 		} | 
 | 629 | 	} | 
 | 630 | } | 
 | 631 |  | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 632 | void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 634 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | 	int n; | 
 | 636 |  | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 637 | 	BUG_ON(direction == PCI_DMA_NONE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | 	if (direction != PCI_DMA_TODEVICE) { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 639 | 		for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 640 | 			BUG_ON(page_address(sg_page(sg)) == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | 			mmu_inval_dma_area( | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 642 | 			    (unsigned long) page_address(sg_page(sg)), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | 		} | 
 | 645 | 	} | 
 | 646 | } | 
 | 647 | #endif /* CONFIG_PCI */ | 
 | 648 |  | 
 | 649 | #ifdef CONFIG_PROC_FS | 
 | 650 |  | 
 | 651 | static int | 
 | 652 | _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, | 
 | 653 |     void *data) | 
 | 654 | { | 
 | 655 | 	char *p = buf, *e = buf + length; | 
 | 656 | 	struct resource *r; | 
 | 657 | 	const char *nm; | 
 | 658 |  | 
 | 659 | 	for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | 
 | 660 | 		if (p + 32 >= e)	/* Better than nothing */ | 
 | 661 | 			break; | 
 | 662 | 		if ((nm = r->name) == 0) nm = "???"; | 
| Greg Kroah-Hartman | 685143a | 2006-06-12 15:18:31 -0700 | [diff] [blame] | 663 | 		p += sprintf(p, "%016llx-%016llx: %s\n", | 
 | 664 | 				(unsigned long long)r->start, | 
 | 665 | 				(unsigned long long)r->end, nm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | 	} | 
 | 667 |  | 
 | 668 | 	return p-buf; | 
 | 669 | } | 
 | 670 |  | 
 | 671 | #endif /* CONFIG_PROC_FS */ | 
 | 672 |  | 
 | 673 | /* | 
 | 674 |  * This is a version of find_resource and it belongs to kernel/resource.c. | 
 | 675 |  * Until we have agreement with Linus and Martin, it lingers here. | 
 | 676 |  * | 
 | 677 |  * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. | 
 | 678 |  * This probably warrants some sort of hashing. | 
 | 679 |  */ | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 680 | static struct resource *_sparc_find_resource(struct resource *root, | 
 | 681 | 					     unsigned long hit) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | { | 
 | 683 |         struct resource *tmp; | 
 | 684 |  | 
 | 685 | 	for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { | 
 | 686 | 		if (tmp->start <= hit && tmp->end >= hit) | 
 | 687 | 			return tmp; | 
 | 688 | 	} | 
 | 689 | 	return NULL; | 
 | 690 | } | 
 | 691 |  | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 692 | static void register_proc_sparc_ioport(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | { | 
 | 694 | #ifdef CONFIG_PROC_FS | 
 | 695 | 	create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); | 
 | 696 | 	create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); | 
 | 697 | #endif | 
 | 698 | } |