| Adrian Bunk | 88278ca | 2008-05-19 16:53:02 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 |  * ioport.c:  Simple io mapping allocator. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | 
 | 5 |  * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) | 
 | 6 |  * | 
 | 7 |  * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. | 
 | 8 |  * | 
 | 9 |  * 2000/01/29 | 
 | 10 |  * <rth> zait: as long as pci_alloc_consistent produces something addressable,  | 
 | 11 |  *	things are ok. | 
 | 12 |  * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a | 
 | 13 |  *	pointer into the big page mapping | 
 | 14 |  * <rth> zait: so what? | 
 | 15 |  * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page())) | 
 | 16 |  * <zaitcev> Hmm | 
 | 17 |  * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())). | 
 | 18 |  *	So far so good. | 
 | 19 |  * <zaitcev> Now, driver calls pci_free_consistent(with result of | 
 | 20 |  *	remap_it_my_way()). | 
 | 21 |  * <zaitcev> How do you find the address to pass to free_pages()? | 
 | 22 |  * <rth> zait: walk the page tables?  It's only two or three level after all. | 
 | 23 |  * <rth> zait: you have to walk them anyway to remove the mapping. | 
 | 24 |  * <zaitcev> Hmm | 
 | 25 |  * <zaitcev> Sounds reasonable | 
 | 26 |  */ | 
 | 27 |  | 
| David S. Miller | 3ca9fab | 2006-06-29 14:35:33 -0700 | [diff] [blame] | 28 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/sched.h> | 
 | 30 | #include <linux/kernel.h> | 
 | 31 | #include <linux/errno.h> | 
 | 32 | #include <linux/types.h> | 
 | 33 | #include <linux/ioport.h> | 
 | 34 | #include <linux/mm.h> | 
 | 35 | #include <linux/slab.h> | 
 | 36 | #include <linux/pci.h>		/* struct pci_dev */ | 
 | 37 | #include <linux/proc_fs.h> | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 38 | #include <linux/seq_file.h> | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 39 | #include <linux/scatterlist.h> | 
| Stephen Rothwell | 764f257 | 2008-08-07 15:33:36 -0700 | [diff] [blame] | 40 | #include <linux/of_device.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
 | 42 | #include <asm/io.h> | 
 | 43 | #include <asm/vaddrs.h> | 
 | 44 | #include <asm/oplib.h> | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 45 | #include <asm/prom.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #include <asm/page.h> | 
 | 47 | #include <asm/pgalloc.h> | 
 | 48 | #include <asm/dma.h> | 
| David S. Miller | e003934 | 2008-08-25 22:47:20 -0700 | [diff] [blame] | 49 | #include <asm/iommu.h> | 
 | 50 | #include <asm/io-unit.h> | 
| Konrad Eisele | 8401707 | 2009-08-31 22:08:13 +0000 | [diff] [blame] | 51 | #include <asm/leon.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 |  | 
| Konrad Eisele | 8401707 | 2009-08-31 22:08:13 +0000 | [diff] [blame] | 53 | #ifdef CONFIG_SPARC_LEON | 
 | 54 | #define mmu_inval_dma_area(p, l) leon_flush_dcache_all() | 
 | 55 | #else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #define mmu_inval_dma_area(p, l)	/* Anton pulled it out for 2.4.0-xx */ | 
| Konrad Eisele | 8401707 | 2009-08-31 22:08:13 +0000 | [diff] [blame] | 57 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 |  | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 59 | static struct resource *_sparc_find_resource(struct resource *r, | 
 | 60 | 					     unsigned long); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 |  | 
 | 62 | static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); | 
 | 63 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | 
 | 64 |     unsigned long size, char *name); | 
 | 65 | static void _sparc_free_io(struct resource *res); | 
 | 66 |  | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 67 | static void register_proc_sparc_ioport(void); | 
 | 68 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | /* This points to the next to use virtual memory for DVMA mappings */ | 
 | 70 | static struct resource _sparc_dvma = { | 
 | 71 | 	.name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 | 
 | 72 | }; | 
 | 73 | /* This points to the start of I/O mappings, cluable from outside. */ | 
 | 74 | /*ext*/ struct resource sparc_iomap = { | 
 | 75 | 	.name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 | 
 | 76 | }; | 
 | 77 |  | 
 | 78 | /* | 
 | 79 |  * Our mini-allocator... | 
 | 80 |  * Boy this is gross! We need it because we must map I/O for | 
 | 81 |  * timers and interrupt controller before the kmalloc is available. | 
 | 82 |  */ | 
 | 83 |  | 
 | 84 | #define XNMLN  15 | 
 | 85 | #define XNRES  10	/* SS-10 uses 8 */ | 
 | 86 |  | 
 | 87 | struct xresource { | 
 | 88 | 	struct resource xres;	/* Must be first */ | 
 | 89 | 	int xflag;		/* 1 == used */ | 
 | 90 | 	char xname[XNMLN+1]; | 
 | 91 | }; | 
 | 92 |  | 
 | 93 | static struct xresource xresv[XNRES]; | 
 | 94 |  | 
 | 95 | static struct xresource *xres_alloc(void) { | 
 | 96 | 	struct xresource *xrp; | 
 | 97 | 	int n; | 
 | 98 |  | 
 | 99 | 	xrp = xresv; | 
 | 100 | 	for (n = 0; n < XNRES; n++) { | 
 | 101 | 		if (xrp->xflag == 0) { | 
 | 102 | 			xrp->xflag = 1; | 
 | 103 | 			return xrp; | 
 | 104 | 		} | 
 | 105 | 		xrp++; | 
 | 106 | 	} | 
 | 107 | 	return NULL; | 
 | 108 | } | 
 | 109 |  | 
 | 110 | static void xres_free(struct xresource *xrp) { | 
 | 111 | 	xrp->xflag = 0; | 
 | 112 | } | 
 | 113 |  | 
 | 114 | /* | 
 | 115 |  * These are typically used in PCI drivers | 
 | 116 |  * which are trying to be cross-platform. | 
 | 117 |  * | 
 | 118 |  * Bus type is always zero on IIep. | 
 | 119 |  */ | 
 | 120 | void __iomem *ioremap(unsigned long offset, unsigned long size) | 
 | 121 | { | 
 | 122 | 	char name[14]; | 
 | 123 |  | 
 | 124 | 	sprintf(name, "phys_%08x", (u32)offset); | 
 | 125 | 	return _sparc_alloc_io(0, offset, size, name); | 
 | 126 | } | 
| Sam Ravnborg | 6943f3d | 2009-01-08 16:58:05 -0800 | [diff] [blame] | 127 | EXPORT_SYMBOL(ioremap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 |  | 
 | 129 | /* | 
 | 130 |  * Comlimentary to ioremap(). | 
 | 131 |  */ | 
 | 132 | void iounmap(volatile void __iomem *virtual) | 
 | 133 | { | 
 | 134 | 	unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; | 
 | 135 | 	struct resource *res; | 
 | 136 |  | 
 | 137 | 	if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) { | 
 | 138 | 		printk("free_io/iounmap: cannot free %lx\n", vaddr); | 
 | 139 | 		return; | 
 | 140 | 	} | 
 | 141 | 	_sparc_free_io(res); | 
 | 142 |  | 
 | 143 | 	if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { | 
 | 144 | 		xres_free((struct xresource *)res); | 
 | 145 | 	} else { | 
 | 146 | 		kfree(res); | 
 | 147 | 	} | 
 | 148 | } | 
| Sam Ravnborg | 6943f3d | 2009-01-08 16:58:05 -0800 | [diff] [blame] | 149 | EXPORT_SYMBOL(iounmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 |  | 
| David S. Miller | 3ca9fab | 2006-06-29 14:35:33 -0700 | [diff] [blame] | 151 | void __iomem *of_ioremap(struct resource *res, unsigned long offset, | 
 | 152 | 			 unsigned long size, char *name) | 
 | 153 | { | 
 | 154 | 	return _sparc_alloc_io(res->flags & 0xF, | 
 | 155 | 			       res->start + offset, | 
 | 156 | 			       size, name); | 
 | 157 | } | 
 | 158 | EXPORT_SYMBOL(of_ioremap); | 
 | 159 |  | 
| David S. Miller | e3a411a3 | 2006-12-28 21:01:32 -0800 | [diff] [blame] | 160 | void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) | 
| David S. Miller | 3ca9fab | 2006-06-29 14:35:33 -0700 | [diff] [blame] | 161 | { | 
 | 162 | 	iounmap(base); | 
 | 163 | } | 
 | 164 | EXPORT_SYMBOL(of_iounmap); | 
 | 165 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 |  * Meat of mapping | 
 | 168 |  */ | 
 | 169 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | 
 | 170 |     unsigned long size, char *name) | 
 | 171 | { | 
 | 172 | 	static int printed_full; | 
 | 173 | 	struct xresource *xres; | 
 | 174 | 	struct resource *res; | 
 | 175 | 	char *tack; | 
 | 176 | 	int tlen; | 
 | 177 | 	void __iomem *va;	/* P3 diag */ | 
 | 178 |  | 
 | 179 | 	if (name == NULL) name = "???"; | 
 | 180 |  | 
 | 181 | 	if ((xres = xres_alloc()) != 0) { | 
 | 182 | 		tack = xres->xname; | 
 | 183 | 		res = &xres->xres; | 
 | 184 | 	} else { | 
 | 185 | 		if (!printed_full) { | 
 | 186 | 			printk("ioremap: done with statics, switching to malloc\n"); | 
 | 187 | 			printed_full = 1; | 
 | 188 | 		} | 
 | 189 | 		tlen = strlen(name); | 
 | 190 | 		tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); | 
 | 191 | 		if (tack == NULL) return NULL; | 
 | 192 | 		memset(tack, 0, sizeof(struct resource)); | 
 | 193 | 		res = (struct resource *) tack; | 
 | 194 | 		tack += sizeof (struct resource); | 
 | 195 | 	} | 
 | 196 |  | 
 | 197 | 	strlcpy(tack, name, XNMLN+1); | 
 | 198 | 	res->name = tack; | 
 | 199 |  | 
 | 200 | 	va = _sparc_ioremap(res, busno, phys, size); | 
 | 201 | 	/* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ | 
 | 202 | 	return va; | 
 | 203 | } | 
 | 204 |  | 
 | 205 | /* | 
 | 206 |  */ | 
 | 207 | static void __iomem * | 
 | 208 | _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) | 
 | 209 | { | 
 | 210 | 	unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | 
 | 211 |  | 
 | 212 | 	if (allocate_resource(&sparc_iomap, res, | 
 | 213 | 	    (offset + sz + PAGE_SIZE-1) & PAGE_MASK, | 
 | 214 | 	    sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { | 
 | 215 | 		/* Usually we cannot see printks in this case. */ | 
 | 216 | 		prom_printf("alloc_io_res(%s): cannot occupy\n", | 
 | 217 | 		    (res->name != NULL)? res->name: "???"); | 
 | 218 | 		prom_halt(); | 
 | 219 | 	} | 
 | 220 |  | 
 | 221 | 	pa &= PAGE_MASK; | 
 | 222 | 	sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); | 
 | 223 |  | 
| Greg Kroah-Hartman | d75fc8b | 2006-06-12 16:09:23 -0700 | [diff] [blame] | 224 | 	return (void __iomem *)(unsigned long)(res->start + offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | } | 
 | 226 |  | 
 | 227 | /* | 
 | 228 |  * Comlimentary to _sparc_ioremap(). | 
 | 229 |  */ | 
 | 230 | static void _sparc_free_io(struct resource *res) | 
 | 231 | { | 
 | 232 | 	unsigned long plen; | 
 | 233 |  | 
 | 234 | 	plen = res->end - res->start + 1; | 
| Eric Sesterhenn | 30d4d1f | 2006-03-10 02:55:20 -0800 | [diff] [blame] | 235 | 	BUG_ON((plen & (PAGE_SIZE-1)) != 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | 	sparc_unmapiorange(res->start, plen); | 
 | 237 | 	release_resource(res); | 
 | 238 | } | 
 | 239 |  | 
 | 240 | #ifdef CONFIG_SBUS | 
 | 241 |  | 
| David S. Miller | 63237ee | 2008-08-26 23:33:42 -0700 | [diff] [blame] | 242 | void sbus_set_sbus64(struct device *dev, int x) | 
| David S. Miller | 8fae097 | 2006-06-20 15:23:28 -0700 | [diff] [blame] | 243 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | 	printk("sbus_set_sbus64: unsupported\n"); | 
 | 245 | } | 
| Sam Ravnborg | 6943f3d | 2009-01-08 16:58:05 -0800 | [diff] [blame] | 246 | EXPORT_SYMBOL(sbus_set_sbus64); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 |  | 
 | 248 | /* | 
 | 249 |  * Allocate a chunk of memory suitable for DMA. | 
 | 250 |  * Typically devices use them for control blocks. | 
 | 251 |  * CPU may access them without any explicit flushing. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 253 | static void *sbus_alloc_coherent(struct device *dev, size_t len, | 
 | 254 | 				 dma_addr_t *dma_addrp, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | { | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 256 | 	struct of_device *op = to_of_device(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | 	unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 
 | 258 | 	unsigned long va; | 
 | 259 | 	struct resource *res; | 
 | 260 | 	int order; | 
 | 261 |  | 
| Paulius Zaleckas | efad798b | 2008-02-03 15:42:53 +0200 | [diff] [blame] | 262 | 	/* XXX why are some lengths signed, others unsigned? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | 	if (len <= 0) { | 
 | 264 | 		return NULL; | 
 | 265 | 	} | 
 | 266 | 	/* XXX So what is maxphys for us and how do drivers know it? */ | 
 | 267 | 	if (len > 256*1024) {			/* __get_free_pages() limit */ | 
 | 268 | 		return NULL; | 
 | 269 | 	} | 
 | 270 |  | 
 | 271 | 	order = get_order(len_total); | 
| Hugh Dickins | f3d48f0 | 2005-11-21 21:32:22 -0800 | [diff] [blame] | 272 | 	if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | 		goto err_nopages; | 
 | 274 |  | 
| Yan Burman | c80892d | 2006-11-30 17:07:04 -0800 | [diff] [blame] | 275 | 	if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | 		goto err_nomem; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 |  | 
 | 278 | 	if (allocate_resource(&_sparc_dvma, res, len_total, | 
 | 279 | 	    _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | 
 | 280 | 		printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); | 
 | 281 | 		goto err_nova; | 
 | 282 | 	} | 
 | 283 | 	mmu_inval_dma_area(va, len_total); | 
 | 284 | 	// XXX The mmu_map_dma_area does this for us below, see comments. | 
 | 285 | 	// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 
 | 286 | 	/* | 
 | 287 | 	 * XXX That's where sdev would be used. Currently we load | 
 | 288 | 	 * all iommu tables with the same translations. | 
 | 289 | 	 */ | 
| David S. Miller | 4b1c5df | 2008-08-27 18:40:38 -0700 | [diff] [blame] | 290 | 	if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | 		goto err_noiommu; | 
 | 292 |  | 
| David S. Miller | 7a715f4 | 2008-08-27 18:37:58 -0700 | [diff] [blame] | 293 | 	res->name = op->node->name; | 
| Martin Habets | 4cfbd7e | 2006-05-07 23:43:19 -0700 | [diff] [blame] | 294 |  | 
| Greg Kroah-Hartman | d75fc8b | 2006-06-12 16:09:23 -0700 | [diff] [blame] | 295 | 	return (void *)(unsigned long)res->start; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 |  | 
 | 297 | err_noiommu: | 
 | 298 | 	release_resource(res); | 
 | 299 | err_nova: | 
 | 300 | 	free_pages(va, order); | 
 | 301 | err_nomem: | 
 | 302 | 	kfree(res); | 
 | 303 | err_nopages: | 
 | 304 | 	return NULL; | 
 | 305 | } | 
 | 306 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 307 | static void sbus_free_coherent(struct device *dev, size_t n, void *p, | 
 | 308 | 			       dma_addr_t ba) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | { | 
 | 310 | 	struct resource *res; | 
 | 311 | 	struct page *pgv; | 
 | 312 |  | 
 | 313 | 	if ((res = _sparc_find_resource(&_sparc_dvma, | 
 | 314 | 	    (unsigned long)p)) == NULL) { | 
 | 315 | 		printk("sbus_free_consistent: cannot free %p\n", p); | 
 | 316 | 		return; | 
 | 317 | 	} | 
 | 318 |  | 
 | 319 | 	if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | 
 | 320 | 		printk("sbus_free_consistent: unaligned va %p\n", p); | 
 | 321 | 		return; | 
 | 322 | 	} | 
 | 323 |  | 
 | 324 | 	n = (n + PAGE_SIZE-1) & PAGE_MASK; | 
 | 325 | 	if ((res->end-res->start)+1 != n) { | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 326 | 		printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | 		    (long)((res->end-res->start)+1), n); | 
 | 328 | 		return; | 
 | 329 | 	} | 
 | 330 |  | 
 | 331 | 	release_resource(res); | 
 | 332 | 	kfree(res); | 
 | 333 |  | 
 | 334 | 	/* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ | 
| David S. Miller | aba945e | 2008-08-27 02:20:35 -0700 | [diff] [blame] | 335 | 	pgv = virt_to_page(p); | 
| David S. Miller | 4b1c5df | 2008-08-27 18:40:38 -0700 | [diff] [blame] | 336 | 	mmu_unmap_dma_area(dev, ba, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 |  | 
 | 338 | 	__free_pages(pgv, get_order(n)); | 
 | 339 | } | 
 | 340 |  | 
 | 341 | /* | 
 | 342 |  * Map a chunk of memory so that devices can see it. | 
 | 343 |  * CPU view of this memory may be inconsistent with | 
 | 344 |  * a device view and explicit flushing is necessary. | 
 | 345 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 346 | static dma_addr_t sbus_map_page(struct device *dev, struct page *page, | 
 | 347 | 				unsigned long offset, size_t len, | 
 | 348 | 				enum dma_data_direction dir, | 
 | 349 | 				struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | { | 
| FUJITA Tomonori | c2c07db | 2009-08-10 11:53:15 +0900 | [diff] [blame] | 351 | 	void *va = page_address(page) + offset; | 
 | 352 |  | 
| Paulius Zaleckas | efad798b | 2008-02-03 15:42:53 +0200 | [diff] [blame] | 353 | 	/* XXX why are some lengths signed, others unsigned? */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | 	if (len <= 0) { | 
 | 355 | 		return 0; | 
 | 356 | 	} | 
 | 357 | 	/* XXX So what is maxphys for us and how do drivers know it? */ | 
 | 358 | 	if (len > 256*1024) {			/* __get_free_pages() limit */ | 
 | 359 | 		return 0; | 
 | 360 | 	} | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 361 | 	return mmu_get_scsi_one(dev, va, len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | } | 
 | 363 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 364 | static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, | 
 | 365 | 			    enum dma_data_direction dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | { | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 367 | 	mmu_release_scsi_one(dev, ba, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | } | 
 | 369 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 370 | static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, | 
 | 371 | 		       enum dma_data_direction dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | { | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 373 | 	mmu_get_scsi_sgl(dev, sg, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 |  | 
 | 375 | 	/* | 
 | 376 | 	 * XXX sparc64 can return a partial length here. sun4c should do this | 
 | 377 | 	 * but it currently panics if it can't fulfill the request - Anton | 
 | 378 | 	 */ | 
 | 379 | 	return n; | 
 | 380 | } | 
 | 381 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 382 | static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, | 
 | 383 | 			  enum dma_data_direction dir, struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | { | 
| David S. Miller | 260489f | 2008-08-26 23:00:58 -0700 | [diff] [blame] | 385 | 	mmu_release_scsi_sgl(dev, sg, n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } | 
 | 387 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 388 | static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 
 | 389 | 				 int n,	enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | { | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 391 | 	BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | } | 
 | 393 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 394 | static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 
 | 395 | 				    int n, enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | { | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 397 | 	BUG(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | } | 
 | 399 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 400 | struct dma_map_ops sbus_dma_ops = { | 
 | 401 | 	.alloc_coherent		= sbus_alloc_coherent, | 
 | 402 | 	.free_coherent		= sbus_free_coherent, | 
 | 403 | 	.map_page		= sbus_map_page, | 
 | 404 | 	.unmap_page		= sbus_unmap_page, | 
 | 405 | 	.map_sg			= sbus_map_sg, | 
 | 406 | 	.unmap_sg		= sbus_unmap_sg, | 
 | 407 | 	.sync_sg_for_cpu	= sbus_sync_sg_for_cpu, | 
 | 408 | 	.sync_sg_for_device	= sbus_sync_sg_for_device, | 
 | 409 | }; | 
 | 410 |  | 
 | 411 | struct dma_map_ops *dma_ops = &sbus_dma_ops; | 
 | 412 | EXPORT_SYMBOL(dma_ops); | 
 | 413 |  | 
| David S. Miller | f8e4d32 | 2008-08-27 04:20:14 -0700 | [diff] [blame] | 414 | static int __init sparc_register_ioport(void) | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 415 | { | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 416 | 	register_proc_sparc_ioport(); | 
 | 417 |  | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 418 | 	return 0; | 
| David S. Miller | 576c352 | 2006-06-23 15:55:45 -0700 | [diff] [blame] | 419 | } | 
 | 420 |  | 
| David S. Miller | f8e4d32 | 2008-08-27 04:20:14 -0700 | [diff] [blame] | 421 | arch_initcall(sparc_register_ioport); | 
 | 422 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | #endif /* CONFIG_SBUS */ | 
 | 424 |  | 
 | 425 | #ifdef CONFIG_PCI | 
 | 426 |  | 
 | 427 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | 
 | 428 |  * hwdev should be valid struct pci_dev pointer for PCI devices. | 
 | 429 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 430 | static void *pci32_alloc_coherent(struct device *dev, size_t len, | 
 | 431 | 				  dma_addr_t *pba, gfp_t gfp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | { | 
 | 433 | 	unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | 
 | 434 | 	unsigned long va; | 
 | 435 | 	struct resource *res; | 
 | 436 | 	int order; | 
 | 437 |  | 
 | 438 | 	if (len == 0) { | 
 | 439 | 		return NULL; | 
 | 440 | 	} | 
 | 441 | 	if (len > 256*1024) {			/* __get_free_pages() limit */ | 
 | 442 | 		return NULL; | 
 | 443 | 	} | 
 | 444 |  | 
 | 445 | 	order = get_order(len_total); | 
 | 446 | 	va = __get_free_pages(GFP_KERNEL, order); | 
 | 447 | 	if (va == 0) { | 
 | 448 | 		printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); | 
 | 449 | 		return NULL; | 
 | 450 | 	} | 
 | 451 |  | 
| Yan Burman | c80892d | 2006-11-30 17:07:04 -0800 | [diff] [blame] | 452 | 	if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 453 | 		free_pages(va, order); | 
 | 454 | 		printk("pci_alloc_consistent: no core\n"); | 
 | 455 | 		return NULL; | 
 | 456 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 |  | 
 | 458 | 	if (allocate_resource(&_sparc_dvma, res, len_total, | 
 | 459 | 	    _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | 
 | 460 | 		printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); | 
 | 461 | 		free_pages(va, order); | 
 | 462 | 		kfree(res); | 
 | 463 | 		return NULL; | 
 | 464 | 	} | 
 | 465 | 	mmu_inval_dma_area(va, len_total); | 
 | 466 | #if 0 | 
 | 467 | /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", | 
 | 468 |   (long)va, (long)res->start, (long)virt_to_phys(va), len_total); | 
 | 469 | #endif | 
 | 470 | 	sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | 
 | 471 |  | 
 | 472 | 	*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | 
 | 473 | 	return (void *) res->start; | 
 | 474 | } | 
 | 475 |  | 
 | 476 | /* Free and unmap a consistent DMA buffer. | 
 | 477 |  * cpu_addr is what was returned from pci_alloc_consistent, | 
 | 478 |  * size must be the same as what as passed into pci_alloc_consistent, | 
 | 479 |  * and likewise dma_addr must be the same as what *dma_addrp was set to. | 
 | 480 |  * | 
| Simon Arlott | d1a78c3 | 2007-05-11 13:51:23 -0700 | [diff] [blame] | 481 |  * References to the memory and mappings associated with cpu_addr/dma_addr | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 |  * past this call are illegal. | 
 | 483 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 484 | static void pci32_free_coherent(struct device *dev, size_t n, void *p, | 
 | 485 | 				dma_addr_t ba) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | { | 
 | 487 | 	struct resource *res; | 
 | 488 | 	unsigned long pgp; | 
 | 489 |  | 
 | 490 | 	if ((res = _sparc_find_resource(&_sparc_dvma, | 
 | 491 | 	    (unsigned long)p)) == NULL) { | 
 | 492 | 		printk("pci_free_consistent: cannot free %p\n", p); | 
 | 493 | 		return; | 
 | 494 | 	} | 
 | 495 |  | 
 | 496 | 	if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | 
 | 497 | 		printk("pci_free_consistent: unaligned va %p\n", p); | 
 | 498 | 		return; | 
 | 499 | 	} | 
 | 500 |  | 
 | 501 | 	n = (n + PAGE_SIZE-1) & PAGE_MASK; | 
 | 502 | 	if ((res->end-res->start)+1 != n) { | 
 | 503 | 		printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", | 
 | 504 | 		    (long)((res->end-res->start)+1), (long)n); | 
 | 505 | 		return; | 
 | 506 | 	} | 
 | 507 |  | 
 | 508 | 	pgp = (unsigned long) phys_to_virt(ba);	/* bus_to_virt actually */ | 
 | 509 | 	mmu_inval_dma_area(pgp, n); | 
 | 510 | 	sparc_unmapiorange((unsigned long)p, n); | 
 | 511 |  | 
 | 512 | 	release_resource(res); | 
 | 513 | 	kfree(res); | 
 | 514 |  | 
 | 515 | 	free_pages(pgp, get_order(n)); | 
 | 516 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 |  | 
 | 518 | /* | 
 | 519 |  * Same as pci_map_single, but with pages. | 
 | 520 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 521 | static dma_addr_t pci32_map_page(struct device *dev, struct page *page, | 
 | 522 | 				 unsigned long offset, size_t size, | 
 | 523 | 				 enum dma_data_direction dir, | 
 | 524 | 				 struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | 	/* IIep is write-through, not flushing. */ | 
 | 527 | 	return page_to_phys(page) + offset; | 
 | 528 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 |  | 
 | 530 | /* Map a set of buffers described by scatterlist in streaming | 
 | 531 |  * mode for DMA.  This is the scather-gather version of the | 
 | 532 |  * above pci_map_single interface.  Here the scatter gather list | 
 | 533 |  * elements are each tagged with the appropriate dma address | 
 | 534 |  * and length.  They are obtained via sg_dma_{address,length}(SG). | 
 | 535 |  * | 
 | 536 |  * NOTE: An implementation may be able to use a smaller number of | 
 | 537 |  *       DMA address/length pairs than there are SG table elements. | 
 | 538 |  *       (for example via virtual mapping capabilities) | 
 | 539 |  *       The routine returns the number of addr/length pairs actually | 
 | 540 |  *       used, at most nents. | 
 | 541 |  * | 
 | 542 |  * Device ownership issues as mentioned above for pci_map_single are | 
 | 543 |  * the same here. | 
 | 544 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 545 | static int pci32_map_sg(struct device *device, struct scatterlist *sgl, | 
 | 546 | 			int nents, enum dma_data_direction dir, | 
 | 547 | 			struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 549 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | 	int n; | 
 | 551 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | 	/* IIep is write-through, not flushing. */ | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 553 | 	for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 554 | 		BUG_ON(page_address(sg_page(sg)) == NULL); | 
| Robert Reif | aa83a26 | 2008-12-11 20:24:58 -0800 | [diff] [blame] | 555 | 		sg->dma_address = virt_to_phys(sg_virt(sg)); | 
 | 556 | 		sg->dma_length = sg->length; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | 	} | 
 | 558 | 	return nents; | 
 | 559 | } | 
 | 560 |  | 
 | 561 | /* Unmap a set of streaming mode DMA translations. | 
 | 562 |  * Again, cpu read rules concerning calls here are the same as for | 
 | 563 |  * pci_unmap_single() above. | 
 | 564 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 565 | static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, | 
 | 566 | 			   int nents, enum dma_data_direction dir, | 
 | 567 | 			   struct dma_attrs *attrs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 569 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | 	int n; | 
 | 571 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 572 | 	if (dir != PCI_DMA_TODEVICE) { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 573 | 		for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 574 | 			BUG_ON(page_address(sg_page(sg)) == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | 			mmu_inval_dma_area( | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 576 | 			    (unsigned long) page_address(sg_page(sg)), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | 		} | 
 | 579 | 	} | 
 | 580 | } | 
 | 581 |  | 
 | 582 | /* Make physical memory consistent for a single | 
 | 583 |  * streaming mode DMA translation before or after a transfer. | 
 | 584 |  * | 
 | 585 |  * If you perform a pci_map_single() but wish to interrogate the | 
 | 586 |  * buffer using the cpu, yet do not wish to teardown the PCI dma | 
 | 587 |  * mapping, you must call this function before doing so.  At the | 
 | 588 |  * next point you give the PCI dma address back to the card, you | 
 | 589 |  * must first perform a pci_dma_sync_for_device, and then the | 
 | 590 |  * device again owns the buffer. | 
 | 591 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 592 | static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, | 
 | 593 | 				      size_t size, enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | { | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 595 | 	if (dir != PCI_DMA_TODEVICE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | 		mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 
 | 597 | 		    (size + PAGE_SIZE-1) & PAGE_MASK); | 
 | 598 | 	} | 
 | 599 | } | 
 | 600 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 601 | static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, | 
 | 602 | 					 size_t size, enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | { | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 604 | 	if (dir != PCI_DMA_TODEVICE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | 		mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | 
 | 606 | 		    (size + PAGE_SIZE-1) & PAGE_MASK); | 
 | 607 | 	} | 
 | 608 | } | 
 | 609 |  | 
 | 610 | /* Make physical memory consistent for a set of streaming | 
 | 611 |  * mode DMA translations after a transfer. | 
 | 612 |  * | 
 | 613 |  * The same as pci_dma_sync_single_* but for a scatter-gather list, | 
 | 614 |  * same rules and usage. | 
 | 615 |  */ | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 616 | static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, | 
 | 617 | 				  int nents, enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 619 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | 	int n; | 
 | 621 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 622 | 	if (dir != PCI_DMA_TODEVICE) { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 623 | 		for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 624 | 			BUG_ON(page_address(sg_page(sg)) == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | 			mmu_inval_dma_area( | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 626 | 			    (unsigned long) page_address(sg_page(sg)), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | 		} | 
 | 629 | 	} | 
 | 630 | } | 
 | 631 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 632 | static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl, | 
 | 633 | 				     int nents, enum dma_data_direction dir) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 635 | 	struct scatterlist *sg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | 	int n; | 
 | 637 |  | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 638 | 	if (dir != PCI_DMA_TODEVICE) { | 
| Jens Axboe | 0912a5d | 2007-05-14 15:44:38 +0200 | [diff] [blame] | 639 | 		for_each_sg(sgl, sg, nents, n) { | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 640 | 			BUG_ON(page_address(sg_page(sg)) == NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | 			mmu_inval_dma_area( | 
| Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 642 | 			    (unsigned long) page_address(sg_page(sg)), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | 			    (sg->length + PAGE_SIZE-1) & PAGE_MASK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 644 | 		} | 
 | 645 | 	} | 
 | 646 | } | 
| FUJITA Tomonori | ee664a9 | 2009-08-10 11:53:16 +0900 | [diff] [blame] | 647 |  | 
 | 648 | struct dma_map_ops pci32_dma_ops = { | 
 | 649 | 	.alloc_coherent		= pci32_alloc_coherent, | 
 | 650 | 	.free_coherent		= pci32_free_coherent, | 
 | 651 | 	.map_page		= pci32_map_page, | 
 | 652 | 	.map_sg			= pci32_map_sg, | 
 | 653 | 	.unmap_sg		= pci32_unmap_sg, | 
 | 654 | 	.sync_single_for_cpu	= pci32_sync_single_for_cpu, | 
 | 655 | 	.sync_single_for_device	= pci32_sync_single_for_device, | 
 | 656 | 	.sync_sg_for_cpu	= pci32_sync_sg_for_cpu, | 
 | 657 | 	.sync_sg_for_device	= pci32_sync_sg_for_device, | 
 | 658 | }; | 
 | 659 | EXPORT_SYMBOL(pci32_dma_ops); | 
 | 660 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | #endif /* CONFIG_PCI */ | 
 | 662 |  | 
| FUJITA Tomonori | 451d740 | 2009-08-10 11:53:17 +0900 | [diff] [blame] | 663 | /* | 
 | 664 |  * Return whether the given PCI device DMA address mask can be | 
 | 665 |  * supported properly.  For example, if your device can only drive the | 
 | 666 |  * low 24-bits during PCI bus mastering, then you would pass | 
 | 667 |  * 0x00ffffff as the mask to this function. | 
 | 668 |  */ | 
 | 669 | int dma_supported(struct device *dev, u64 mask) | 
 | 670 | { | 
 | 671 | #ifdef CONFIG_PCI | 
 | 672 | 	if (dev->bus == &pci_bus_type) | 
 | 673 | 		return 1; | 
 | 674 | #endif | 
 | 675 | 	return 0; | 
 | 676 | } | 
 | 677 | EXPORT_SYMBOL(dma_supported); | 
 | 678 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 679 | #ifdef CONFIG_PROC_FS | 
 | 680 |  | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 681 | static int sparc_io_proc_show(struct seq_file *m, void *v) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | { | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 683 | 	struct resource *root = m->private, *r; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | 	const char *nm; | 
 | 685 |  | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 686 | 	for (r = root->child; r != NULL; r = r->sibling) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | 		if ((nm = r->name) == 0) nm = "???"; | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 688 | 		seq_printf(m, "%016llx-%016llx: %s\n", | 
| Greg Kroah-Hartman | 685143ac | 2006-06-12 15:18:31 -0700 | [diff] [blame] | 689 | 				(unsigned long long)r->start, | 
 | 690 | 				(unsigned long long)r->end, nm); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | 	} | 
 | 692 |  | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 693 | 	return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | } | 
 | 695 |  | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 696 | static int sparc_io_proc_open(struct inode *inode, struct file *file) | 
 | 697 | { | 
 | 698 | 	return single_open(file, sparc_io_proc_show, PDE(inode)->data); | 
 | 699 | } | 
 | 700 |  | 
 | 701 | static const struct file_operations sparc_io_proc_fops = { | 
 | 702 | 	.owner		= THIS_MODULE, | 
 | 703 | 	.open		= sparc_io_proc_open, | 
 | 704 | 	.read		= seq_read, | 
 | 705 | 	.llseek		= seq_lseek, | 
 | 706 | 	.release	= single_release, | 
 | 707 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | #endif /* CONFIG_PROC_FS */ | 
 | 709 |  | 
 | 710 | /* | 
 | 711 |  * This is a version of find_resource and it belongs to kernel/resource.c. | 
 | 712 |  * Until we have agreement with Linus and Martin, it lingers here. | 
 | 713 |  * | 
 | 714 |  * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. | 
 | 715 |  * This probably warrants some sort of hashing. | 
 | 716 |  */ | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 717 | static struct resource *_sparc_find_resource(struct resource *root, | 
 | 718 | 					     unsigned long hit) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | { | 
 | 720 |         struct resource *tmp; | 
 | 721 |  | 
 | 722 | 	for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { | 
 | 723 | 		if (tmp->start <= hit && tmp->end >= hit) | 
 | 724 | 			return tmp; | 
 | 725 | 	} | 
 | 726 | 	return NULL; | 
 | 727 | } | 
 | 728 |  | 
| Adrian Bunk | c61c65c | 2008-06-05 11:40:58 -0700 | [diff] [blame] | 729 | static void register_proc_sparc_ioport(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | { | 
 | 731 | #ifdef CONFIG_PROC_FS | 
| Alexey Dobriyan | e7a088f | 2009-09-01 17:54:07 -0700 | [diff] [blame] | 732 | 	proc_create_data("io_map", 0, NULL, &sparc_io_proc_fops, &sparc_iomap); | 
 | 733 | 	proc_create_data("dvma_map", 0, NULL, &sparc_io_proc_fops, &_sparc_dvma); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | #endif | 
 | 735 | } |