| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/m68k/mm/kmap.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1997 Roman Hodek | 
 | 5 |  * | 
 | 6 |  *  10/01/99 cleaned up the code and changing to the same interface | 
 | 7 |  *	     used by other architectures		/Roman Zippel | 
 | 8 |  */ | 
 | 9 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> | 
 | 11 | #include <linux/kernel.h> | 
 | 12 | #include <linux/string.h> | 
 | 13 | #include <linux/types.h> | 
 | 14 | #include <linux/slab.h> | 
 | 15 | #include <linux/vmalloc.h> | 
 | 16 |  | 
 | 17 | #include <asm/setup.h> | 
 | 18 | #include <asm/segment.h> | 
 | 19 | #include <asm/page.h> | 
 | 20 | #include <asm/pgalloc.h> | 
 | 21 | #include <asm/io.h> | 
 | 22 | #include <asm/system.h> | 
 | 23 |  | 
 | 24 | #undef DEBUG | 
 | 25 |  | 
 | 26 | #define PTRTREESIZE	(256*1024) | 
 | 27 |  | 
 | 28 | /* | 
 | 29 |  * For 040/060 we can use the virtual memory area like other architectures, | 
 | 30 |  * but for 020/030 we want to use early termination page descriptor and we | 
 | 31 |  * can't mix this with normal page descriptors, so we have to copy that code | 
 | 32 |  * (mm/vmalloc.c) and return appriorate aligned addresses. | 
 | 33 |  */ | 
 | 34 |  | 
 | 35 | #ifdef CPU_M68040_OR_M68060_ONLY | 
 | 36 |  | 
 | 37 | #define IO_SIZE		PAGE_SIZE | 
 | 38 |  | 
 | 39 | static inline struct vm_struct *get_io_area(unsigned long size) | 
 | 40 | { | 
 | 41 | 	return get_vm_area(size, VM_IOREMAP); | 
 | 42 | } | 
 | 43 |  | 
 | 44 |  | 
 | 45 | static inline void free_io_area(void *addr) | 
 | 46 | { | 
 | 47 | 	vfree((void *)(PAGE_MASK & (unsigned long)addr)); | 
 | 48 | } | 
 | 49 |  | 
 | 50 | #else | 
 | 51 |  | 
 | 52 | #define IO_SIZE		(256*1024) | 
 | 53 |  | 
 | 54 | static struct vm_struct *iolist; | 
 | 55 |  | 
 | 56 | static struct vm_struct *get_io_area(unsigned long size) | 
 | 57 | { | 
 | 58 | 	unsigned long addr; | 
 | 59 | 	struct vm_struct **p, *tmp, *area; | 
 | 60 |  | 
 | 61 | 	area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL); | 
 | 62 | 	if (!area) | 
 | 63 | 		return NULL; | 
 | 64 | 	addr = KMAP_START; | 
 | 65 | 	for (p = &iolist; (tmp = *p) ; p = &tmp->next) { | 
 | 66 | 		if (size + addr < (unsigned long)tmp->addr) | 
 | 67 | 			break; | 
 | 68 | 		if (addr > KMAP_END-size) | 
 | 69 | 			return NULL; | 
 | 70 | 		addr = tmp->size + (unsigned long)tmp->addr; | 
 | 71 | 	} | 
 | 72 | 	area->addr = (void *)addr; | 
 | 73 | 	area->size = size + IO_SIZE; | 
 | 74 | 	area->next = *p; | 
 | 75 | 	*p = area; | 
 | 76 | 	return area; | 
 | 77 | } | 
 | 78 |  | 
 | 79 | static inline void free_io_area(void *addr) | 
 | 80 | { | 
 | 81 | 	struct vm_struct **p, *tmp; | 
 | 82 |  | 
 | 83 | 	if (!addr) | 
 | 84 | 		return; | 
 | 85 | 	addr = (void *)((unsigned long)addr & -IO_SIZE); | 
 | 86 | 	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { | 
 | 87 | 		if (tmp->addr == addr) { | 
 | 88 | 			*p = tmp->next; | 
 | 89 | 			__iounmap(tmp->addr, tmp->size); | 
 | 90 | 			kfree(tmp); | 
 | 91 | 			return; | 
 | 92 | 		} | 
 | 93 | 	} | 
 | 94 | } | 
 | 95 |  | 
 | 96 | #endif | 
 | 97 |  | 
 | 98 | /* | 
 | 99 |  * Map some physical address range into the kernel address space. The | 
 | 100 |  * code is copied and adapted from map_chunk(). | 
 | 101 |  */ | 
 | 102 | /* Rewritten by Andreas Schwab to remove all races. */ | 
 | 103 |  | 
| Al Viro | ad9ec4f | 2006-01-12 01:06:24 -0800 | [diff] [blame] | 104 | void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | { | 
 | 106 | 	struct vm_struct *area; | 
 | 107 | 	unsigned long virtaddr, retaddr; | 
 | 108 | 	long offset; | 
 | 109 | 	pgd_t *pgd_dir; | 
 | 110 | 	pmd_t *pmd_dir; | 
 | 111 | 	pte_t *pte_dir; | 
 | 112 |  | 
 | 113 | 	/* | 
 | 114 | 	 * Don't allow mappings that wrap.. | 
 | 115 | 	 */ | 
 | 116 | 	if (!size || size > physaddr + size) | 
 | 117 | 		return NULL; | 
 | 118 |  | 
 | 119 | #ifdef CONFIG_AMIGA | 
 | 120 | 	if (MACH_IS_AMIGA) { | 
 | 121 | 		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000) | 
 | 122 | 		    && (cacheflag == IOMAP_NOCACHE_SER)) | 
| Al Viro | ad9ec4f | 2006-01-12 01:06:24 -0800 | [diff] [blame] | 123 | 			return (void __iomem *)physaddr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | 	} | 
 | 125 | #endif | 
 | 126 |  | 
 | 127 | #ifdef DEBUG | 
 | 128 | 	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag); | 
 | 129 | #endif | 
 | 130 | 	/* | 
 | 131 | 	 * Mappings have to be aligned | 
 | 132 | 	 */ | 
 | 133 | 	offset = physaddr & (IO_SIZE - 1); | 
 | 134 | 	physaddr &= -IO_SIZE; | 
 | 135 | 	size = (size + offset + IO_SIZE - 1) & -IO_SIZE; | 
 | 136 |  | 
 | 137 | 	/* | 
 | 138 | 	 * Ok, go for it.. | 
 | 139 | 	 */ | 
 | 140 | 	area = get_io_area(size); | 
 | 141 | 	if (!area) | 
 | 142 | 		return NULL; | 
 | 143 |  | 
 | 144 | 	virtaddr = (unsigned long)area->addr; | 
 | 145 | 	retaddr = virtaddr + offset; | 
 | 146 | #ifdef DEBUG | 
 | 147 | 	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr); | 
 | 148 | #endif | 
 | 149 |  | 
 | 150 | 	/* | 
 | 151 | 	 * add cache and table flags to physical address | 
 | 152 | 	 */ | 
 | 153 | 	if (CPU_IS_040_OR_060) { | 
 | 154 | 		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 | | 
 | 155 | 			     _PAGE_ACCESSED | _PAGE_DIRTY); | 
 | 156 | 		switch (cacheflag) { | 
 | 157 | 		case IOMAP_FULL_CACHING: | 
 | 158 | 			physaddr |= _PAGE_CACHE040; | 
 | 159 | 			break; | 
 | 160 | 		case IOMAP_NOCACHE_SER: | 
 | 161 | 		default: | 
 | 162 | 			physaddr |= _PAGE_NOCACHE_S; | 
 | 163 | 			break; | 
 | 164 | 		case IOMAP_NOCACHE_NONSER: | 
 | 165 | 			physaddr |= _PAGE_NOCACHE; | 
 | 166 | 			break; | 
 | 167 | 		case IOMAP_WRITETHROUGH: | 
 | 168 | 			physaddr |= _PAGE_CACHE040W; | 
 | 169 | 			break; | 
 | 170 | 		} | 
 | 171 | 	} else { | 
 | 172 | 		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | 
 | 173 | 		switch (cacheflag) { | 
 | 174 | 		case IOMAP_NOCACHE_SER: | 
 | 175 | 		case IOMAP_NOCACHE_NONSER: | 
 | 176 | 		default: | 
 | 177 | 			physaddr |= _PAGE_NOCACHE030; | 
 | 178 | 			break; | 
 | 179 | 		case IOMAP_FULL_CACHING: | 
 | 180 | 		case IOMAP_WRITETHROUGH: | 
 | 181 | 			break; | 
 | 182 | 		} | 
 | 183 | 	} | 
 | 184 |  | 
 | 185 | 	while ((long)size > 0) { | 
 | 186 | #ifdef DEBUG | 
 | 187 | 		if (!(virtaddr & (PTRTREESIZE-1))) | 
 | 188 | 			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); | 
 | 189 | #endif | 
 | 190 | 		pgd_dir = pgd_offset_k(virtaddr); | 
 | 191 | 		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr); | 
 | 192 | 		if (!pmd_dir) { | 
 | 193 | 			printk("ioremap: no mem for pmd_dir\n"); | 
 | 194 | 			return NULL; | 
 | 195 | 		} | 
 | 196 |  | 
 | 197 | 		if (CPU_IS_020_OR_030) { | 
 | 198 | 			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; | 
 | 199 | 			physaddr += PTRTREESIZE; | 
 | 200 | 			virtaddr += PTRTREESIZE; | 
 | 201 | 			size -= PTRTREESIZE; | 
 | 202 | 		} else { | 
| Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 203 | 			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | 			if (!pte_dir) { | 
 | 205 | 				printk("ioremap: no mem for pte_dir\n"); | 
 | 206 | 				return NULL; | 
 | 207 | 			} | 
 | 208 |  | 
 | 209 | 			pte_val(*pte_dir) = physaddr; | 
 | 210 | 			virtaddr += PAGE_SIZE; | 
 | 211 | 			physaddr += PAGE_SIZE; | 
 | 212 | 			size -= PAGE_SIZE; | 
 | 213 | 		} | 
 | 214 | 	} | 
 | 215 | #ifdef DEBUG | 
 | 216 | 	printk("\n"); | 
 | 217 | #endif | 
 | 218 | 	flush_tlb_all(); | 
 | 219 |  | 
| Al Viro | ad9ec4f | 2006-01-12 01:06:24 -0800 | [diff] [blame] | 220 | 	return (void __iomem *)retaddr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } | 
 | 222 |  | 
 | 223 | /* | 
 | 224 |  * Unmap a ioremap()ed region again | 
 | 225 |  */ | 
| Al Viro | ad9ec4f | 2006-01-12 01:06:24 -0800 | [diff] [blame] | 226 | void iounmap(void __iomem *addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | { | 
 | 228 | #ifdef CONFIG_AMIGA | 
 | 229 | 	if ((!MACH_IS_AMIGA) || | 
 | 230 | 	    (((unsigned long)addr < 0x40000000) || | 
 | 231 | 	     ((unsigned long)addr > 0x60000000))) | 
| Al Viro | ad9ec4f | 2006-01-12 01:06:24 -0800 | [diff] [blame] | 232 | 			free_io_area((__force void *)addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | #else | 
| Al Viro | ad9ec4f | 2006-01-12 01:06:24 -0800 | [diff] [blame] | 234 | 	free_io_area((__force void *)addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | #endif | 
 | 236 | } | 
 | 237 |  | 
 | 238 | /* | 
 | 239 |  * __iounmap unmaps nearly everything, so be careful | 
 | 240 |  * it doesn't free currently pointer/page tables anymore but it | 
 | 241 |  * wans't used anyway and might be added later. | 
 | 242 |  */ | 
 | 243 | void __iounmap(void *addr, unsigned long size) | 
 | 244 | { | 
 | 245 | 	unsigned long virtaddr = (unsigned long)addr; | 
 | 246 | 	pgd_t *pgd_dir; | 
 | 247 | 	pmd_t *pmd_dir; | 
 | 248 | 	pte_t *pte_dir; | 
 | 249 |  | 
 | 250 | 	while ((long)size > 0) { | 
 | 251 | 		pgd_dir = pgd_offset_k(virtaddr); | 
 | 252 | 		if (pgd_bad(*pgd_dir)) { | 
 | 253 | 			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | 
 | 254 | 			pgd_clear(pgd_dir); | 
 | 255 | 			return; | 
 | 256 | 		} | 
 | 257 | 		pmd_dir = pmd_offset(pgd_dir, virtaddr); | 
 | 258 |  | 
 | 259 | 		if (CPU_IS_020_OR_030) { | 
 | 260 | 			int pmd_off = (virtaddr/PTRTREESIZE) & 15; | 
| Roman Zippel | a7b1a1a | 2006-06-25 05:46:55 -0700 | [diff] [blame] | 261 | 			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 |  | 
| Roman Zippel | a7b1a1a | 2006-06-25 05:46:55 -0700 | [diff] [blame] | 263 | 			if (pmd_type == _PAGE_PRESENT) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | 				pmd_dir->pmd[pmd_off] = 0; | 
 | 265 | 				virtaddr += PTRTREESIZE; | 
 | 266 | 				size -= PTRTREESIZE; | 
 | 267 | 				continue; | 
| Roman Zippel | a7b1a1a | 2006-06-25 05:46:55 -0700 | [diff] [blame] | 268 | 			} else if (pmd_type == 0) | 
 | 269 | 				continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | 		} | 
 | 271 |  | 
 | 272 | 		if (pmd_bad(*pmd_dir)) { | 
 | 273 | 			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | 
 | 274 | 			pmd_clear(pmd_dir); | 
 | 275 | 			return; | 
 | 276 | 		} | 
 | 277 | 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | 
 | 278 |  | 
 | 279 | 		pte_val(*pte_dir) = 0; | 
 | 280 | 		virtaddr += PAGE_SIZE; | 
 | 281 | 		size -= PAGE_SIZE; | 
 | 282 | 	} | 
 | 283 |  | 
 | 284 | 	flush_tlb_all(); | 
 | 285 | } | 
 | 286 |  | 
 | 287 | /* | 
 | 288 |  * Set new cache mode for some kernel address space. | 
 | 289 |  * The caller must push data for that range itself, if such data may already | 
 | 290 |  * be in the cache. | 
 | 291 |  */ | 
 | 292 | void kernel_set_cachemode(void *addr, unsigned long size, int cmode) | 
 | 293 | { | 
 | 294 | 	unsigned long virtaddr = (unsigned long)addr; | 
 | 295 | 	pgd_t *pgd_dir; | 
 | 296 | 	pmd_t *pmd_dir; | 
 | 297 | 	pte_t *pte_dir; | 
 | 298 |  | 
 | 299 | 	if (CPU_IS_040_OR_060) { | 
 | 300 | 		switch (cmode) { | 
 | 301 | 		case IOMAP_FULL_CACHING: | 
 | 302 | 			cmode = _PAGE_CACHE040; | 
 | 303 | 			break; | 
 | 304 | 		case IOMAP_NOCACHE_SER: | 
 | 305 | 		default: | 
 | 306 | 			cmode = _PAGE_NOCACHE_S; | 
 | 307 | 			break; | 
 | 308 | 		case IOMAP_NOCACHE_NONSER: | 
 | 309 | 			cmode = _PAGE_NOCACHE; | 
 | 310 | 			break; | 
 | 311 | 		case IOMAP_WRITETHROUGH: | 
 | 312 | 			cmode = _PAGE_CACHE040W; | 
 | 313 | 			break; | 
 | 314 | 		} | 
 | 315 | 	} else { | 
 | 316 | 		switch (cmode) { | 
 | 317 | 		case IOMAP_NOCACHE_SER: | 
 | 318 | 		case IOMAP_NOCACHE_NONSER: | 
 | 319 | 		default: | 
 | 320 | 			cmode = _PAGE_NOCACHE030; | 
 | 321 | 			break; | 
 | 322 | 		case IOMAP_FULL_CACHING: | 
 | 323 | 		case IOMAP_WRITETHROUGH: | 
 | 324 | 			cmode = 0; | 
 | 325 | 		} | 
 | 326 | 	} | 
 | 327 |  | 
 | 328 | 	while ((long)size > 0) { | 
 | 329 | 		pgd_dir = pgd_offset_k(virtaddr); | 
 | 330 | 		if (pgd_bad(*pgd_dir)) { | 
 | 331 | 			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | 
 | 332 | 			pgd_clear(pgd_dir); | 
 | 333 | 			return; | 
 | 334 | 		} | 
 | 335 | 		pmd_dir = pmd_offset(pgd_dir, virtaddr); | 
 | 336 |  | 
 | 337 | 		if (CPU_IS_020_OR_030) { | 
 | 338 | 			int pmd_off = (virtaddr/PTRTREESIZE) & 15; | 
 | 339 |  | 
 | 340 | 			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { | 
 | 341 | 				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & | 
 | 342 | 							 _CACHEMASK040) | cmode; | 
 | 343 | 				virtaddr += PTRTREESIZE; | 
 | 344 | 				size -= PTRTREESIZE; | 
 | 345 | 				continue; | 
 | 346 | 			} | 
 | 347 | 		} | 
 | 348 |  | 
 | 349 | 		if (pmd_bad(*pmd_dir)) { | 
 | 350 | 			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | 
 | 351 | 			pmd_clear(pmd_dir); | 
 | 352 | 			return; | 
 | 353 | 		} | 
 | 354 | 		pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | 
 | 355 |  | 
 | 356 | 		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode; | 
 | 357 | 		virtaddr += PAGE_SIZE; | 
 | 358 | 		size -= PAGE_SIZE; | 
 | 359 | 	} | 
 | 360 |  | 
 | 361 | 	flush_tlb_all(); | 
 | 362 | } |