| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 1 | /* | 
 | 2 |  * arch/sh/mm/pmb.c | 
 | 3 |  * | 
 | 4 |  * Privileged Space Mapping Buffer (PMB) Support. | 
 | 5 |  * | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 6 |  * Copyright (C) 2005 - 2011  Paul Mundt | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 7 |  * Copyright (C) 2010  Matt Fleming | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 8 |  * | 
 | 9 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 10 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 11 |  * for more details. | 
 | 12 |  */ | 
 | 13 | #include <linux/init.h> | 
 | 14 | #include <linux/kernel.h> | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 15 | #include <linux/syscore_ops.h> | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 16 | #include <linux/cpu.h> | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 17 | #include <linux/module.h> | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 18 | #include <linux/bitops.h> | 
 | 19 | #include <linux/debugfs.h> | 
 | 20 | #include <linux/fs.h> | 
 | 21 | #include <linux/seq_file.h> | 
 | 22 | #include <linux/err.h> | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 23 | #include <linux/io.h> | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 24 | #include <linux/spinlock.h> | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 25 | #include <linux/vmalloc.h> | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 26 | #include <asm/cacheflush.h> | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 27 | #include <asm/sizes.h> | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 28 | #include <asm/system.h> | 
 | 29 | #include <asm/uaccess.h> | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 30 | #include <asm/pgtable.h> | 
| Paul Mundt | 7bdda62 | 2010-02-17 13:23:00 +0900 | [diff] [blame] | 31 | #include <asm/page.h> | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 32 | #include <asm/mmu.h> | 
| Stuart Menefy | eddeeb3 | 2007-11-26 21:32:40 +0900 | [diff] [blame] | 33 | #include <asm/mmu_context.h> | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 34 |  | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 35 | struct pmb_entry; | 
 | 36 |  | 
 | 37 | struct pmb_entry { | 
 | 38 | 	unsigned long vpn; | 
 | 39 | 	unsigned long ppn; | 
 | 40 | 	unsigned long flags; | 
 | 41 | 	unsigned long size; | 
 | 42 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 43 | 	raw_spinlock_t lock; | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 44 |  | 
 | 45 | 	/* | 
 | 46 | 	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or | 
 | 47 | 	 * PMB_NO_ENTRY to search for a free one | 
 | 48 | 	 */ | 
 | 49 | 	int entry; | 
 | 50 |  | 
 | 51 | 	/* Adjacent entry link for contiguous multi-entry mappings */ | 
 | 52 | 	struct pmb_entry *link; | 
 | 53 | }; | 
 | 54 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 55 | static struct { | 
 | 56 | 	unsigned long size; | 
 | 57 | 	int flag; | 
 | 58 | } pmb_sizes[] = { | 
 | 59 | 	{ .size	= SZ_512M, .flag = PMB_SZ_512M, }, | 
 | 60 | 	{ .size = SZ_128M, .flag = PMB_SZ_128M, }, | 
 | 61 | 	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  }, | 
 | 62 | 	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  }, | 
 | 63 | }; | 
 | 64 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 65 | static void pmb_unmap_entry(struct pmb_entry *, int depth); | 
| Matt Fleming | fc2bdef | 2009-10-06 21:22:22 +0000 | [diff] [blame] | 66 |  | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 67 | static DEFINE_RWLOCK(pmb_rwlock); | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 68 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 69 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 70 |  | 
| Paul Mundt | 4cfa8e7 | 2010-03-02 16:49:50 +0900 | [diff] [blame] | 71 | static unsigned int pmb_iomapping_enabled; | 
 | 72 |  | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 73 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 74 | { | 
 | 75 | 	return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 
 | 76 | } | 
 | 77 |  | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 78 | static __always_inline unsigned long mk_pmb_addr(unsigned int entry) | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 79 | { | 
 | 80 | 	return mk_pmb_entry(entry) | PMB_ADDR; | 
 | 81 | } | 
 | 82 |  | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 83 | static __always_inline unsigned long mk_pmb_data(unsigned int entry) | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 84 | { | 
 | 85 | 	return mk_pmb_entry(entry) | PMB_DATA; | 
 | 86 | } | 
 | 87 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 88 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) | 
 | 89 | { | 
 | 90 | 	return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | 
 | 91 | } | 
 | 92 |  | 
 | 93 | /* | 
 | 94 |  * Ensure that the PMB entries match our cache configuration. | 
 | 95 |  * | 
 | 96 |  * When we are in 32-bit address extended mode, CCR.CB becomes | 
 | 97 |  * invalid, so care must be taken to manually adjust cacheable | 
 | 98 |  * translations. | 
 | 99 |  */ | 
 | 100 | static __always_inline unsigned long pmb_cache_flags(void) | 
 | 101 | { | 
 | 102 | 	unsigned long flags = 0; | 
 | 103 |  | 
 | 104 | #if defined(CONFIG_CACHE_OFF) | 
 | 105 | 	flags |= PMB_WT | PMB_UB; | 
 | 106 | #elif defined(CONFIG_CACHE_WRITETHROUGH) | 
 | 107 | 	flags |= PMB_C | PMB_WT | PMB_UB; | 
 | 108 | #elif defined(CONFIG_CACHE_WRITEBACK) | 
 | 109 | 	flags |= PMB_C; | 
 | 110 | #endif | 
 | 111 |  | 
 | 112 | 	return flags; | 
 | 113 | } | 
 | 114 |  | 
 | 115 | /* | 
 | 116 |  * Convert typical pgprot value to the PMB equivalent | 
 | 117 |  */ | 
 | 118 | static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) | 
 | 119 | { | 
 | 120 | 	unsigned long pmb_flags = 0; | 
 | 121 | 	u64 flags = pgprot_val(prot); | 
 | 122 |  | 
 | 123 | 	if (flags & _PAGE_CACHABLE) | 
 | 124 | 		pmb_flags |= PMB_C; | 
 | 125 | 	if (flags & _PAGE_WT) | 
 | 126 | 		pmb_flags |= PMB_WT | PMB_UB; | 
 | 127 |  | 
 | 128 | 	return pmb_flags; | 
 | 129 | } | 
 | 130 |  | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 131 | static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 132 | { | 
 | 133 | 	return (b->vpn == (a->vpn + a->size)) && | 
 | 134 | 	       (b->ppn == (a->ppn + a->size)) && | 
 | 135 | 	       (b->flags == a->flags); | 
 | 136 | } | 
 | 137 |  | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 138 | static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, | 
 | 139 | 			       unsigned long size) | 
 | 140 | { | 
 | 141 | 	int i; | 
 | 142 |  | 
 | 143 | 	read_lock(&pmb_rwlock); | 
 | 144 |  | 
 | 145 | 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 
 | 146 | 		struct pmb_entry *pmbe, *iter; | 
 | 147 | 		unsigned long span; | 
 | 148 |  | 
 | 149 | 		if (!test_bit(i, pmb_map)) | 
 | 150 | 			continue; | 
 | 151 |  | 
 | 152 | 		pmbe = &pmb_entry_list[i]; | 
 | 153 |  | 
 | 154 | 		/* | 
 | 155 | 		 * See if VPN and PPN are bounded by an existing mapping. | 
 | 156 | 		 */ | 
 | 157 | 		if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) | 
 | 158 | 			continue; | 
 | 159 | 		if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) | 
 | 160 | 			continue; | 
 | 161 |  | 
 | 162 | 		/* | 
 | 163 | 		 * Now see if we're in range of a simple mapping. | 
 | 164 | 		 */ | 
 | 165 | 		if (size <= pmbe->size) { | 
 | 166 | 			read_unlock(&pmb_rwlock); | 
 | 167 | 			return true; | 
 | 168 | 		} | 
 | 169 |  | 
 | 170 | 		span = pmbe->size; | 
 | 171 |  | 
 | 172 | 		/* | 
 | 173 | 		 * Finally for sizes that involve compound mappings, walk | 
 | 174 | 		 * the chain. | 
 | 175 | 		 */ | 
 | 176 | 		for (iter = pmbe->link; iter; iter = iter->link) | 
 | 177 | 			span += iter->size; | 
 | 178 |  | 
 | 179 | 		/* | 
 | 180 | 		 * Nothing else to do if the range requirements are met. | 
 | 181 | 		 */ | 
 | 182 | 		if (size <= span) { | 
 | 183 | 			read_unlock(&pmb_rwlock); | 
 | 184 | 			return true; | 
 | 185 | 		} | 
 | 186 | 	} | 
 | 187 |  | 
 | 188 | 	read_unlock(&pmb_rwlock); | 
 | 189 | 	return false; | 
 | 190 | } | 
 | 191 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 192 | static bool pmb_size_valid(unsigned long size) | 
 | 193 | { | 
 | 194 | 	int i; | 
 | 195 |  | 
 | 196 | 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | 
 | 197 | 		if (pmb_sizes[i].size == size) | 
 | 198 | 			return true; | 
 | 199 |  | 
 | 200 | 	return false; | 
 | 201 | } | 
 | 202 |  | 
 | 203 | static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) | 
 | 204 | { | 
 | 205 | 	return (addr >= P1SEG && (addr + size - 1) < P3SEG); | 
 | 206 | } | 
 | 207 |  | 
 | 208 | static inline bool pmb_prot_valid(pgprot_t prot) | 
 | 209 | { | 
 | 210 | 	return (pgprot_val(prot) & _PAGE_USER) == 0; | 
 | 211 | } | 
 | 212 |  | 
 | 213 | static int pmb_size_to_flags(unsigned long size) | 
 | 214 | { | 
 | 215 | 	int i; | 
 | 216 |  | 
 | 217 | 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | 
 | 218 | 		if (pmb_sizes[i].size == size) | 
 | 219 | 			return pmb_sizes[i].flag; | 
 | 220 |  | 
 | 221 | 	return 0; | 
 | 222 | } | 
 | 223 |  | 
| Matt Fleming | 067784f | 2009-10-06 21:22:23 +0000 | [diff] [blame] | 224 | static int pmb_alloc_entry(void) | 
 | 225 | { | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 226 | 	int pos; | 
| Matt Fleming | 067784f | 2009-10-06 21:22:23 +0000 | [diff] [blame] | 227 |  | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 228 | 	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 229 | 	if (pos >= 0 && pos < NR_PMB_ENTRIES) | 
 | 230 | 		__set_bit(pos, pmb_map); | 
 | 231 | 	else | 
 | 232 | 		pos = -ENOSPC; | 
| Matt Fleming | 067784f | 2009-10-06 21:22:23 +0000 | [diff] [blame] | 233 |  | 
 | 234 | 	return pos; | 
 | 235 | } | 
 | 236 |  | 
| Matt Fleming | 8386aeb | 2009-10-06 21:22:28 +0000 | [diff] [blame] | 237 | static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 238 | 				   unsigned long flags, int entry) | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 239 | { | 
 | 240 | 	struct pmb_entry *pmbe; | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 241 | 	unsigned long irqflags; | 
 | 242 | 	void *ret = NULL; | 
| Matt Fleming | 067784f | 2009-10-06 21:22:23 +0000 | [diff] [blame] | 243 | 	int pos; | 
 | 244 |  | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 245 | 	write_lock_irqsave(&pmb_rwlock, irqflags); | 
 | 246 |  | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 247 | 	if (entry == PMB_NO_ENTRY) { | 
 | 248 | 		pos = pmb_alloc_entry(); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 249 | 		if (unlikely(pos < 0)) { | 
 | 250 | 			ret = ERR_PTR(pos); | 
 | 251 | 			goto out; | 
 | 252 | 		} | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 253 | 	} else { | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 254 | 		if (__test_and_set_bit(entry, pmb_map)) { | 
 | 255 | 			ret = ERR_PTR(-ENOSPC); | 
 | 256 | 			goto out; | 
 | 257 | 		} | 
 | 258 |  | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 259 | 		pos = entry; | 
 | 260 | 	} | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 261 |  | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 262 | 	write_unlock_irqrestore(&pmb_rwlock, irqflags); | 
 | 263 |  | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 264 | 	pmbe = &pmb_entry_list[pos]; | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 265 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 266 | 	memset(pmbe, 0, sizeof(struct pmb_entry)); | 
 | 267 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 268 | 	raw_spin_lock_init(&pmbe->lock); | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 269 |  | 
 | 270 | 	pmbe->vpn	= vpn; | 
 | 271 | 	pmbe->ppn	= ppn; | 
 | 272 | 	pmbe->flags	= flags; | 
| Matt Fleming | 067784f | 2009-10-06 21:22:23 +0000 | [diff] [blame] | 273 | 	pmbe->entry	= pos; | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 274 |  | 
 | 275 | 	return pmbe; | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 276 |  | 
 | 277 | out: | 
 | 278 | 	write_unlock_irqrestore(&pmb_rwlock, irqflags); | 
 | 279 | 	return ret; | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 280 | } | 
 | 281 |  | 
| Matt Fleming | 8386aeb | 2009-10-06 21:22:28 +0000 | [diff] [blame] | 282 | static void pmb_free(struct pmb_entry *pmbe) | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 283 | { | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 284 | 	__clear_bit(pmbe->entry, pmb_map); | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 285 |  | 
 | 286 | 	pmbe->entry	= PMB_NO_ENTRY; | 
 | 287 | 	pmbe->link	= NULL; | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 288 | } | 
 | 289 |  | 
 | 290 | /* | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 291 |  * Must be run uncached. | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 292 |  */ | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 293 | static void __set_pmb_entry(struct pmb_entry *pmbe) | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 294 | { | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 295 | 	unsigned long addr, data; | 
 | 296 |  | 
 | 297 | 	addr = mk_pmb_addr(pmbe->entry); | 
 | 298 | 	data = mk_pmb_data(pmbe->entry); | 
 | 299 |  | 
 | 300 | 	jump_to_uncached(); | 
 | 301 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 302 | 	/* Set V-bit */ | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 303 | 	__raw_writel(pmbe->vpn | PMB_V, addr); | 
 | 304 | 	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data); | 
 | 305 |  | 
 | 306 | 	back_to_cached(); | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 307 | } | 
 | 308 |  | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 309 | static void __clear_pmb_entry(struct pmb_entry *pmbe) | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 310 | { | 
| Paul Mundt | 2e45064 | 2010-02-18 13:26:05 +0900 | [diff] [blame] | 311 | 	unsigned long addr, data; | 
 | 312 | 	unsigned long addr_val, data_val; | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 313 |  | 
| Paul Mundt | 2e45064 | 2010-02-18 13:26:05 +0900 | [diff] [blame] | 314 | 	addr = mk_pmb_addr(pmbe->entry); | 
 | 315 | 	data = mk_pmb_data(pmbe->entry); | 
 | 316 |  | 
 | 317 | 	addr_val = __raw_readl(addr); | 
 | 318 | 	data_val = __raw_readl(data); | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 319 |  | 
 | 320 | 	/* Clear V-bit */ | 
| Paul Mundt | 2e45064 | 2010-02-18 13:26:05 +0900 | [diff] [blame] | 321 | 	writel_uncached(addr_val & ~PMB_V, addr); | 
 | 322 | 	writel_uncached(data_val & ~PMB_V, data); | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 323 | } | 
 | 324 |  | 
| Matt Fleming | 3fe0f36 | 2010-03-22 22:09:58 +0000 | [diff] [blame] | 325 | #ifdef CONFIG_PM | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 326 | static void set_pmb_entry(struct pmb_entry *pmbe) | 
 | 327 | { | 
 | 328 | 	unsigned long flags; | 
 | 329 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 330 | 	raw_spin_lock_irqsave(&pmbe->lock, flags); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 331 | 	__set_pmb_entry(pmbe); | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 332 | 	raw_spin_unlock_irqrestore(&pmbe->lock, flags); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 333 | } | 
| Matt Fleming | 3fe0f36 | 2010-03-22 22:09:58 +0000 | [diff] [blame] | 334 | #endif /* CONFIG_PM */ | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 335 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 336 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, | 
 | 337 | 		     unsigned long size, pgprot_t prot) | 
 | 338 | { | 
| Matt Fleming | fc2bdef | 2009-10-06 21:22:22 +0000 | [diff] [blame] | 339 | 	struct pmb_entry *pmbp, *pmbe; | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 340 | 	unsigned long orig_addr, orig_size; | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 341 | 	unsigned long flags, pmb_flags; | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 342 | 	int i, mapped; | 
| Paul Mundt | 7bdda62 | 2010-02-17 13:23:00 +0900 | [diff] [blame] | 343 |  | 
| Paul Mundt | dfbca89 | 2010-05-11 13:50:29 +0900 | [diff] [blame] | 344 | 	if (size < SZ_16M) | 
 | 345 | 		return -EINVAL; | 
| Paul Mundt | 6eb3c73 | 2010-03-02 17:22:29 +0900 | [diff] [blame] | 346 | 	if (!pmb_addr_valid(vaddr, size)) | 
 | 347 | 		return -EFAULT; | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 348 | 	if (pmb_mapping_exists(vaddr, phys, size)) | 
 | 349 | 		return 0; | 
| Paul Mundt | 4cfa8e7 | 2010-03-02 16:49:50 +0900 | [diff] [blame] | 350 |  | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 351 | 	orig_addr = vaddr; | 
 | 352 | 	orig_size = size; | 
 | 353 |  | 
 | 354 | 	flush_tlb_kernel_range(vaddr, vaddr + size); | 
 | 355 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 356 | 	pmb_flags = pgprot_to_pmb_flags(prot); | 
| Paul Mundt | 6eb3c73 | 2010-03-02 17:22:29 +0900 | [diff] [blame] | 357 | 	pmbp = NULL; | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 358 |  | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 359 | 	do { | 
 | 360 | 		for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 
 | 361 | 			if (size < pmb_sizes[i].size) | 
 | 362 | 				continue; | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 363 |  | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 364 | 			pmbe = pmb_alloc(vaddr, phys, pmb_flags | | 
 | 365 | 					 pmb_sizes[i].flag, PMB_NO_ENTRY); | 
 | 366 | 			if (IS_ERR(pmbe)) { | 
 | 367 | 				pmb_unmap_entry(pmbp, mapped); | 
 | 368 | 				return PTR_ERR(pmbe); | 
 | 369 | 			} | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 370 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 371 | 			raw_spin_lock_irqsave(&pmbe->lock, flags); | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 372 |  | 
 | 373 | 			pmbe->size = pmb_sizes[i].size; | 
 | 374 |  | 
 | 375 | 			__set_pmb_entry(pmbe); | 
 | 376 |  | 
 | 377 | 			phys	+= pmbe->size; | 
 | 378 | 			vaddr	+= pmbe->size; | 
 | 379 | 			size	-= pmbe->size; | 
 | 380 |  | 
 | 381 | 			/* | 
 | 382 | 			 * Link adjacent entries that span multiple PMB | 
 | 383 | 			 * entries for easier tear-down. | 
 | 384 | 			 */ | 
 | 385 | 			if (likely(pmbp)) { | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 386 | 				raw_spin_lock_nested(&pmbp->lock, | 
 | 387 | 						     SINGLE_DEPTH_NESTING); | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 388 | 				pmbp->link = pmbe; | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 389 | 				raw_spin_unlock(&pmbp->lock); | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 390 | 			} | 
 | 391 |  | 
 | 392 | 			pmbp = pmbe; | 
 | 393 |  | 
 | 394 | 			/* | 
 | 395 | 			 * Instead of trying smaller sizes on every | 
 | 396 | 			 * iteration (even if we succeed in allocating | 
 | 397 | 			 * space), try using pmb_sizes[i].size again. | 
 | 398 | 			 */ | 
 | 399 | 			i--; | 
 | 400 | 			mapped++; | 
 | 401 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 402 | 			raw_spin_unlock_irqrestore(&pmbe->lock, flags); | 
| Matt Fleming | fc2bdef | 2009-10-06 21:22:22 +0000 | [diff] [blame] | 403 | 		} | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 404 | 	} while (size >= SZ_16M); | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 405 |  | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 406 | 	flush_cache_vmap(orig_addr, orig_addr + orig_size); | 
 | 407 |  | 
| Paul Mundt | 6eb3c73 | 2010-03-02 17:22:29 +0900 | [diff] [blame] | 408 | 	return 0; | 
 | 409 | } | 
 | 410 |  | 
 | 411 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, | 
 | 412 | 			       pgprot_t prot, void *caller) | 
 | 413 | { | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 414 | 	unsigned long vaddr; | 
| Paul Mundt | 6eb3c73 | 2010-03-02 17:22:29 +0900 | [diff] [blame] | 415 | 	phys_addr_t offset, last_addr; | 
 | 416 | 	phys_addr_t align_mask; | 
 | 417 | 	unsigned long aligned; | 
 | 418 | 	struct vm_struct *area; | 
 | 419 | 	int i, ret; | 
 | 420 |  | 
 | 421 | 	if (!pmb_iomapping_enabled) | 
 | 422 | 		return NULL; | 
 | 423 |  | 
 | 424 | 	/* | 
 | 425 | 	 * Small mappings need to go through the TLB. | 
 | 426 | 	 */ | 
 | 427 | 	if (size < SZ_16M) | 
 | 428 | 		return ERR_PTR(-EINVAL); | 
 | 429 | 	if (!pmb_prot_valid(prot)) | 
 | 430 | 		return ERR_PTR(-EINVAL); | 
 | 431 |  | 
 | 432 | 	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | 
 | 433 | 		if (size >= pmb_sizes[i].size) | 
 | 434 | 			break; | 
 | 435 |  | 
 | 436 | 	last_addr = phys + size; | 
 | 437 | 	align_mask = ~(pmb_sizes[i].size - 1); | 
 | 438 | 	offset = phys & ~align_mask; | 
 | 439 | 	phys &= align_mask; | 
 | 440 | 	aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; | 
 | 441 |  | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 442 | 	/* | 
 | 443 | 	 * XXX: This should really start from uncached_end, but this | 
 | 444 | 	 * causes the MMU to reset, so for now we restrict it to the | 
 | 445 | 	 * 0xb000...0xc000 range. | 
 | 446 | 	 */ | 
 | 447 | 	area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000, | 
| Paul Mundt | 6eb3c73 | 2010-03-02 17:22:29 +0900 | [diff] [blame] | 448 | 				    P3SEG, caller); | 
 | 449 | 	if (!area) | 
 | 450 | 		return NULL; | 
 | 451 |  | 
 | 452 | 	area->phys_addr = phys; | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 453 | 	vaddr = (unsigned long)area->addr; | 
| Paul Mundt | 6eb3c73 | 2010-03-02 17:22:29 +0900 | [diff] [blame] | 454 |  | 
 | 455 | 	ret = pmb_bolt_mapping(vaddr, phys, size, prot); | 
| Paul Mundt | a1042aa | 2010-03-03 13:13:25 +0900 | [diff] [blame] | 456 | 	if (unlikely(ret != 0)) | 
| Paul Mundt | 6eb3c73 | 2010-03-02 17:22:29 +0900 | [diff] [blame] | 457 | 		return ERR_PTR(ret); | 
 | 458 |  | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 459 | 	return (void __iomem *)(offset + (char *)vaddr); | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 460 | } | 
 | 461 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 462 | int pmb_unmap(void __iomem *addr) | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 463 | { | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 464 | 	struct pmb_entry *pmbe = NULL; | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 465 | 	unsigned long vaddr = (unsigned long __force)addr; | 
 | 466 | 	int i, found = 0; | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 467 |  | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 468 | 	read_lock(&pmb_rwlock); | 
 | 469 |  | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 470 | 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 471 | 		if (test_bit(i, pmb_map)) { | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 472 | 			pmbe = &pmb_entry_list[i]; | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 473 | 			if (pmbe->vpn == vaddr) { | 
 | 474 | 				found = 1; | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 475 | 				break; | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 476 | 			} | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 477 | 		} | 
 | 478 | 	} | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 479 |  | 
 | 480 | 	read_unlock(&pmb_rwlock); | 
 | 481 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 482 | 	if (found) { | 
 | 483 | 		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); | 
 | 484 | 		return 0; | 
 | 485 | 	} | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 486 |  | 
| Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 487 | 	return -EINVAL; | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 488 | } | 
 | 489 |  | 
 | 490 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | 
 | 491 | { | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 492 | 	do { | 
 | 493 | 		struct pmb_entry *pmblink = pmbe; | 
 | 494 |  | 
| Matt Fleming | 067784f | 2009-10-06 21:22:23 +0000 | [diff] [blame] | 495 | 		/* | 
 | 496 | 		 * We may be called before this pmb_entry has been | 
 | 497 | 		 * entered into the PMB table via set_pmb_entry(), but | 
 | 498 | 		 * that's OK because we've allocated a unique slot for | 
 | 499 | 		 * this entry in pmb_alloc() (even if we haven't filled | 
 | 500 | 		 * it yet). | 
 | 501 | 		 * | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 502 | 		 * Therefore, calling __clear_pmb_entry() is safe as no | 
| Matt Fleming | 067784f | 2009-10-06 21:22:23 +0000 | [diff] [blame] | 503 | 		 * other mapping can be using that slot. | 
 | 504 | 		 */ | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 505 | 		__clear_pmb_entry(pmbe); | 
| Matt Fleming | fc2bdef | 2009-10-06 21:22:22 +0000 | [diff] [blame] | 506 |  | 
| Paul Mundt | 281983d | 2010-03-04 16:44:20 +0900 | [diff] [blame] | 507 | 		flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size); | 
 | 508 |  | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 509 | 		pmbe = pmblink->link; | 
 | 510 |  | 
 | 511 | 		pmb_free(pmblink); | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 512 | 	} while (pmbe && --depth); | 
 | 513 | } | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 514 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 515 | static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | 
 | 516 | { | 
 | 517 | 	unsigned long flags; | 
 | 518 |  | 
 | 519 | 	if (unlikely(!pmbe)) | 
 | 520 | 		return; | 
 | 521 |  | 
 | 522 | 	write_lock_irqsave(&pmb_rwlock, flags); | 
 | 523 | 	__pmb_unmap_entry(pmbe, depth); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 524 | 	write_unlock_irqrestore(&pmb_rwlock, flags); | 
| Paul Mundt | d7cdc9e | 2006-09-27 15:16:42 +0900 | [diff] [blame] | 525 | } | 
 | 526 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 527 | static void __init pmb_notify(void) | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 528 | { | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 529 | 	int i; | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 530 |  | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 531 | 	pr_info("PMB: boot mappings:\n"); | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 532 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 533 | 	read_lock(&pmb_rwlock); | 
 | 534 |  | 
 | 535 | 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 
 | 536 | 		struct pmb_entry *pmbe; | 
 | 537 |  | 
 | 538 | 		if (!test_bit(i, pmb_map)) | 
 | 539 | 			continue; | 
 | 540 |  | 
 | 541 | 		pmbe = &pmb_entry_list[i]; | 
 | 542 |  | 
 | 543 | 		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", | 
 | 544 | 			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, | 
 | 545 | 			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); | 
 | 546 | 	} | 
 | 547 |  | 
 | 548 | 	read_unlock(&pmb_rwlock); | 
 | 549 | } | 
 | 550 |  | 
 | 551 | /* | 
 | 552 |  * Sync our software copy of the PMB mappings with those in hardware. The | 
 | 553 |  * mappings in the hardware PMB were either set up by the bootloader or | 
 | 554 |  * very early on by the kernel. | 
 | 555 |  */ | 
 | 556 | static void __init pmb_synchronize(void) | 
 | 557 | { | 
 | 558 | 	struct pmb_entry *pmbp = NULL; | 
 | 559 | 	int i, j; | 
 | 560 |  | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 561 | 	/* | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 562 | 	 * Run through the initial boot mappings, log the established | 
 | 563 | 	 * ones, and blow away anything that falls outside of the valid | 
 | 564 | 	 * PPN range. Specifically, we only care about existing mappings | 
 | 565 | 	 * that impact the cached/uncached sections. | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 566 | 	 * | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 567 | 	 * Note that touching these can be a bit of a minefield; the boot | 
 | 568 | 	 * loader can establish multi-page mappings with the same caching | 
 | 569 | 	 * attributes, so we need to ensure that we aren't modifying a | 
 | 570 | 	 * mapping that we're presently executing from, or may execute | 
 | 571 | 	 * from in the case of straddling page boundaries. | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 572 | 	 * | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 573 | 	 * In the future we will have to tidy up after the boot loader by | 
 | 574 | 	 * jumping between the cached and uncached mappings and tearing | 
 | 575 | 	 * down alternating mappings while executing from the other. | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 576 | 	 */ | 
| Paul Mundt | 51becfd | 2010-02-17 15:33:30 +0900 | [diff] [blame] | 577 | 	for (i = 0; i < NR_PMB_ENTRIES; i++) { | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 578 | 		unsigned long addr, data; | 
 | 579 | 		unsigned long addr_val, data_val; | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 580 | 		unsigned long ppn, vpn, flags; | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 581 | 		unsigned long irqflags; | 
| Paul Mundt | d7813bc | 2010-02-17 17:56:38 +0900 | [diff] [blame] | 582 | 		unsigned int size; | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 583 | 		struct pmb_entry *pmbe; | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 584 |  | 
 | 585 | 		addr = mk_pmb_addr(i); | 
 | 586 | 		data = mk_pmb_data(i); | 
 | 587 |  | 
 | 588 | 		addr_val = __raw_readl(addr); | 
 | 589 | 		data_val = __raw_readl(data); | 
 | 590 |  | 
 | 591 | 		/* | 
 | 592 | 		 * Skip over any bogus entries | 
 | 593 | 		 */ | 
 | 594 | 		if (!(data_val & PMB_V) || !(addr_val & PMB_V)) | 
 | 595 | 			continue; | 
 | 596 |  | 
 | 597 | 		ppn = data_val & PMB_PFN_MASK; | 
 | 598 | 		vpn = addr_val & PMB_PFN_MASK; | 
 | 599 |  | 
 | 600 | 		/* | 
 | 601 | 		 * Only preserve in-range mappings. | 
 | 602 | 		 */ | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 603 | 		if (!pmb_ppn_in_range(ppn)) { | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 604 | 			/* | 
 | 605 | 			 * Invalidate anything out of bounds. | 
 | 606 | 			 */ | 
| Paul Mundt | 2e45064 | 2010-02-18 13:26:05 +0900 | [diff] [blame] | 607 | 			writel_uncached(addr_val & ~PMB_V, addr); | 
 | 608 | 			writel_uncached(data_val & ~PMB_V, data); | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 609 | 			continue; | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 610 | 		} | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 611 |  | 
 | 612 | 		/* | 
 | 613 | 		 * Update the caching attributes if necessary | 
 | 614 | 		 */ | 
 | 615 | 		if (data_val & PMB_C) { | 
| Paul Mundt | 0065b96 | 2010-02-17 18:05:23 +0900 | [diff] [blame] | 616 | 			data_val &= ~PMB_CACHE_MASK; | 
 | 617 | 			data_val |= pmb_cache_flags(); | 
| Paul Mundt | 2e45064 | 2010-02-18 13:26:05 +0900 | [diff] [blame] | 618 |  | 
 | 619 | 			writel_uncached(data_val, data); | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 620 | 		} | 
 | 621 |  | 
| Paul Mundt | d7813bc | 2010-02-17 17:56:38 +0900 | [diff] [blame] | 622 | 		size = data_val & PMB_SZ_MASK; | 
 | 623 | 		flags = size | (data_val & PMB_CACHE_MASK); | 
| Paul Mundt | efd54ea | 2010-02-16 18:39:30 +0900 | [diff] [blame] | 624 |  | 
 | 625 | 		pmbe = pmb_alloc(vpn, ppn, flags, i); | 
 | 626 | 		if (IS_ERR(pmbe)) { | 
 | 627 | 			WARN_ON_ONCE(1); | 
 | 628 | 			continue; | 
 | 629 | 		} | 
 | 630 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 631 | 		raw_spin_lock_irqsave(&pmbe->lock, irqflags); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 632 |  | 
| Paul Mundt | d7813bc | 2010-02-17 17:56:38 +0900 | [diff] [blame] | 633 | 		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) | 
 | 634 | 			if (pmb_sizes[j].flag == size) | 
 | 635 | 				pmbe->size = pmb_sizes[j].size; | 
 | 636 |  | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 637 | 		if (pmbp) { | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 638 | 			raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 639 | 			/* | 
 | 640 | 			 * Compare the previous entry against the current one to | 
 | 641 | 			 * see if the entries span a contiguous mapping. If so, | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 642 | 			 * setup the entry links accordingly. Compound mappings | 
 | 643 | 			 * are later coalesced. | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 644 | 			 */ | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 645 | 			if (pmb_can_merge(pmbp, pmbe)) | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 646 | 				pmbp->link = pmbe; | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 647 | 			raw_spin_unlock(&pmbp->lock); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 648 | 		} | 
| Paul Mundt | d7813bc | 2010-02-17 17:56:38 +0900 | [diff] [blame] | 649 |  | 
 | 650 | 		pmbp = pmbe; | 
 | 651 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 652 | 		raw_spin_unlock_irqrestore(&pmbe->lock, irqflags); | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 653 | 	} | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 654 | } | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 655 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 656 | static void __init pmb_merge(struct pmb_entry *head) | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 657 | { | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 658 | 	unsigned long span, newsize; | 
 | 659 | 	struct pmb_entry *tail; | 
 | 660 | 	int i = 1, depth = 0; | 
 | 661 |  | 
 | 662 | 	span = newsize = head->size; | 
 | 663 |  | 
 | 664 | 	tail = head->link; | 
 | 665 | 	while (tail) { | 
 | 666 | 		span += tail->size; | 
 | 667 |  | 
 | 668 | 		if (pmb_size_valid(span)) { | 
 | 669 | 			newsize = span; | 
 | 670 | 			depth = i; | 
 | 671 | 		} | 
 | 672 |  | 
 | 673 | 		/* This is the end of the line.. */ | 
 | 674 | 		if (!tail->link) | 
 | 675 | 			break; | 
 | 676 |  | 
 | 677 | 		tail = tail->link; | 
 | 678 | 		i++; | 
 | 679 | 	} | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 680 |  | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 681 | 	/* | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 682 | 	 * The merged page size must be valid. | 
| Matt Fleming | 3d46767 | 2010-01-18 19:33:10 +0900 | [diff] [blame] | 683 | 	 */ | 
| Matt Fleming | c7b03fa | 2010-04-25 17:29:07 +0100 | [diff] [blame] | 684 | 	if (!depth || !pmb_size_valid(newsize)) | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 685 | 		return; | 
 | 686 |  | 
 | 687 | 	head->flags &= ~PMB_SZ_MASK; | 
 | 688 | 	head->flags |= pmb_size_to_flags(newsize); | 
 | 689 |  | 
 | 690 | 	head->size = newsize; | 
 | 691 |  | 
 | 692 | 	__pmb_unmap_entry(head->link, depth); | 
 | 693 | 	__set_pmb_entry(head); | 
 | 694 | } | 
 | 695 |  | 
 | 696 | static void __init pmb_coalesce(void) | 
 | 697 | { | 
 | 698 | 	unsigned long flags; | 
 | 699 | 	int i; | 
 | 700 |  | 
 | 701 | 	write_lock_irqsave(&pmb_rwlock, flags); | 
 | 702 |  | 
 | 703 | 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 
 | 704 | 		struct pmb_entry *pmbe; | 
 | 705 |  | 
 | 706 | 		if (!test_bit(i, pmb_map)) | 
 | 707 | 			continue; | 
 | 708 |  | 
 | 709 | 		pmbe = &pmb_entry_list[i]; | 
 | 710 |  | 
 | 711 | 		/* | 
 | 712 | 		 * We're only interested in compound mappings | 
 | 713 | 		 */ | 
 | 714 | 		if (!pmbe->link) | 
 | 715 | 			continue; | 
 | 716 |  | 
 | 717 | 		/* | 
 | 718 | 		 * Nothing to do if it already uses the largest possible | 
 | 719 | 		 * page size. | 
 | 720 | 		 */ | 
 | 721 | 		if (pmbe->size == SZ_512M) | 
 | 722 | 			continue; | 
 | 723 |  | 
 | 724 | 		pmb_merge(pmbe); | 
 | 725 | 	} | 
 | 726 |  | 
 | 727 | 	write_unlock_irqrestore(&pmb_rwlock, flags); | 
 | 728 | } | 
 | 729 |  | 
 | 730 | #ifdef CONFIG_UNCACHED_MAPPING | 
 | 731 | static void __init pmb_resize(void) | 
 | 732 | { | 
 | 733 | 	int i; | 
 | 734 |  | 
 | 735 | 	/* | 
 | 736 | 	 * If the uncached mapping was constructed by the kernel, it will | 
 | 737 | 	 * already be a reasonable size. | 
 | 738 | 	 */ | 
 | 739 | 	if (uncached_size == SZ_16M) | 
 | 740 | 		return; | 
 | 741 |  | 
 | 742 | 	read_lock(&pmb_rwlock); | 
 | 743 |  | 
 | 744 | 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 
 | 745 | 		struct pmb_entry *pmbe; | 
 | 746 | 		unsigned long flags; | 
 | 747 |  | 
 | 748 | 		if (!test_bit(i, pmb_map)) | 
 | 749 | 			continue; | 
 | 750 |  | 
 | 751 | 		pmbe = &pmb_entry_list[i]; | 
 | 752 |  | 
 | 753 | 		if (pmbe->vpn != uncached_start) | 
 | 754 | 			continue; | 
 | 755 |  | 
 | 756 | 		/* | 
 | 757 | 		 * Found it, now resize it. | 
 | 758 | 		 */ | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 759 | 		raw_spin_lock_irqsave(&pmbe->lock, flags); | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 760 |  | 
 | 761 | 		pmbe->size = SZ_16M; | 
 | 762 | 		pmbe->flags &= ~PMB_SZ_MASK; | 
 | 763 | 		pmbe->flags |= pmb_size_to_flags(pmbe->size); | 
 | 764 |  | 
 | 765 | 		uncached_resize(pmbe->size); | 
 | 766 |  | 
 | 767 | 		__set_pmb_entry(pmbe); | 
 | 768 |  | 
| Paul Mundt | f7fcec9 | 2010-10-14 03:49:15 +0900 | [diff] [blame] | 769 | 		raw_spin_unlock_irqrestore(&pmbe->lock, flags); | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 770 | 	} | 
 | 771 |  | 
| Julia Lawall | 0e6f989 | 2010-06-20 11:24:54 +0000 | [diff] [blame] | 772 | 	read_unlock(&pmb_rwlock); | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 773 | } | 
 | 774 | #endif | 
 | 775 |  | 
| Paul Mundt | 4cfa8e7 | 2010-03-02 16:49:50 +0900 | [diff] [blame] | 776 | static int __init early_pmb(char *p) | 
 | 777 | { | 
 | 778 | 	if (!p) | 
 | 779 | 		return 0; | 
 | 780 |  | 
 | 781 | 	if (strstr(p, "iomap")) | 
 | 782 | 		pmb_iomapping_enabled = 1; | 
 | 783 |  | 
 | 784 | 	return 0; | 
 | 785 | } | 
 | 786 | early_param("pmb", early_pmb); | 
 | 787 |  | 
| Paul Mundt | d01447b | 2010-02-18 18:13:51 +0900 | [diff] [blame] | 788 | void __init pmb_init(void) | 
 | 789 | { | 
 | 790 | 	/* Synchronize software state */ | 
 | 791 | 	pmb_synchronize(); | 
 | 792 |  | 
 | 793 | 	/* Attempt to combine compound mappings */ | 
 | 794 | 	pmb_coalesce(); | 
 | 795 |  | 
 | 796 | #ifdef CONFIG_UNCACHED_MAPPING | 
 | 797 | 	/* Resize initial mappings, if necessary */ | 
 | 798 | 	pmb_resize(); | 
 | 799 | #endif | 
 | 800 |  | 
 | 801 | 	/* Log them */ | 
 | 802 | 	pmb_notify(); | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 803 |  | 
| Paul Mundt | 2e45064 | 2010-02-18 13:26:05 +0900 | [diff] [blame] | 804 | 	writel_uncached(0, PMB_IRMCR); | 
| Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 805 |  | 
| Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 806 | 	/* Flush out the TLB */ | 
| Matt Fleming | b5b6c7e | 2010-03-21 19:51:52 +0000 | [diff] [blame] | 807 | 	local_flush_tlb_all(); | 
| Paul Mundt | 2e45064 | 2010-02-18 13:26:05 +0900 | [diff] [blame] | 808 | 	ctrl_barrier(); | 
| Matt Fleming | 20b5014 | 2009-10-06 21:22:33 +0000 | [diff] [blame] | 809 | } | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 810 |  | 
| Paul Mundt | 2efa53b | 2010-01-20 16:40:48 +0900 | [diff] [blame] | 811 | bool __in_29bit_mode(void) | 
 | 812 | { | 
 | 813 |         return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; | 
 | 814 | } | 
 | 815 |  | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 816 | static int pmb_seq_show(struct seq_file *file, void *iter) | 
 | 817 | { | 
 | 818 | 	int i; | 
 | 819 |  | 
 | 820 | 	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" | 
 | 821 | 			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); | 
 | 822 | 	seq_printf(file, "ety   vpn  ppn  size   flags\n"); | 
 | 823 |  | 
 | 824 | 	for (i = 0; i < NR_PMB_ENTRIES; i++) { | 
 | 825 | 		unsigned long addr, data; | 
 | 826 | 		unsigned int size; | 
 | 827 | 		char *sz_str = NULL; | 
 | 828 |  | 
| Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 829 | 		addr = __raw_readl(mk_pmb_addr(i)); | 
 | 830 | 		data = __raw_readl(mk_pmb_data(i)); | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 831 |  | 
 | 832 | 		size = data & PMB_SZ_MASK; | 
 | 833 | 		sz_str = (size == PMB_SZ_16M)  ? " 16MB": | 
 | 834 | 			 (size == PMB_SZ_64M)  ? " 64MB": | 
 | 835 | 			 (size == PMB_SZ_128M) ? "128MB": | 
 | 836 | 					         "512MB"; | 
 | 837 |  | 
 | 838 | 		/* 02: V 0x88 0x08 128MB C CB  B */ | 
 | 839 | 		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", | 
 | 840 | 			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', | 
 | 841 | 			   (addr >> 24) & 0xff, (data >> 24) & 0xff, | 
 | 842 | 			   sz_str, (data & PMB_C) ? 'C' : ' ', | 
 | 843 | 			   (data & PMB_WT) ? "WT" : "CB", | 
 | 844 | 			   (data & PMB_UB) ? "UB" : " B"); | 
 | 845 | 	} | 
 | 846 |  | 
 | 847 | 	return 0; | 
 | 848 | } | 
 | 849 |  | 
 | 850 | static int pmb_debugfs_open(struct inode *inode, struct file *file) | 
 | 851 | { | 
 | 852 | 	return single_open(file, pmb_seq_show, NULL); | 
 | 853 | } | 
 | 854 |  | 
| Arjan van de Ven | 5dfe4c9 | 2007-02-12 00:55:31 -0800 | [diff] [blame] | 855 | static const struct file_operations pmb_debugfs_fops = { | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 856 | 	.owner		= THIS_MODULE, | 
 | 857 | 	.open		= pmb_debugfs_open, | 
 | 858 | 	.read		= seq_read, | 
 | 859 | 	.llseek		= seq_lseek, | 
| Li Zefan | 45dabf1 | 2008-06-24 13:30:23 +0800 | [diff] [blame] | 860 | 	.release	= single_release, | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 861 | }; | 
 | 862 |  | 
 | 863 | static int __init pmb_debugfs_init(void) | 
 | 864 | { | 
 | 865 | 	struct dentry *dentry; | 
 | 866 |  | 
 | 867 | 	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO, | 
| Paul Mundt | 3f224f4 | 2010-09-24 04:04:26 +0900 | [diff] [blame] | 868 | 				     arch_debugfs_dir, NULL, &pmb_debugfs_fops); | 
| Zhaolei | 25627c7 | 2008-10-17 19:25:09 +0800 | [diff] [blame] | 869 | 	if (!dentry) | 
 | 870 | 		return -ENOMEM; | 
| Paul Mundt | 0c7b1df | 2006-09-27 15:08:07 +0900 | [diff] [blame] | 871 |  | 
 | 872 | 	return 0; | 
 | 873 | } | 
| Pawel Moll | 62c8cbb | 2010-02-19 10:26:31 +0000 | [diff] [blame] | 874 | subsys_initcall(pmb_debugfs_init); | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 875 |  | 
 | 876 | #ifdef CONFIG_PM | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 877 | static void pmb_syscore_resume(void) | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 878 | { | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 879 | 	struct pmb_entry *pmbe; | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 880 | 	int i; | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 881 |  | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 882 | 	read_lock(&pmb_rwlock); | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 883 |  | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 884 | 	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | 
 | 885 | 		if (test_bit(i, pmb_map)) { | 
 | 886 | 			pmbe = &pmb_entry_list[i]; | 
 | 887 | 			set_pmb_entry(pmbe); | 
| Matt Fleming | edd7de8 | 2009-10-06 21:22:29 +0000 | [diff] [blame] | 888 | 		} | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 889 | 	} | 
| Paul Mundt | d53a0d3 | 2010-02-17 21:17:02 +0900 | [diff] [blame] | 890 |  | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 891 | 	read_unlock(&pmb_rwlock); | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 892 | } | 
 | 893 |  | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 894 | static struct syscore_ops pmb_syscore_ops = { | 
 | 895 | 	.resume = pmb_syscore_resume, | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 896 | }; | 
 | 897 |  | 
 | 898 | static int __init pmb_sysdev_init(void) | 
 | 899 | { | 
| Paul Mundt | d4cc183 | 2011-03-23 19:05:18 +0900 | [diff] [blame] | 900 | 	register_syscore_ops(&pmb_syscore_ops); | 
 | 901 | 	return 0; | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 902 | } | 
| Francesco VIRLINZI | a83c0b7 | 2009-03-11 10:39:02 +0000 | [diff] [blame] | 903 | subsys_initcall(pmb_sysdev_init); | 
 | 904 | #endif |