| Adrian Bunk | b00dc83 | 2008-05-19 16:52:27 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | *  arch/sparc64/mm/init.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) | 
|  | 5 | *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 
|  | 6 | */ | 
|  | 7 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 8 | #include <linux/module.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/kernel.h> | 
|  | 10 | #include <linux/sched.h> | 
|  | 11 | #include <linux/string.h> | 
|  | 12 | #include <linux/init.h> | 
|  | 13 | #include <linux/bootmem.h> | 
|  | 14 | #include <linux/mm.h> | 
|  | 15 | #include <linux/hugetlb.h> | 
|  | 16 | #include <linux/slab.h> | 
|  | 17 | #include <linux/initrd.h> | 
|  | 18 | #include <linux/swap.h> | 
|  | 19 | #include <linux/pagemap.h> | 
| Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 20 | #include <linux/poison.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/fs.h> | 
|  | 22 | #include <linux/seq_file.h> | 
| Prasanna S Panchamukhi | 05e14cb | 2005-09-06 15:19:30 -0700 | [diff] [blame] | 23 | #include <linux/kprobes.h> | 
| David S. Miller | 1ac4f5e | 2005-09-21 21:49:32 -0700 | [diff] [blame] | 24 | #include <linux/cache.h> | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 25 | #include <linux/sort.h> | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 26 | #include <linux/percpu.h> | 
| David S. Miller | 3b2a7e2 | 2008-02-13 18:13:20 -0800 | [diff] [blame] | 27 | #include <linux/lmb.h> | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 28 | #include <linux/mmzone.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 |  | 
|  | 30 | #include <asm/head.h> | 
|  | 31 | #include <asm/system.h> | 
|  | 32 | #include <asm/page.h> | 
|  | 33 | #include <asm/pgalloc.h> | 
|  | 34 | #include <asm/pgtable.h> | 
|  | 35 | #include <asm/oplib.h> | 
|  | 36 | #include <asm/iommu.h> | 
|  | 37 | #include <asm/io.h> | 
|  | 38 | #include <asm/uaccess.h> | 
|  | 39 | #include <asm/mmu_context.h> | 
|  | 40 | #include <asm/tlbflush.h> | 
|  | 41 | #include <asm/dma.h> | 
|  | 42 | #include <asm/starfire.h> | 
|  | 43 | #include <asm/tlb.h> | 
|  | 44 | #include <asm/spitfire.h> | 
|  | 45 | #include <asm/sections.h> | 
| David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 46 | #include <asm/tsb.h> | 
| David S. Miller | 481295f | 2006-02-07 21:51:08 -0800 | [diff] [blame] | 47 | #include <asm/hypervisor.h> | 
| David S. Miller | 372b07b | 2006-06-21 15:35:28 -0700 | [diff] [blame] | 48 | #include <asm/prom.h> | 
| David S. Miller | 22d6a1c | 2007-05-25 00:37:12 -0700 | [diff] [blame] | 49 | #include <asm/sstate.h> | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 50 | #include <asm/mdesc.h> | 
| David S. Miller | 3d5ae6b | 2008-03-25 21:51:40 -0700 | [diff] [blame] | 51 | #include <asm/cpudata.h> | 
| David S. Miller | 4f70f7a | 2008-08-12 18:33:56 -0700 | [diff] [blame] | 52 | #include <asm/irq.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 |  | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 54 | #define MAX_PHYS_ADDRESS	(1UL << 42UL) | 
|  | 55 | #define KPTE_BITMAP_CHUNK_SZ	(256UL * 1024UL * 1024UL) | 
|  | 56 | #define KPTE_BITMAP_BYTES	\ | 
|  | 57 | ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) | 
|  | 58 |  | 
|  | 59 | unsigned long kern_linear_pte_xor[2] __read_mostly; | 
|  | 60 |  | 
|  | 61 | /* A bitmap, one bit for every 256MB of physical memory.  If the bit | 
|  | 62 | * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else | 
|  | 63 | * if set we should use a 256MB page (via kern_linear_pte_xor[1]). | 
|  | 64 | */ | 
|  | 65 | unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; | 
|  | 66 |  | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 67 | #ifndef CONFIG_DEBUG_PAGEALLOC | 
| David S. Miller | 2d9e276 | 2007-05-29 01:58:31 -0700 | [diff] [blame] | 68 | /* A special kernel TSB for 4MB and 256MB linear mappings. | 
|  | 69 | * Space is allocated for this right after the trap table | 
|  | 70 | * in arch/sparc64/kernel/head.S | 
|  | 71 | */ | 
|  | 72 | extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 73 | #endif | 
| David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 74 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 75 | #define MAX_BANKS	32 | 
| David S. Miller | 1014757 | 2005-09-28 21:46:43 -0700 | [diff] [blame] | 76 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 77 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 78 | static int pavail_ents __initdata; | 
| David S. Miller | 1014757 | 2005-09-28 21:46:43 -0700 | [diff] [blame] | 79 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 80 | static int cmp_p64(const void *a, const void *b) | 
|  | 81 | { | 
|  | 82 | const struct linux_prom64_registers *x = a, *y = b; | 
|  | 83 |  | 
|  | 84 | if (x->phys_addr > y->phys_addr) | 
|  | 85 | return 1; | 
|  | 86 | if (x->phys_addr < y->phys_addr) | 
|  | 87 | return -1; | 
|  | 88 | return 0; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | static void __init read_obp_memory(const char *property, | 
|  | 92 | struct linux_prom64_registers *regs, | 
|  | 93 | int *num_ents) | 
|  | 94 | { | 
|  | 95 | int node = prom_finddevice("/memory"); | 
|  | 96 | int prop_size = prom_getproplen(node, property); | 
|  | 97 | int ents, ret, i; | 
|  | 98 |  | 
|  | 99 | ents = prop_size / sizeof(struct linux_prom64_registers); | 
|  | 100 | if (ents > MAX_BANKS) { | 
|  | 101 | prom_printf("The machine has more %s property entries than " | 
|  | 102 | "this kernel can support (%d).\n", | 
|  | 103 | property, MAX_BANKS); | 
|  | 104 | prom_halt(); | 
|  | 105 | } | 
|  | 106 |  | 
|  | 107 | ret = prom_getproperty(node, property, (char *) regs, prop_size); | 
|  | 108 | if (ret == -1) { | 
|  | 109 | prom_printf("Couldn't get %s property from /memory.\n"); | 
|  | 110 | prom_halt(); | 
|  | 111 | } | 
|  | 112 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 113 | /* Sanitize what we got from the firmware, by page aligning | 
|  | 114 | * everything. | 
|  | 115 | */ | 
|  | 116 | for (i = 0; i < ents; i++) { | 
|  | 117 | unsigned long base, size; | 
|  | 118 |  | 
|  | 119 | base = regs[i].phys_addr; | 
|  | 120 | size = regs[i].reg_size; | 
|  | 121 |  | 
|  | 122 | size &= PAGE_MASK; | 
|  | 123 | if (base & ~PAGE_MASK) { | 
|  | 124 | unsigned long new_base = PAGE_ALIGN(base); | 
|  | 125 |  | 
|  | 126 | size -= new_base - base; | 
|  | 127 | if ((long) size < 0L) | 
|  | 128 | size = 0UL; | 
|  | 129 | base = new_base; | 
|  | 130 | } | 
| David S. Miller | 0015d3d | 2007-03-15 00:06:34 -0700 | [diff] [blame] | 131 | if (size == 0UL) { | 
|  | 132 | /* If it is empty, simply get rid of it. | 
|  | 133 | * This simplifies the logic of the other | 
|  | 134 | * functions that process these arrays. | 
|  | 135 | */ | 
|  | 136 | memmove(®s[i], ®s[i + 1], | 
|  | 137 | (ents - i - 1) * sizeof(regs[0])); | 
|  | 138 | i--; | 
|  | 139 | ents--; | 
|  | 140 | continue; | 
|  | 141 | } | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 142 | regs[i].phys_addr = base; | 
|  | 143 | regs[i].reg_size = size; | 
|  | 144 | } | 
| David S. Miller | 486ad10 | 2006-06-22 00:00:00 -0700 | [diff] [blame] | 145 |  | 
| David S. Miller | 486ad10 | 2006-06-22 00:00:00 -0700 | [diff] [blame] | 146 | *num_ents = ents; | 
|  | 147 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 148 | sort(regs, ents, sizeof(struct linux_prom64_registers), | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 149 | cmp_p64, NULL); | 
|  | 150 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 |  | 
| David S. Miller | 2bdb3cb | 2005-09-22 01:08:57 -0700 | [diff] [blame] | 152 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 |  | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 154 | /* Kernel physical address base and size in bytes.  */ | 
| David S. Miller | 1ac4f5e | 2005-09-21 21:49:32 -0700 | [diff] [blame] | 155 | unsigned long kern_base __read_mostly; | 
|  | 156 | unsigned long kern_size __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | /* Initial ramdisk setup */ | 
|  | 159 | extern unsigned long sparc_ramdisk_image64; | 
|  | 160 | extern unsigned int sparc_ramdisk_image; | 
|  | 161 | extern unsigned int sparc_ramdisk_size; | 
|  | 162 |  | 
| David S. Miller | 1ac4f5e | 2005-09-21 21:49:32 -0700 | [diff] [blame] | 163 | struct page *mem_map_zero __read_mostly; | 
| Aneesh Kumar K.V | 35802c0 | 2008-04-29 08:11:12 -0400 | [diff] [blame] | 164 | EXPORT_SYMBOL(mem_map_zero); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 |  | 
| David S. Miller | 0835ae0 | 2005-10-04 15:23:20 -0700 | [diff] [blame] | 166 | unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; | 
|  | 167 |  | 
|  | 168 | unsigned long sparc64_kern_pri_context __read_mostly; | 
|  | 169 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | 
|  | 170 | unsigned long sparc64_kern_sec_context __read_mostly; | 
|  | 171 |  | 
| David S. Miller | 6465874 | 2008-03-21 17:01:38 -0700 | [diff] [blame] | 172 | int num_kernel_image_mappings; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 175 | atomic_t dcpage_flushes = ATOMIC_INIT(0); | 
|  | 176 | #ifdef CONFIG_SMP | 
|  | 177 | atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); | 
|  | 178 | #endif | 
|  | 179 | #endif | 
|  | 180 |  | 
| David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 181 | inline void flush_dcache_page_impl(struct page *page) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { | 
| David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 183 | BUG_ON(tlb_type == hypervisor); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 185 | atomic_inc(&dcpage_flushes); | 
|  | 186 | #endif | 
|  | 187 |  | 
|  | 188 | #ifdef DCACHE_ALIASING_POSSIBLE | 
|  | 189 | __flush_dcache_page(page_address(page), | 
|  | 190 | ((tlb_type == spitfire) && | 
|  | 191 | page_mapping(page) != NULL)); | 
|  | 192 | #else | 
|  | 193 | if (page_mapping(page) != NULL && | 
|  | 194 | tlb_type == spitfire) | 
|  | 195 | __flush_icache_page(__pa(page_address(page))); | 
|  | 196 | #endif | 
|  | 197 | } | 
|  | 198 |  | 
|  | 199 | #define PG_dcache_dirty		PG_arch_1 | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 200 | #define PG_dcache_cpu_shift	32UL | 
|  | 201 | #define PG_dcache_cpu_mask	\ | 
|  | 202 | ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 |  | 
|  | 204 | #define dcache_dirty_cpu(page) \ | 
| David S. Miller | 48b0e54 | 2005-07-27 16:08:44 -0700 | [diff] [blame] | 205 | (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 |  | 
| David S. Miller | d979f17 | 2007-10-27 00:13:04 -0700 | [diff] [blame] | 207 | static inline void set_dcache_dirty(struct page *page, int this_cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | { | 
|  | 209 | unsigned long mask = this_cpu; | 
| David S. Miller | 48b0e54 | 2005-07-27 16:08:44 -0700 | [diff] [blame] | 210 | unsigned long non_cpu_bits; | 
|  | 211 |  | 
|  | 212 | non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); | 
|  | 213 | mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); | 
|  | 214 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | __asm__ __volatile__("1:\n\t" | 
|  | 216 | "ldx	[%2], %%g7\n\t" | 
|  | 217 | "and	%%g7, %1, %%g1\n\t" | 
|  | 218 | "or	%%g1, %0, %%g1\n\t" | 
|  | 219 | "casx	[%2], %%g7, %%g1\n\t" | 
|  | 220 | "cmp	%%g7, %%g1\n\t" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 221 | "membar	#StoreLoad | #StoreStore\n\t" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | "bne,pn	%%xcc, 1b\n\t" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 223 | " nop" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | : /* no outputs */ | 
|  | 225 | : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) | 
|  | 226 | : "g1", "g7"); | 
|  | 227 | } | 
|  | 228 |  | 
| David S. Miller | d979f17 | 2007-10-27 00:13:04 -0700 | [diff] [blame] | 229 | static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | { | 
|  | 231 | unsigned long mask = (1UL << PG_dcache_dirty); | 
|  | 232 |  | 
|  | 233 | __asm__ __volatile__("! test_and_clear_dcache_dirty\n" | 
|  | 234 | "1:\n\t" | 
|  | 235 | "ldx	[%2], %%g7\n\t" | 
| David S. Miller | 48b0e54 | 2005-07-27 16:08:44 -0700 | [diff] [blame] | 236 | "srlx	%%g7, %4, %%g1\n\t" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | "and	%%g1, %3, %%g1\n\t" | 
|  | 238 | "cmp	%%g1, %0\n\t" | 
|  | 239 | "bne,pn	%%icc, 2f\n\t" | 
|  | 240 | " andn	%%g7, %1, %%g1\n\t" | 
|  | 241 | "casx	[%2], %%g7, %%g1\n\t" | 
|  | 242 | "cmp	%%g7, %%g1\n\t" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 243 | "membar	#StoreLoad | #StoreStore\n\t" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | "bne,pn	%%xcc, 1b\n\t" | 
| David S. Miller | b445e26 | 2005-06-27 15:42:04 -0700 | [diff] [blame] | 245 | " nop\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | "2:" | 
|  | 247 | : /* no outputs */ | 
|  | 248 | : "r" (cpu), "r" (mask), "r" (&page->flags), | 
| David S. Miller | 48b0e54 | 2005-07-27 16:08:44 -0700 | [diff] [blame] | 249 | "i" (PG_dcache_cpu_mask), | 
|  | 250 | "i" (PG_dcache_cpu_shift) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | : "g1", "g7"); | 
|  | 252 | } | 
|  | 253 |  | 
| David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 254 | static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) | 
|  | 255 | { | 
|  | 256 | unsigned long tsb_addr = (unsigned long) ent; | 
|  | 257 |  | 
| David S. Miller | 3b3ab2e | 2006-02-17 09:54:42 -0800 | [diff] [blame] | 258 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | 
| David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 259 | tsb_addr = __pa(tsb_addr); | 
|  | 260 |  | 
|  | 261 | __tsb_insert(tsb_addr, tag, pte); | 
|  | 262 | } | 
|  | 263 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 264 | unsigned long _PAGE_ALL_SZ_BITS __read_mostly; | 
|  | 265 | unsigned long _PAGE_SZBITS __read_mostly; | 
|  | 266 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) | 
|  | 268 | { | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 269 | struct mm_struct *mm; | 
| David S. Miller | 74ae998 | 2006-03-05 18:26:24 -0800 | [diff] [blame] | 270 | struct tsb *tsb; | 
| David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 271 | unsigned long tag, flags; | 
| David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 272 | unsigned long tsb_index, tsb_hash_shift; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 |  | 
| David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 274 | if (tlb_type != hypervisor) { | 
|  | 275 | unsigned long pfn = pte_pfn(pte); | 
|  | 276 | unsigned long pg_flags; | 
|  | 277 | struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 |  | 
| David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 279 | if (pfn_valid(pfn) && | 
|  | 280 | (page = pfn_to_page(pfn), page_mapping(page)) && | 
|  | 281 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | 
|  | 282 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & | 
|  | 283 | PG_dcache_cpu_mask); | 
|  | 284 | int this_cpu = get_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 |  | 
| David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 286 | /* This is just to optimize away some function calls | 
|  | 287 | * in the SMP case. | 
|  | 288 | */ | 
|  | 289 | if (cpu == this_cpu) | 
|  | 290 | flush_dcache_page_impl(page); | 
|  | 291 | else | 
|  | 292 | smp_flush_dcache_page_impl(page, cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 |  | 
| David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 294 | clear_dcache_dirty_cpu(page, cpu); | 
|  | 295 |  | 
|  | 296 | put_cpu(); | 
|  | 297 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | } | 
| David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 299 |  | 
|  | 300 | mm = vma->vm_mm; | 
| David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 301 |  | 
| David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 302 | tsb_index = MM_TSB_BASE; | 
|  | 303 | tsb_hash_shift = PAGE_SHIFT; | 
|  | 304 |  | 
| David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 305 | spin_lock_irqsave(&mm->context.lock, flags); | 
|  | 306 |  | 
| David S. Miller | dcc1e8d | 2006-03-22 00:49:59 -0800 | [diff] [blame] | 307 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 308 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { | 
|  | 309 | if ((tlb_type == hypervisor && | 
|  | 310 | (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || | 
|  | 311 | (tlb_type != hypervisor && | 
|  | 312 | (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { | 
|  | 313 | tsb_index = MM_TSB_HUGE; | 
|  | 314 | tsb_hash_shift = HPAGE_SHIFT; | 
|  | 315 | } | 
|  | 316 | } | 
|  | 317 | #endif | 
|  | 318 |  | 
|  | 319 | tsb = mm->context.tsb_block[tsb_index].tsb; | 
|  | 320 | tsb += ((address >> tsb_hash_shift) & | 
|  | 321 | (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); | 
| David S. Miller | 74ae998 | 2006-03-05 18:26:24 -0800 | [diff] [blame] | 322 | tag = (address >> 22UL); | 
|  | 323 | tsb_insert(tsb, tag, pte_val(pte)); | 
| David S. Miller | 7a1ac52 | 2006-03-16 02:02:32 -0800 | [diff] [blame] | 324 |  | 
|  | 325 | spin_unlock_irqrestore(&mm->context.lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | } | 
|  | 327 |  | 
|  | 328 | void flush_dcache_page(struct page *page) | 
|  | 329 | { | 
| David S. Miller | a9546f5 | 2005-04-17 18:03:09 -0700 | [diff] [blame] | 330 | struct address_space *mapping; | 
|  | 331 | int this_cpu; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 |  | 
| David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 333 | if (tlb_type == hypervisor) | 
|  | 334 | return; | 
|  | 335 |  | 
| David S. Miller | a9546f5 | 2005-04-17 18:03:09 -0700 | [diff] [blame] | 336 | /* Do not bother with the expensive D-cache flush if it | 
|  | 337 | * is merely the zero page.  The 'bigcore' testcase in GDB | 
|  | 338 | * causes this case to run millions of times. | 
|  | 339 | */ | 
|  | 340 | if (page == ZERO_PAGE(0)) | 
|  | 341 | return; | 
|  | 342 |  | 
|  | 343 | this_cpu = get_cpu(); | 
|  | 344 |  | 
|  | 345 | mapping = page_mapping(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | if (mapping && !mapping_mapped(mapping)) { | 
| David S. Miller | a9546f5 | 2005-04-17 18:03:09 -0700 | [diff] [blame] | 347 | int dirty = test_bit(PG_dcache_dirty, &page->flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | if (dirty) { | 
| David S. Miller | a9546f5 | 2005-04-17 18:03:09 -0700 | [diff] [blame] | 349 | int dirty_cpu = dcache_dirty_cpu(page); | 
|  | 350 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | if (dirty_cpu == this_cpu) | 
|  | 352 | goto out; | 
|  | 353 | smp_flush_dcache_page_impl(page, dirty_cpu); | 
|  | 354 | } | 
|  | 355 | set_dcache_dirty(page, this_cpu); | 
|  | 356 | } else { | 
|  | 357 | /* We could delay the flush for the !page_mapping | 
|  | 358 | * case too.  But that case is for exec env/arg | 
|  | 359 | * pages and those are %99 certainly going to get | 
|  | 360 | * faulted into the tlb (and thus flushed) anyways. | 
|  | 361 | */ | 
|  | 362 | flush_dcache_page_impl(page); | 
|  | 363 | } | 
|  | 364 |  | 
|  | 365 | out: | 
|  | 366 | put_cpu(); | 
|  | 367 | } | 
|  | 368 |  | 
| Prasanna S Panchamukhi | 05e14cb | 2005-09-06 15:19:30 -0700 | [diff] [blame] | 369 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | { | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 371 | /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | if (tlb_type == spitfire) { | 
|  | 373 | unsigned long kaddr; | 
|  | 374 |  | 
| David S. Miller | a94aa25 | 2007-03-15 15:50:11 -0700 | [diff] [blame] | 375 | /* This code only runs on Spitfire cpus so this is | 
|  | 376 | * why we can assume _PAGE_PADDR_4U. | 
|  | 377 | */ | 
|  | 378 | for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { | 
|  | 379 | unsigned long paddr, mask = _PAGE_PADDR_4U; | 
|  | 380 |  | 
|  | 381 | if (kaddr >= PAGE_OFFSET) | 
|  | 382 | paddr = kaddr & mask; | 
|  | 383 | else { | 
|  | 384 | pgd_t *pgdp = pgd_offset_k(kaddr); | 
|  | 385 | pud_t *pudp = pud_offset(pgdp, kaddr); | 
|  | 386 | pmd_t *pmdp = pmd_offset(pudp, kaddr); | 
|  | 387 | pte_t *ptep = pte_offset_kernel(pmdp, kaddr); | 
|  | 388 |  | 
|  | 389 | paddr = pte_val(*ptep) & mask; | 
|  | 390 | } | 
|  | 391 | __flush_icache_page(paddr); | 
|  | 392 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | } | 
|  | 394 | } | 
|  | 395 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | void mmu_info(struct seq_file *m) | 
|  | 397 | { | 
|  | 398 | if (tlb_type == cheetah) | 
|  | 399 | seq_printf(m, "MMU Type\t: Cheetah\n"); | 
|  | 400 | else if (tlb_type == cheetah_plus) | 
|  | 401 | seq_printf(m, "MMU Type\t: Cheetah+\n"); | 
|  | 402 | else if (tlb_type == spitfire) | 
|  | 403 | seq_printf(m, "MMU Type\t: Spitfire\n"); | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 404 | else if (tlb_type == hypervisor) | 
|  | 405 | seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | else | 
|  | 407 | seq_printf(m, "MMU Type\t: ???\n"); | 
|  | 408 |  | 
|  | 409 | #ifdef CONFIG_DEBUG_DCFLUSH | 
|  | 410 | seq_printf(m, "DCPageFlushes\t: %d\n", | 
|  | 411 | atomic_read(&dcpage_flushes)); | 
|  | 412 | #ifdef CONFIG_SMP | 
|  | 413 | seq_printf(m, "DCPageFlushesXC\t: %d\n", | 
|  | 414 | atomic_read(&dcpage_flushes_xcall)); | 
|  | 415 | #endif /* CONFIG_SMP */ | 
|  | 416 | #endif /* CONFIG_DEBUG_DCFLUSH */ | 
|  | 417 | } | 
|  | 418 |  | 
| David S. Miller | a94aa25 | 2007-03-15 15:50:11 -0700 | [diff] [blame] | 419 | struct linux_prom_translation { | 
|  | 420 | unsigned long virt; | 
|  | 421 | unsigned long size; | 
|  | 422 | unsigned long data; | 
|  | 423 | }; | 
|  | 424 |  | 
|  | 425 | /* Exported for kernel TLB miss handling in ktlb.S */ | 
|  | 426 | struct linux_prom_translation prom_trans[512] __read_mostly; | 
|  | 427 | unsigned int prom_trans_ents __read_mostly; | 
|  | 428 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | /* Exported for SMP bootup purposes. */ | 
|  | 430 | unsigned long kern_locked_tte_data; | 
|  | 431 |  | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 432 | /* The obp translations are saved based on 8k pagesize, since obp can | 
|  | 433 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | 
| David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 434 | * HI_OBP_ADDRESS range are handled in ktlb.S. | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 435 | */ | 
| David S. Miller | 5085b4a | 2005-09-22 00:45:41 -0700 | [diff] [blame] | 436 | static inline int in_obp_range(unsigned long vaddr) | 
|  | 437 | { | 
|  | 438 | return (vaddr >= LOW_OBP_ADDRESS && | 
|  | 439 | vaddr < HI_OBP_ADDRESS); | 
|  | 440 | } | 
|  | 441 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 442 | static int cmp_ptrans(const void *a, const void *b) | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 443 | { | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 444 | const struct linux_prom_translation *x = a, *y = b; | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 445 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 446 | if (x->virt > y->virt) | 
|  | 447 | return 1; | 
|  | 448 | if (x->virt < y->virt) | 
|  | 449 | return -1; | 
|  | 450 | return 0; | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 451 | } | 
|  | 452 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 453 | /* Read OBP translations property into 'prom_trans[]'.  */ | 
| David S. Miller | 9ad98c5 | 2005-10-05 15:12:00 -0700 | [diff] [blame] | 454 | static void __init read_obp_translations(void) | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 455 | { | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 456 | int n, node, ents, first, last, i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 |  | 
|  | 458 | node = prom_finddevice("/virtual-memory"); | 
|  | 459 | n = prom_getproplen(node, "translations"); | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 460 | if (unlikely(n == 0 || n == -1)) { | 
| David S. Miller | b206fc4 | 2005-09-21 22:31:13 -0700 | [diff] [blame] | 461 | prom_printf("prom_mappings: Couldn't get size.\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | prom_halt(); | 
|  | 463 | } | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 464 | if (unlikely(n > sizeof(prom_trans))) { | 
|  | 465 | prom_printf("prom_mappings: Size %Zd is too big.\n", n); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | prom_halt(); | 
|  | 467 | } | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 468 |  | 
| David S. Miller | b206fc4 | 2005-09-21 22:31:13 -0700 | [diff] [blame] | 469 | if ((n = prom_getproperty(node, "translations", | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 470 | (char *)&prom_trans[0], | 
|  | 471 | sizeof(prom_trans))) == -1) { | 
| David S. Miller | b206fc4 | 2005-09-21 22:31:13 -0700 | [diff] [blame] | 472 | prom_printf("prom_mappings: Couldn't get property.\n"); | 
|  | 473 | prom_halt(); | 
|  | 474 | } | 
| David S. Miller | 9ad98c5 | 2005-10-05 15:12:00 -0700 | [diff] [blame] | 475 |  | 
| David S. Miller | b206fc4 | 2005-09-21 22:31:13 -0700 | [diff] [blame] | 476 | n = n / sizeof(struct linux_prom_translation); | 
| David S. Miller | 9ad98c5 | 2005-10-05 15:12:00 -0700 | [diff] [blame] | 477 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 478 | ents = n; | 
|  | 479 |  | 
|  | 480 | sort(prom_trans, ents, sizeof(struct linux_prom_translation), | 
|  | 481 | cmp_ptrans, NULL); | 
|  | 482 |  | 
|  | 483 | /* Now kick out all the non-OBP entries.  */ | 
|  | 484 | for (i = 0; i < ents; i++) { | 
|  | 485 | if (in_obp_range(prom_trans[i].virt)) | 
|  | 486 | break; | 
|  | 487 | } | 
|  | 488 | first = i; | 
|  | 489 | for (; i < ents; i++) { | 
|  | 490 | if (!in_obp_range(prom_trans[i].virt)) | 
|  | 491 | break; | 
|  | 492 | } | 
|  | 493 | last = i; | 
|  | 494 |  | 
|  | 495 | for (i = 0; i < (last - first); i++) { | 
|  | 496 | struct linux_prom_translation *src = &prom_trans[i + first]; | 
|  | 497 | struct linux_prom_translation *dest = &prom_trans[i]; | 
|  | 498 |  | 
|  | 499 | *dest = *src; | 
|  | 500 | } | 
|  | 501 | for (; i < ents; i++) { | 
|  | 502 | struct linux_prom_translation *dest = &prom_trans[i]; | 
|  | 503 | dest->virt = dest->size = dest->data = 0x0UL; | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | prom_trans_ents = last - first; | 
|  | 507 |  | 
|  | 508 | if (tlb_type == spitfire) { | 
|  | 509 | /* Clear diag TTE bits. */ | 
|  | 510 | for (i = 0; i < prom_trans_ents; i++) | 
|  | 511 | prom_trans[i].data &= ~0x0003fe0000000000UL; | 
|  | 512 | } | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 513 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 |  | 
| David S. Miller | d82ace7 | 2006-02-09 02:52:44 -0800 | [diff] [blame] | 515 | static void __init hypervisor_tlb_lock(unsigned long vaddr, | 
|  | 516 | unsigned long pte, | 
|  | 517 | unsigned long mmu) | 
|  | 518 | { | 
| David S. Miller | 7db35f3 | 2007-05-29 02:22:14 -0700 | [diff] [blame] | 519 | unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); | 
| David S. Miller | d82ace7 | 2006-02-09 02:52:44 -0800 | [diff] [blame] | 520 |  | 
| David S. Miller | 7db35f3 | 2007-05-29 02:22:14 -0700 | [diff] [blame] | 521 | if (ret != 0) { | 
| David S. Miller | 12e126a | 2006-02-17 14:40:30 -0800 | [diff] [blame] | 522 | prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " | 
| David S. Miller | 7db35f3 | 2007-05-29 02:22:14 -0700 | [diff] [blame] | 523 | "errors with %lx\n", vaddr, 0, pte, mmu, ret); | 
| David S. Miller | 12e126a | 2006-02-17 14:40:30 -0800 | [diff] [blame] | 524 | prom_halt(); | 
|  | 525 | } | 
| David S. Miller | d82ace7 | 2006-02-09 02:52:44 -0800 | [diff] [blame] | 526 | } | 
|  | 527 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 528 | static unsigned long kern_large_tte(unsigned long paddr); | 
|  | 529 |  | 
| David S. Miller | 898cf0e | 2005-09-23 11:59:44 -0700 | [diff] [blame] | 530 | static void __init remap_kernel(void) | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 531 | { | 
|  | 532 | unsigned long phys_page, tte_vaddr, tte_data; | 
| David S. Miller | 6465874 | 2008-03-21 17:01:38 -0700 | [diff] [blame] | 533 | int i, tlb_ent = sparc64_highest_locked_tlbent(); | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 534 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | tte_vaddr = (unsigned long) KERNBASE; | 
| David S. Miller | bff06d5 | 2005-09-22 20:11:33 -0700 | [diff] [blame] | 536 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 537 | tte_data = kern_large_tte(phys_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 |  | 
|  | 539 | kern_locked_tte_data = tte_data; | 
|  | 540 |  | 
| David S. Miller | d82ace7 | 2006-02-09 02:52:44 -0800 | [diff] [blame] | 541 | /* Now lock us into the TLBs via Hypervisor or OBP. */ | 
|  | 542 | if (tlb_type == hypervisor) { | 
| David S. Miller | 6465874 | 2008-03-21 17:01:38 -0700 | [diff] [blame] | 543 | for (i = 0; i < num_kernel_image_mappings; i++) { | 
| David S. Miller | d82ace7 | 2006-02-09 02:52:44 -0800 | [diff] [blame] | 544 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); | 
|  | 545 | hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); | 
| David S. Miller | 6465874 | 2008-03-21 17:01:38 -0700 | [diff] [blame] | 546 | tte_vaddr += 0x400000; | 
|  | 547 | tte_data += 0x400000; | 
| David S. Miller | d82ace7 | 2006-02-09 02:52:44 -0800 | [diff] [blame] | 548 | } | 
|  | 549 | } else { | 
| David S. Miller | 6465874 | 2008-03-21 17:01:38 -0700 | [diff] [blame] | 550 | for (i = 0; i < num_kernel_image_mappings; i++) { | 
|  | 551 | prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); | 
|  | 552 | prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); | 
|  | 553 | tte_vaddr += 0x400000; | 
|  | 554 | tte_data += 0x400000; | 
| David S. Miller | d82ace7 | 2006-02-09 02:52:44 -0800 | [diff] [blame] | 555 | } | 
| David S. Miller | 6465874 | 2008-03-21 17:01:38 -0700 | [diff] [blame] | 556 | sparc64_highest_unlocked_tlb_ent = tlb_ent - i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | } | 
| David S. Miller | 0835ae0 | 2005-10-04 15:23:20 -0700 | [diff] [blame] | 558 | if (tlb_type == cheetah_plus) { | 
|  | 559 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | 
|  | 560 | CTX_CHEETAH_PLUS_NUC); | 
|  | 561 | sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; | 
|  | 562 | sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; | 
|  | 563 | } | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 564 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 |  | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 566 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 567 | static void __init inherit_prom_mappings(void) | 
| David S. Miller | 9ad98c5 | 2005-10-05 15:12:00 -0700 | [diff] [blame] | 568 | { | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 569 | /* Now fixup OBP's idea about where we really are mapped. */ | 
| David S. Miller | 3c62a2d | 2008-02-17 23:22:50 -0800 | [diff] [blame] | 570 | printk("Remapping the kernel... "); | 
| David S. Miller | 405599b | 2005-09-22 00:12:35 -0700 | [diff] [blame] | 571 | remap_kernel(); | 
| David S. Miller | 3c62a2d | 2008-02-17 23:22:50 -0800 | [diff] [blame] | 572 | printk("done.\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | } | 
|  | 574 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | void prom_world(int enter) | 
|  | 576 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | if (!enter) | 
|  | 578 | set_fs((mm_segment_t) { get_thread_current_ds() }); | 
|  | 579 |  | 
| David S. Miller | 3487d1d | 2006-01-31 18:33:25 -0800 | [diff] [blame] | 580 | __asm__ __volatile__("flushw"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | } | 
|  | 582 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | void __flush_dcache_range(unsigned long start, unsigned long end) | 
|  | 584 | { | 
|  | 585 | unsigned long va; | 
|  | 586 |  | 
|  | 587 | if (tlb_type == spitfire) { | 
|  | 588 | int n = 0; | 
|  | 589 |  | 
|  | 590 | for (va = start; va < end; va += 32) { | 
|  | 591 | spitfire_put_dcache_tag(va & 0x3fe0, 0x0); | 
|  | 592 | if (++n >= 512) | 
|  | 593 | break; | 
|  | 594 | } | 
| David S. Miller | a43fe0e | 2006-02-04 03:10:53 -0800 | [diff] [blame] | 595 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | start = __pa(start); | 
|  | 597 | end = __pa(end); | 
|  | 598 | for (va = start; va < end; va += 32) | 
|  | 599 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 
|  | 600 | "membar #Sync" | 
|  | 601 | : /* no outputs */ | 
|  | 602 | : "r" (va), | 
|  | 603 | "i" (ASI_DCACHE_INVALIDATE)); | 
|  | 604 | } | 
|  | 605 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 |  | 
| David S. Miller | 85f1e1f | 2007-03-15 17:51:26 -0700 | [diff] [blame] | 607 | /* get_new_mmu_context() uses "cache + 1".  */ | 
|  | 608 | DEFINE_SPINLOCK(ctx_alloc_lock); | 
|  | 609 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | 
|  | 610 | #define MAX_CTX_NR	(1UL << CTX_NR_BITS) | 
|  | 611 | #define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR) | 
|  | 612 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); | 
|  | 613 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | /* Caller does TLB context flushing on local CPU if necessary. | 
|  | 615 | * The caller also ensures that CTX_VALID(mm->context) is false. | 
|  | 616 | * | 
|  | 617 | * We must be careful about boundary cases so that we never | 
|  | 618 | * let the user have CTX 0 (nucleus) or we ever use a CTX | 
|  | 619 | * version of zero (and thus NO_CONTEXT would not be caught | 
|  | 620 | * by version mis-match tests in mmu_context.h). | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 621 | * | 
|  | 622 | * Always invoked with interrupts disabled. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | */ | 
|  | 624 | void get_new_mmu_context(struct mm_struct *mm) | 
|  | 625 | { | 
|  | 626 | unsigned long ctx, new_ctx; | 
|  | 627 | unsigned long orig_pgsz_bits; | 
| David S. Miller | a77754b | 2006-03-06 19:59:50 -0800 | [diff] [blame] | 628 | unsigned long flags; | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 629 | int new_version; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 |  | 
| David S. Miller | a77754b | 2006-03-06 19:59:50 -0800 | [diff] [blame] | 631 | spin_lock_irqsave(&ctx_alloc_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | 
|  | 633 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | 
|  | 634 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 635 | new_version = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | if (new_ctx >= (1 << CTX_NR_BITS)) { | 
|  | 637 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | 
|  | 638 | if (new_ctx >= ctx) { | 
|  | 639 | int i; | 
|  | 640 | new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + | 
|  | 641 | CTX_FIRST_VERSION; | 
|  | 642 | if (new_ctx == 1) | 
|  | 643 | new_ctx = CTX_FIRST_VERSION; | 
|  | 644 |  | 
|  | 645 | /* Don't call memset, for 16 entries that's just | 
|  | 646 | * plain silly... | 
|  | 647 | */ | 
|  | 648 | mmu_context_bmap[0] = 3; | 
|  | 649 | mmu_context_bmap[1] = 0; | 
|  | 650 | mmu_context_bmap[2] = 0; | 
|  | 651 | mmu_context_bmap[3] = 0; | 
|  | 652 | for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { | 
|  | 653 | mmu_context_bmap[i + 0] = 0; | 
|  | 654 | mmu_context_bmap[i + 1] = 0; | 
|  | 655 | mmu_context_bmap[i + 2] = 0; | 
|  | 656 | mmu_context_bmap[i + 3] = 0; | 
|  | 657 | } | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 658 | new_version = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | goto out; | 
|  | 660 | } | 
|  | 661 | } | 
|  | 662 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); | 
|  | 663 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); | 
|  | 664 | out: | 
|  | 665 | tlb_context_cache = new_ctx; | 
|  | 666 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | 
| David S. Miller | a77754b | 2006-03-06 19:59:50 -0800 | [diff] [blame] | 667 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); | 
| David S. Miller | a0663a7 | 2006-02-23 14:19:28 -0800 | [diff] [blame] | 668 |  | 
|  | 669 | if (unlikely(new_version)) | 
|  | 670 | smp_new_mmu_context_version(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | } | 
|  | 672 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 673 | static int numa_enabled = 1; | 
|  | 674 | static int numa_debug; | 
|  | 675 |  | 
|  | 676 | static int __init early_numa(char *p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | { | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 678 | if (!p) | 
|  | 679 | return 0; | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 680 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 681 | if (strstr(p, "off")) | 
|  | 682 | numa_enabled = 0; | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 683 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 684 | if (strstr(p, "debug")) | 
|  | 685 | numa_debug = 1; | 
|  | 686 |  | 
|  | 687 | return 0; | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 688 | } | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 689 | early_param("numa", early_numa); | 
|  | 690 |  | 
|  | 691 | #define numadbg(f, a...) \ | 
|  | 692 | do {	if (numa_debug) \ | 
|  | 693 | printk(KERN_INFO f, ## a); \ | 
|  | 694 | } while (0) | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 695 |  | 
| David S. Miller | 4e82c9a | 2008-02-13 18:00:03 -0800 | [diff] [blame] | 696 | static void __init find_ramdisk(unsigned long phys_base) | 
|  | 697 | { | 
|  | 698 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 699 | if (sparc_ramdisk_image || sparc_ramdisk_image64) { | 
|  | 700 | unsigned long ramdisk_image; | 
|  | 701 |  | 
|  | 702 | /* Older versions of the bootloader only supported a | 
|  | 703 | * 32-bit physical address for the ramdisk image | 
|  | 704 | * location, stored at sparc_ramdisk_image.  Newer | 
|  | 705 | * SILO versions set sparc_ramdisk_image to zero and | 
|  | 706 | * provide a full 64-bit physical address at | 
|  | 707 | * sparc_ramdisk_image64. | 
|  | 708 | */ | 
|  | 709 | ramdisk_image = sparc_ramdisk_image; | 
|  | 710 | if (!ramdisk_image) | 
|  | 711 | ramdisk_image = sparc_ramdisk_image64; | 
|  | 712 |  | 
|  | 713 | /* Another bootloader quirk.  The bootloader normalizes | 
|  | 714 | * the physical address to KERNBASE, so we have to | 
|  | 715 | * factor that back out and add in the lowest valid | 
|  | 716 | * physical page address to get the true physical address. | 
|  | 717 | */ | 
|  | 718 | ramdisk_image -= KERNBASE; | 
|  | 719 | ramdisk_image += phys_base; | 
|  | 720 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 721 | numadbg("Found ramdisk at physical address 0x%lx, size %u\n", | 
|  | 722 | ramdisk_image, sparc_ramdisk_size); | 
|  | 723 |  | 
| David S. Miller | 4e82c9a | 2008-02-13 18:00:03 -0800 | [diff] [blame] | 724 | initrd_start = ramdisk_image; | 
|  | 725 | initrd_end = ramdisk_image + sparc_ramdisk_size; | 
| David S. Miller | 3b2a7e2 | 2008-02-13 18:13:20 -0800 | [diff] [blame] | 726 |  | 
| David S. Miller | 7047901 | 2008-05-14 23:10:33 -0700 | [diff] [blame] | 727 | lmb_reserve(initrd_start, sparc_ramdisk_size); | 
| David S. Miller | d45100f | 2008-05-06 15:19:54 -0700 | [diff] [blame] | 728 |  | 
|  | 729 | initrd_start += PAGE_OFFSET; | 
|  | 730 | initrd_end += PAGE_OFFSET; | 
| David S. Miller | 4e82c9a | 2008-02-13 18:00:03 -0800 | [diff] [blame] | 731 | } | 
|  | 732 | #endif | 
|  | 733 | } | 
|  | 734 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 735 | struct node_mem_mask { | 
|  | 736 | unsigned long mask; | 
|  | 737 | unsigned long val; | 
|  | 738 | unsigned long bootmem_paddr; | 
|  | 739 | }; | 
|  | 740 | static struct node_mem_mask node_masks[MAX_NUMNODES]; | 
|  | 741 | static int num_node_masks; | 
|  | 742 |  | 
|  | 743 | int numa_cpu_lookup_table[NR_CPUS]; | 
|  | 744 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | 
|  | 745 |  | 
|  | 746 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 747 |  | 
|  | 748 | struct mdesc_mblock { | 
|  | 749 | u64	base; | 
|  | 750 | u64	size; | 
|  | 751 | u64	offset; /* RA-to-PA */ | 
|  | 752 | }; | 
|  | 753 | static struct mdesc_mblock *mblocks; | 
|  | 754 | static int num_mblocks; | 
|  | 755 |  | 
|  | 756 | static unsigned long ra_to_pa(unsigned long addr) | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 757 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | int i; | 
|  | 759 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 760 | for (i = 0; i < num_mblocks; i++) { | 
|  | 761 | struct mdesc_mblock *m = &mblocks[i]; | 
| David S. Miller | 6fc5bae | 2006-12-28 21:00:23 -0800 | [diff] [blame] | 762 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 763 | if (addr >= m->base && | 
|  | 764 | addr < (m->base + m->size)) { | 
|  | 765 | addr += m->offset; | 
|  | 766 | break; | 
|  | 767 | } | 
|  | 768 | } | 
|  | 769 | return addr; | 
|  | 770 | } | 
|  | 771 |  | 
|  | 772 | static int find_node(unsigned long addr) | 
|  | 773 | { | 
|  | 774 | int i; | 
|  | 775 |  | 
|  | 776 | addr = ra_to_pa(addr); | 
|  | 777 | for (i = 0; i < num_node_masks; i++) { | 
|  | 778 | struct node_mem_mask *p = &node_masks[i]; | 
|  | 779 |  | 
|  | 780 | if ((addr & p->mask) == p->val) | 
|  | 781 | return i; | 
|  | 782 | } | 
|  | 783 | return -1; | 
|  | 784 | } | 
|  | 785 |  | 
|  | 786 | static unsigned long nid_range(unsigned long start, unsigned long end, | 
|  | 787 | int *nid) | 
|  | 788 | { | 
|  | 789 | *nid = find_node(start); | 
|  | 790 | start += PAGE_SIZE; | 
|  | 791 | while (start < end) { | 
|  | 792 | int n = find_node(start); | 
|  | 793 |  | 
|  | 794 | if (n != *nid) | 
|  | 795 | break; | 
|  | 796 | start += PAGE_SIZE; | 
|  | 797 | } | 
|  | 798 |  | 
| David S. Miller | c918dcc | 2008-08-14 01:41:39 -0700 | [diff] [blame] | 799 | if (start > end) | 
|  | 800 | start = end; | 
|  | 801 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 802 | return start; | 
|  | 803 | } | 
|  | 804 | #else | 
|  | 805 | static unsigned long nid_range(unsigned long start, unsigned long end, | 
|  | 806 | int *nid) | 
|  | 807 | { | 
|  | 808 | *nid = 0; | 
|  | 809 | return end; | 
|  | 810 | } | 
|  | 811 | #endif | 
|  | 812 |  | 
|  | 813 | /* This must be invoked after performing all of the necessary | 
|  | 814 | * add_active_range() calls for 'nid'.  We need to be able to get | 
|  | 815 | * correct data from get_pfn_range_for_nid(). | 
|  | 816 | */ | 
|  | 817 | static void __init allocate_node_data(int nid) | 
|  | 818 | { | 
|  | 819 | unsigned long paddr, num_pages, start_pfn, end_pfn; | 
|  | 820 | struct pglist_data *p; | 
|  | 821 |  | 
|  | 822 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
|  | 823 | paddr = lmb_alloc_nid(sizeof(struct pglist_data), | 
|  | 824 | SMP_CACHE_BYTES, nid, nid_range); | 
|  | 825 | if (!paddr) { | 
|  | 826 | prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); | 
|  | 827 | prom_halt(); | 
|  | 828 | } | 
|  | 829 | NODE_DATA(nid) = __va(paddr); | 
|  | 830 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 
|  | 831 |  | 
| Johannes Weiner | b61bfa3 | 2008-07-23 21:26:55 -0700 | [diff] [blame] | 832 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 833 | #endif | 
|  | 834 |  | 
|  | 835 | p = NODE_DATA(nid); | 
|  | 836 |  | 
|  | 837 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | 
|  | 838 | p->node_start_pfn = start_pfn; | 
|  | 839 | p->node_spanned_pages = end_pfn - start_pfn; | 
|  | 840 |  | 
|  | 841 | if (p->node_spanned_pages) { | 
|  | 842 | num_pages = bootmem_bootmap_pages(p->node_spanned_pages); | 
|  | 843 |  | 
|  | 844 | paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, | 
|  | 845 | nid_range); | 
|  | 846 | if (!paddr) { | 
|  | 847 | prom_printf("Cannot allocate bootmap for nid[%d]\n", | 
|  | 848 | nid); | 
|  | 849 | prom_halt(); | 
|  | 850 | } | 
|  | 851 | node_masks[nid].bootmem_paddr = paddr; | 
|  | 852 | } | 
|  | 853 | } | 
|  | 854 |  | 
|  | 855 | static void init_node_masks_nonnuma(void) | 
|  | 856 | { | 
|  | 857 | int i; | 
|  | 858 |  | 
|  | 859 | numadbg("Initializing tables for non-numa.\n"); | 
|  | 860 |  | 
|  | 861 | node_masks[0].mask = node_masks[0].val = 0; | 
|  | 862 | num_node_masks = 1; | 
|  | 863 |  | 
|  | 864 | for (i = 0; i < NR_CPUS; i++) | 
|  | 865 | numa_cpu_lookup_table[i] = 0; | 
|  | 866 |  | 
|  | 867 | numa_cpumask_lookup_table[0] = CPU_MASK_ALL; | 
|  | 868 | } | 
|  | 869 |  | 
|  | 870 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
|  | 871 | struct pglist_data *node_data[MAX_NUMNODES]; | 
|  | 872 |  | 
|  | 873 | EXPORT_SYMBOL(numa_cpu_lookup_table); | 
|  | 874 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | 
|  | 875 | EXPORT_SYMBOL(node_data); | 
|  | 876 |  | 
|  | 877 | struct mdesc_mlgroup { | 
|  | 878 | u64	node; | 
|  | 879 | u64	latency; | 
|  | 880 | u64	match; | 
|  | 881 | u64	mask; | 
|  | 882 | }; | 
|  | 883 | static struct mdesc_mlgroup *mlgroups; | 
|  | 884 | static int num_mlgroups; | 
|  | 885 |  | 
|  | 886 | static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, | 
|  | 887 | u32 cfg_handle) | 
|  | 888 | { | 
|  | 889 | u64 arc; | 
|  | 890 |  | 
|  | 891 | mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { | 
|  | 892 | u64 target = mdesc_arc_target(md, arc); | 
|  | 893 | const u64 *val; | 
|  | 894 |  | 
|  | 895 | val = mdesc_get_property(md, target, | 
|  | 896 | "cfg-handle", NULL); | 
|  | 897 | if (val && *val == cfg_handle) | 
|  | 898 | return 0; | 
|  | 899 | } | 
|  | 900 | return -ENODEV; | 
|  | 901 | } | 
|  | 902 |  | 
|  | 903 | static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, | 
|  | 904 | u32 cfg_handle) | 
|  | 905 | { | 
|  | 906 | u64 arc, candidate, best_latency = ~(u64)0; | 
|  | 907 |  | 
|  | 908 | candidate = MDESC_NODE_NULL; | 
|  | 909 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { | 
|  | 910 | u64 target = mdesc_arc_target(md, arc); | 
|  | 911 | const char *name = mdesc_node_name(md, target); | 
|  | 912 | const u64 *val; | 
|  | 913 |  | 
|  | 914 | if (strcmp(name, "pio-latency-group")) | 
|  | 915 | continue; | 
|  | 916 |  | 
|  | 917 | val = mdesc_get_property(md, target, "latency", NULL); | 
|  | 918 | if (!val) | 
|  | 919 | continue; | 
|  | 920 |  | 
|  | 921 | if (*val < best_latency) { | 
|  | 922 | candidate = target; | 
|  | 923 | best_latency = *val; | 
|  | 924 | } | 
|  | 925 | } | 
|  | 926 |  | 
|  | 927 | if (candidate == MDESC_NODE_NULL) | 
|  | 928 | return -ENODEV; | 
|  | 929 |  | 
|  | 930 | return scan_pio_for_cfg_handle(md, candidate, cfg_handle); | 
|  | 931 | } | 
|  | 932 |  | 
|  | 933 | int of_node_to_nid(struct device_node *dp) | 
|  | 934 | { | 
|  | 935 | const struct linux_prom64_registers *regs; | 
|  | 936 | struct mdesc_handle *md; | 
|  | 937 | u32 cfg_handle; | 
|  | 938 | int count, nid; | 
|  | 939 | u64 grp; | 
|  | 940 |  | 
|  | 941 | if (!mlgroups) | 
|  | 942 | return -1; | 
|  | 943 |  | 
|  | 944 | regs = of_get_property(dp, "reg", NULL); | 
|  | 945 | if (!regs) | 
|  | 946 | return -1; | 
|  | 947 |  | 
|  | 948 | cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; | 
|  | 949 |  | 
|  | 950 | md = mdesc_grab(); | 
|  | 951 |  | 
|  | 952 | count = 0; | 
|  | 953 | nid = -1; | 
|  | 954 | mdesc_for_each_node_by_name(md, grp, "group") { | 
|  | 955 | if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { | 
|  | 956 | nid = count; | 
|  | 957 | break; | 
|  | 958 | } | 
|  | 959 | count++; | 
|  | 960 | } | 
|  | 961 |  | 
|  | 962 | mdesc_release(md); | 
|  | 963 |  | 
|  | 964 | return nid; | 
|  | 965 | } | 
|  | 966 |  | 
|  | 967 | static void add_node_ranges(void) | 
|  | 968 | { | 
|  | 969 | int i; | 
|  | 970 |  | 
|  | 971 | for (i = 0; i < lmb.memory.cnt; i++) { | 
|  | 972 | unsigned long size = lmb_size_bytes(&lmb.memory, i); | 
|  | 973 | unsigned long start, end; | 
|  | 974 |  | 
|  | 975 | start = lmb.memory.region[i].base; | 
|  | 976 | end = start + size; | 
|  | 977 | while (start < end) { | 
|  | 978 | unsigned long this_end; | 
|  | 979 | int nid; | 
|  | 980 |  | 
|  | 981 | this_end = nid_range(start, end, &nid); | 
|  | 982 |  | 
|  | 983 | numadbg("Adding active range nid[%d] " | 
|  | 984 | "start[%lx] end[%lx]\n", | 
|  | 985 | nid, start, this_end); | 
|  | 986 |  | 
|  | 987 | add_active_range(nid, | 
|  | 988 | start >> PAGE_SHIFT, | 
|  | 989 | this_end >> PAGE_SHIFT); | 
|  | 990 |  | 
|  | 991 | start = this_end; | 
|  | 992 | } | 
|  | 993 | } | 
|  | 994 | } | 
|  | 995 |  | 
|  | 996 | static int __init grab_mlgroups(struct mdesc_handle *md) | 
|  | 997 | { | 
|  | 998 | unsigned long paddr; | 
|  | 999 | int count = 0; | 
|  | 1000 | u64 node; | 
|  | 1001 |  | 
|  | 1002 | mdesc_for_each_node_by_name(md, node, "memory-latency-group") | 
|  | 1003 | count++; | 
|  | 1004 | if (!count) | 
|  | 1005 | return -ENOENT; | 
|  | 1006 |  | 
|  | 1007 | paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup), | 
|  | 1008 | SMP_CACHE_BYTES); | 
|  | 1009 | if (!paddr) | 
|  | 1010 | return -ENOMEM; | 
|  | 1011 |  | 
|  | 1012 | mlgroups = __va(paddr); | 
|  | 1013 | num_mlgroups = count; | 
|  | 1014 |  | 
|  | 1015 | count = 0; | 
|  | 1016 | mdesc_for_each_node_by_name(md, node, "memory-latency-group") { | 
|  | 1017 | struct mdesc_mlgroup *m = &mlgroups[count++]; | 
|  | 1018 | const u64 *val; | 
|  | 1019 |  | 
|  | 1020 | m->node = node; | 
|  | 1021 |  | 
|  | 1022 | val = mdesc_get_property(md, node, "latency", NULL); | 
|  | 1023 | m->latency = *val; | 
|  | 1024 | val = mdesc_get_property(md, node, "address-match", NULL); | 
|  | 1025 | m->match = *val; | 
|  | 1026 | val = mdesc_get_property(md, node, "address-mask", NULL); | 
|  | 1027 | m->mask = *val; | 
|  | 1028 |  | 
|  | 1029 | numadbg("MLGROUP[%d]: node[%lx] latency[%lx] " | 
|  | 1030 | "match[%lx] mask[%lx]\n", | 
|  | 1031 | count - 1, m->node, m->latency, m->match, m->mask); | 
|  | 1032 | } | 
|  | 1033 |  | 
|  | 1034 | return 0; | 
|  | 1035 | } | 
|  | 1036 |  | 
|  | 1037 | static int __init grab_mblocks(struct mdesc_handle *md) | 
|  | 1038 | { | 
|  | 1039 | unsigned long paddr; | 
|  | 1040 | int count = 0; | 
|  | 1041 | u64 node; | 
|  | 1042 |  | 
|  | 1043 | mdesc_for_each_node_by_name(md, node, "mblock") | 
|  | 1044 | count++; | 
|  | 1045 | if (!count) | 
|  | 1046 | return -ENOENT; | 
|  | 1047 |  | 
|  | 1048 | paddr = lmb_alloc(count * sizeof(struct mdesc_mblock), | 
|  | 1049 | SMP_CACHE_BYTES); | 
|  | 1050 | if (!paddr) | 
|  | 1051 | return -ENOMEM; | 
|  | 1052 |  | 
|  | 1053 | mblocks = __va(paddr); | 
|  | 1054 | num_mblocks = count; | 
|  | 1055 |  | 
|  | 1056 | count = 0; | 
|  | 1057 | mdesc_for_each_node_by_name(md, node, "mblock") { | 
|  | 1058 | struct mdesc_mblock *m = &mblocks[count++]; | 
|  | 1059 | const u64 *val; | 
|  | 1060 |  | 
|  | 1061 | val = mdesc_get_property(md, node, "base", NULL); | 
|  | 1062 | m->base = *val; | 
|  | 1063 | val = mdesc_get_property(md, node, "size", NULL); | 
|  | 1064 | m->size = *val; | 
|  | 1065 | val = mdesc_get_property(md, node, | 
|  | 1066 | "address-congruence-offset", NULL); | 
|  | 1067 | m->offset = *val; | 
|  | 1068 |  | 
|  | 1069 | numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n", | 
|  | 1070 | count - 1, m->base, m->size, m->offset); | 
|  | 1071 | } | 
|  | 1072 |  | 
|  | 1073 | return 0; | 
|  | 1074 | } | 
|  | 1075 |  | 
|  | 1076 | static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, | 
|  | 1077 | u64 grp, cpumask_t *mask) | 
|  | 1078 | { | 
|  | 1079 | u64 arc; | 
|  | 1080 |  | 
|  | 1081 | cpus_clear(*mask); | 
|  | 1082 |  | 
|  | 1083 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { | 
|  | 1084 | u64 target = mdesc_arc_target(md, arc); | 
|  | 1085 | const char *name = mdesc_node_name(md, target); | 
|  | 1086 | const u64 *id; | 
|  | 1087 |  | 
|  | 1088 | if (strcmp(name, "cpu")) | 
|  | 1089 | continue; | 
|  | 1090 | id = mdesc_get_property(md, target, "id", NULL); | 
|  | 1091 | if (*id < NR_CPUS) | 
|  | 1092 | cpu_set(*id, *mask); | 
|  | 1093 | } | 
|  | 1094 | } | 
|  | 1095 |  | 
|  | 1096 | static struct mdesc_mlgroup * __init find_mlgroup(u64 node) | 
|  | 1097 | { | 
|  | 1098 | int i; | 
|  | 1099 |  | 
|  | 1100 | for (i = 0; i < num_mlgroups; i++) { | 
|  | 1101 | struct mdesc_mlgroup *m = &mlgroups[i]; | 
|  | 1102 | if (m->node == node) | 
|  | 1103 | return m; | 
|  | 1104 | } | 
|  | 1105 | return NULL; | 
|  | 1106 | } | 
|  | 1107 |  | 
|  | 1108 | static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, | 
|  | 1109 | int index) | 
|  | 1110 | { | 
|  | 1111 | struct mdesc_mlgroup *candidate = NULL; | 
|  | 1112 | u64 arc, best_latency = ~(u64)0; | 
|  | 1113 | struct node_mem_mask *n; | 
|  | 1114 |  | 
|  | 1115 | mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { | 
|  | 1116 | u64 target = mdesc_arc_target(md, arc); | 
|  | 1117 | struct mdesc_mlgroup *m = find_mlgroup(target); | 
|  | 1118 | if (!m) | 
|  | 1119 | continue; | 
|  | 1120 | if (m->latency < best_latency) { | 
|  | 1121 | candidate = m; | 
|  | 1122 | best_latency = m->latency; | 
|  | 1123 | } | 
|  | 1124 | } | 
|  | 1125 | if (!candidate) | 
|  | 1126 | return -ENOENT; | 
|  | 1127 |  | 
|  | 1128 | if (num_node_masks != index) { | 
|  | 1129 | printk(KERN_ERR "Inconsistent NUMA state, " | 
|  | 1130 | "index[%d] != num_node_masks[%d]\n", | 
|  | 1131 | index, num_node_masks); | 
|  | 1132 | return -EINVAL; | 
|  | 1133 | } | 
|  | 1134 |  | 
|  | 1135 | n = &node_masks[num_node_masks++]; | 
|  | 1136 |  | 
|  | 1137 | n->mask = candidate->mask; | 
|  | 1138 | n->val = candidate->match; | 
|  | 1139 |  | 
|  | 1140 | numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n", | 
|  | 1141 | index, n->mask, n->val, candidate->latency); | 
|  | 1142 |  | 
|  | 1143 | return 0; | 
|  | 1144 | } | 
|  | 1145 |  | 
|  | 1146 | static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, | 
|  | 1147 | int index) | 
|  | 1148 | { | 
|  | 1149 | cpumask_t mask; | 
|  | 1150 | int cpu; | 
|  | 1151 |  | 
|  | 1152 | numa_parse_mdesc_group_cpus(md, grp, &mask); | 
|  | 1153 |  | 
|  | 1154 | for_each_cpu_mask(cpu, mask) | 
|  | 1155 | numa_cpu_lookup_table[cpu] = index; | 
|  | 1156 | numa_cpumask_lookup_table[index] = mask; | 
|  | 1157 |  | 
|  | 1158 | if (numa_debug) { | 
|  | 1159 | printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); | 
|  | 1160 | for_each_cpu_mask(cpu, mask) | 
|  | 1161 | printk("%d ", cpu); | 
|  | 1162 | printk("]\n"); | 
|  | 1163 | } | 
|  | 1164 |  | 
|  | 1165 | return numa_attach_mlgroup(md, grp, index); | 
|  | 1166 | } | 
|  | 1167 |  | 
|  | 1168 | static int __init numa_parse_mdesc(void) | 
|  | 1169 | { | 
|  | 1170 | struct mdesc_handle *md = mdesc_grab(); | 
|  | 1171 | int i, err, count; | 
|  | 1172 | u64 node; | 
|  | 1173 |  | 
|  | 1174 | node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); | 
|  | 1175 | if (node == MDESC_NODE_NULL) { | 
|  | 1176 | mdesc_release(md); | 
|  | 1177 | return -ENOENT; | 
|  | 1178 | } | 
|  | 1179 |  | 
|  | 1180 | err = grab_mblocks(md); | 
|  | 1181 | if (err < 0) | 
|  | 1182 | goto out; | 
|  | 1183 |  | 
|  | 1184 | err = grab_mlgroups(md); | 
|  | 1185 | if (err < 0) | 
|  | 1186 | goto out; | 
|  | 1187 |  | 
|  | 1188 | count = 0; | 
|  | 1189 | mdesc_for_each_node_by_name(md, node, "group") { | 
|  | 1190 | err = numa_parse_mdesc_group(md, node, count); | 
|  | 1191 | if (err < 0) | 
|  | 1192 | break; | 
|  | 1193 | count++; | 
|  | 1194 | } | 
|  | 1195 |  | 
|  | 1196 | add_node_ranges(); | 
|  | 1197 |  | 
|  | 1198 | for (i = 0; i < num_node_masks; i++) { | 
|  | 1199 | allocate_node_data(i); | 
|  | 1200 | node_set_online(i); | 
|  | 1201 | } | 
|  | 1202 |  | 
|  | 1203 | err = 0; | 
|  | 1204 | out: | 
|  | 1205 | mdesc_release(md); | 
|  | 1206 | return err; | 
|  | 1207 | } | 
|  | 1208 |  | 
|  | 1209 | static int __init numa_parse_sun4u(void) | 
|  | 1210 | { | 
|  | 1211 | return -1; | 
|  | 1212 | } | 
|  | 1213 |  | 
|  | 1214 | static int __init bootmem_init_numa(void) | 
|  | 1215 | { | 
|  | 1216 | int err = -1; | 
|  | 1217 |  | 
|  | 1218 | numadbg("bootmem_init_numa()\n"); | 
|  | 1219 |  | 
|  | 1220 | if (numa_enabled) { | 
|  | 1221 | if (tlb_type == hypervisor) | 
|  | 1222 | err = numa_parse_mdesc(); | 
|  | 1223 | else | 
|  | 1224 | err = numa_parse_sun4u(); | 
|  | 1225 | } | 
|  | 1226 | return err; | 
|  | 1227 | } | 
|  | 1228 |  | 
|  | 1229 | #else | 
|  | 1230 |  | 
|  | 1231 | static int bootmem_init_numa(void) | 
|  | 1232 | { | 
|  | 1233 | return -1; | 
|  | 1234 | } | 
|  | 1235 |  | 
|  | 1236 | #endif | 
|  | 1237 |  | 
|  | 1238 | static void __init bootmem_init_nonnuma(void) | 
|  | 1239 | { | 
|  | 1240 | unsigned long top_of_ram = lmb_end_of_DRAM(); | 
|  | 1241 | unsigned long total_ram = lmb_phys_mem_size(); | 
|  | 1242 | unsigned int i; | 
|  | 1243 |  | 
|  | 1244 | numadbg("bootmem_init_nonnuma()\n"); | 
|  | 1245 |  | 
|  | 1246 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 
|  | 1247 | top_of_ram, total_ram); | 
|  | 1248 | printk(KERN_INFO "Memory hole size: %ldMB\n", | 
|  | 1249 | (top_of_ram - total_ram) >> 20); | 
|  | 1250 |  | 
|  | 1251 | init_node_masks_nonnuma(); | 
|  | 1252 |  | 
|  | 1253 | for (i = 0; i < lmb.memory.cnt; i++) { | 
|  | 1254 | unsigned long size = lmb_size_bytes(&lmb.memory, i); | 
|  | 1255 | unsigned long start_pfn, end_pfn; | 
|  | 1256 |  | 
|  | 1257 | if (!size) | 
|  | 1258 | continue; | 
|  | 1259 |  | 
|  | 1260 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | 
|  | 1261 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | 
|  | 1262 | add_active_range(0, start_pfn, end_pfn); | 
|  | 1263 | } | 
|  | 1264 |  | 
|  | 1265 | allocate_node_data(0); | 
|  | 1266 |  | 
|  | 1267 | node_set_online(0); | 
|  | 1268 | } | 
|  | 1269 |  | 
|  | 1270 | static void __init reserve_range_in_node(int nid, unsigned long start, | 
|  | 1271 | unsigned long end) | 
|  | 1272 | { | 
|  | 1273 | numadbg("    reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n", | 
|  | 1274 | nid, start, end); | 
|  | 1275 | while (start < end) { | 
|  | 1276 | unsigned long this_end; | 
|  | 1277 | int n; | 
|  | 1278 |  | 
|  | 1279 | this_end = nid_range(start, end, &n); | 
|  | 1280 | if (n == nid) { | 
|  | 1281 | numadbg("      MATCH reserving range [%lx:%lx]\n", | 
|  | 1282 | start, this_end); | 
|  | 1283 | reserve_bootmem_node(NODE_DATA(nid), start, | 
|  | 1284 | (this_end - start), BOOTMEM_DEFAULT); | 
|  | 1285 | } else | 
|  | 1286 | numadbg("      NO MATCH, advancing start to %lx\n", | 
|  | 1287 | this_end); | 
|  | 1288 |  | 
|  | 1289 | start = this_end; | 
|  | 1290 | } | 
|  | 1291 | } | 
|  | 1292 |  | 
|  | 1293 | static void __init trim_reserved_in_node(int nid) | 
|  | 1294 | { | 
|  | 1295 | int i; | 
|  | 1296 |  | 
|  | 1297 | numadbg("  trim_reserved_in_node(%d)\n", nid); | 
|  | 1298 |  | 
|  | 1299 | for (i = 0; i < lmb.reserved.cnt; i++) { | 
|  | 1300 | unsigned long start = lmb.reserved.region[i].base; | 
|  | 1301 | unsigned long size = lmb_size_bytes(&lmb.reserved, i); | 
|  | 1302 | unsigned long end = start + size; | 
|  | 1303 |  | 
|  | 1304 | reserve_range_in_node(nid, start, end); | 
|  | 1305 | } | 
|  | 1306 | } | 
|  | 1307 |  | 
|  | 1308 | static void __init bootmem_init_one_node(int nid) | 
|  | 1309 | { | 
|  | 1310 | struct pglist_data *p; | 
|  | 1311 |  | 
|  | 1312 | numadbg("bootmem_init_one_node(%d)\n", nid); | 
|  | 1313 |  | 
|  | 1314 | p = NODE_DATA(nid); | 
|  | 1315 |  | 
|  | 1316 | if (p->node_spanned_pages) { | 
|  | 1317 | unsigned long paddr = node_masks[nid].bootmem_paddr; | 
|  | 1318 | unsigned long end_pfn; | 
|  | 1319 |  | 
|  | 1320 | end_pfn = p->node_start_pfn + p->node_spanned_pages; | 
|  | 1321 |  | 
|  | 1322 | numadbg("  init_bootmem_node(%d, %lx, %lx, %lx)\n", | 
|  | 1323 | nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn); | 
|  | 1324 |  | 
|  | 1325 | init_bootmem_node(p, paddr >> PAGE_SHIFT, | 
|  | 1326 | p->node_start_pfn, end_pfn); | 
|  | 1327 |  | 
|  | 1328 | numadbg("  free_bootmem_with_active_regions(%d, %lx)\n", | 
|  | 1329 | nid, end_pfn); | 
|  | 1330 | free_bootmem_with_active_regions(nid, end_pfn); | 
|  | 1331 |  | 
|  | 1332 | trim_reserved_in_node(nid); | 
|  | 1333 |  | 
|  | 1334 | numadbg("  sparse_memory_present_with_active_regions(%d)\n", | 
|  | 1335 | nid); | 
|  | 1336 | sparse_memory_present_with_active_regions(nid); | 
|  | 1337 | } | 
|  | 1338 | } | 
|  | 1339 |  | 
|  | 1340 | static unsigned long __init bootmem_init(unsigned long phys_base) | 
|  | 1341 | { | 
|  | 1342 | unsigned long end_pfn; | 
|  | 1343 | int nid; | 
|  | 1344 |  | 
|  | 1345 | end_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1346 | max_pfn = max_low_pfn = end_pfn; | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 1347 | min_low_pfn = (phys_base >> PAGE_SHIFT); | 
|  | 1348 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1349 | if (bootmem_init_numa() < 0) | 
|  | 1350 | bootmem_init_nonnuma(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1351 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1352 | /* XXX cpu notifier XXX */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1354 | for_each_online_node(nid) | 
|  | 1355 | bootmem_init_one_node(nid); | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 1356 |  | 
|  | 1357 | sparse_init(); | 
|  | 1358 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | return end_pfn; | 
|  | 1360 | } | 
|  | 1361 |  | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1362 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; | 
|  | 1363 | static int pall_ents __initdata; | 
|  | 1364 |  | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1365 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| Sam Ravnborg | 896aef4 | 2008-02-24 19:49:52 -0800 | [diff] [blame] | 1366 | static unsigned long __ref kernel_map_range(unsigned long pstart, | 
|  | 1367 | unsigned long pend, pgprot_t prot) | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1368 | { | 
|  | 1369 | unsigned long vstart = PAGE_OFFSET + pstart; | 
|  | 1370 | unsigned long vend = PAGE_OFFSET + pend; | 
|  | 1371 | unsigned long alloc_bytes = 0UL; | 
|  | 1372 |  | 
|  | 1373 | if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1374 | prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1375 | vstart, vend); | 
|  | 1376 | prom_halt(); | 
|  | 1377 | } | 
|  | 1378 |  | 
|  | 1379 | while (vstart < vend) { | 
|  | 1380 | unsigned long this_end, paddr = __pa(vstart); | 
|  | 1381 | pgd_t *pgd = pgd_offset_k(vstart); | 
|  | 1382 | pud_t *pud; | 
|  | 1383 | pmd_t *pmd; | 
|  | 1384 | pte_t *pte; | 
|  | 1385 |  | 
|  | 1386 | pud = pud_offset(pgd, vstart); | 
|  | 1387 | if (pud_none(*pud)) { | 
|  | 1388 | pmd_t *new; | 
|  | 1389 |  | 
|  | 1390 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | 
|  | 1391 | alloc_bytes += PAGE_SIZE; | 
|  | 1392 | pud_populate(&init_mm, pud, new); | 
|  | 1393 | } | 
|  | 1394 |  | 
|  | 1395 | pmd = pmd_offset(pud, vstart); | 
|  | 1396 | if (!pmd_present(*pmd)) { | 
|  | 1397 | pte_t *new; | 
|  | 1398 |  | 
|  | 1399 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | 
|  | 1400 | alloc_bytes += PAGE_SIZE; | 
|  | 1401 | pmd_populate_kernel(&init_mm, pmd, new); | 
|  | 1402 | } | 
|  | 1403 |  | 
|  | 1404 | pte = pte_offset_kernel(pmd, vstart); | 
|  | 1405 | this_end = (vstart + PMD_SIZE) & PMD_MASK; | 
|  | 1406 | if (this_end > vend) | 
|  | 1407 | this_end = vend; | 
|  | 1408 |  | 
|  | 1409 | while (vstart < this_end) { | 
|  | 1410 | pte_val(*pte) = (paddr | pgprot_val(prot)); | 
|  | 1411 |  | 
|  | 1412 | vstart += PAGE_SIZE; | 
|  | 1413 | paddr += PAGE_SIZE; | 
|  | 1414 | pte++; | 
|  | 1415 | } | 
|  | 1416 | } | 
|  | 1417 |  | 
|  | 1418 | return alloc_bytes; | 
|  | 1419 | } | 
|  | 1420 |  | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1421 | extern unsigned int kvmap_linear_patch[1]; | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1422 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 
|  | 1423 |  | 
|  | 1424 | static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) | 
|  | 1425 | { | 
|  | 1426 | const unsigned long shift_256MB = 28; | 
|  | 1427 | const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); | 
|  | 1428 | const unsigned long size_256MB = (1UL << shift_256MB); | 
|  | 1429 |  | 
|  | 1430 | while (start < end) { | 
|  | 1431 | long remains; | 
|  | 1432 |  | 
| David S. Miller | f7c0033 | 2006-03-05 22:18:50 -0800 | [diff] [blame] | 1433 | remains = end - start; | 
|  | 1434 | if (remains < size_256MB) | 
|  | 1435 | break; | 
|  | 1436 |  | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1437 | if (start & mask_256MB) { | 
|  | 1438 | start = (start + size_256MB) & ~mask_256MB; | 
|  | 1439 | continue; | 
|  | 1440 | } | 
|  | 1441 |  | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1442 | while (remains >= size_256MB) { | 
|  | 1443 | unsigned long index = start >> shift_256MB; | 
|  | 1444 |  | 
|  | 1445 | __set_bit(index, kpte_linear_bitmap); | 
|  | 1446 |  | 
|  | 1447 | start += size_256MB; | 
|  | 1448 | remains -= size_256MB; | 
|  | 1449 | } | 
|  | 1450 | } | 
|  | 1451 | } | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1452 |  | 
| David S. Miller | 8f361453 | 2007-12-13 06:13:38 -0800 | [diff] [blame] | 1453 | static void __init init_kpte_bitmap(void) | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1454 | { | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1455 | unsigned long i; | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1456 |  | 
|  | 1457 | for (i = 0; i < pall_ents; i++) { | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1458 | unsigned long phys_start, phys_end; | 
|  | 1459 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1460 | phys_start = pall[i].phys_addr; | 
|  | 1461 | phys_end = phys_start + pall[i].reg_size; | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1462 |  | 
|  | 1463 | mark_kpte_bitmap(phys_start, phys_end); | 
| David S. Miller | 8f361453 | 2007-12-13 06:13:38 -0800 | [diff] [blame] | 1464 | } | 
|  | 1465 | } | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1466 |  | 
| David S. Miller | 8f361453 | 2007-12-13 06:13:38 -0800 | [diff] [blame] | 1467 | static void __init kernel_physical_mapping_init(void) | 
|  | 1468 | { | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1469 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| David S. Miller | 8f361453 | 2007-12-13 06:13:38 -0800 | [diff] [blame] | 1470 | unsigned long i, mem_alloced = 0UL; | 
|  | 1471 |  | 
|  | 1472 | for (i = 0; i < pall_ents; i++) { | 
|  | 1473 | unsigned long phys_start, phys_end; | 
|  | 1474 |  | 
|  | 1475 | phys_start = pall[i].phys_addr; | 
|  | 1476 | phys_end = phys_start + pall[i].reg_size; | 
|  | 1477 |  | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1478 | mem_alloced += kernel_map_range(phys_start, phys_end, | 
|  | 1479 | PAGE_KERNEL); | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1480 | } | 
|  | 1481 |  | 
|  | 1482 | printk("Allocated %ld bytes for kernel page tables.\n", | 
|  | 1483 | mem_alloced); | 
|  | 1484 |  | 
|  | 1485 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | 
|  | 1486 | flushi(&kvmap_linear_patch[0]); | 
|  | 1487 |  | 
|  | 1488 | __flush_tlb_all(); | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1489 | #endif | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1490 | } | 
|  | 1491 |  | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 1492 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1493 | void kernel_map_pages(struct page *page, int numpages, int enable) | 
|  | 1494 | { | 
|  | 1495 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | 
|  | 1496 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | 
|  | 1497 |  | 
|  | 1498 | kernel_map_range(phys_start, phys_end, | 
|  | 1499 | (enable ? PAGE_KERNEL : __pgprot(0))); | 
|  | 1500 |  | 
| David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 1501 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, | 
|  | 1502 | PAGE_OFFSET + phys_end); | 
|  | 1503 |  | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1504 | /* we should perform an IPI and flush all tlbs, | 
|  | 1505 | * but that can deadlock->flush only current cpu. | 
|  | 1506 | */ | 
|  | 1507 | __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, | 
|  | 1508 | PAGE_OFFSET + phys_end); | 
|  | 1509 | } | 
|  | 1510 | #endif | 
|  | 1511 |  | 
| David S. Miller | 1014757 | 2005-09-28 21:46:43 -0700 | [diff] [blame] | 1512 | unsigned long __init find_ecache_flush_span(unsigned long size) | 
|  | 1513 | { | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1514 | int i; | 
| David S. Miller | 1014757 | 2005-09-28 21:46:43 -0700 | [diff] [blame] | 1515 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1516 | for (i = 0; i < pavail_ents; i++) { | 
|  | 1517 | if (pavail[i].reg_size >= size) | 
|  | 1518 | return pavail[i].phys_addr; | 
| David S. Miller | 1014757 | 2005-09-28 21:46:43 -0700 | [diff] [blame] | 1519 | } | 
|  | 1520 |  | 
|  | 1521 | return ~0UL; | 
|  | 1522 | } | 
|  | 1523 |  | 
| David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 1524 | static void __init tsb_phys_patch(void) | 
|  | 1525 | { | 
| David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 1526 | struct tsb_ldquad_phys_patch_entry *pquad; | 
| David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 1527 | struct tsb_phys_patch_entry *p; | 
|  | 1528 |  | 
| David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 1529 | pquad = &__tsb_ldquad_phys_patch; | 
|  | 1530 | while (pquad < &__tsb_ldquad_phys_patch_end) { | 
|  | 1531 | unsigned long addr = pquad->addr; | 
|  | 1532 |  | 
|  | 1533 | if (tlb_type == hypervisor) | 
|  | 1534 | *(unsigned int *) addr = pquad->sun4v_insn; | 
|  | 1535 | else | 
|  | 1536 | *(unsigned int *) addr = pquad->sun4u_insn; | 
|  | 1537 | wmb(); | 
|  | 1538 | __asm__ __volatile__("flush	%0" | 
|  | 1539 | : /* no outputs */ | 
|  | 1540 | : "r" (addr)); | 
|  | 1541 |  | 
|  | 1542 | pquad++; | 
|  | 1543 | } | 
|  | 1544 |  | 
| David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 1545 | p = &__tsb_phys_patch; | 
|  | 1546 | while (p < &__tsb_phys_patch_end) { | 
|  | 1547 | unsigned long addr = p->addr; | 
|  | 1548 |  | 
|  | 1549 | *(unsigned int *) addr = p->insn; | 
|  | 1550 | wmb(); | 
|  | 1551 | __asm__ __volatile__("flush	%0" | 
|  | 1552 | : /* no outputs */ | 
|  | 1553 | : "r" (addr)); | 
|  | 1554 |  | 
|  | 1555 | p++; | 
|  | 1556 | } | 
|  | 1557 | } | 
|  | 1558 |  | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1559 | /* Don't mark as init, we give this to the Hypervisor.  */ | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 1560 | #ifndef CONFIG_DEBUG_PAGEALLOC | 
|  | 1561 | #define NUM_KTSB_DESCR	2 | 
|  | 1562 | #else | 
|  | 1563 | #define NUM_KTSB_DESCR	1 | 
|  | 1564 | #endif | 
|  | 1565 | static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR]; | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1566 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | 
|  | 1567 |  | 
|  | 1568 | static void __init sun4v_ktsb_init(void) | 
|  | 1569 | { | 
|  | 1570 | unsigned long ktsb_pa; | 
|  | 1571 |  | 
| David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 1572 | /* First KTSB for PAGE_SIZE mappings.  */ | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1573 | ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); | 
|  | 1574 |  | 
|  | 1575 | switch (PAGE_SIZE) { | 
|  | 1576 | case 8 * 1024: | 
|  | 1577 | default: | 
|  | 1578 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; | 
|  | 1579 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; | 
|  | 1580 | break; | 
|  | 1581 |  | 
|  | 1582 | case 64 * 1024: | 
|  | 1583 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; | 
|  | 1584 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; | 
|  | 1585 | break; | 
|  | 1586 |  | 
|  | 1587 | case 512 * 1024: | 
|  | 1588 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; | 
|  | 1589 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; | 
|  | 1590 | break; | 
|  | 1591 |  | 
|  | 1592 | case 4 * 1024 * 1024: | 
|  | 1593 | ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; | 
|  | 1594 | ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; | 
|  | 1595 | break; | 
|  | 1596 | }; | 
|  | 1597 |  | 
| David S. Miller | 3f19a84 | 2006-02-17 12:03:20 -0800 | [diff] [blame] | 1598 | ktsb_descr[0].assoc = 1; | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1599 | ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; | 
|  | 1600 | ktsb_descr[0].ctx_idx = 0; | 
|  | 1601 | ktsb_descr[0].tsb_base = ktsb_pa; | 
|  | 1602 | ktsb_descr[0].resv = 0; | 
|  | 1603 |  | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 1604 | #ifndef CONFIG_DEBUG_PAGEALLOC | 
| David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 1605 | /* Second KTSB for 4MB/256MB mappings.  */ | 
|  | 1606 | ktsb_pa = (kern_base + | 
|  | 1607 | ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); | 
|  | 1608 |  | 
|  | 1609 | ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; | 
|  | 1610 | ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | | 
|  | 1611 | HV_PGSZ_MASK_256MB); | 
|  | 1612 | ktsb_descr[1].assoc = 1; | 
|  | 1613 | ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; | 
|  | 1614 | ktsb_descr[1].ctx_idx = 0; | 
|  | 1615 | ktsb_descr[1].tsb_base = ktsb_pa; | 
|  | 1616 | ktsb_descr[1].resv = 0; | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 1617 | #endif | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1618 | } | 
|  | 1619 |  | 
|  | 1620 | void __cpuinit sun4v_ktsb_register(void) | 
|  | 1621 | { | 
| David S. Miller | 7db35f3 | 2007-05-29 02:22:14 -0700 | [diff] [blame] | 1622 | unsigned long pa, ret; | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1623 |  | 
|  | 1624 | pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); | 
|  | 1625 |  | 
| David S. Miller | 7db35f3 | 2007-05-29 02:22:14 -0700 | [diff] [blame] | 1626 | ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa); | 
|  | 1627 | if (ret != 0) { | 
|  | 1628 | prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: " | 
|  | 1629 | "errors with %lx\n", pa, ret); | 
|  | 1630 | prom_halt(); | 
|  | 1631 | } | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1632 | } | 
|  | 1633 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1634 | /* paging_init() sets up the page tables */ | 
|  | 1635 |  | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1636 | extern void central_probe(void); | 
|  | 1637 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | static unsigned long last_valid_pfn; | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1639 | pgd_t swapper_pg_dir[2048]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 1641 | static void sun4u_pgprot_init(void); | 
|  | 1642 | static void sun4v_pgprot_init(void); | 
|  | 1643 |  | 
| travis@sgi.com | 3afc620 | 2008-01-30 23:27:58 +0100 | [diff] [blame] | 1644 | /* Dummy function */ | 
|  | 1645 | void __init setup_per_cpu_areas(void) | 
|  | 1646 | { | 
|  | 1647 | } | 
|  | 1648 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 | void __init paging_init(void) | 
|  | 1650 | { | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1651 | unsigned long end_pfn, shift, phys_base; | 
| David S. Miller | 0836a0e | 2005-09-28 21:38:08 -0700 | [diff] [blame] | 1652 | unsigned long real_end, i; | 
|  | 1653 |  | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 1654 | /* These build time checkes make sure that the dcache_dirty_cpu() | 
|  | 1655 | * page->flags usage will work. | 
|  | 1656 | * | 
|  | 1657 | * When a page gets marked as dcache-dirty, we store the | 
|  | 1658 | * cpu number starting at bit 32 in the page->flags.  Also, | 
|  | 1659 | * functions like clear_dcache_dirty_cpu use the cpu mask | 
|  | 1660 | * in 13-bit signed-immediate instruction fields. | 
|  | 1661 | */ | 
| Christoph Lameter | 9223b41 | 2008-04-28 02:12:48 -0700 | [diff] [blame] | 1662 |  | 
|  | 1663 | /* | 
|  | 1664 | * Page flags must not reach into upper 32 bits that are used | 
|  | 1665 | * for the cpu number | 
|  | 1666 | */ | 
|  | 1667 | BUILD_BUG_ON(NR_PAGEFLAGS > 32); | 
|  | 1668 |  | 
|  | 1669 | /* | 
|  | 1670 | * The bit fields placed in the high range must not reach below | 
|  | 1671 | * the 32 bit boundary. Otherwise we cannot place the cpu field | 
|  | 1672 | * at the 32 bit boundary. | 
|  | 1673 | */ | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 1674 | BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + | 
| Christoph Lameter | 9223b41 | 2008-04-28 02:12:48 -0700 | [diff] [blame] | 1675 | ilog2(roundup_pow_of_two(NR_CPUS)) > 32); | 
|  | 1676 |  | 
| David S. Miller | 22adb35 | 2007-05-26 01:14:43 -0700 | [diff] [blame] | 1677 | BUILD_BUG_ON(NR_CPUS > 4096); | 
|  | 1678 |  | 
| David S. Miller | 481295f | 2006-02-07 21:51:08 -0800 | [diff] [blame] | 1679 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | 
|  | 1680 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | 
|  | 1681 |  | 
| David S. Miller | 22d6a1c | 2007-05-25 00:37:12 -0700 | [diff] [blame] | 1682 | sstate_booting(); | 
|  | 1683 |  | 
| David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 1684 | /* Invalidate both kernel TSBs.  */ | 
| David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 1685 | memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 1686 | #ifndef CONFIG_DEBUG_PAGEALLOC | 
| David S. Miller | d7744a0 | 2006-02-21 22:31:11 -0800 | [diff] [blame] | 1687 | memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 1688 | #endif | 
| David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 1689 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 1690 | if (tlb_type == hypervisor) | 
|  | 1691 | sun4v_pgprot_init(); | 
|  | 1692 | else | 
|  | 1693 | sun4u_pgprot_init(); | 
|  | 1694 |  | 
| David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 1695 | if (tlb_type == cheetah_plus || | 
|  | 1696 | tlb_type == hypervisor) | 
| David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 1697 | tsb_phys_patch(); | 
|  | 1698 |  | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1699 | if (tlb_type == hypervisor) { | 
| David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 1700 | sun4v_patch_tlb_handlers(); | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1701 | sun4v_ktsb_init(); | 
|  | 1702 | } | 
| David S. Miller | d257d5d | 2006-02-06 23:44:37 -0800 | [diff] [blame] | 1703 |  | 
| David S. Miller | 3b2a7e2 | 2008-02-13 18:13:20 -0800 | [diff] [blame] | 1704 | lmb_init(); | 
|  | 1705 |  | 
| David S. Miller | a94a172 | 2008-05-11 21:04:48 -0700 | [diff] [blame] | 1706 | /* Find available physical memory... | 
|  | 1707 | * | 
|  | 1708 | * Read it twice in order to work around a bug in openfirmware. | 
|  | 1709 | * The call to grab this table itself can cause openfirmware to | 
|  | 1710 | * allocate memory, which in turn can take away some space from | 
|  | 1711 | * the list of available memory.  Reading it twice makes sure | 
|  | 1712 | * we really do get the final value. | 
|  | 1713 | */ | 
|  | 1714 | read_obp_translations(); | 
|  | 1715 | read_obp_memory("reg", &pall[0], &pall_ents); | 
|  | 1716 | read_obp_memory("available", &pavail[0], &pavail_ents); | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1717 | read_obp_memory("available", &pavail[0], &pavail_ents); | 
| David S. Miller | 0836a0e | 2005-09-28 21:38:08 -0700 | [diff] [blame] | 1718 |  | 
|  | 1719 | phys_base = 0xffffffffffffffffUL; | 
| David S. Miller | 3b2a7e2 | 2008-02-13 18:13:20 -0800 | [diff] [blame] | 1720 | for (i = 0; i < pavail_ents; i++) { | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1721 | phys_base = min(phys_base, pavail[i].phys_addr); | 
| David S. Miller | 3b2a7e2 | 2008-02-13 18:13:20 -0800 | [diff] [blame] | 1722 | lmb_add(pavail[i].phys_addr, pavail[i].reg_size); | 
|  | 1723 | } | 
|  | 1724 |  | 
|  | 1725 | lmb_reserve(kern_base, kern_size); | 
| David S. Miller | 0836a0e | 2005-09-28 21:38:08 -0700 | [diff] [blame] | 1726 |  | 
| David S. Miller | 4e82c9a | 2008-02-13 18:00:03 -0800 | [diff] [blame] | 1727 | find_ramdisk(phys_base); | 
|  | 1728 |  | 
| David S. Miller | f2b6079 | 2008-08-14 01:45:41 -0700 | [diff] [blame] | 1729 | lmb_enforce_memory_limit(cmdline_memory_size); | 
| David S. Miller | 25b0c65 | 2008-02-13 18:20:14 -0800 | [diff] [blame] | 1730 |  | 
| David S. Miller | 3b2a7e2 | 2008-02-13 18:13:20 -0800 | [diff] [blame] | 1731 | lmb_analyze(); | 
|  | 1732 | lmb_dump_all(); | 
|  | 1733 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1734 | set_bit(0, mmu_context_bmap); | 
|  | 1735 |  | 
| David S. Miller | 2bdb3cb | 2005-09-22 01:08:57 -0700 | [diff] [blame] | 1736 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); | 
|  | 1737 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1738 | real_end = (unsigned long)_end; | 
| David S. Miller | 6465874 | 2008-03-21 17:01:38 -0700 | [diff] [blame] | 1739 | num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); | 
|  | 1740 | printk("Kernel: Using %d locked TLB entries for main kernel image.\n", | 
|  | 1741 | num_kernel_image_mappings); | 
| David S. Miller | 2bdb3cb | 2005-09-22 01:08:57 -0700 | [diff] [blame] | 1742 |  | 
|  | 1743 | /* Set kernel pgd to upper alias so physical page computations | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1744 | * work. | 
|  | 1745 | */ | 
|  | 1746 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); | 
|  | 1747 |  | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1748 | memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1749 |  | 
|  | 1750 | /* Now can init the kernel/bad page tables. */ | 
|  | 1751 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1752 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 1754 | inherit_prom_mappings(); | 
| David S. Miller | 5085b4a | 2005-09-22 00:45:41 -0700 | [diff] [blame] | 1755 |  | 
| David S. Miller | 8f361453 | 2007-12-13 06:13:38 -0800 | [diff] [blame] | 1756 | init_kpte_bitmap(); | 
|  | 1757 |  | 
| David S. Miller | a8b900d | 2006-01-31 18:33:37 -0800 | [diff] [blame] | 1758 | /* Ok, we can use our TLB miss and window trap handlers safely.  */ | 
|  | 1759 | setup_tba(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1760 |  | 
| David S. Miller | c9c1083 | 2005-10-12 12:22:46 -0700 | [diff] [blame] | 1761 | __flush_tlb_all(); | 
| David S. Miller | 9ad98c5 | 2005-10-05 15:12:00 -0700 | [diff] [blame] | 1762 |  | 
| David S. Miller | 490384e | 2006-02-11 14:41:18 -0800 | [diff] [blame] | 1763 | if (tlb_type == hypervisor) | 
|  | 1764 | sun4v_ktsb_register(); | 
|  | 1765 |  | 
| David S. Miller | b970945 | 2008-02-13 19:20:45 -0800 | [diff] [blame] | 1766 | /* We must setup the per-cpu areas before we pull in the | 
|  | 1767 | * PROM and the MDESC.  The code there fills in cpu and | 
|  | 1768 | * other information into per-cpu data structures. | 
|  | 1769 | */ | 
|  | 1770 | real_setup_per_cpu_areas(); | 
|  | 1771 |  | 
| David S. Miller | ad07200 | 2008-02-13 19:21:51 -0800 | [diff] [blame] | 1772 | prom_build_devicetree(); | 
|  | 1773 |  | 
| David S. Miller | 4a28333 | 2008-02-13 19:22:23 -0800 | [diff] [blame] | 1774 | if (tlb_type == hypervisor) | 
|  | 1775 | sun4v_mdesc_init(); | 
|  | 1776 |  | 
| David S. Miller | 4f70f7a | 2008-08-12 18:33:56 -0700 | [diff] [blame] | 1777 | /* Once the OF device tree and MDESC have been setup, we know | 
|  | 1778 | * the list of possible cpus.  Therefore we can allocate the | 
|  | 1779 | * IRQ stacks. | 
|  | 1780 | */ | 
|  | 1781 | for_each_possible_cpu(i) { | 
|  | 1782 | /* XXX Use node local allocations... XXX */ | 
|  | 1783 | softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 
|  | 1784 | hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE)); | 
|  | 1785 | } | 
|  | 1786 |  | 
| David S. Miller | 2bdb3cb | 2005-09-22 01:08:57 -0700 | [diff] [blame] | 1787 | /* Setup bootmem... */ | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1788 | last_valid_pfn = end_pfn = bootmem_init(phys_base); | 
| David S. Miller | d111201 | 2006-03-08 02:16:07 -0800 | [diff] [blame] | 1789 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1790 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 
| David S. Miller | 17b0e19 | 2006-03-08 15:57:03 -0800 | [diff] [blame] | 1791 | max_mapnr = last_valid_pfn; | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1792 | #endif | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1793 | kernel_physical_mapping_init(); | 
| David S. Miller | 5642530 | 2005-09-25 16:46:57 -0700 | [diff] [blame] | 1794 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1795 | { | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1796 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1797 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1798 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1800 | max_zone_pfns[ZONE_NORMAL] = end_pfn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1801 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1802 | free_area_init_nodes(max_zone_pfns); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1803 | } | 
|  | 1804 |  | 
| David S. Miller | 3c62a2d | 2008-02-17 23:22:50 -0800 | [diff] [blame] | 1805 | printk("Booting Linux...\n"); | 
| David S. Miller | 5cbc307 | 2007-05-25 15:49:59 -0700 | [diff] [blame] | 1806 |  | 
|  | 1807 | central_probe(); | 
|  | 1808 | cpu_probe(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1809 | } | 
|  | 1810 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1811 | int __init page_in_phys_avail(unsigned long paddr) | 
|  | 1812 | { | 
|  | 1813 | int i; | 
|  | 1814 |  | 
|  | 1815 | paddr &= PAGE_MASK; | 
|  | 1816 |  | 
|  | 1817 | for (i = 0; i < pavail_ents; i++) { | 
|  | 1818 | unsigned long start, end; | 
|  | 1819 |  | 
|  | 1820 | start = pavail[i].phys_addr; | 
|  | 1821 | end = start + pavail[i].reg_size; | 
|  | 1822 |  | 
|  | 1823 | if (paddr >= start && paddr < end) | 
|  | 1824 | return 1; | 
|  | 1825 | } | 
|  | 1826 | if (paddr >= kern_base && paddr < (kern_base + kern_size)) | 
|  | 1827 | return 1; | 
|  | 1828 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 1829 | if (paddr >= __pa(initrd_start) && | 
|  | 1830 | paddr < __pa(PAGE_ALIGN(initrd_end))) | 
|  | 1831 | return 1; | 
|  | 1832 | #endif | 
|  | 1833 |  | 
|  | 1834 | return 0; | 
|  | 1835 | } | 
|  | 1836 |  | 
|  | 1837 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | 
|  | 1838 | static int pavail_rescan_ents __initdata; | 
|  | 1839 |  | 
|  | 1840 | /* Certain OBP calls, such as fetching "available" properties, can | 
|  | 1841 | * claim physical memory.  So, along with initializing the valid | 
|  | 1842 | * address bitmap, what we do here is refetch the physical available | 
|  | 1843 | * memory list again, and make sure it provides at least as much | 
|  | 1844 | * memory as 'pavail' does. | 
|  | 1845 | */ | 
| David S. Miller | dbb8c35 | 2008-08-30 02:04:45 -0700 | [diff] [blame] | 1846 | static void __init setup_valid_addr_bitmap_from_pavail(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1847 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 | int i; | 
|  | 1849 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1850 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1851 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1852 | for (i = 0; i < pavail_ents; i++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1853 | unsigned long old_start, old_end; | 
|  | 1854 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1855 | old_start = pavail[i].phys_addr; | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1856 | old_end = old_start + pavail[i].reg_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1857 | while (old_start < old_end) { | 
|  | 1858 | int n; | 
|  | 1859 |  | 
| David S. Miller | c2a5a46 | 2006-06-22 00:01:56 -0700 | [diff] [blame] | 1860 | for (n = 0; n < pavail_rescan_ents; n++) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1861 | unsigned long new_start, new_end; | 
|  | 1862 |  | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1863 | new_start = pavail_rescan[n].phys_addr; | 
|  | 1864 | new_end = new_start + | 
|  | 1865 | pavail_rescan[n].reg_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1866 |  | 
|  | 1867 | if (new_start <= old_start && | 
|  | 1868 | new_end >= (old_start + PAGE_SIZE)) { | 
| David S. Miller | 13edad7 | 2005-09-29 17:58:26 -0700 | [diff] [blame] | 1869 | set_bit(old_start >> 22, | 
|  | 1870 | sparc64_valid_addr_bitmap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1871 | goto do_next_page; | 
|  | 1872 | } | 
|  | 1873 | } | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1874 |  | 
|  | 1875 | prom_printf("mem_init: Lost memory in pavail\n"); | 
|  | 1876 | prom_printf("mem_init: OLD start[%lx] size[%lx]\n", | 
|  | 1877 | pavail[i].phys_addr, | 
|  | 1878 | pavail[i].reg_size); | 
|  | 1879 | prom_printf("mem_init: NEW start[%lx] size[%lx]\n", | 
|  | 1880 | pavail_rescan[i].phys_addr, | 
|  | 1881 | pavail_rescan[i].reg_size); | 
|  | 1882 | prom_printf("mem_init: Cannot continue, aborting.\n"); | 
|  | 1883 | prom_halt(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1884 |  | 
|  | 1885 | do_next_page: | 
|  | 1886 | old_start += PAGE_SIZE; | 
|  | 1887 | } | 
|  | 1888 | } | 
|  | 1889 | } | 
|  | 1890 |  | 
|  | 1891 | void __init mem_init(void) | 
|  | 1892 | { | 
|  | 1893 | unsigned long codepages, datapages, initpages; | 
|  | 1894 | unsigned long addr, last; | 
|  | 1895 | int i; | 
|  | 1896 |  | 
|  | 1897 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); | 
|  | 1898 | i += 1; | 
| David S. Miller | 2bdb3cb | 2005-09-22 01:08:57 -0700 | [diff] [blame] | 1899 | sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1900 | if (sparc64_valid_addr_bitmap == NULL) { | 
|  | 1901 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | 
|  | 1902 | prom_halt(); | 
|  | 1903 | } | 
|  | 1904 | memset(sparc64_valid_addr_bitmap, 0, i << 3); | 
|  | 1905 |  | 
|  | 1906 | addr = PAGE_OFFSET + kern_base; | 
|  | 1907 | last = PAGE_ALIGN(kern_size) + addr; | 
|  | 1908 | while (addr < last) { | 
|  | 1909 | set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); | 
|  | 1910 | addr += PAGE_SIZE; | 
|  | 1911 | } | 
|  | 1912 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1913 | setup_valid_addr_bitmap_from_pavail(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1914 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1915 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | 
|  | 1916 |  | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1917 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 
|  | 1918 | for_each_online_node(i) { | 
|  | 1919 | if (NODE_DATA(i)->node_spanned_pages != 0) { | 
|  | 1920 | totalram_pages += | 
|  | 1921 | free_all_bootmem_node(NODE_DATA(i)); | 
|  | 1922 | } | 
|  | 1923 | } | 
|  | 1924 | #else | 
|  | 1925 | totalram_pages = free_all_bootmem(); | 
|  | 1926 | #endif | 
|  | 1927 |  | 
| David S. Miller | f1cfdb5 | 2007-03-15 22:52:18 -0700 | [diff] [blame] | 1928 | /* We subtract one to account for the mem_map_zero page | 
|  | 1929 | * allocated below. | 
|  | 1930 | */ | 
| David S. Miller | 919ee67 | 2008-04-23 05:40:25 -0700 | [diff] [blame] | 1931 | totalram_pages -= 1; | 
|  | 1932 | num_physpages = totalram_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 |  | 
|  | 1934 | /* | 
|  | 1935 | * Set up the zero page, mark it reserved, so that page count | 
|  | 1936 | * is not manipulated when freeing the page from user ptes. | 
|  | 1937 | */ | 
|  | 1938 | mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); | 
|  | 1939 | if (mem_map_zero == NULL) { | 
|  | 1940 | prom_printf("paging_init: Cannot alloc zero page.\n"); | 
|  | 1941 | prom_halt(); | 
|  | 1942 | } | 
|  | 1943 | SetPageReserved(mem_map_zero); | 
|  | 1944 |  | 
|  | 1945 | codepages = (((unsigned long) _etext) - ((unsigned long) _start)); | 
|  | 1946 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | 
|  | 1947 | datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); | 
|  | 1948 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | 
|  | 1949 | initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); | 
|  | 1950 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | 
|  | 1951 |  | 
| Christoph Lameter | 9617729 | 2007-02-10 01:43:03 -0800 | [diff] [blame] | 1952 | printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1953 | nr_free_pages() << (PAGE_SHIFT-10), | 
|  | 1954 | codepages << (PAGE_SHIFT-10), | 
|  | 1955 | datapages << (PAGE_SHIFT-10), | 
|  | 1956 | initpages << (PAGE_SHIFT-10), | 
|  | 1957 | PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); | 
|  | 1958 |  | 
|  | 1959 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | 
|  | 1960 | cheetah_ecache_flush_init(); | 
|  | 1961 | } | 
|  | 1962 |  | 
| David S. Miller | 898cf0e | 2005-09-23 11:59:44 -0700 | [diff] [blame] | 1963 | void free_initmem(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1964 | { | 
|  | 1965 | unsigned long addr, initend; | 
| David S. Miller | f2b6079 | 2008-08-14 01:45:41 -0700 | [diff] [blame] | 1966 | int do_free = 1; | 
|  | 1967 |  | 
|  | 1968 | /* If the physical memory maps were trimmed by kernel command | 
|  | 1969 | * line options, don't even try freeing this initmem stuff up. | 
|  | 1970 | * The kernel image could have been in the trimmed out region | 
|  | 1971 | * and if so the freeing below will free invalid page structs. | 
|  | 1972 | */ | 
|  | 1973 | if (cmdline_memory_size) | 
|  | 1974 | do_free = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1975 |  | 
|  | 1976 | /* | 
|  | 1977 | * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. | 
|  | 1978 | */ | 
|  | 1979 | addr = PAGE_ALIGN((unsigned long)(__init_begin)); | 
|  | 1980 | initend = (unsigned long)(__init_end) & PAGE_MASK; | 
|  | 1981 | for (; addr < initend; addr += PAGE_SIZE) { | 
|  | 1982 | unsigned long page; | 
|  | 1983 | struct page *p; | 
|  | 1984 |  | 
|  | 1985 | page = (addr + | 
|  | 1986 | ((unsigned long) __va(kern_base)) - | 
|  | 1987 | ((unsigned long) KERNBASE)); | 
| Randy Dunlap | c9cf552 | 2006-06-27 02:53:52 -0700 | [diff] [blame] | 1988 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1989 |  | 
| David S. Miller | f2b6079 | 2008-08-14 01:45:41 -0700 | [diff] [blame] | 1990 | if (do_free) { | 
|  | 1991 | p = virt_to_page(page); | 
|  | 1992 |  | 
|  | 1993 | ClearPageReserved(p); | 
|  | 1994 | init_page_count(p); | 
|  | 1995 | __free_page(p); | 
|  | 1996 | num_physpages++; | 
|  | 1997 | totalram_pages++; | 
|  | 1998 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | } | 
|  | 2000 | } | 
|  | 2001 |  | 
|  | 2002 | #ifdef CONFIG_BLK_DEV_INITRD | 
|  | 2003 | void free_initrd_mem(unsigned long start, unsigned long end) | 
|  | 2004 | { | 
|  | 2005 | if (start < end) | 
|  | 2006 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 
|  | 2007 | for (; start < end; start += PAGE_SIZE) { | 
|  | 2008 | struct page *p = virt_to_page(start); | 
|  | 2009 |  | 
|  | 2010 | ClearPageReserved(p); | 
| Nick Piggin | 7835e98 | 2006-03-22 00:08:40 -0800 | [diff] [blame] | 2011 | init_page_count(p); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2012 | __free_page(p); | 
|  | 2013 | num_physpages++; | 
|  | 2014 | totalram_pages++; | 
|  | 2015 | } | 
|  | 2016 | } | 
|  | 2017 | #endif | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2018 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2019 | #define _PAGE_CACHE_4U	(_PAGE_CP_4U | _PAGE_CV_4U) | 
|  | 2020 | #define _PAGE_CACHE_4V	(_PAGE_CP_4V | _PAGE_CV_4V) | 
|  | 2021 | #define __DIRTY_BITS_4U	 (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) | 
|  | 2022 | #define __DIRTY_BITS_4V	 (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) | 
|  | 2023 | #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) | 
|  | 2024 | #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) | 
|  | 2025 |  | 
|  | 2026 | pgprot_t PAGE_KERNEL __read_mostly; | 
|  | 2027 | EXPORT_SYMBOL(PAGE_KERNEL); | 
|  | 2028 |  | 
|  | 2029 | pgprot_t PAGE_KERNEL_LOCKED __read_mostly; | 
|  | 2030 | pgprot_t PAGE_COPY __read_mostly; | 
| David S. Miller | 0f15952 | 2006-02-18 12:43:16 -0800 | [diff] [blame] | 2031 |  | 
|  | 2032 | pgprot_t PAGE_SHARED __read_mostly; | 
|  | 2033 | EXPORT_SYMBOL(PAGE_SHARED); | 
|  | 2034 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2035 | pgprot_t PAGE_EXEC __read_mostly; | 
|  | 2036 | unsigned long pg_iobits __read_mostly; | 
|  | 2037 |  | 
|  | 2038 | unsigned long _PAGE_IE __read_mostly; | 
| David S. Miller | 987c74f | 2006-06-25 01:34:43 -0700 | [diff] [blame] | 2039 | EXPORT_SYMBOL(_PAGE_IE); | 
| David S. Miller | b2bef44 | 2006-02-23 01:55:55 -0800 | [diff] [blame] | 2040 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2041 | unsigned long _PAGE_E __read_mostly; | 
| David S. Miller | b2bef44 | 2006-02-23 01:55:55 -0800 | [diff] [blame] | 2042 | EXPORT_SYMBOL(_PAGE_E); | 
|  | 2043 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2044 | unsigned long _PAGE_CACHE __read_mostly; | 
| David S. Miller | b2bef44 | 2006-02-23 01:55:55 -0800 | [diff] [blame] | 2045 | EXPORT_SYMBOL(_PAGE_CACHE); | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2046 |  | 
| David Miller | 46644c2 | 2007-10-16 01:24:16 -0700 | [diff] [blame] | 2047 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 
|  | 2048 |  | 
|  | 2049 | #define VMEMMAP_CHUNK_SHIFT	22 | 
|  | 2050 | #define VMEMMAP_CHUNK		(1UL << VMEMMAP_CHUNK_SHIFT) | 
|  | 2051 | #define VMEMMAP_CHUNK_MASK	~(VMEMMAP_CHUNK - 1UL) | 
|  | 2052 | #define VMEMMAP_ALIGN(x)	(((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) | 
|  | 2053 |  | 
|  | 2054 | #define VMEMMAP_SIZE	((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ | 
|  | 2055 | sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT) | 
|  | 2056 | unsigned long vmemmap_table[VMEMMAP_SIZE]; | 
|  | 2057 |  | 
|  | 2058 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) | 
|  | 2059 | { | 
|  | 2060 | unsigned long vstart = (unsigned long) start; | 
|  | 2061 | unsigned long vend = (unsigned long) (start + nr); | 
|  | 2062 | unsigned long phys_start = (vstart - VMEMMAP_BASE); | 
|  | 2063 | unsigned long phys_end = (vend - VMEMMAP_BASE); | 
|  | 2064 | unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; | 
|  | 2065 | unsigned long end = VMEMMAP_ALIGN(phys_end); | 
|  | 2066 | unsigned long pte_base; | 
|  | 2067 |  | 
|  | 2068 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | | 
|  | 2069 | _PAGE_CP_4U | _PAGE_CV_4U | | 
|  | 2070 | _PAGE_P_4U | _PAGE_W_4U); | 
|  | 2071 | if (tlb_type == hypervisor) | 
|  | 2072 | pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V | | 
|  | 2073 | _PAGE_CP_4V | _PAGE_CV_4V | | 
|  | 2074 | _PAGE_P_4V | _PAGE_W_4V); | 
|  | 2075 |  | 
|  | 2076 | for (; addr < end; addr += VMEMMAP_CHUNK) { | 
|  | 2077 | unsigned long *vmem_pp = | 
|  | 2078 | vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT); | 
|  | 2079 | void *block; | 
|  | 2080 |  | 
|  | 2081 | if (!(*vmem_pp & _PAGE_VALID)) { | 
|  | 2082 | block = vmemmap_alloc_block(1UL << 22, node); | 
|  | 2083 | if (!block) | 
|  | 2084 | return -ENOMEM; | 
|  | 2085 |  | 
|  | 2086 | *vmem_pp = pte_base | __pa(block); | 
|  | 2087 |  | 
|  | 2088 | printk(KERN_INFO "[%p-%p] page_structs=%lu " | 
|  | 2089 | "node=%d entry=%lu/%lu\n", start, block, nr, | 
|  | 2090 | node, | 
|  | 2091 | addr >> VMEMMAP_CHUNK_SHIFT, | 
|  | 2092 | VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT); | 
|  | 2093 | } | 
|  | 2094 | } | 
|  | 2095 | return 0; | 
|  | 2096 | } | 
|  | 2097 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 
|  | 2098 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2099 | static void prot_init_common(unsigned long page_none, | 
|  | 2100 | unsigned long page_shared, | 
|  | 2101 | unsigned long page_copy, | 
|  | 2102 | unsigned long page_readonly, | 
|  | 2103 | unsigned long page_exec_bit) | 
|  | 2104 | { | 
|  | 2105 | PAGE_COPY = __pgprot(page_copy); | 
| David S. Miller | 0f15952 | 2006-02-18 12:43:16 -0800 | [diff] [blame] | 2106 | PAGE_SHARED = __pgprot(page_shared); | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2107 |  | 
|  | 2108 | protection_map[0x0] = __pgprot(page_none); | 
|  | 2109 | protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); | 
|  | 2110 | protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); | 
|  | 2111 | protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); | 
|  | 2112 | protection_map[0x4] = __pgprot(page_readonly); | 
|  | 2113 | protection_map[0x5] = __pgprot(page_readonly); | 
|  | 2114 | protection_map[0x6] = __pgprot(page_copy); | 
|  | 2115 | protection_map[0x7] = __pgprot(page_copy); | 
|  | 2116 | protection_map[0x8] = __pgprot(page_none); | 
|  | 2117 | protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); | 
|  | 2118 | protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); | 
|  | 2119 | protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); | 
|  | 2120 | protection_map[0xc] = __pgprot(page_readonly); | 
|  | 2121 | protection_map[0xd] = __pgprot(page_readonly); | 
|  | 2122 | protection_map[0xe] = __pgprot(page_shared); | 
|  | 2123 | protection_map[0xf] = __pgprot(page_shared); | 
|  | 2124 | } | 
|  | 2125 |  | 
|  | 2126 | static void __init sun4u_pgprot_init(void) | 
|  | 2127 | { | 
|  | 2128 | unsigned long page_none, page_shared, page_copy, page_readonly; | 
|  | 2129 | unsigned long page_exec_bit; | 
|  | 2130 |  | 
|  | 2131 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | 
|  | 2132 | _PAGE_CACHE_4U | _PAGE_P_4U | | 
|  | 2133 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | 
|  | 2134 | _PAGE_EXEC_4U); | 
|  | 2135 | PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | | 
|  | 2136 | _PAGE_CACHE_4U | _PAGE_P_4U | | 
|  | 2137 | __ACCESS_BITS_4U | __DIRTY_BITS_4U | | 
|  | 2138 | _PAGE_EXEC_4U | _PAGE_L_4U); | 
|  | 2139 | PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); | 
|  | 2140 |  | 
|  | 2141 | _PAGE_IE = _PAGE_IE_4U; | 
|  | 2142 | _PAGE_E = _PAGE_E_4U; | 
|  | 2143 | _PAGE_CACHE = _PAGE_CACHE_4U; | 
|  | 2144 |  | 
|  | 2145 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | | 
|  | 2146 | __ACCESS_BITS_4U | _PAGE_E_4U); | 
|  | 2147 |  | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 2148 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
|  | 2149 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^ | 
|  | 2150 | 0xfffff80000000000; | 
|  | 2151 | #else | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 2152 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2153 | 0xfffff80000000000; | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 2154 | #endif | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 2155 | kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | | 
|  | 2156 | _PAGE_P_4U | _PAGE_W_4U); | 
|  | 2157 |  | 
|  | 2158 | /* XXX Should use 256MB on Panther. XXX */ | 
|  | 2159 | kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2160 |  | 
|  | 2161 | _PAGE_SZBITS = _PAGE_SZBITS_4U; | 
|  | 2162 | _PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | | 
|  | 2163 | _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | | 
|  | 2164 | _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); | 
|  | 2165 |  | 
|  | 2166 |  | 
|  | 2167 | page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; | 
|  | 2168 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | 
|  | 2169 | __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); | 
|  | 2170 | page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | 
|  | 2171 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | 
|  | 2172 | page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | | 
|  | 2173 | __ACCESS_BITS_4U | _PAGE_EXEC_4U); | 
|  | 2174 |  | 
|  | 2175 | page_exec_bit = _PAGE_EXEC_4U; | 
|  | 2176 |  | 
|  | 2177 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | 
|  | 2178 | page_exec_bit); | 
|  | 2179 | } | 
|  | 2180 |  | 
|  | 2181 | static void __init sun4v_pgprot_init(void) | 
|  | 2182 | { | 
|  | 2183 | unsigned long page_none, page_shared, page_copy, page_readonly; | 
|  | 2184 | unsigned long page_exec_bit; | 
|  | 2185 |  | 
|  | 2186 | PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | | 
|  | 2187 | _PAGE_CACHE_4V | _PAGE_P_4V | | 
|  | 2188 | __ACCESS_BITS_4V | __DIRTY_BITS_4V | | 
|  | 2189 | _PAGE_EXEC_4V); | 
|  | 2190 | PAGE_KERNEL_LOCKED = PAGE_KERNEL; | 
|  | 2191 | PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); | 
|  | 2192 |  | 
|  | 2193 | _PAGE_IE = _PAGE_IE_4V; | 
|  | 2194 | _PAGE_E = _PAGE_E_4V; | 
|  | 2195 | _PAGE_CACHE = _PAGE_CACHE_4V; | 
|  | 2196 |  | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 2197 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
|  | 2198 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ | 
|  | 2199 | 0xfffff80000000000; | 
|  | 2200 | #else | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 2201 | kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2202 | 0xfffff80000000000; | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 2203 | #endif | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 2204 | kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | | 
|  | 2205 | _PAGE_P_4V | _PAGE_W_4V); | 
|  | 2206 |  | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 2207 | #ifdef CONFIG_DEBUG_PAGEALLOC | 
|  | 2208 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^ | 
|  | 2209 | 0xfffff80000000000; | 
|  | 2210 | #else | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 2211 | kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ | 
|  | 2212 | 0xfffff80000000000; | 
| David S. Miller | d1acb42 | 2007-03-16 17:20:28 -0700 | [diff] [blame] | 2213 | #endif | 
| David S. Miller | 9cc3a1a | 2006-02-21 20:51:13 -0800 | [diff] [blame] | 2214 | kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | | 
|  | 2215 | _PAGE_P_4V | _PAGE_W_4V); | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2216 |  | 
|  | 2217 | pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | | 
|  | 2218 | __ACCESS_BITS_4V | _PAGE_E_4V); | 
|  | 2219 |  | 
|  | 2220 | _PAGE_SZBITS = _PAGE_SZBITS_4V; | 
|  | 2221 | _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | | 
|  | 2222 | _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | | 
|  | 2223 | _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | | 
|  | 2224 | _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); | 
|  | 2225 |  | 
|  | 2226 | page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; | 
|  | 2227 | page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | 
|  | 2228 | __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); | 
|  | 2229 | page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | 
|  | 2230 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | 
|  | 2231 | page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | | 
|  | 2232 | __ACCESS_BITS_4V | _PAGE_EXEC_4V); | 
|  | 2233 |  | 
|  | 2234 | page_exec_bit = _PAGE_EXEC_4V; | 
|  | 2235 |  | 
|  | 2236 | prot_init_common(page_none, page_shared, page_copy, page_readonly, | 
|  | 2237 | page_exec_bit); | 
|  | 2238 | } | 
|  | 2239 |  | 
|  | 2240 | unsigned long pte_sz_bits(unsigned long sz) | 
|  | 2241 | { | 
|  | 2242 | if (tlb_type == hypervisor) { | 
|  | 2243 | switch (sz) { | 
|  | 2244 | case 8 * 1024: | 
|  | 2245 | default: | 
|  | 2246 | return _PAGE_SZ8K_4V; | 
|  | 2247 | case 64 * 1024: | 
|  | 2248 | return _PAGE_SZ64K_4V; | 
|  | 2249 | case 512 * 1024: | 
|  | 2250 | return _PAGE_SZ512K_4V; | 
|  | 2251 | case 4 * 1024 * 1024: | 
|  | 2252 | return _PAGE_SZ4MB_4V; | 
|  | 2253 | }; | 
|  | 2254 | } else { | 
|  | 2255 | switch (sz) { | 
|  | 2256 | case 8 * 1024: | 
|  | 2257 | default: | 
|  | 2258 | return _PAGE_SZ8K_4U; | 
|  | 2259 | case 64 * 1024: | 
|  | 2260 | return _PAGE_SZ64K_4U; | 
|  | 2261 | case 512 * 1024: | 
|  | 2262 | return _PAGE_SZ512K_4U; | 
|  | 2263 | case 4 * 1024 * 1024: | 
|  | 2264 | return _PAGE_SZ4MB_4U; | 
|  | 2265 | }; | 
|  | 2266 | } | 
|  | 2267 | } | 
|  | 2268 |  | 
|  | 2269 | pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) | 
|  | 2270 | { | 
|  | 2271 | pte_t pte; | 
| David S. Miller | cf62715 | 2006-02-12 21:10:07 -0800 | [diff] [blame] | 2272 |  | 
|  | 2273 | pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot)); | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2274 | pte_val(pte) |= (((unsigned long)space) << 32); | 
|  | 2275 | pte_val(pte) |= pte_sz_bits(page_size); | 
| David S. Miller | cf62715 | 2006-02-12 21:10:07 -0800 | [diff] [blame] | 2276 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2277 | return pte; | 
|  | 2278 | } | 
|  | 2279 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2280 | static unsigned long kern_large_tte(unsigned long paddr) | 
|  | 2281 | { | 
|  | 2282 | unsigned long val; | 
|  | 2283 |  | 
|  | 2284 | val = (_PAGE_VALID | _PAGE_SZ4MB_4U | | 
|  | 2285 | _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | | 
|  | 2286 | _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); | 
|  | 2287 | if (tlb_type == hypervisor) | 
|  | 2288 | val = (_PAGE_VALID | _PAGE_SZ4MB_4V | | 
|  | 2289 | _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | | 
|  | 2290 | _PAGE_EXEC_4V | _PAGE_W_4V); | 
|  | 2291 |  | 
|  | 2292 | return val | paddr; | 
|  | 2293 | } | 
|  | 2294 |  | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2295 | /* If not locked, zap it. */ | 
|  | 2296 | void __flush_tlb_all(void) | 
|  | 2297 | { | 
|  | 2298 | unsigned long pstate; | 
|  | 2299 | int i; | 
|  | 2300 |  | 
|  | 2301 | __asm__ __volatile__("flushw\n\t" | 
|  | 2302 | "rdpr	%%pstate, %0\n\t" | 
|  | 2303 | "wrpr	%0, %1, %%pstate" | 
|  | 2304 | : "=r" (pstate) | 
|  | 2305 | : "i" (PSTATE_IE)); | 
| David S. Miller | 8f361453 | 2007-12-13 06:13:38 -0800 | [diff] [blame] | 2306 | if (tlb_type == hypervisor) { | 
|  | 2307 | sun4v_mmu_demap_all(); | 
|  | 2308 | } else if (tlb_type == spitfire) { | 
| David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 2309 | for (i = 0; i < 64; i++) { | 
|  | 2310 | /* Spitfire Errata #32 workaround */ | 
|  | 2311 | /* NOTE: Always runs on spitfire, so no | 
|  | 2312 | *       cheetah+ page size encodings. | 
|  | 2313 | */ | 
|  | 2314 | __asm__ __volatile__("stxa	%0, [%1] %2\n\t" | 
|  | 2315 | "flush	%%g6" | 
|  | 2316 | : /* No outputs */ | 
|  | 2317 | : "r" (0), | 
|  | 2318 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | 
|  | 2319 |  | 
|  | 2320 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { | 
|  | 2321 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 
|  | 2322 | "membar #Sync" | 
|  | 2323 | : /* no outputs */ | 
|  | 2324 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | 
|  | 2325 | spitfire_put_dtlb_data(i, 0x0UL); | 
|  | 2326 | } | 
|  | 2327 |  | 
|  | 2328 | /* Spitfire Errata #32 workaround */ | 
|  | 2329 | /* NOTE: Always runs on spitfire, so no | 
|  | 2330 | *       cheetah+ page size encodings. | 
|  | 2331 | */ | 
|  | 2332 | __asm__ __volatile__("stxa	%0, [%1] %2\n\t" | 
|  | 2333 | "flush	%%g6" | 
|  | 2334 | : /* No outputs */ | 
|  | 2335 | : "r" (0), | 
|  | 2336 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | 
|  | 2337 |  | 
|  | 2338 | if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { | 
|  | 2339 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | 
|  | 2340 | "membar #Sync" | 
|  | 2341 | : /* no outputs */ | 
|  | 2342 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | 
|  | 2343 | spitfire_put_itlb_data(i, 0x0UL); | 
|  | 2344 | } | 
|  | 2345 | } | 
|  | 2346 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | 
|  | 2347 | cheetah_flush_dtlb_all(); | 
|  | 2348 | cheetah_flush_itlb_all(); | 
|  | 2349 | } | 
|  | 2350 | __asm__ __volatile__("wrpr	%0, 0, %%pstate" | 
|  | 2351 | : : "r" (pstate)); | 
|  | 2352 | } |