| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * PowerPC64 SLB support. | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | 
 | 5 |  * Based on earlier code writteh by: | 
 | 6 |  * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | 
 | 7 |  *    Copyright (c) 2001 Dave Engebretsen | 
 | 8 |  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | 
 | 9 |  * | 
 | 10 |  * | 
 | 11 |  *      This program is free software; you can redistribute it and/or | 
 | 12 |  *      modify it under the terms of the GNU General Public License | 
 | 13 |  *      as published by the Free Software Foundation; either version | 
 | 14 |  *      2 of the License, or (at your option) any later version. | 
 | 15 |  */ | 
 | 16 |  | 
 | 17 | #include <linux/config.h> | 
 | 18 | #include <asm/pgtable.h> | 
 | 19 | #include <asm/mmu.h> | 
 | 20 | #include <asm/mmu_context.h> | 
 | 21 | #include <asm/paca.h> | 
 | 22 | #include <asm/cputable.h> | 
 | 23 |  | 
 | 24 | extern void slb_allocate(unsigned long ea); | 
 | 25 |  | 
 | 26 | static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) | 
 | 27 | { | 
 | 28 | 	return (ea & ESID_MASK) | SLB_ESID_V | slot; | 
 | 29 | } | 
 | 30 |  | 
 | 31 | static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) | 
 | 32 | { | 
 | 33 | 	return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; | 
 | 34 | } | 
 | 35 |  | 
| Olof Johansson | bb78cb7 | 2005-05-01 08:58:44 -0700 | [diff] [blame] | 36 | static inline void create_slbe(unsigned long ea, unsigned long flags, | 
 | 37 | 			       unsigned long entry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | { | 
 | 39 | 	asm volatile("slbmte  %0,%1" : | 
 | 40 | 		     : "r" (mk_vsid_data(ea, flags)), | 
 | 41 | 		       "r" (mk_esid_data(ea, entry)) | 
 | 42 | 		     : "memory" ); | 
 | 43 | } | 
 | 44 |  | 
 | 45 | static void slb_flush_and_rebolt(void) | 
 | 46 | { | 
 | 47 | 	/* If you change this make sure you change SLB_NUM_BOLTED | 
 | 48 | 	 * appropriately too. */ | 
 | 49 | 	unsigned long ksp_flags = SLB_VSID_KERNEL; | 
 | 50 | 	unsigned long ksp_esid_data; | 
 | 51 |  | 
 | 52 | 	WARN_ON(!irqs_disabled()); | 
 | 53 |  | 
 | 54 | 	if (cpu_has_feature(CPU_FTR_16M_PAGE)) | 
 | 55 | 		ksp_flags |= SLB_VSID_L; | 
 | 56 |  | 
 | 57 | 	ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | 
 | 58 | 	if ((ksp_esid_data & ESID_MASK) == KERNELBASE) | 
 | 59 | 		ksp_esid_data &= ~SLB_ESID_V; | 
 | 60 |  | 
 | 61 | 	/* We need to do this all in asm, so we're sure we don't touch | 
 | 62 | 	 * the stack between the slbia and rebolting it. */ | 
 | 63 | 	asm volatile("isync\n" | 
 | 64 | 		     "slbia\n" | 
 | 65 | 		     /* Slot 1 - first VMALLOC segment */ | 
 | 66 | 		     "slbmte	%0,%1\n" | 
 | 67 | 		     /* Slot 2 - kernel stack */ | 
 | 68 | 		     "slbmte	%2,%3\n" | 
 | 69 | 		     "isync" | 
 | 70 | 		     :: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)), | 
 | 71 | 		        "r"(mk_esid_data(VMALLOCBASE, 1)), | 
 | 72 | 		        "r"(mk_vsid_data(ksp_esid_data, ksp_flags)), | 
 | 73 | 		        "r"(ksp_esid_data) | 
 | 74 | 		     : "memory"); | 
 | 75 | } | 
 | 76 |  | 
 | 77 | /* Flush all user entries from the segment table of the current processor. */ | 
 | 78 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | 
 | 79 | { | 
 | 80 | 	unsigned long offset = get_paca()->slb_cache_ptr; | 
 | 81 | 	unsigned long esid_data = 0; | 
 | 82 | 	unsigned long pc = KSTK_EIP(tsk); | 
 | 83 | 	unsigned long stack = KSTK_ESP(tsk); | 
 | 84 | 	unsigned long unmapped_base; | 
 | 85 |  | 
 | 86 | 	if (offset <= SLB_CACHE_ENTRIES) { | 
 | 87 | 		int i; | 
 | 88 | 		asm volatile("isync" : : : "memory"); | 
 | 89 | 		for (i = 0; i < offset; i++) { | 
| David Gibson | 14b3466 | 2005-09-06 14:59:47 +1000 | [diff] [blame] | 90 | 			esid_data = ((unsigned long)get_paca()->slb_cache[i] | 
 | 91 | 				<< SID_SHIFT) | SLBIE_C; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | 			asm volatile("slbie %0" : : "r" (esid_data)); | 
 | 93 | 		} | 
 | 94 | 		asm volatile("isync" : : : "memory"); | 
 | 95 | 	} else { | 
 | 96 | 		slb_flush_and_rebolt(); | 
 | 97 | 	} | 
 | 98 |  | 
 | 99 | 	/* Workaround POWER5 < DD2.1 issue */ | 
 | 100 | 	if (offset == 1 || offset > SLB_CACHE_ENTRIES) | 
 | 101 | 		asm volatile("slbie %0" : : "r" (esid_data)); | 
 | 102 |  | 
 | 103 | 	get_paca()->slb_cache_ptr = 0; | 
 | 104 | 	get_paca()->context = mm->context; | 
 | 105 |  | 
 | 106 | 	/* | 
 | 107 | 	 * preload some userspace segments into the SLB. | 
 | 108 | 	 */ | 
 | 109 | 	if (test_tsk_thread_flag(tsk, TIF_32BIT)) | 
 | 110 | 		unmapped_base = TASK_UNMAPPED_BASE_USER32; | 
 | 111 | 	else | 
 | 112 | 		unmapped_base = TASK_UNMAPPED_BASE_USER64; | 
 | 113 |  | 
 | 114 | 	if (pc >= KERNELBASE) | 
 | 115 | 		return; | 
 | 116 | 	slb_allocate(pc); | 
 | 117 |  | 
 | 118 | 	if (GET_ESID(pc) == GET_ESID(stack)) | 
 | 119 | 		return; | 
 | 120 |  | 
 | 121 | 	if (stack >= KERNELBASE) | 
 | 122 | 		return; | 
 | 123 | 	slb_allocate(stack); | 
 | 124 |  | 
 | 125 | 	if ((GET_ESID(pc) == GET_ESID(unmapped_base)) | 
 | 126 | 	    || (GET_ESID(stack) == GET_ESID(unmapped_base))) | 
 | 127 | 		return; | 
 | 128 |  | 
 | 129 | 	if (unmapped_base >= KERNELBASE) | 
 | 130 | 		return; | 
 | 131 | 	slb_allocate(unmapped_base); | 
 | 132 | } | 
 | 133 |  | 
 | 134 | void slb_initialize(void) | 
 | 135 | { | 
 | 136 | 	/* On iSeries the bolted entries have already been set up by | 
 | 137 | 	 * the hypervisor from the lparMap data in head.S */ | 
 | 138 | #ifndef CONFIG_PPC_ISERIES | 
 | 139 | 	unsigned long flags = SLB_VSID_KERNEL; | 
 | 140 |  | 
 | 141 |  	/* Invalidate the entire SLB (even slot 0) & all the ERATS */ | 
 | 142 |  	if (cpu_has_feature(CPU_FTR_16M_PAGE)) | 
 | 143 |  		flags |= SLB_VSID_L; | 
 | 144 |  | 
 | 145 |  	asm volatile("isync":::"memory"); | 
 | 146 |  	asm volatile("slbmte  %0,%0"::"r" (0) : "memory"); | 
 | 147 | 	asm volatile("isync; slbia; isync":::"memory"); | 
| Olof Johansson | bb78cb7 | 2005-05-01 08:58:44 -0700 | [diff] [blame] | 148 | 	create_slbe(KERNELBASE, flags, 0); | 
 | 149 | 	create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | 	/* We don't bolt the stack for the time being - we're in boot, | 
 | 151 | 	 * so the stack is in the bolted segment.  By the time it goes | 
 | 152 | 	 * elsewhere, we'll call _switch() which will bolt in the new | 
 | 153 | 	 * one. */ | 
 | 154 | 	asm volatile("isync":::"memory"); | 
 | 155 | #endif | 
 | 156 |  | 
 | 157 | 	get_paca()->stab_rr = SLB_NUM_BOLTED; | 
 | 158 | } |