| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* smp.c: Sparc SMP support. | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | 
 | 4 |  * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 
 | 5 |  * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org) | 
 | 6 |  */ | 
 | 7 |  | 
 | 8 | #include <asm/head.h> | 
 | 9 |  | 
 | 10 | #include <linux/kernel.h> | 
 | 11 | #include <linux/sched.h> | 
 | 12 | #include <linux/threads.h> | 
 | 13 | #include <linux/smp.h> | 
 | 14 | #include <linux/smp_lock.h> | 
 | 15 | #include <linux/interrupt.h> | 
 | 16 | #include <linux/kernel_stat.h> | 
 | 17 | #include <linux/init.h> | 
 | 18 | #include <linux/spinlock.h> | 
 | 19 | #include <linux/mm.h> | 
 | 20 | #include <linux/fs.h> | 
 | 21 | #include <linux/seq_file.h> | 
 | 22 | #include <linux/cache.h> | 
 | 23 | #include <linux/delay.h> | 
 | 24 |  | 
 | 25 | #include <asm/ptrace.h> | 
 | 26 | #include <asm/atomic.h> | 
 | 27 |  | 
 | 28 | #include <asm/irq.h> | 
 | 29 | #include <asm/page.h> | 
 | 30 | #include <asm/pgalloc.h> | 
 | 31 | #include <asm/pgtable.h> | 
 | 32 | #include <asm/oplib.h> | 
 | 33 | #include <asm/cacheflush.h> | 
 | 34 | #include <asm/tlbflush.h> | 
 | 35 | #include <asm/cpudata.h> | 
 | 36 |  | 
 | 37 | volatile int smp_processors_ready = 0; | 
 | 38 | int smp_num_cpus = 1; | 
 | 39 | volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; | 
 | 40 | unsigned char boot_cpu_id = 0; | 
 | 41 | unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ | 
 | 42 | int smp_activated = 0; | 
 | 43 | volatile int __cpu_number_map[NR_CPUS]; | 
 | 44 | volatile int __cpu_logical_map[NR_CPUS]; | 
 | 45 |  | 
 | 46 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 
 | 47 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 
 | 48 |  | 
 | 49 | /* The only guaranteed locking primitive available on all Sparc | 
 | 50 |  * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically | 
 | 51 |  * places the current byte at the effective address into dest_reg and | 
 | 52 |  * places 0xff there afterwards.  Pretty lame locking primitive | 
 | 53 |  * compared to the Alpha and the Intel no?  Most Sparcs have 'swap' | 
 | 54 |  * instruction which is much better... | 
 | 55 |  */ | 
 | 56 |  | 
 | 57 | /* Used to make bitops atomic */ | 
 | 58 | unsigned char bitops_spinlock = 0; | 
 | 59 |  | 
 | 60 | volatile unsigned long ipi_count; | 
 | 61 |  | 
 | 62 | volatile int smp_process_available=0; | 
 | 63 | volatile int smp_commenced = 0; | 
 | 64 |  | 
 | 65 | void __init smp_store_cpu_info(int id) | 
 | 66 | { | 
 | 67 | 	int cpu_node; | 
 | 68 |  | 
 | 69 | 	cpu_data(id).udelay_val = loops_per_jiffy; | 
 | 70 |  | 
 | 71 | 	cpu_find_by_mid(id, &cpu_node); | 
 | 72 | 	cpu_data(id).clock_tick = prom_getintdefault(cpu_node, | 
 | 73 | 						     "clock-frequency", 0); | 
 | 74 | 	cpu_data(id).prom_node = cpu_node; | 
 | 75 | 	cpu_data(id).mid = cpu_get_hwmid(cpu_node); | 
 | 76 | 	if (cpu_data(id).mid < 0) | 
 | 77 | 		panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); | 
 | 78 | } | 
 | 79 |  | 
 | 80 | void __init smp_cpus_done(unsigned int max_cpus) | 
 | 81 | { | 
 | 82 | } | 
 | 83 |  | 
 | 84 | void cpu_panic(void) | 
 | 85 | { | 
 | 86 | 	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); | 
 | 87 | 	panic("SMP bolixed\n"); | 
 | 88 | } | 
 | 89 |  | 
 | 90 | struct linux_prom_registers smp_penguin_ctable __initdata = { 0 }; | 
 | 91 |  | 
 | 92 | void __init smp_boot_cpus(void) | 
 | 93 | { | 
 | 94 | 	extern void smp4m_boot_cpus(void); | 
 | 95 | 	extern void smp4d_boot_cpus(void); | 
 | 96 | 	 | 
 | 97 | 	if (sparc_cpu_model == sun4m) | 
 | 98 | 		smp4m_boot_cpus(); | 
 | 99 | 	else | 
 | 100 | 		smp4d_boot_cpus(); | 
 | 101 | } | 
 | 102 |  | 
 | 103 | void smp_send_reschedule(int cpu) | 
 | 104 | { | 
 | 105 | 	/* See sparc64 */ | 
 | 106 | } | 
 | 107 |  | 
 | 108 | void smp_send_stop(void) | 
 | 109 | { | 
 | 110 | } | 
 | 111 |  | 
 | 112 | void smp_flush_cache_all(void) | 
 | 113 | { | 
 | 114 | 	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); | 
 | 115 | 	local_flush_cache_all(); | 
 | 116 | } | 
 | 117 |  | 
 | 118 | void smp_flush_tlb_all(void) | 
 | 119 | { | 
 | 120 | 	xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all)); | 
 | 121 | 	local_flush_tlb_all(); | 
 | 122 | } | 
 | 123 |  | 
 | 124 | void smp_flush_cache_mm(struct mm_struct *mm) | 
 | 125 | { | 
 | 126 | 	if(mm->context != NO_CONTEXT) { | 
 | 127 | 		cpumask_t cpu_mask = mm->cpu_vm_mask; | 
 | 128 | 		cpu_clear(smp_processor_id(), cpu_mask); | 
 | 129 | 		if (!cpus_empty(cpu_mask)) | 
 | 130 | 			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); | 
 | 131 | 		local_flush_cache_mm(mm); | 
 | 132 | 	} | 
 | 133 | } | 
 | 134 |  | 
 | 135 | void smp_flush_tlb_mm(struct mm_struct *mm) | 
 | 136 | { | 
 | 137 | 	if(mm->context != NO_CONTEXT) { | 
 | 138 | 		cpumask_t cpu_mask = mm->cpu_vm_mask; | 
 | 139 | 		cpu_clear(smp_processor_id(), cpu_mask); | 
 | 140 | 		if (!cpus_empty(cpu_mask)) { | 
 | 141 | 			xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); | 
 | 142 | 			if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) | 
 | 143 | 				mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); | 
 | 144 | 		} | 
 | 145 | 		local_flush_tlb_mm(mm); | 
 | 146 | 	} | 
 | 147 | } | 
 | 148 |  | 
 | 149 | void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 
 | 150 | 			   unsigned long end) | 
 | 151 | { | 
 | 152 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 153 |  | 
 | 154 | 	if (mm->context != NO_CONTEXT) { | 
 | 155 | 		cpumask_t cpu_mask = mm->cpu_vm_mask; | 
 | 156 | 		cpu_clear(smp_processor_id(), cpu_mask); | 
 | 157 | 		if (!cpus_empty(cpu_mask)) | 
 | 158 | 			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); | 
 | 159 | 		local_flush_cache_range(vma, start, end); | 
 | 160 | 	} | 
 | 161 | } | 
 | 162 |  | 
 | 163 | void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 
 | 164 | 			 unsigned long end) | 
 | 165 | { | 
 | 166 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 167 |  | 
 | 168 | 	if (mm->context != NO_CONTEXT) { | 
 | 169 | 		cpumask_t cpu_mask = mm->cpu_vm_mask; | 
 | 170 | 		cpu_clear(smp_processor_id(), cpu_mask); | 
 | 171 | 		if (!cpus_empty(cpu_mask)) | 
 | 172 | 			xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); | 
 | 173 | 		local_flush_tlb_range(vma, start, end); | 
 | 174 | 	} | 
 | 175 | } | 
 | 176 |  | 
 | 177 | void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) | 
 | 178 | { | 
 | 179 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 180 |  | 
 | 181 | 	if(mm->context != NO_CONTEXT) { | 
 | 182 | 		cpumask_t cpu_mask = mm->cpu_vm_mask; | 
 | 183 | 		cpu_clear(smp_processor_id(), cpu_mask); | 
 | 184 | 		if (!cpus_empty(cpu_mask)) | 
 | 185 | 			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); | 
 | 186 | 		local_flush_cache_page(vma, page); | 
 | 187 | 	} | 
 | 188 | } | 
 | 189 |  | 
 | 190 | void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 
 | 191 | { | 
 | 192 | 	struct mm_struct *mm = vma->vm_mm; | 
 | 193 |  | 
 | 194 | 	if(mm->context != NO_CONTEXT) { | 
 | 195 | 		cpumask_t cpu_mask = mm->cpu_vm_mask; | 
 | 196 | 		cpu_clear(smp_processor_id(), cpu_mask); | 
 | 197 | 		if (!cpus_empty(cpu_mask)) | 
 | 198 | 			xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); | 
 | 199 | 		local_flush_tlb_page(vma, page); | 
 | 200 | 	} | 
 | 201 | } | 
 | 202 |  | 
 | 203 | void smp_reschedule_irq(void) | 
 | 204 | { | 
 | 205 | 	set_need_resched(); | 
 | 206 | } | 
 | 207 |  | 
 | 208 | void smp_flush_page_to_ram(unsigned long page) | 
 | 209 | { | 
 | 210 | 	/* Current theory is that those who call this are the one's | 
 | 211 | 	 * who have just dirtied their cache with the pages contents | 
 | 212 | 	 * in kernel space, therefore we only run this on local cpu. | 
 | 213 | 	 * | 
 | 214 | 	 * XXX This experiment failed, research further... -DaveM | 
 | 215 | 	 */ | 
 | 216 | #if 1 | 
 | 217 | 	xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page); | 
 | 218 | #endif | 
 | 219 | 	local_flush_page_to_ram(page); | 
 | 220 | } | 
 | 221 |  | 
 | 222 | void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) | 
 | 223 | { | 
 | 224 | 	cpumask_t cpu_mask = mm->cpu_vm_mask; | 
 | 225 | 	cpu_clear(smp_processor_id(), cpu_mask); | 
 | 226 | 	if (!cpus_empty(cpu_mask)) | 
 | 227 | 		xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); | 
 | 228 | 	local_flush_sig_insns(mm, insn_addr); | 
 | 229 | } | 
 | 230 |  | 
 | 231 | extern unsigned int lvl14_resolution; | 
 | 232 |  | 
 | 233 | /* /proc/profile writes can call this, don't __init it please. */ | 
 | 234 | static DEFINE_SPINLOCK(prof_setup_lock); | 
 | 235 |  | 
 | 236 | int setup_profiling_timer(unsigned int multiplier) | 
 | 237 | { | 
 | 238 | 	int i; | 
 | 239 | 	unsigned long flags; | 
 | 240 |  | 
 | 241 | 	/* Prevent level14 ticker IRQ flooding. */ | 
 | 242 | 	if((!multiplier) || (lvl14_resolution / multiplier) < 500) | 
 | 243 | 		return -EINVAL; | 
 | 244 |  | 
 | 245 | 	spin_lock_irqsave(&prof_setup_lock, flags); | 
 | 246 | 	for(i = 0; i < NR_CPUS; i++) { | 
 | 247 | 		if (cpu_possible(i)) | 
 | 248 | 			load_profile_irq(i, lvl14_resolution / multiplier); | 
 | 249 | 		prof_multiplier(i) = multiplier; | 
 | 250 | 	} | 
 | 251 | 	spin_unlock_irqrestore(&prof_setup_lock, flags); | 
 | 252 |  | 
 | 253 | 	return 0; | 
 | 254 | } | 
 | 255 |  | 
 | 256 | void __init smp_prepare_cpus(unsigned int maxcpus) | 
 | 257 | { | 
 | 258 | } | 
 | 259 |  | 
 | 260 | void __devinit smp_prepare_boot_cpu(void) | 
 | 261 | { | 
 | 262 | 	current_thread_info()->cpu = hard_smp_processor_id(); | 
 | 263 | 	cpu_set(smp_processor_id(), cpu_online_map); | 
 | 264 | 	cpu_set(smp_processor_id(), phys_cpu_present_map); | 
 | 265 | } | 
 | 266 |  | 
 | 267 | int __devinit __cpu_up(unsigned int cpu) | 
 | 268 | { | 
 | 269 | 	panic("smp doesn't work\n"); | 
 | 270 | } | 
 | 271 |  | 
 | 272 | void smp_bogo(struct seq_file *m) | 
 | 273 | { | 
 | 274 | 	int i; | 
 | 275 | 	 | 
 | 276 | 	for (i = 0; i < NR_CPUS; i++) { | 
 | 277 | 		if (cpu_online(i)) | 
 | 278 | 			seq_printf(m, | 
 | 279 | 				   "Cpu%dBogo\t: %lu.%02lu\n",  | 
 | 280 | 				   i, | 
 | 281 | 				   cpu_data(i).udelay_val/(500000/HZ), | 
 | 282 | 				   (cpu_data(i).udelay_val/(5000/HZ))%100); | 
 | 283 | 	} | 
 | 284 | } | 
 | 285 |  | 
 | 286 | void smp_info(struct seq_file *m) | 
 | 287 | { | 
 | 288 | 	int i; | 
 | 289 |  | 
 | 290 | 	seq_printf(m, "State:\n"); | 
 | 291 | 	for (i = 0; i < NR_CPUS; i++) { | 
 | 292 | 		if (cpu_online(i)) | 
 | 293 | 			seq_printf(m, "CPU%d\t\t: online\n", i); | 
 | 294 | 	} | 
 | 295 | } |