Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 1 | #include <linux/types.h> |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 2 | #include <asm/delay.h> |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 3 | #include <irq.h> |
| 4 | #include <hwregs/intr_vect.h> |
| 5 | #include <hwregs/intr_vect_defs.h> |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 6 | #include <asm/tlbflush.h> |
| 7 | #include <asm/mmu_context.h> |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 8 | #include <hwregs/asm/mmu_defs_asm.h> |
| 9 | #include <hwregs/supp_reg.h> |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 10 | #include <asm/atomic.h> |
| 11 | |
| 12 | #include <linux/err.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/timex.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/cpumask.h> |
| 18 | #include <linux/interrupt.h> |
David S. Miller | c8923c6 | 2005-10-13 14:41:23 -0700 | [diff] [blame] | 19 | #include <linux/module.h> |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 20 | |
| 21 | #define IPI_SCHEDULE 1 |
| 22 | #define IPI_CALL 2 |
| 23 | #define IPI_FLUSH_TLB 4 |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 24 | #define IPI_BOOT 8 |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 25 | |
| 26 | #define FLUSH_ALL (void*)0xffffffff |
| 27 | |
| 28 | /* Vector of locks used for various atomic operations */ |
| 29 | spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; |
| 30 | |
| 31 | /* CPU masks */ |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 32 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
David S. Miller | c8923c6 | 2005-10-13 14:41:23 -0700 | [diff] [blame] | 33 | EXPORT_SYMBOL(phys_cpu_present_map); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 34 | |
| 35 | /* Variables used during SMP boot */ |
| 36 | volatile int cpu_now_booting = 0; |
| 37 | volatile struct thread_info *smp_init_current_idle_thread; |
| 38 | |
| 39 | /* Variables used during IPI */ |
| 40 | static DEFINE_SPINLOCK(call_lock); |
| 41 | static DEFINE_SPINLOCK(tlbstate_lock); |
| 42 | |
| 43 | struct call_data_struct { |
| 44 | void (*func) (void *info); |
| 45 | void *info; |
| 46 | int wait; |
| 47 | }; |
| 48 | |
| 49 | static struct call_data_struct * call_data; |
| 50 | |
| 51 | static struct mm_struct* flush_mm; |
| 52 | static struct vm_area_struct* flush_vma; |
| 53 | static unsigned long flush_addr; |
| 54 | |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 55 | /* Mode registers */ |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 56 | static unsigned long irq_regs[NR_CPUS] = { |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 57 | regi_irq, |
| 58 | regi_irq2 |
| 59 | }; |
| 60 | |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 61 | static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 62 | static int send_ipi(int vector, int wait, cpumask_t cpu_mask); |
Thomas Gleixner | e5f7178 | 2007-10-16 01:26:38 -0700 | [diff] [blame] | 63 | static struct irqaction irq_ipi = { |
| 64 | .handler = crisv32_ipi_interrupt, |
| 65 | .flags = IRQF_DISABLED, |
Thomas Gleixner | e5f7178 | 2007-10-16 01:26:38 -0700 | [diff] [blame] | 66 | .name = "ipi", |
| 67 | }; |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 68 | |
| 69 | extern void cris_mmu_init(void); |
| 70 | extern void cris_timer_init(void); |
| 71 | |
| 72 | /* SMP initialization */ |
| 73 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 74 | { |
| 75 | int i; |
| 76 | |
| 77 | /* From now on we can expect IPIs so set them up */ |
| 78 | setup_irq(IPI_INTR_VECT, &irq_ipi); |
| 79 | |
| 80 | /* Mark all possible CPUs as present */ |
| 81 | for (i = 0; i < max_cpus; i++) |
| 82 | cpu_set(i, phys_cpu_present_map); |
| 83 | } |
| 84 | |
| 85 | void __devinit smp_prepare_boot_cpu(void) |
| 86 | { |
| 87 | /* PGD pointer has moved after per_cpu initialization so |
| 88 | * update the MMU. |
| 89 | */ |
| 90 | pgd_t **pgd; |
| 91 | pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); |
| 92 | |
| 93 | SUPP_BANK_SEL(1); |
| 94 | SUPP_REG_WR(RW_MM_TLB_PGD, pgd); |
| 95 | SUPP_BANK_SEL(2); |
| 96 | SUPP_REG_WR(RW_MM_TLB_PGD, pgd); |
| 97 | |
Rusty Russell | afc6ca0 | 2009-03-16 14:11:47 +1030 | [diff] [blame] | 98 | set_cpu_online(0, true); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 99 | cpu_set(0, phys_cpu_present_map); |
Rusty Russell | afc6ca0 | 2009-03-16 14:11:47 +1030 | [diff] [blame] | 100 | set_cpu_possible(0, true); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | void __init smp_cpus_done(unsigned int max_cpus) |
| 104 | { |
| 105 | } |
| 106 | |
| 107 | /* Bring one cpu online.*/ |
| 108 | static int __init |
| 109 | smp_boot_one_cpu(int cpuid) |
| 110 | { |
| 111 | unsigned timeout; |
| 112 | struct task_struct *idle; |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 113 | cpumask_t cpu_mask = CPU_MASK_NONE; |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 114 | |
| 115 | idle = fork_idle(cpuid); |
| 116 | if (IS_ERR(idle)) |
| 117 | panic("SMP: fork failed for CPU:%d", cpuid); |
| 118 | |
Al Viro | 718d611 | 2006-01-12 01:06:04 -0800 | [diff] [blame] | 119 | task_thread_info(idle)->cpu = cpuid; |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 120 | |
| 121 | /* Information to the CPU that is about to boot */ |
Al Viro | 718d611 | 2006-01-12 01:06:04 -0800 | [diff] [blame] | 122 | smp_init_current_idle_thread = task_thread_info(idle); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 123 | cpu_now_booting = cpuid; |
| 124 | |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 125 | /* Kick it */ |
| 126 | cpu_set(cpuid, cpu_online_map); |
| 127 | cpu_set(cpuid, cpu_mask); |
| 128 | send_ipi(IPI_BOOT, 0, cpu_mask); |
| 129 | cpu_clear(cpuid, cpu_online_map); |
| 130 | |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 131 | /* Wait for CPU to come online */ |
| 132 | for (timeout = 0; timeout < 10000; timeout++) { |
| 133 | if(cpu_online(cpuid)) { |
| 134 | cpu_now_booting = 0; |
| 135 | smp_init_current_idle_thread = NULL; |
| 136 | return 0; /* CPU online */ |
| 137 | } |
| 138 | udelay(100); |
| 139 | barrier(); |
| 140 | } |
| 141 | |
| 142 | put_task_struct(idle); |
| 143 | idle = NULL; |
| 144 | |
| 145 | printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); |
| 146 | return -1; |
| 147 | } |
| 148 | |
Simon Arlott | 49b4ff3 | 2007-10-20 01:08:50 +0200 | [diff] [blame] | 149 | /* Secondary CPUs starts using C here. Here we need to setup CPU |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 150 | * specific stuff such as the local timer and the MMU. */ |
| 151 | void __init smp_callin(void) |
| 152 | { |
| 153 | extern void cpu_idle(void); |
| 154 | |
| 155 | int cpu = cpu_now_booting; |
| 156 | reg_intr_vect_rw_mask vect_mask = {0}; |
| 157 | |
| 158 | /* Initialise the idle task for this CPU */ |
| 159 | atomic_inc(&init_mm.mm_count); |
| 160 | current->active_mm = &init_mm; |
| 161 | |
| 162 | /* Set up MMU */ |
| 163 | cris_mmu_init(); |
| 164 | __flush_tlb_all(); |
| 165 | |
| 166 | /* Setup local timer. */ |
| 167 | cris_timer_init(); |
| 168 | |
| 169 | /* Enable IRQ and idle */ |
| 170 | REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); |
| 171 | unmask_irq(IPI_INTR_VECT); |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 172 | unmask_irq(TIMER0_INTR_VECT); |
Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 173 | preempt_disable(); |
Manfred Spraul | e545a61 | 2008-09-07 16:57:22 +0200 | [diff] [blame] | 174 | notify_cpu_starting(cpu); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 175 | local_irq_enable(); |
| 176 | |
| 177 | cpu_set(cpu, cpu_online_map); |
| 178 | cpu_idle(); |
| 179 | } |
| 180 | |
| 181 | /* Stop execution on this CPU.*/ |
| 182 | void stop_this_cpu(void* dummy) |
| 183 | { |
| 184 | local_irq_disable(); |
| 185 | asm volatile("halt"); |
| 186 | } |
| 187 | |
| 188 | /* Other calls */ |
| 189 | void smp_send_stop(void) |
| 190 | { |
Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 191 | smp_call_function(stop_this_cpu, NULL, 0); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | int setup_profiling_timer(unsigned int multiplier) |
| 195 | { |
| 196 | return -EINVAL; |
| 197 | } |
| 198 | |
| 199 | |
| 200 | /* cache_decay_ticks is used by the scheduler to decide if a process |
| 201 | * is "hot" on one CPU. A higher value means a higher penalty to move |
| 202 | * a process to another CPU. Our cache is rather small so we report |
| 203 | * 1 tick. |
| 204 | */ |
| 205 | unsigned long cache_decay_ticks = 1; |
| 206 | |
Gautham R Shenoy | b282b6f | 2007-01-10 23:15:34 -0800 | [diff] [blame] | 207 | int __cpuinit __cpu_up(unsigned int cpu) |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 208 | { |
| 209 | smp_boot_one_cpu(cpu); |
| 210 | return cpu_online(cpu) ? 0 : -ENOSYS; |
| 211 | } |
| 212 | |
| 213 | void smp_send_reschedule(int cpu) |
| 214 | { |
| 215 | cpumask_t cpu_mask = CPU_MASK_NONE; |
| 216 | cpu_set(cpu, cpu_mask); |
| 217 | send_ipi(IPI_SCHEDULE, 0, cpu_mask); |
| 218 | } |
| 219 | |
| 220 | /* TLB flushing |
| 221 | * |
| 222 | * Flush needs to be done on the local CPU and on any other CPU that |
| 223 | * may have the same mapping. The mm->cpu_vm_mask is used to keep track |
| 224 | * of which CPUs that a specific process has been executed on. |
| 225 | */ |
| 226 | void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr) |
| 227 | { |
| 228 | unsigned long flags; |
| 229 | cpumask_t cpu_mask; |
| 230 | |
| 231 | spin_lock_irqsave(&tlbstate_lock, flags); |
Rusty Russell | b9d65c0 | 2009-03-16 14:11:47 +1030 | [diff] [blame] | 232 | cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 233 | cpu_clear(smp_processor_id(), cpu_mask); |
| 234 | flush_mm = mm; |
| 235 | flush_vma = vma; |
| 236 | flush_addr = addr; |
| 237 | send_ipi(IPI_FLUSH_TLB, 1, cpu_mask); |
| 238 | spin_unlock_irqrestore(&tlbstate_lock, flags); |
| 239 | } |
| 240 | |
| 241 | void flush_tlb_all(void) |
| 242 | { |
| 243 | __flush_tlb_all(); |
| 244 | flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0); |
| 245 | } |
| 246 | |
| 247 | void flush_tlb_mm(struct mm_struct *mm) |
| 248 | { |
| 249 | __flush_tlb_mm(mm); |
| 250 | flush_tlb_common(mm, FLUSH_ALL, 0); |
| 251 | /* No more mappings in other CPUs */ |
Rusty Russell | b9d65c0 | 2009-03-16 14:11:47 +1030 | [diff] [blame] | 252 | cpumask_clear(mm_cpumask(mm)); |
| 253 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | void flush_tlb_page(struct vm_area_struct *vma, |
| 257 | unsigned long addr) |
| 258 | { |
| 259 | __flush_tlb_page(vma, addr); |
| 260 | flush_tlb_common(vma->vm_mm, vma, addr); |
| 261 | } |
| 262 | |
| 263 | /* Inter processor interrupts |
| 264 | * |
| 265 | * The IPIs are used for: |
| 266 | * * Force a schedule on a CPU |
| 267 | * * FLush TLB on other CPUs |
| 268 | * * Call a function on other CPUs |
| 269 | */ |
| 270 | |
| 271 | int send_ipi(int vector, int wait, cpumask_t cpu_mask) |
| 272 | { |
| 273 | int i = 0; |
| 274 | reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); |
| 275 | int ret = 0; |
| 276 | |
| 277 | /* Calculate CPUs to send to. */ |
| 278 | cpus_and(cpu_mask, cpu_mask, cpu_online_map); |
| 279 | |
| 280 | /* Send the IPI. */ |
| 281 | for_each_cpu_mask(i, cpu_mask) |
| 282 | { |
| 283 | ipi.vector |= vector; |
| 284 | REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); |
| 285 | } |
| 286 | |
| 287 | /* Wait for IPI to finish on other CPUS */ |
| 288 | if (wait) { |
| 289 | for_each_cpu_mask(i, cpu_mask) { |
| 290 | int j; |
| 291 | for (j = 0 ; j < 1000; j++) { |
| 292 | ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); |
| 293 | if (!ipi.vector) |
| 294 | break; |
| 295 | udelay(100); |
| 296 | } |
| 297 | |
| 298 | /* Timeout? */ |
| 299 | if (ipi.vector) { |
| 300 | printk("SMP call timeout from %d to %d\n", smp_processor_id(), i); |
| 301 | ret = -ETIMEDOUT; |
| 302 | dump_stack(); |
| 303 | } |
| 304 | } |
| 305 | } |
| 306 | return ret; |
| 307 | } |
| 308 | |
| 309 | /* |
| 310 | * You must not call this function with disabled interrupts or from a |
| 311 | * hardware interrupt handler or from a bottom half handler. |
| 312 | */ |
Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 313 | int smp_call_function(void (*func)(void *info), void *info, int wait) |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 314 | { |
| 315 | cpumask_t cpu_mask = CPU_MASK_ALL; |
| 316 | struct call_data_struct data; |
| 317 | int ret; |
| 318 | |
| 319 | cpu_clear(smp_processor_id(), cpu_mask); |
| 320 | |
| 321 | WARN_ON(irqs_disabled()); |
| 322 | |
| 323 | data.func = func; |
| 324 | data.info = info; |
| 325 | data.wait = wait; |
| 326 | |
| 327 | spin_lock(&call_lock); |
| 328 | call_data = &data; |
| 329 | ret = send_ipi(IPI_CALL, wait, cpu_mask); |
| 330 | spin_unlock(&call_lock); |
| 331 | |
| 332 | return ret; |
| 333 | } |
| 334 | |
Jesper Nilsson | 538380d | 2008-01-25 16:15:44 +0100 | [diff] [blame] | 335 | irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) |
Mikael Starvik | 51533b6 | 2005-07-27 11:44:44 -0700 | [diff] [blame] | 336 | { |
| 337 | void (*func) (void *info) = call_data->func; |
| 338 | void *info = call_data->info; |
| 339 | reg_intr_vect_rw_ipi ipi; |
| 340 | |
| 341 | ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); |
| 342 | |
| 343 | if (ipi.vector & IPI_CALL) { |
| 344 | func(info); |
| 345 | } |
| 346 | if (ipi.vector & IPI_FLUSH_TLB) { |
| 347 | if (flush_mm == FLUSH_ALL) |
| 348 | __flush_tlb_all(); |
| 349 | else if (flush_vma == FLUSH_ALL) |
| 350 | __flush_tlb_mm(flush_mm); |
| 351 | else |
| 352 | __flush_tlb_page(flush_vma, flush_addr); |
| 353 | } |
| 354 | |
| 355 | ipi.vector = 0; |
| 356 | REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi); |
| 357 | |
| 358 | return IRQ_HANDLED; |
| 359 | } |
| 360 | |