blob: 653be061b9ec6745c17cac05f6f5827ed2013b17 [file] [log] [blame]
Ralf Baechle39b8d522008-04-28 17:14:26 +01001/*
2 * This program is free software; you can distribute it and/or modify it
3 * under the terms of the GNU General Public License (Version 2) as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
9 * for more details.
10 *
11 * You should have received a copy of the GNU General Public License along
12 * with this program; if not, write to the Free Software Foundation, Inc.,
13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
14 *
15 * Copyright (C) 2007 MIPS Technologies, Inc.
16 * Chris Dearman (chris@mips.com)
17 */
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010023#include <linux/smp.h>
Ralf Baechle39b8d522008-04-28 17:14:26 +010024#include <linux/cpumask.h>
25#include <linux/interrupt.h>
26#include <linux/compiler.h>
27
28#include <asm/atomic.h>
29#include <asm/cacheflush.h>
30#include <asm/cpu.h>
31#include <asm/processor.h>
32#include <asm/system.h>
33#include <asm/hardirq.h>
34#include <asm/mmu_context.h>
35#include <asm/smp.h>
36#include <asm/time.h>
37#include <asm/mipsregs.h>
38#include <asm/mipsmtregs.h>
39#include <asm/mips_mt.h>
40
41/*
42 * Crude manipulation of the CPU masks to control which
43 * which CPU's are brought online during initialisation
44 *
45 * Beware... this needs to be called after CPU discovery
46 * but before CPU bringup
47 */
48static int __init allowcpus(char *str)
49{
50 cpumask_t cpu_allow_map;
51 char buf[256];
52 int len;
53
54 cpus_clear(cpu_allow_map);
Rusty Russell29c01772008-12-13 21:20:25 +103055 if (cpulist_parse(str, &cpu_allow_map) == 0) {
Ralf Baechle39b8d522008-04-28 17:14:26 +010056 cpu_set(0, cpu_allow_map);
57 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
Rusty Russell29c01772008-12-13 21:20:25 +103058 len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map);
Ralf Baechle39b8d522008-04-28 17:14:26 +010059 buf[len] = '\0';
60 pr_debug("Allowable CPUs: %s\n", buf);
61 return 1;
62 } else
63 return 0;
64}
65__setup("allowcpus=", allowcpus);
66
67static void ipi_call_function(unsigned int cpu)
68{
69 unsigned int action = 0;
70
71 pr_debug("CPU%d: %s cpu %d status %08x\n",
72 smp_processor_id(), __func__, cpu, read_c0_status());
73
74 switch (cpu) {
75 case 0:
76 action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
77 break;
78 case 1:
79 action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
80 break;
81 case 2:
82 action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
83 break;
84 case 3:
85 action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
86 break;
87 }
88 gic_send_ipi(action);
89}
90
91
92static void ipi_resched(unsigned int cpu)
93{
94 unsigned int action = 0;
95
96 pr_debug("CPU%d: %s cpu %d status %08x\n",
97 smp_processor_id(), __func__, cpu, read_c0_status());
98
99 switch (cpu) {
100 case 0:
101 action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
102 break;
103 case 1:
104 action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
105 break;
106 case 2:
107 action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
108 break;
109 case 3:
110 action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
111 break;
112 }
113 gic_send_ipi(action);
114}
115
116/*
117 * FIXME: This isn't restricted to CMP
118 * The SMVP kernel could use GIC interrupts if available
119 */
120void cmp_send_ipi_single(int cpu, unsigned int action)
121{
122 unsigned long flags;
123
124 local_irq_save(flags);
125
126 switch (action) {
127 case SMP_CALL_FUNCTION:
128 ipi_call_function(cpu);
129 break;
130
131 case SMP_RESCHEDULE_YOURSELF:
132 ipi_resched(cpu);
133 break;
134 }
135
136 local_irq_restore(flags);
137}
138
139static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
140{
141 unsigned int i;
142
143 for_each_cpu_mask(i, mask)
144 cmp_send_ipi_single(i, action);
145}
146
147static void cmp_init_secondary(void)
148{
149 struct cpuinfo_mips *c = &current_cpu_data;
150
151 /* Assume GIC is present */
152 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
153 STATUSF_IP7);
154
155 /* Enable per-cpu interrupts: platform specific */
156
157 c->core = (read_c0_ebase() >> 1) & 0xff;
158#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
159 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
160#endif
161#ifdef CONFIG_MIPS_MT_SMTC
162 c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
163#endif
164}
165
166static void cmp_smp_finish(void)
167{
168 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
169
170 /* CDFIXME: remove this? */
171 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
172
173#ifdef CONFIG_MIPS_MT_FPAFF
174 /* If we have an FPU, enroll ourselves in the FPU-full mask */
175 if (cpu_has_fpu)
176 cpu_set(smp_processor_id(), mt_fpu_cpumask);
177#endif /* CONFIG_MIPS_MT_FPAFF */
178
179 local_irq_enable();
180}
181
182static void cmp_cpus_done(void)
183{
184 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
185}
186
187/*
188 * Setup the PC, SP, and GP of a secondary processor and start it running
189 * smp_bootstrap is the place to resume from
190 * __KSTK_TOS(idle) is apparently the stack pointer
191 * (unsigned long)idle->thread_info the gp
192 */
193static void cmp_boot_secondary(int cpu, struct task_struct *idle)
194{
195 struct thread_info *gp = task_thread_info(idle);
196 unsigned long sp = __KSTK_TOS(idle);
197 unsigned long pc = (unsigned long)&smp_bootstrap;
198 unsigned long a0 = 0;
199
200 pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
201 __func__, cpu);
202
203#if 0
204 /* Needed? */
205 flush_icache_range((unsigned long)gp,
206 (unsigned long)(gp + sizeof(struct thread_info)));
207#endif
208
209 amon_cpu_start(cpu, pc, sp, gp, a0);
210}
211
212/*
213 * Common setup before any secondaries are started
214 */
215void __init cmp_smp_setup(void)
216{
217 int i;
218 int ncpu = 0;
219
220 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
221
222#ifdef CONFIG_MIPS_MT_FPAFF
223 /* If we have an FPU, enroll ourselves in the FPU-full mask */
224 if (cpu_has_fpu)
225 cpu_set(0, mt_fpu_cpumask);
226#endif /* CONFIG_MIPS_MT_FPAFF */
227
228 for (i = 1; i < NR_CPUS; i++) {
229 if (amon_cpu_avail(i)) {
Rusty Russell98a79d62008-12-13 21:19:41 +1030230 cpu_set(i, cpu_possible_map);
Ralf Baechle39b8d522008-04-28 17:14:26 +0100231 __cpu_number_map[i] = ++ncpu;
232 __cpu_logical_map[ncpu] = i;
233 }
234 }
235
236 if (cpu_has_mipsmt) {
237 unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
238
239 nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
240 smp_num_siblings = nvpe;
241 }
242 pr_info("Detected %i available secondary CPU(s)\n", ncpu);
243}
244
245void __init cmp_prepare_cpus(unsigned int max_cpus)
246{
247 pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
248 smp_processor_id(), __func__, max_cpus);
249
250 /*
251 * FIXME: some of these options are per-system, some per-core and
252 * some per-cpu
253 */
254 mips_mt_set_cpuoptions();
255}
256
257struct plat_smp_ops cmp_smp_ops = {
258 .send_ipi_single = cmp_send_ipi_single,
259 .send_ipi_mask = cmp_send_ipi_mask,
260 .init_secondary = cmp_init_secondary,
261 .smp_finish = cmp_smp_finish,
262 .cpus_done = cmp_cpus_done,
263 .boot_secondary = cmp_boot_secondary,
264 .smp_setup = cmp_smp_setup,
265 .prepare_cpus = cmp_prepare_cpus,
266};