blob: aba085b2c0d5a9d69cc2d222258c54b51b0a6aa7 [file] [log] [blame]
David Daney5b3b1682009-01-08 16:46:40 -08001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
David Daneya0c16582012-07-05 18:12:39 +02006 * Copyright (C) 2004-2012 Cavium, Inc.
David Daney5b3b1682009-01-08 16:46:40 -08007 */
David Daney0c326382011-03-25 12:38:51 -07008
David Daney5b3b1682009-01-08 16:46:40 -08009#include <linux/interrupt.h>
David Daneya0c16582012-07-05 18:12:39 +020010#include <linux/irqdomain.h>
David Daney0c326382011-03-25 12:38:51 -070011#include <linux/bitops.h>
12#include <linux/percpu.h>
David Daneya0c16582012-07-05 18:12:39 +020013#include <linux/slab.h>
David Daney0c326382011-03-25 12:38:51 -070014#include <linux/irq.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010015#include <linux/smp.h>
David Daneya0c16582012-07-05 18:12:39 +020016#include <linux/of.h>
David Daney5b3b1682009-01-08 16:46:40 -080017
18#include <asm/octeon/octeon.h>
David Daney88fd8582012-04-04 15:34:41 -070019#include <asm/octeon/cvmx-ciu2-defs.h>
David Daney5b3b1682009-01-08 16:46:40 -080020
David Daney39961422010-02-18 11:47:40 -080021static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
22static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
David Daney5b3b1682009-01-08 16:46:40 -080023
David Daney0c326382011-03-25 12:38:51 -070024static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
25static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
26
27static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
28
29union octeon_ciu_chip_data {
30 void *p;
31 unsigned long l;
32 struct {
David Daney88fd8582012-04-04 15:34:41 -070033 unsigned long line:6;
34 unsigned long bit:6;
35 unsigned long gpio_line:6;
David Daney0c326382011-03-25 12:38:51 -070036 } s;
37};
38
39struct octeon_core_chip_data {
40 struct mutex core_irq_mutex;
41 bool current_en;
42 bool desired_en;
43 u8 bit;
44};
45
46#define MIPS_CORE_IRQ_LINES 8
47
48static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
49
David Daney88fd8582012-04-04 15:34:41 -070050static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
David Daneya0c16582012-07-05 18:12:39 +020051 struct irq_chip *chip,
52 irq_flow_handler_t handler)
David Daney0c326382011-03-25 12:38:51 -070053{
54 union octeon_ciu_chip_data cd;
55
56 irq_set_chip_and_handler(irq, chip, handler);
57
58 cd.l = 0;
59 cd.s.line = line;
60 cd.s.bit = bit;
David Daney88fd8582012-04-04 15:34:41 -070061 cd.s.gpio_line = gpio_line;
David Daney0c326382011-03-25 12:38:51 -070062
63 irq_set_chip_data(irq, cd.p);
64 octeon_irq_ciu_to_irq[line][bit] = irq;
65}
66
David Daney87161cc2012-08-10 16:00:31 -070067static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
68 int irq, int line, int bit)
69{
70 irq_domain_associate(domain, irq, line << 6 | bit);
71}
72
David Daneycd847b72009-10-13 11:26:03 -070073static int octeon_coreid_for_cpu(int cpu)
74{
75#ifdef CONFIG_SMP
76 return cpu_logical_map(cpu);
77#else
78 return cvmx_get_core_num();
79#endif
80}
81
David Daney0c326382011-03-25 12:38:51 -070082static int octeon_cpu_for_coreid(int coreid)
David Daney5b3b1682009-01-08 16:46:40 -080083{
David Daney0c326382011-03-25 12:38:51 -070084#ifdef CONFIG_SMP
85 return cpu_number_map(coreid);
86#else
87 return smp_processor_id();
88#endif
89}
90
91static void octeon_irq_core_ack(struct irq_data *data)
92{
93 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
94 unsigned int bit = cd->bit;
95
David Daney5b3b1682009-01-08 16:46:40 -080096 /*
97 * We don't need to disable IRQs to make these atomic since
98 * they are already disabled earlier in the low level
99 * interrupt code.
100 */
101 clear_c0_status(0x100 << bit);
102 /* The two user interrupts must be cleared manually. */
103 if (bit < 2)
104 clear_c0_cause(0x100 << bit);
105}
106
David Daney0c326382011-03-25 12:38:51 -0700107static void octeon_irq_core_eoi(struct irq_data *data)
David Daney5b3b1682009-01-08 16:46:40 -0800108{
David Daney0c326382011-03-25 12:38:51 -0700109 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
110
David Daney5b3b1682009-01-08 16:46:40 -0800111 /*
112 * We don't need to disable IRQs to make these atomic since
113 * they are already disabled earlier in the low level
114 * interrupt code.
115 */
David Daney0c326382011-03-25 12:38:51 -0700116 set_c0_status(0x100 << cd->bit);
David Daney5b3b1682009-01-08 16:46:40 -0800117}
118
David Daney0c326382011-03-25 12:38:51 -0700119static void octeon_irq_core_set_enable_local(void *arg)
David Daney5b3b1682009-01-08 16:46:40 -0800120{
David Daney0c326382011-03-25 12:38:51 -0700121 struct irq_data *data = arg;
122 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
123 unsigned int mask = 0x100 << cd->bit;
David Daney5b3b1682009-01-08 16:46:40 -0800124
125 /*
David Daney0c326382011-03-25 12:38:51 -0700126 * Interrupts are already disabled, so these are atomic.
David Daney5b3b1682009-01-08 16:46:40 -0800127 */
David Daney0c326382011-03-25 12:38:51 -0700128 if (cd->desired_en)
129 set_c0_status(mask);
130 else
131 clear_c0_status(mask);
132
David Daney5b3b1682009-01-08 16:46:40 -0800133}
134
David Daney0c326382011-03-25 12:38:51 -0700135static void octeon_irq_core_disable(struct irq_data *data)
David Daney5b3b1682009-01-08 16:46:40 -0800136{
David Daney0c326382011-03-25 12:38:51 -0700137 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
138 cd->desired_en = false;
David Daney5b3b1682009-01-08 16:46:40 -0800139}
140
David Daney0c326382011-03-25 12:38:51 -0700141static void octeon_irq_core_enable(struct irq_data *data)
David Daney5b3b1682009-01-08 16:46:40 -0800142{
David Daney0c326382011-03-25 12:38:51 -0700143 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
144 cd->desired_en = true;
145}
146
147static void octeon_irq_core_bus_lock(struct irq_data *data)
148{
149 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
150
151 mutex_lock(&cd->core_irq_mutex);
152}
153
154static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
155{
156 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
157
158 if (cd->desired_en != cd->current_en) {
159 on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
160
161 cd->current_en = cd->desired_en;
162 }
163
164 mutex_unlock(&cd->core_irq_mutex);
165}
166
David Daney5b3b1682009-01-08 16:46:40 -0800167static struct irq_chip octeon_irq_chip_core = {
168 .name = "Core",
David Daney0c326382011-03-25 12:38:51 -0700169 .irq_enable = octeon_irq_core_enable,
170 .irq_disable = octeon_irq_core_disable,
171 .irq_ack = octeon_irq_core_ack,
172 .irq_eoi = octeon_irq_core_eoi,
173 .irq_bus_lock = octeon_irq_core_bus_lock,
174 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
175
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200176 .irq_cpu_online = octeon_irq_core_eoi,
177 .irq_cpu_offline = octeon_irq_core_ack,
178 .flags = IRQCHIP_ONOFFLINE_ENABLED,
David Daney5b3b1682009-01-08 16:46:40 -0800179};
180
David Daney0c326382011-03-25 12:38:51 -0700181static void __init octeon_irq_init_core(void)
David Daney5b3b1682009-01-08 16:46:40 -0800182{
David Daney0c326382011-03-25 12:38:51 -0700183 int i;
184 int irq;
185 struct octeon_core_chip_data *cd;
David Daney5aae1fd2010-07-23 10:43:46 -0700186
David Daney0c326382011-03-25 12:38:51 -0700187 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
188 cd = &octeon_irq_core_chip_data[i];
189 cd->current_en = false;
190 cd->desired_en = false;
191 cd->bit = i;
192 mutex_init(&cd->core_irq_mutex);
193
194 irq = OCTEON_IRQ_SW0 + i;
David Daney87161cc2012-08-10 16:00:31 -0700195 irq_set_chip_data(irq, cd);
196 irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
197 handle_percpu_irq);
David Daney0c326382011-03-25 12:38:51 -0700198 }
David Daney5b3b1682009-01-08 16:46:40 -0800199}
200
David Daney0c326382011-03-25 12:38:51 -0700201static int next_cpu_for_irq(struct irq_data *data)
David Daney5aae1fd2010-07-23 10:43:46 -0700202{
203
204#ifdef CONFIG_SMP
David Daney0c326382011-03-25 12:38:51 -0700205 int cpu;
206 int weight = cpumask_weight(data->affinity);
David Daney5aae1fd2010-07-23 10:43:46 -0700207
208 if (weight > 1) {
David Daney0c326382011-03-25 12:38:51 -0700209 cpu = smp_processor_id();
David Daney5aae1fd2010-07-23 10:43:46 -0700210 for (;;) {
David Daney0c326382011-03-25 12:38:51 -0700211 cpu = cpumask_next(cpu, data->affinity);
David Daney5aae1fd2010-07-23 10:43:46 -0700212 if (cpu >= nr_cpu_ids) {
213 cpu = -1;
214 continue;
215 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
216 break;
217 }
218 }
David Daney5aae1fd2010-07-23 10:43:46 -0700219 } else if (weight == 1) {
David Daney0c326382011-03-25 12:38:51 -0700220 cpu = cpumask_first(data->affinity);
David Daney5aae1fd2010-07-23 10:43:46 -0700221 } else {
David Daney0c326382011-03-25 12:38:51 -0700222 cpu = smp_processor_id();
David Daney5aae1fd2010-07-23 10:43:46 -0700223 }
David Daney0c326382011-03-25 12:38:51 -0700224 return cpu;
David Daney5aae1fd2010-07-23 10:43:46 -0700225#else
David Daney0c326382011-03-25 12:38:51 -0700226 return smp_processor_id();
David Daney5aae1fd2010-07-23 10:43:46 -0700227#endif
228}
229
David Daney0c326382011-03-25 12:38:51 -0700230static void octeon_irq_ciu_enable(struct irq_data *data)
David Daney5b3b1682009-01-08 16:46:40 -0800231{
David Daney0c326382011-03-25 12:38:51 -0700232 int cpu = next_cpu_for_irq(data);
233 int coreid = octeon_coreid_for_cpu(cpu);
234 unsigned long *pen;
David Daney5aae1fd2010-07-23 10:43:46 -0700235 unsigned long flags;
David Daney0c326382011-03-25 12:38:51 -0700236 union octeon_ciu_chip_data cd;
David Daney5aae1fd2010-07-23 10:43:46 -0700237
David Daney0c326382011-03-25 12:38:51 -0700238 cd.p = irq_data_get_irq_chip_data(data);
David Daney5aae1fd2010-07-23 10:43:46 -0700239
David Daney0c326382011-03-25 12:38:51 -0700240 if (cd.s.line == 0) {
241 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
242 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
243 set_bit(cd.s.bit, pen);
244 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
245 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
246 } else {
247 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
248 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
249 set_bit(cd.s.bit, pen);
250 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
251 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
David Daney5b3b1682009-01-08 16:46:40 -0800252 }
David Daney0c326382011-03-25 12:38:51 -0700253}
254
255static void octeon_irq_ciu_enable_local(struct irq_data *data)
256{
257 unsigned long *pen;
258 unsigned long flags;
259 union octeon_ciu_chip_data cd;
260
261 cd.p = irq_data_get_irq_chip_data(data);
262
263 if (cd.s.line == 0) {
264 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
265 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
266 set_bit(cd.s.bit, pen);
267 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
268 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
269 } else {
270 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
271 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
272 set_bit(cd.s.bit, pen);
273 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
274 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
275 }
276}
277
278static void octeon_irq_ciu_disable_local(struct irq_data *data)
279{
280 unsigned long *pen;
281 unsigned long flags;
282 union octeon_ciu_chip_data cd;
283
284 cd.p = irq_data_get_irq_chip_data(data);
285
286 if (cd.s.line == 0) {
287 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
288 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
289 clear_bit(cd.s.bit, pen);
290 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
291 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
292 } else {
293 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
294 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
295 clear_bit(cd.s.bit, pen);
296 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
297 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
298 }
299}
300
301static void octeon_irq_ciu_disable_all(struct irq_data *data)
302{
303 unsigned long flags;
304 unsigned long *pen;
305 int cpu;
306 union octeon_ciu_chip_data cd;
307
308 wmb(); /* Make sure flag changes arrive before register updates. */
309
310 cd.p = irq_data_get_irq_chip_data(data);
311
312 if (cd.s.line == 0) {
313 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
314 for_each_online_cpu(cpu) {
315 int coreid = octeon_coreid_for_cpu(cpu);
316 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
317 clear_bit(cd.s.bit, pen);
318 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
319 }
320 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
321 } else {
322 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
323 for_each_online_cpu(cpu) {
324 int coreid = octeon_coreid_for_cpu(cpu);
325 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
326 clear_bit(cd.s.bit, pen);
327 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
328 }
329 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
330 }
331}
332
333static void octeon_irq_ciu_enable_all(struct irq_data *data)
334{
335 unsigned long flags;
336 unsigned long *pen;
337 int cpu;
338 union octeon_ciu_chip_data cd;
339
340 cd.p = irq_data_get_irq_chip_data(data);
341
342 if (cd.s.line == 0) {
343 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
344 for_each_online_cpu(cpu) {
345 int coreid = octeon_coreid_for_cpu(cpu);
346 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
347 set_bit(cd.s.bit, pen);
348 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
349 }
350 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
351 } else {
352 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
353 for_each_online_cpu(cpu) {
354 int coreid = octeon_coreid_for_cpu(cpu);
355 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
356 set_bit(cd.s.bit, pen);
357 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
358 }
359 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
360 }
David Daneycd847b72009-10-13 11:26:03 -0700361}
362
363/*
David Daney5aae1fd2010-07-23 10:43:46 -0700364 * Enable the irq on the next core in the affinity set for chips that
365 * have the EN*_W1{S,C} registers.
David Daneycd847b72009-10-13 11:26:03 -0700366 */
David Daney0c326382011-03-25 12:38:51 -0700367static void octeon_irq_ciu_enable_v2(struct irq_data *data)
David Daneycd847b72009-10-13 11:26:03 -0700368{
David Daney0c326382011-03-25 12:38:51 -0700369 u64 mask;
370 int cpu = next_cpu_for_irq(data);
371 union octeon_ciu_chip_data cd;
David Daney5aae1fd2010-07-23 10:43:46 -0700372
David Daney0c326382011-03-25 12:38:51 -0700373 cd.p = irq_data_get_irq_chip_data(data);
374 mask = 1ull << (cd.s.bit);
375
376 /*
377 * Called under the desc lock, so these should never get out
378 * of sync.
379 */
380 if (cd.s.line == 0) {
381 int index = octeon_coreid_for_cpu(cpu) * 2;
382 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
David Daney5aae1fd2010-07-23 10:43:46 -0700383 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
David Daney0c326382011-03-25 12:38:51 -0700384 } else {
385 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
386 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
387 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
David Daney5aae1fd2010-07-23 10:43:46 -0700388 }
389}
390
391/*
392 * Enable the irq on the current CPU for chips that
393 * have the EN*_W1{S,C} registers.
394 */
David Daney0c326382011-03-25 12:38:51 -0700395static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
David Daney5aae1fd2010-07-23 10:43:46 -0700396{
David Daney0c326382011-03-25 12:38:51 -0700397 u64 mask;
398 union octeon_ciu_chip_data cd;
David Daneycd847b72009-10-13 11:26:03 -0700399
David Daney0c326382011-03-25 12:38:51 -0700400 cd.p = irq_data_get_irq_chip_data(data);
401 mask = 1ull << (cd.s.bit);
David Daneycd847b72009-10-13 11:26:03 -0700402
David Daney0c326382011-03-25 12:38:51 -0700403 if (cd.s.line == 0) {
404 int index = cvmx_get_core_num() * 2;
405 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
David Daneydbb103b2010-01-07 11:05:00 -0800406 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
David Daney0c326382011-03-25 12:38:51 -0700407 } else {
408 int index = cvmx_get_core_num() * 2 + 1;
409 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
410 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
411 }
David Daneydbb103b2010-01-07 11:05:00 -0800412}
413
David Daney0c326382011-03-25 12:38:51 -0700414static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
David Daneycd847b72009-10-13 11:26:03 -0700415{
David Daney0c326382011-03-25 12:38:51 -0700416 u64 mask;
417 union octeon_ciu_chip_data cd;
418
419 cd.p = irq_data_get_irq_chip_data(data);
420 mask = 1ull << (cd.s.bit);
421
422 if (cd.s.line == 0) {
423 int index = cvmx_get_core_num() * 2;
424 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
David Daneycd847b72009-10-13 11:26:03 -0700425 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
David Daney0c326382011-03-25 12:38:51 -0700426 } else {
427 int index = cvmx_get_core_num() * 2 + 1;
428 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
David Daneycd847b72009-10-13 11:26:03 -0700429 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
430 }
David Daney5b3b1682009-01-08 16:46:40 -0800431}
432
David Daney0c326382011-03-25 12:38:51 -0700433/*
434 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
435 */
436static void octeon_irq_ciu_ack(struct irq_data *data)
437{
438 u64 mask;
439 union octeon_ciu_chip_data cd;
440
David Daney88fd8582012-04-04 15:34:41 -0700441 cd.p = irq_data_get_irq_chip_data(data);
David Daney0c326382011-03-25 12:38:51 -0700442 mask = 1ull << (cd.s.bit);
443
444 if (cd.s.line == 0) {
445 int index = cvmx_get_core_num() * 2;
446 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
447 } else {
448 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
449 }
450}
451
452/*
453 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
454 * registers.
455 */
456static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
David Daney5b3b1682009-01-08 16:46:40 -0800457{
458 int cpu;
David Daney0c326382011-03-25 12:38:51 -0700459 u64 mask;
460 union octeon_ciu_chip_data cd;
461
462 wmb(); /* Make sure flag changes arrive before register updates. */
463
David Daney88fd8582012-04-04 15:34:41 -0700464 cd.p = irq_data_get_irq_chip_data(data);
David Daney0c326382011-03-25 12:38:51 -0700465 mask = 1ull << (cd.s.bit);
466
467 if (cd.s.line == 0) {
468 for_each_online_cpu(cpu) {
469 int index = octeon_coreid_for_cpu(cpu) * 2;
470 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
471 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
472 }
473 } else {
474 for_each_online_cpu(cpu) {
475 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
476 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
477 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
478 }
479 }
480}
481
482/*
483 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
484 * registers.
485 */
486static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
487{
488 int cpu;
489 u64 mask;
490 union octeon_ciu_chip_data cd;
491
David Daney88fd8582012-04-04 15:34:41 -0700492 cd.p = irq_data_get_irq_chip_data(data);
David Daney0c326382011-03-25 12:38:51 -0700493 mask = 1ull << (cd.s.bit);
494
495 if (cd.s.line == 0) {
496 for_each_online_cpu(cpu) {
497 int index = octeon_coreid_for_cpu(cpu) * 2;
498 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
499 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
500 }
501 } else {
502 for_each_online_cpu(cpu) {
503 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
504 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
505 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
506 }
507 }
508}
509
David Daney6d1ab4c2012-07-05 18:12:37 +0200510static void octeon_irq_gpio_setup(struct irq_data *data)
511{
512 union cvmx_gpio_bit_cfgx cfg;
513 union octeon_ciu_chip_data cd;
514 u32 t = irqd_get_trigger_type(data);
515
516 cd.p = irq_data_get_irq_chip_data(data);
517
518 cfg.u64 = 0;
519 cfg.s.int_en = 1;
520 cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
521 cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
522
523 /* 140 nS glitch filter*/
524 cfg.s.fil_cnt = 7;
525 cfg.s.fil_sel = 3;
526
David Daney88fd8582012-04-04 15:34:41 -0700527 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64);
David Daney6d1ab4c2012-07-05 18:12:37 +0200528}
529
530static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
531{
532 octeon_irq_gpio_setup(data);
533 octeon_irq_ciu_enable_v2(data);
534}
535
536static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
537{
538 octeon_irq_gpio_setup(data);
539 octeon_irq_ciu_enable(data);
540}
541
542static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
543{
544 irqd_set_trigger_type(data, t);
545 octeon_irq_gpio_setup(data);
546
547 return IRQ_SET_MASK_OK;
548}
549
550static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
551{
552 union octeon_ciu_chip_data cd;
553
554 cd.p = irq_data_get_irq_chip_data(data);
David Daney88fd8582012-04-04 15:34:41 -0700555 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
David Daney6d1ab4c2012-07-05 18:12:37 +0200556
557 octeon_irq_ciu_disable_all_v2(data);
558}
559
560static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
561{
562 union octeon_ciu_chip_data cd;
563
564 cd.p = irq_data_get_irq_chip_data(data);
David Daney88fd8582012-04-04 15:34:41 -0700565 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
David Daney6d1ab4c2012-07-05 18:12:37 +0200566
567 octeon_irq_ciu_disable_all(data);
568}
569
570static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
571{
572 union octeon_ciu_chip_data cd;
573 u64 mask;
574
575 cd.p = irq_data_get_irq_chip_data(data);
David Daney88fd8582012-04-04 15:34:41 -0700576 mask = 1ull << (cd.s.gpio_line);
David Daney6d1ab4c2012-07-05 18:12:37 +0200577
578 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
579}
580
581static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
582{
583 if (irqd_get_trigger_type(irq_desc_get_irq_data(desc)) & IRQ_TYPE_EDGE_BOTH)
584 handle_edge_irq(irq, desc);
585 else
586 handle_level_irq(irq, desc);
587}
588
David Daney0c326382011-03-25 12:38:51 -0700589#ifdef CONFIG_SMP
590
591static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
592{
593 int cpu = smp_processor_id();
594 cpumask_t new_affinity;
595
596 if (!cpumask_test_cpu(cpu, data->affinity))
597 return;
598
599 if (cpumask_weight(data->affinity) > 1) {
600 /*
601 * It has multi CPU affinity, just remove this CPU
602 * from the affinity set.
603 */
604 cpumask_copy(&new_affinity, data->affinity);
605 cpumask_clear_cpu(cpu, &new_affinity);
606 } else {
607 /* Otherwise, put it on lowest numbered online CPU. */
608 cpumask_clear(&new_affinity);
609 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
610 }
611 __irq_set_affinity_locked(data, &new_affinity);
612}
613
614static int octeon_irq_ciu_set_affinity(struct irq_data *data,
615 const struct cpumask *dest, bool force)
616{
617 int cpu;
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200618 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
David Daneyb6b74d52009-10-13 08:52:28 -0700619 unsigned long flags;
David Daney0c326382011-03-25 12:38:51 -0700620 union octeon_ciu_chip_data cd;
621
David Daney88fd8582012-04-04 15:34:41 -0700622 cd.p = irq_data_get_irq_chip_data(data);
David Daney5b3b1682009-01-08 16:46:40 -0800623
David Daney5aae1fd2010-07-23 10:43:46 -0700624 /*
625 * For non-v2 CIU, we will allow only single CPU affinity.
626 * This removes the need to do locking in the .ack/.eoi
627 * functions.
628 */
629 if (cpumask_weight(dest) != 1)
630 return -EINVAL;
631
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200632 if (!enable_one)
David Daney0c326382011-03-25 12:38:51 -0700633 return 0;
Yinghai Lud5dedd42009-04-27 17:59:21 -0700634
David Daney0c326382011-03-25 12:38:51 -0700635 if (cd.s.line == 0) {
636 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
637 for_each_online_cpu(cpu) {
638 int coreid = octeon_coreid_for_cpu(cpu);
639 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
640
641 if (cpumask_test_cpu(cpu, dest) && enable_one) {
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200642 enable_one = false;
David Daney0c326382011-03-25 12:38:51 -0700643 set_bit(cd.s.bit, pen);
644 } else {
645 clear_bit(cd.s.bit, pen);
646 }
647 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
648 }
649 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
650 } else {
651 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
652 for_each_online_cpu(cpu) {
653 int coreid = octeon_coreid_for_cpu(cpu);
654 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
655
656 if (cpumask_test_cpu(cpu, dest) && enable_one) {
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200657 enable_one = false;
David Daney0c326382011-03-25 12:38:51 -0700658 set_bit(cd.s.bit, pen);
659 } else {
660 clear_bit(cd.s.bit, pen);
661 }
662 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
663 }
664 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
665 }
Yinghai Lud5dedd42009-04-27 17:59:21 -0700666 return 0;
David Daney5b3b1682009-01-08 16:46:40 -0800667}
David Daneycd847b72009-10-13 11:26:03 -0700668
669/*
670 * Set affinity for the irq for chips that have the EN*_W1{S,C}
671 * registers.
672 */
David Daney0c326382011-03-25 12:38:51 -0700673static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
674 const struct cpumask *dest,
675 bool force)
David Daneycd847b72009-10-13 11:26:03 -0700676{
677 int cpu;
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200678 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
David Daney0c326382011-03-25 12:38:51 -0700679 u64 mask;
680 union octeon_ciu_chip_data cd;
681
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200682 if (!enable_one)
David Daney0c326382011-03-25 12:38:51 -0700683 return 0;
684
David Daney88fd8582012-04-04 15:34:41 -0700685 cd.p = irq_data_get_irq_chip_data(data);
David Daney0c326382011-03-25 12:38:51 -0700686 mask = 1ull << cd.s.bit;
687
688 if (cd.s.line == 0) {
689 for_each_online_cpu(cpu) {
690 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
691 int index = octeon_coreid_for_cpu(cpu) * 2;
692 if (cpumask_test_cpu(cpu, dest) && enable_one) {
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200693 enable_one = false;
David Daney0c326382011-03-25 12:38:51 -0700694 set_bit(cd.s.bit, pen);
695 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
696 } else {
697 clear_bit(cd.s.bit, pen);
698 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
699 }
700 }
701 } else {
702 for_each_online_cpu(cpu) {
703 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
704 int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
705 if (cpumask_test_cpu(cpu, dest) && enable_one) {
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200706 enable_one = false;
David Daney0c326382011-03-25 12:38:51 -0700707 set_bit(cd.s.bit, pen);
708 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
709 } else {
710 clear_bit(cd.s.bit, pen);
711 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
712 }
David Daney5aae1fd2010-07-23 10:43:46 -0700713 }
David Daneycd847b72009-10-13 11:26:03 -0700714 }
715 return 0;
716}
David Daney5b3b1682009-01-08 16:46:40 -0800717#endif
718
David Daneycd847b72009-10-13 11:26:03 -0700719/*
David Daney0c326382011-03-25 12:38:51 -0700720 * The v1 CIU code already masks things, so supply a dummy version to
721 * the core chip code.
722 */
723static void octeon_irq_dummy_mask(struct irq_data *data)
724{
David Daney0c326382011-03-25 12:38:51 -0700725}
726
727/*
David Daneycd847b72009-10-13 11:26:03 -0700728 * Newer octeon chips have support for lockless CIU operation.
729 */
David Daney0c326382011-03-25 12:38:51 -0700730static struct irq_chip octeon_irq_chip_ciu_v2 = {
731 .name = "CIU",
732 .irq_enable = octeon_irq_ciu_enable_v2,
733 .irq_disable = octeon_irq_ciu_disable_all_v2,
David Daney0c326382011-03-25 12:38:51 -0700734 .irq_ack = octeon_irq_ciu_ack,
735 .irq_mask = octeon_irq_ciu_disable_local_v2,
736 .irq_unmask = octeon_irq_ciu_enable_v2,
David Daney5b3b1682009-01-08 16:46:40 -0800737#ifdef CONFIG_SMP
David Daney0c326382011-03-25 12:38:51 -0700738 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
739 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
David Daney5b3b1682009-01-08 16:46:40 -0800740#endif
741};
742
David Daney0c326382011-03-25 12:38:51 -0700743static struct irq_chip octeon_irq_chip_ciu = {
744 .name = "CIU",
745 .irq_enable = octeon_irq_ciu_enable,
746 .irq_disable = octeon_irq_ciu_disable_all,
David Daney0c326382011-03-25 12:38:51 -0700747 .irq_ack = octeon_irq_ciu_ack,
David Daneya339aef2012-07-05 18:12:38 +0200748 .irq_mask = octeon_irq_dummy_mask,
David Daney0c326382011-03-25 12:38:51 -0700749#ifdef CONFIG_SMP
750 .irq_set_affinity = octeon_irq_ciu_set_affinity,
751 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
752#endif
David Daney5aae1fd2010-07-23 10:43:46 -0700753};
754
David Daney0c326382011-03-25 12:38:51 -0700755/* The mbox versions don't do any affinity or round-robin. */
756static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
757 .name = "CIU-M",
758 .irq_enable = octeon_irq_ciu_enable_all_v2,
759 .irq_disable = octeon_irq_ciu_disable_all_v2,
760 .irq_ack = octeon_irq_ciu_disable_local_v2,
761 .irq_eoi = octeon_irq_ciu_enable_local_v2,
762
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200763 .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
764 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
765 .flags = IRQCHIP_ONOFFLINE_ENABLED,
David Daney0c326382011-03-25 12:38:51 -0700766};
767
768static struct irq_chip octeon_irq_chip_ciu_mbox = {
769 .name = "CIU-M",
770 .irq_enable = octeon_irq_ciu_enable_all,
771 .irq_disable = octeon_irq_ciu_disable_all,
772
Thomas Gleixner5b7cd6f2011-03-27 16:04:30 +0200773 .irq_cpu_online = octeon_irq_ciu_enable_local,
774 .irq_cpu_offline = octeon_irq_ciu_disable_local,
775 .flags = IRQCHIP_ONOFFLINE_ENABLED,
David Daney0c326382011-03-25 12:38:51 -0700776};
777
David Daney6d1ab4c2012-07-05 18:12:37 +0200778static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
779 .name = "CIU-GPIO",
780 .irq_enable = octeon_irq_ciu_enable_gpio_v2,
781 .irq_disable = octeon_irq_ciu_disable_gpio_v2,
782 .irq_ack = octeon_irq_ciu_gpio_ack,
783 .irq_mask = octeon_irq_ciu_disable_local_v2,
784 .irq_unmask = octeon_irq_ciu_enable_v2,
785 .irq_set_type = octeon_irq_ciu_gpio_set_type,
786#ifdef CONFIG_SMP
787 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
788#endif
789 .flags = IRQCHIP_SET_TYPE_MASKED,
790};
791
792static struct irq_chip octeon_irq_chip_ciu_gpio = {
793 .name = "CIU-GPIO",
794 .irq_enable = octeon_irq_ciu_enable_gpio,
795 .irq_disable = octeon_irq_ciu_disable_gpio,
796 .irq_mask = octeon_irq_dummy_mask,
797 .irq_ack = octeon_irq_ciu_gpio_ack,
798 .irq_set_type = octeon_irq_ciu_gpio_set_type,
799#ifdef CONFIG_SMP
800 .irq_set_affinity = octeon_irq_ciu_set_affinity,
801#endif
802 .flags = IRQCHIP_SET_TYPE_MASKED,
803};
804
David Daney0c326382011-03-25 12:38:51 -0700805/*
806 * Watchdog interrupts are special. They are associated with a single
807 * core, so we hardwire the affinity to that core.
808 */
809static void octeon_irq_ciu_wd_enable(struct irq_data *data)
810{
811 unsigned long flags;
812 unsigned long *pen;
813 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
814 int cpu = octeon_cpu_for_coreid(coreid);
815
816 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
817 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
818 set_bit(coreid, pen);
819 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
820 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
821}
822
823/*
824 * Watchdog interrupts are special. They are associated with a single
825 * core, so we hardwire the affinity to that core.
826 */
827static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
828{
829 int coreid = data->irq - OCTEON_IRQ_WDOG0;
830 int cpu = octeon_cpu_for_coreid(coreid);
831
832 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
833 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
834}
835
836
837static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
838 .name = "CIU-W",
839 .irq_enable = octeon_irq_ciu1_wd_enable_v2,
840 .irq_disable = octeon_irq_ciu_disable_all_v2,
841 .irq_mask = octeon_irq_ciu_disable_local_v2,
842 .irq_unmask = octeon_irq_ciu_enable_local_v2,
843};
844
845static struct irq_chip octeon_irq_chip_ciu_wd = {
846 .name = "CIU-W",
847 .irq_enable = octeon_irq_ciu_wd_enable,
848 .irq_disable = octeon_irq_ciu_disable_all,
849 .irq_mask = octeon_irq_dummy_mask,
850};
851
David Daneya0c16582012-07-05 18:12:39 +0200852static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
853{
854 bool edge = false;
855
856 if (line == 0)
857 switch (bit) {
858 case 48 ... 49: /* GMX DRP */
859 case 50: /* IPD_DRP */
860 case 52 ... 55: /* Timers */
861 case 58: /* MPI */
862 edge = true;
863 break;
864 default:
865 break;
866 }
867 else /* line == 1 */
868 switch (bit) {
869 case 47: /* PTP */
870 edge = true;
871 break;
872 default:
873 break;
874 }
875 return edge;
876}
877
878struct octeon_irq_gpio_domain_data {
879 unsigned int base_hwirq;
880};
881
882static int octeon_irq_gpio_xlat(struct irq_domain *d,
883 struct device_node *node,
884 const u32 *intspec,
885 unsigned int intsize,
886 unsigned long *out_hwirq,
887 unsigned int *out_type)
888{
889 unsigned int type;
890 unsigned int pin;
891 unsigned int trigger;
David Daneya0c16582012-07-05 18:12:39 +0200892
893 if (d->of_node != node)
894 return -EINVAL;
895
896 if (intsize < 2)
897 return -EINVAL;
898
899 pin = intspec[0];
900 if (pin >= 16)
901 return -EINVAL;
902
903 trigger = intspec[1];
904
905 switch (trigger) {
906 case 1:
907 type = IRQ_TYPE_EDGE_RISING;
908 break;
909 case 2:
910 type = IRQ_TYPE_EDGE_FALLING;
911 break;
912 case 4:
913 type = IRQ_TYPE_LEVEL_HIGH;
914 break;
915 case 8:
916 type = IRQ_TYPE_LEVEL_LOW;
917 break;
918 default:
919 pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
920 node->name,
921 trigger);
922 type = IRQ_TYPE_LEVEL_LOW;
923 break;
924 }
925 *out_type = type;
David Daney87161cc2012-08-10 16:00:31 -0700926 *out_hwirq = pin;
David Daneya0c16582012-07-05 18:12:39 +0200927
928 return 0;
929}
930
931static int octeon_irq_ciu_xlat(struct irq_domain *d,
932 struct device_node *node,
933 const u32 *intspec,
934 unsigned int intsize,
935 unsigned long *out_hwirq,
936 unsigned int *out_type)
937{
938 unsigned int ciu, bit;
939
940 ciu = intspec[0];
941 bit = intspec[1];
942
943 if (ciu > 1 || bit > 63)
944 return -EINVAL;
945
946 /* These are the GPIO lines */
947 if (ciu == 0 && bit >= 16 && bit < 32)
948 return -EINVAL;
949
950 *out_hwirq = (ciu << 6) | bit;
951 *out_type = 0;
952
953 return 0;
954}
955
956static struct irq_chip *octeon_irq_ciu_chip;
957static struct irq_chip *octeon_irq_gpio_chip;
958
959static bool octeon_irq_virq_in_range(unsigned int virq)
960{
961 /* We cannot let it overflow the mapping array. */
962 if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
963 return true;
964
965 WARN_ONCE(true, "virq out of range %u.\n", virq);
966 return false;
967}
968
969static int octeon_irq_ciu_map(struct irq_domain *d,
970 unsigned int virq, irq_hw_number_t hw)
971{
972 unsigned int line = hw >> 6;
973 unsigned int bit = hw & 63;
974
975 if (!octeon_irq_virq_in_range(virq))
976 return -EINVAL;
977
978 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
979 return -EINVAL;
980
981 if (octeon_irq_ciu_is_edge(line, bit))
David Daney88fd8582012-04-04 15:34:41 -0700982 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
David Daneya0c16582012-07-05 18:12:39 +0200983 octeon_irq_ciu_chip,
984 handle_edge_irq);
985 else
David Daney88fd8582012-04-04 15:34:41 -0700986 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
David Daneya0c16582012-07-05 18:12:39 +0200987 octeon_irq_ciu_chip,
988 handle_level_irq);
989
990 return 0;
991}
992
David Daney88fd8582012-04-04 15:34:41 -0700993static int octeon_irq_gpio_map_common(struct irq_domain *d,
994 unsigned int virq, irq_hw_number_t hw,
995 int line_limit, struct irq_chip *chip)
David Daneya0c16582012-07-05 18:12:39 +0200996{
David Daney87161cc2012-08-10 16:00:31 -0700997 struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
998 unsigned int line, bit;
David Daneya0c16582012-07-05 18:12:39 +0200999
1000 if (!octeon_irq_virq_in_range(virq))
1001 return -EINVAL;
1002
David Daney87161cc2012-08-10 16:00:31 -07001003 hw += gpiod->base_hwirq;
1004 line = hw >> 6;
1005 bit = hw & 63;
David Daney88fd8582012-04-04 15:34:41 -07001006 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
David Daneya0c16582012-07-05 18:12:39 +02001007 return -EINVAL;
1008
David Daney88fd8582012-04-04 15:34:41 -07001009 octeon_irq_set_ciu_mapping(virq, line, bit, hw,
1010 chip, octeon_irq_handle_gpio);
David Daneya0c16582012-07-05 18:12:39 +02001011 return 0;
1012}
1013
David Daney88fd8582012-04-04 15:34:41 -07001014static int octeon_irq_gpio_map(struct irq_domain *d,
1015 unsigned int virq, irq_hw_number_t hw)
1016{
1017 return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
1018}
1019
David Daneya0c16582012-07-05 18:12:39 +02001020static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
1021 .map = octeon_irq_ciu_map,
1022 .xlate = octeon_irq_ciu_xlat,
1023};
1024
1025static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
1026 .map = octeon_irq_gpio_map,
1027 .xlate = octeon_irq_gpio_xlat,
1028};
1029
David Daney0c326382011-03-25 12:38:51 -07001030static void octeon_irq_ip2_v1(void)
1031{
1032 const unsigned long core_id = cvmx_get_core_num();
1033 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
1034
1035 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
1036 clear_c0_status(STATUSF_IP2);
1037 if (likely(ciu_sum)) {
1038 int bit = fls64(ciu_sum) - 1;
1039 int irq = octeon_irq_ciu_to_irq[0][bit];
1040 if (likely(irq))
1041 do_IRQ(irq);
1042 else
1043 spurious_interrupt();
1044 } else {
1045 spurious_interrupt();
1046 }
1047 set_c0_status(STATUSF_IP2);
1048}
1049
1050static void octeon_irq_ip2_v2(void)
1051{
1052 const unsigned long core_id = cvmx_get_core_num();
1053 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
1054
1055 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
1056 if (likely(ciu_sum)) {
1057 int bit = fls64(ciu_sum) - 1;
1058 int irq = octeon_irq_ciu_to_irq[0][bit];
1059 if (likely(irq))
1060 do_IRQ(irq);
1061 else
1062 spurious_interrupt();
1063 } else {
1064 spurious_interrupt();
1065 }
1066}
1067static void octeon_irq_ip3_v1(void)
1068{
1069 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1070
1071 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
1072 clear_c0_status(STATUSF_IP3);
1073 if (likely(ciu_sum)) {
1074 int bit = fls64(ciu_sum) - 1;
1075 int irq = octeon_irq_ciu_to_irq[1][bit];
1076 if (likely(irq))
1077 do_IRQ(irq);
1078 else
1079 spurious_interrupt();
1080 } else {
1081 spurious_interrupt();
1082 }
1083 set_c0_status(STATUSF_IP3);
1084}
1085
1086static void octeon_irq_ip3_v2(void)
1087{
1088 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
1089
1090 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
1091 if (likely(ciu_sum)) {
1092 int bit = fls64(ciu_sum) - 1;
1093 int irq = octeon_irq_ciu_to_irq[1][bit];
1094 if (likely(irq))
1095 do_IRQ(irq);
1096 else
1097 spurious_interrupt();
1098 } else {
1099 spurious_interrupt();
1100 }
1101}
1102
David Daney88fd8582012-04-04 15:34:41 -07001103static bool octeon_irq_use_ip4;
1104
1105static void __cpuinit octeon_irq_local_enable_ip4(void *arg)
1106{
1107 set_c0_status(STATUSF_IP4);
1108}
1109
David Daney0c326382011-03-25 12:38:51 -07001110static void octeon_irq_ip4_mask(void)
1111{
1112 clear_c0_status(STATUSF_IP4);
1113 spurious_interrupt();
1114}
1115
1116static void (*octeon_irq_ip2)(void);
1117static void (*octeon_irq_ip3)(void);
1118static void (*octeon_irq_ip4)(void);
1119
1120void __cpuinitdata (*octeon_irq_setup_secondary)(void);
1121
David Daney88fd8582012-04-04 15:34:41 -07001122void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1123{
1124 octeon_irq_ip4 = h;
1125 octeon_irq_use_ip4 = true;
1126 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
1127}
1128
David Daney0c326382011-03-25 12:38:51 -07001129static void __cpuinit octeon_irq_percpu_enable(void)
1130{
1131 irq_cpu_online();
1132}
1133
1134static void __cpuinit octeon_irq_init_ciu_percpu(void)
1135{
1136 int coreid = cvmx_get_core_num();
1137 /*
1138 * Disable All CIU Interrupts. The ones we need will be
1139 * enabled later. Read the SUM register so we know the write
1140 * completed.
1141 */
1142 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
1143 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
1144 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
1145 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
1146 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
1147}
1148
David Daney88fd8582012-04-04 15:34:41 -07001149static void octeon_irq_init_ciu2_percpu(void)
1150{
1151 u64 regx, ipx;
1152 int coreid = cvmx_get_core_num();
1153 u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
1154
1155 /*
1156 * Disable All CIU2 Interrupts. The ones we need will be
1157 * enabled later. Read the SUM register so we know the write
1158 * completed.
1159 *
1160 * There are 9 registers and 3 IPX levels with strides 0x1000
1161 * and 0x200 respectivly. Use loops to clear them.
1162 */
1163 for (regx = 0; regx <= 0x8000; regx += 0x1000) {
1164 for (ipx = 0; ipx <= 0x400; ipx += 0x200)
1165 cvmx_write_csr(base + regx + ipx, 0);
1166 }
1167
1168 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
1169}
1170
David Daney0c326382011-03-25 12:38:51 -07001171static void __cpuinit octeon_irq_setup_secondary_ciu(void)
1172{
1173
1174 __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
1175 __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
1176
1177 octeon_irq_init_ciu_percpu();
1178 octeon_irq_percpu_enable();
1179
1180 /* Enable the CIU lines */
1181 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1182 clear_c0_status(STATUSF_IP4);
1183}
1184
David Daney88fd8582012-04-04 15:34:41 -07001185static void octeon_irq_setup_secondary_ciu2(void)
1186{
1187 octeon_irq_init_ciu2_percpu();
1188 octeon_irq_percpu_enable();
1189
1190 /* Enable the CIU lines */
1191 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1192 if (octeon_irq_use_ip4)
1193 set_c0_status(STATUSF_IP4);
1194 else
1195 clear_c0_status(STATUSF_IP4);
1196}
1197
David Daney0c326382011-03-25 12:38:51 -07001198static void __init octeon_irq_init_ciu(void)
1199{
1200 unsigned int i;
1201 struct irq_chip *chip;
David Daney0c326382011-03-25 12:38:51 -07001202 struct irq_chip *chip_mbox;
1203 struct irq_chip *chip_wd;
David Daneya0c16582012-07-05 18:12:39 +02001204 struct device_node *gpio_node;
1205 struct device_node *ciu_node;
David Daney87161cc2012-08-10 16:00:31 -07001206 struct irq_domain *ciu_domain = NULL;
David Daney0c326382011-03-25 12:38:51 -07001207
1208 octeon_irq_init_ciu_percpu();
1209 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
1210
1211 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
1212 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
1213 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
1214 OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
1215 octeon_irq_ip2 = octeon_irq_ip2_v2;
1216 octeon_irq_ip3 = octeon_irq_ip3_v2;
1217 chip = &octeon_irq_chip_ciu_v2;
David Daney0c326382011-03-25 12:38:51 -07001218 chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
1219 chip_wd = &octeon_irq_chip_ciu_wd_v2;
David Daneya0c16582012-07-05 18:12:39 +02001220 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
David Daney0c326382011-03-25 12:38:51 -07001221 } else {
1222 octeon_irq_ip2 = octeon_irq_ip2_v1;
1223 octeon_irq_ip3 = octeon_irq_ip3_v1;
1224 chip = &octeon_irq_chip_ciu;
David Daney0c326382011-03-25 12:38:51 -07001225 chip_mbox = &octeon_irq_chip_ciu_mbox;
1226 chip_wd = &octeon_irq_chip_ciu_wd;
David Daneya0c16582012-07-05 18:12:39 +02001227 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
David Daney0c326382011-03-25 12:38:51 -07001228 }
David Daneya0c16582012-07-05 18:12:39 +02001229 octeon_irq_ciu_chip = chip;
David Daney0c326382011-03-25 12:38:51 -07001230 octeon_irq_ip4 = octeon_irq_ip4_mask;
1231
1232 /* Mips internal */
1233 octeon_irq_init_core();
1234
David Daneya0c16582012-07-05 18:12:39 +02001235 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1236 if (gpio_node) {
1237 struct octeon_irq_gpio_domain_data *gpiod;
1238
1239 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1240 if (gpiod) {
1241 /* gpio domain host_data is the base hwirq number. */
1242 gpiod->base_hwirq = 16;
1243 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
1244 of_node_put(gpio_node);
1245 } else
1246 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1247 } else
1248 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1249
1250 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
1251 if (ciu_node) {
David Daney87161cc2012-08-10 16:00:31 -07001252 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
David Daneya0c16582012-07-05 18:12:39 +02001253 of_node_put(ciu_node);
1254 } else
David Daney87161cc2012-08-10 16:00:31 -07001255 panic("Cannot find device node for cavium,octeon-3860-ciu.");
1256
1257 /* CIU_0 */
1258 for (i = 0; i < 16; i++)
1259 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
1260
David Daney88fd8582012-04-04 15:34:41 -07001261 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
1262 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
David Daney87161cc2012-08-10 16:00:31 -07001263
1264 for (i = 0; i < 4; i++)
1265 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
1266 for (i = 0; i < 4; i++)
1267 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
1268
1269 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
1270 for (i = 0; i < 4; i++)
1271 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
1272
1273 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
1274 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
1275
1276 /* CIU_1 */
1277 for (i = 0; i < 16; i++)
David Daney88fd8582012-04-04 15:34:41 -07001278 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq);
David Daney87161cc2012-08-10 16:00:31 -07001279
1280 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
David Daneya0c16582012-07-05 18:12:39 +02001281
David Daney0c326382011-03-25 12:38:51 -07001282 /* Enable the CIU lines */
1283 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1284 clear_c0_status(STATUSF_IP4);
1285}
David Daney5aae1fd2010-07-23 10:43:46 -07001286
David Daney88fd8582012-04-04 15:34:41 -07001287/*
1288 * Watchdog interrupts are special. They are associated with a single
1289 * core, so we hardwire the affinity to that core.
1290 */
1291static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
1292{
1293 u64 mask;
1294 u64 en_addr;
1295 int coreid = data->irq - OCTEON_IRQ_WDOG0;
1296 union octeon_ciu_chip_data cd;
1297
1298 cd.p = irq_data_get_irq_chip_data(data);
1299 mask = 1ull << (cd.s.bit);
1300
1301 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1302 cvmx_write_csr(en_addr, mask);
1303
1304}
1305
1306static void octeon_irq_ciu2_enable(struct irq_data *data)
1307{
1308 u64 mask;
1309 u64 en_addr;
1310 int cpu = next_cpu_for_irq(data);
1311 int coreid = octeon_coreid_for_cpu(cpu);
1312 union octeon_ciu_chip_data cd;
1313
1314 cd.p = irq_data_get_irq_chip_data(data);
1315 mask = 1ull << (cd.s.bit);
1316
1317 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1318 cvmx_write_csr(en_addr, mask);
1319}
1320
1321static void octeon_irq_ciu2_enable_local(struct irq_data *data)
1322{
1323 u64 mask;
1324 u64 en_addr;
1325 int coreid = cvmx_get_core_num();
1326 union octeon_ciu_chip_data cd;
1327
1328 cd.p = irq_data_get_irq_chip_data(data);
1329 mask = 1ull << (cd.s.bit);
1330
1331 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
1332 cvmx_write_csr(en_addr, mask);
1333
1334}
1335
1336static void octeon_irq_ciu2_disable_local(struct irq_data *data)
1337{
1338 u64 mask;
1339 u64 en_addr;
1340 int coreid = cvmx_get_core_num();
1341 union octeon_ciu_chip_data cd;
1342
1343 cd.p = irq_data_get_irq_chip_data(data);
1344 mask = 1ull << (cd.s.bit);
1345
1346 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line);
1347 cvmx_write_csr(en_addr, mask);
1348
1349}
1350
1351static void octeon_irq_ciu2_ack(struct irq_data *data)
1352{
1353 u64 mask;
1354 u64 en_addr;
1355 int coreid = cvmx_get_core_num();
1356 union octeon_ciu_chip_data cd;
1357
1358 cd.p = irq_data_get_irq_chip_data(data);
1359 mask = 1ull << (cd.s.bit);
1360
1361 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line);
1362 cvmx_write_csr(en_addr, mask);
1363
1364}
1365
1366static void octeon_irq_ciu2_disable_all(struct irq_data *data)
1367{
1368 int cpu;
1369 u64 mask;
1370 union octeon_ciu_chip_data cd;
1371
1372 cd.p = irq_data_get_irq_chip_data(data);
1373 mask = 1ull << (cd.s.bit);
1374
1375 for_each_online_cpu(cpu) {
1376 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1377 cvmx_write_csr(en_addr, mask);
1378 }
1379}
1380
1381static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
1382{
1383 int cpu;
1384 u64 mask;
1385
1386 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1387
1388 for_each_online_cpu(cpu) {
1389 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu));
1390 cvmx_write_csr(en_addr, mask);
1391 }
1392}
1393
1394static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
1395{
1396 int cpu;
1397 u64 mask;
1398
1399 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1400
1401 for_each_online_cpu(cpu) {
1402 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu));
1403 cvmx_write_csr(en_addr, mask);
1404 }
1405}
1406
1407static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
1408{
1409 u64 mask;
1410 u64 en_addr;
1411 int coreid = cvmx_get_core_num();
1412
1413 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1414 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
1415 cvmx_write_csr(en_addr, mask);
1416}
1417
1418static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
1419{
1420 u64 mask;
1421 u64 en_addr;
1422 int coreid = cvmx_get_core_num();
1423
1424 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
1425 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
1426 cvmx_write_csr(en_addr, mask);
1427}
1428
1429#ifdef CONFIG_SMP
1430static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
1431 const struct cpumask *dest, bool force)
1432{
1433 int cpu;
1434 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
1435 u64 mask;
1436 union octeon_ciu_chip_data cd;
1437
1438 if (!enable_one)
1439 return 0;
1440
1441 cd.p = irq_data_get_irq_chip_data(data);
1442 mask = 1ull << cd.s.bit;
1443
1444 for_each_online_cpu(cpu) {
1445 u64 en_addr;
1446 if (cpumask_test_cpu(cpu, dest) && enable_one) {
1447 enable_one = false;
1448 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1449 } else {
1450 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
1451 }
1452 cvmx_write_csr(en_addr, mask);
1453 }
1454
1455 return 0;
1456}
1457#endif
1458
1459static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
1460{
1461 octeon_irq_gpio_setup(data);
1462 octeon_irq_ciu2_enable(data);
1463}
1464
1465static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
1466{
1467 union octeon_ciu_chip_data cd;
1468 cd.p = irq_data_get_irq_chip_data(data);
1469
1470 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
1471
1472 octeon_irq_ciu2_disable_all(data);
1473}
1474
1475static struct irq_chip octeon_irq_chip_ciu2 = {
1476 .name = "CIU2-E",
1477 .irq_enable = octeon_irq_ciu2_enable,
1478 .irq_disable = octeon_irq_ciu2_disable_all,
1479 .irq_ack = octeon_irq_ciu2_ack,
1480 .irq_mask = octeon_irq_ciu2_disable_local,
1481 .irq_unmask = octeon_irq_ciu2_enable,
1482#ifdef CONFIG_SMP
1483 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1484 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1485#endif
1486};
1487
1488static struct irq_chip octeon_irq_chip_ciu2_mbox = {
1489 .name = "CIU2-M",
1490 .irq_enable = octeon_irq_ciu2_mbox_enable_all,
1491 .irq_disable = octeon_irq_ciu2_mbox_disable_all,
1492 .irq_ack = octeon_irq_ciu2_mbox_disable_local,
1493 .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
1494
1495 .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
1496 .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
1497 .flags = IRQCHIP_ONOFFLINE_ENABLED,
1498};
1499
1500static struct irq_chip octeon_irq_chip_ciu2_wd = {
1501 .name = "CIU2-W",
1502 .irq_enable = octeon_irq_ciu2_wd_enable,
1503 .irq_disable = octeon_irq_ciu2_disable_all,
1504 .irq_mask = octeon_irq_ciu2_disable_local,
1505 .irq_unmask = octeon_irq_ciu2_enable_local,
1506};
1507
1508static struct irq_chip octeon_irq_chip_ciu2_gpio = {
1509 .name = "CIU-GPIO",
1510 .irq_enable = octeon_irq_ciu2_enable_gpio,
1511 .irq_disable = octeon_irq_ciu2_disable_gpio,
1512 .irq_ack = octeon_irq_ciu_gpio_ack,
1513 .irq_mask = octeon_irq_ciu2_disable_local,
1514 .irq_unmask = octeon_irq_ciu2_enable,
1515 .irq_set_type = octeon_irq_ciu_gpio_set_type,
1516#ifdef CONFIG_SMP
1517 .irq_set_affinity = octeon_irq_ciu2_set_affinity,
1518 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
1519#endif
1520 .flags = IRQCHIP_SET_TYPE_MASKED,
1521};
1522
1523static int octeon_irq_ciu2_xlat(struct irq_domain *d,
1524 struct device_node *node,
1525 const u32 *intspec,
1526 unsigned int intsize,
1527 unsigned long *out_hwirq,
1528 unsigned int *out_type)
1529{
1530 unsigned int ciu, bit;
1531
1532 ciu = intspec[0];
1533 bit = intspec[1];
1534
1535 /* Line 7 are the GPIO lines */
1536 if (ciu > 6 || bit > 63)
1537 return -EINVAL;
1538
1539 *out_hwirq = (ciu << 6) | bit;
1540 *out_type = 0;
1541
1542 return 0;
1543}
1544
1545static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
1546{
1547 bool edge = false;
1548
1549 if (line == 3) /* MIO */
1550 switch (bit) {
1551 case 2: /* IPD_DRP */
1552 case 8 ... 11: /* Timers */
1553 case 48: /* PTP */
1554 edge = true;
1555 break;
1556 default:
1557 break;
1558 }
1559 else if (line == 6) /* PKT */
1560 switch (bit) {
1561 case 52 ... 53: /* ILK_DRP */
1562 case 8 ... 12: /* GMX_DRP */
1563 edge = true;
1564 break;
1565 default:
1566 break;
1567 }
1568 return edge;
1569}
1570
1571static int octeon_irq_ciu2_map(struct irq_domain *d,
1572 unsigned int virq, irq_hw_number_t hw)
1573{
1574 unsigned int line = hw >> 6;
1575 unsigned int bit = hw & 63;
1576
1577 if (!octeon_irq_virq_in_range(virq))
1578 return -EINVAL;
1579
1580 /* Line 7 are the GPIO lines */
1581 if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
1582 return -EINVAL;
1583
1584 if (octeon_irq_ciu2_is_edge(line, bit))
1585 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1586 &octeon_irq_chip_ciu2,
1587 handle_edge_irq);
1588 else
1589 octeon_irq_set_ciu_mapping(virq, line, bit, 0,
1590 &octeon_irq_chip_ciu2,
1591 handle_level_irq);
1592
1593 return 0;
1594}
1595static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
1596 unsigned int virq, irq_hw_number_t hw)
1597{
1598 return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
1599}
1600
1601static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
1602 .map = octeon_irq_ciu2_map,
1603 .xlate = octeon_irq_ciu2_xlat,
1604};
1605
1606static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
1607 .map = octeon_irq_ciu2_gpio_map,
1608 .xlate = octeon_irq_gpio_xlat,
1609};
1610
1611static void octeon_irq_ciu2(void)
1612{
1613 int line;
1614 int bit;
1615 int irq;
1616 u64 src_reg, src, sum;
1617 const unsigned long core_id = cvmx_get_core_num();
1618
1619 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
1620
1621 if (unlikely(!sum))
1622 goto spurious;
1623
1624 line = fls64(sum) - 1;
1625 src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
1626 src = cvmx_read_csr(src_reg);
1627
1628 if (unlikely(!src))
1629 goto spurious;
1630
1631 bit = fls64(src) - 1;
1632 irq = octeon_irq_ciu_to_irq[line][bit];
1633 if (unlikely(!irq))
1634 goto spurious;
1635
1636 do_IRQ(irq);
1637 goto out;
1638
1639spurious:
1640 spurious_interrupt();
1641out:
1642 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1643 can stop interrupts from propagating */
1644 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1645 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1646 else
1647 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
1648 return;
1649}
1650
1651static void octeon_irq_ciu2_mbox(void)
1652{
1653 int line;
1654
1655 const unsigned long core_id = cvmx_get_core_num();
1656 u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
1657
1658 if (unlikely(!sum))
1659 goto spurious;
1660
1661 line = fls64(sum) - 1;
1662
1663 do_IRQ(OCTEON_IRQ_MBOX0 + line);
1664 goto out;
1665
1666spurious:
1667 spurious_interrupt();
1668out:
1669 /* CN68XX pass 1.x has an errata that accessing the ACK registers
1670 can stop interrupts from propagating */
1671 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1672 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
1673 else
1674 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
1675 return;
1676}
1677
1678static void __init octeon_irq_init_ciu2(void)
1679{
1680 unsigned int i;
1681 struct device_node *gpio_node;
1682 struct device_node *ciu_node;
1683 struct irq_domain *ciu_domain = NULL;
1684
1685 octeon_irq_init_ciu2_percpu();
1686 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
1687
1688 octeon_irq_ip2 = octeon_irq_ciu2;
1689 octeon_irq_ip3 = octeon_irq_ciu2_mbox;
1690 octeon_irq_ip4 = octeon_irq_ip4_mask;
1691
1692 /* Mips internal */
1693 octeon_irq_init_core();
1694
1695 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1696 if (gpio_node) {
1697 struct octeon_irq_gpio_domain_data *gpiod;
1698
1699 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
1700 if (gpiod) {
1701 /* gpio domain host_data is the base hwirq number. */
1702 gpiod->base_hwirq = 7 << 6;
1703 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
1704 of_node_put(gpio_node);
1705 } else
1706 pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
1707 } else
1708 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
1709
1710 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
1711 if (ciu_node) {
1712 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
1713 of_node_put(ciu_node);
1714 } else
1715 panic("Cannot find device node for cavium,octeon-6880-ciu2.");
1716
1717 /* CUI2 */
1718 for (i = 0; i < 64; i++)
1719 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
1720
1721 for (i = 0; i < 32; i++)
1722 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
1723 &octeon_irq_chip_ciu2_wd, handle_level_irq);
1724
1725 for (i = 0; i < 4; i++)
1726 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
1727
1728 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
1729
1730 for (i = 0; i < 4; i++)
1731 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
1732
1733 for (i = 0; i < 4; i++)
1734 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
1735
1736 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1737 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1738 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1739 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
1740
1741 /* Enable the CIU lines */
1742 set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1743 clear_c0_status(STATUSF_IP4);
1744}
1745
David Daney5b3b1682009-01-08 16:46:40 -08001746void __init arch_init_irq(void)
1747{
David Daney5b3b1682009-01-08 16:46:40 -08001748#ifdef CONFIG_SMP
1749 /* Set the default affinity to the boot cpu. */
1750 cpumask_clear(irq_default_affinity);
1751 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
1752#endif
David Daney88fd8582012-04-04 15:34:41 -07001753 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
1754 octeon_irq_init_ciu2();
1755 else
1756 octeon_irq_init_ciu();
David Daney5b3b1682009-01-08 16:46:40 -08001757}
1758
1759asmlinkage void plat_irq_dispatch(void)
1760{
David Daney5b3b1682009-01-08 16:46:40 -08001761 unsigned long cop0_cause;
1762 unsigned long cop0_status;
David Daney5b3b1682009-01-08 16:46:40 -08001763
1764 while (1) {
1765 cop0_cause = read_c0_cause();
1766 cop0_status = read_c0_status();
1767 cop0_cause &= cop0_status;
1768 cop0_cause &= ST0_IM;
1769
David Daney0c326382011-03-25 12:38:51 -07001770 if (unlikely(cop0_cause & STATUSF_IP2))
1771 octeon_irq_ip2();
1772 else if (unlikely(cop0_cause & STATUSF_IP3))
1773 octeon_irq_ip3();
1774 else if (unlikely(cop0_cause & STATUSF_IP4))
1775 octeon_irq_ip4();
1776 else if (likely(cop0_cause))
David Daney5b3b1682009-01-08 16:46:40 -08001777 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
David Daney0c326382011-03-25 12:38:51 -07001778 else
David Daney5b3b1682009-01-08 16:46:40 -08001779 break;
David Daney5b3b1682009-01-08 16:46:40 -08001780 }
1781}
Ralf Baechle773cb772009-06-23 10:36:38 +01001782
1783#ifdef CONFIG_HOTPLUG_CPU
Ralf Baechle773cb772009-06-23 10:36:38 +01001784
1785void fixup_irqs(void)
1786{
David Daney0c326382011-03-25 12:38:51 -07001787 irq_cpu_offline();
Ralf Baechle773cb772009-06-23 10:36:38 +01001788}
1789
1790#endif /* CONFIG_HOTPLUG_CPU */