blob: 6a9195e10579a8263b3446ccc443bd1d1381f784 [file] [log] [blame]
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +02001/*
2 * Marvell Armada 370 and Armada XP SoC IRQ handling
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
24#include <linux/irqdomain.h>
25#include <asm/mach/arch.h>
26#include <asm/exception.h>
Gregory CLEMENT344e8732012-08-02 11:19:12 +030027#include <asm/smp_plat.h>
Gregory CLEMENTd792b1e2012-09-26 18:02:48 +020028#include <asm/hardware/cache-l2x0.h>
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020029
30/* Interrupt Controller Registers Map */
31#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
32#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
33
Ben Dooksf3e16cc2012-06-04 18:50:12 +020034#define ARMADA_370_XP_INT_CONTROL (0x00)
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020035#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
36#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010037#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020038
39#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
40
Gregory CLEMENT344e8732012-08-02 11:19:12 +030041#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
42#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
43#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
44
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010045#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
46
Gregory CLEMENT7f23f622013-03-20 16:09:35 +010047#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
48
Gregory CLEMENT344e8732012-08-02 11:19:12 +030049#define ACTIVE_DOORBELLS (8)
50
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010051static DEFINE_RAW_SPINLOCK(irq_controller_lock);
52
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020053static void __iomem *per_cpu_int_base;
54static void __iomem *main_int_base;
55static struct irq_domain *armada_370_xp_mpic_domain;
56
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010057/*
58 * In SMP mode:
59 * For shared global interrupts, mask/unmask global enable bit
60 * For CPU interrtups, mask/unmask the calling CPU's bit
61 */
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020062static void armada_370_xp_irq_mask(struct irq_data *d)
63{
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010064#ifdef CONFIG_SMP
65 irq_hw_number_t hwirq = irqd_to_hwirq(d);
66
Gregory CLEMENT7f23f622013-03-20 16:09:35 +010067 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010068 writel(hwirq, main_int_base +
69 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
70 else
71 writel(hwirq, per_cpu_int_base +
72 ARMADA_370_XP_INT_SET_MASK_OFFS);
73#else
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020074 writel(irqd_to_hwirq(d),
75 per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010076#endif
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020077}
78
79static void armada_370_xp_irq_unmask(struct irq_data *d)
80{
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010081#ifdef CONFIG_SMP
82 irq_hw_number_t hwirq = irqd_to_hwirq(d);
83
Gregory CLEMENT7f23f622013-03-20 16:09:35 +010084 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010085 writel(hwirq, main_int_base +
86 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
87 else
88 writel(hwirq, per_cpu_int_base +
89 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
90#else
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020091 writel(irqd_to_hwirq(d),
92 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010093#endif
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020094}
95
Gregory CLEMENT344e8732012-08-02 11:19:12 +030096#ifdef CONFIG_SMP
97static int armada_xp_set_affinity(struct irq_data *d,
98 const struct cpumask *mask_val, bool force)
99{
Gregory CLEMENT3202bf02012-12-05 21:43:23 +0100100 unsigned long reg;
101 unsigned long new_mask = 0;
102 unsigned long online_mask = 0;
103 unsigned long count = 0;
104 irq_hw_number_t hwirq = irqd_to_hwirq(d);
105 int cpu;
106
107 for_each_cpu(cpu, mask_val) {
108 new_mask |= 1 << cpu_logical_map(cpu);
109 count++;
110 }
111
112 /*
113 * Forbid mutlicore interrupt affinity
114 * This is required since the MPIC HW doesn't limit
115 * several CPUs from acknowledging the same interrupt.
116 */
117 if (count > 1)
118 return -EINVAL;
119
120 for_each_cpu(cpu, cpu_online_mask)
121 online_mask |= 1 << cpu_logical_map(cpu);
122
123 raw_spin_lock(&irq_controller_lock);
124
125 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
126 reg = (reg & (~online_mask)) | new_mask;
127 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
128
129 raw_spin_unlock(&irq_controller_lock);
130
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300131 return 0;
132}
133#endif
134
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200135static struct irq_chip armada_370_xp_irq_chip = {
136 .name = "armada_370_xp_irq",
137 .irq_mask = armada_370_xp_irq_mask,
138 .irq_mask_ack = armada_370_xp_irq_mask,
139 .irq_unmask = armada_370_xp_irq_unmask,
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300140#ifdef CONFIG_SMP
141 .irq_set_affinity = armada_xp_set_affinity,
142#endif
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200143};
144
145static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
146 unsigned int virq, irq_hw_number_t hw)
147{
148 armada_370_xp_irq_mask(irq_get_irq_data(virq));
149 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200150 irq_set_status_flags(virq, IRQ_LEVEL);
Gregory CLEMENT3a6f08a2013-01-25 18:32:41 +0100151
Gregory CLEMENT7f23f622013-03-20 16:09:35 +0100152 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
Gregory CLEMENT3a6f08a2013-01-25 18:32:41 +0100153 irq_set_percpu_devid(virq);
154 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
155 handle_percpu_devid_irq);
156
157 } else {
158 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
159 handle_level_irq);
160 }
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200161 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
162
163 return 0;
164}
165
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300166#ifdef CONFIG_SMP
167void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
168{
169 int cpu;
170 unsigned long map = 0;
171
172 /* Convert our logical CPU mask into a physical one. */
173 for_each_cpu(cpu, mask)
174 map |= 1 << cpu_logical_map(cpu);
175
176 /*
177 * Ensure that stores to Normal memory are visible to the
178 * other CPUs before issuing the IPI.
179 */
180 dsb();
181
182 /* submit softirq */
183 writel((map << 8) | irq, main_int_base +
184 ARMADA_370_XP_SW_TRIG_INT_OFFS);
185}
186
187void armada_xp_mpic_smp_cpu_init(void)
188{
189 /* Clear pending IPIs */
190 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
191
192 /* Enable first 8 IPIs */
193 writel((1 << ACTIVE_DOORBELLS) - 1, per_cpu_int_base +
194 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
195
196 /* Unmask IPI interrupt */
197 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
198}
199#endif /* CONFIG_SMP */
200
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200201static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
202 .map = armada_370_xp_mpic_irq_map,
203 .xlate = irq_domain_xlate_onecell,
204};
205
206static int __init armada_370_xp_mpic_of_init(struct device_node *node,
207 struct device_node *parent)
208{
Ben Dooksf3e16cc2012-06-04 18:50:12 +0200209 u32 control;
210
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200211 main_int_base = of_iomap(node, 0);
212 per_cpu_int_base = of_iomap(node, 1);
213
214 BUG_ON(!main_int_base);
215 BUG_ON(!per_cpu_int_base);
216
Ben Dooksf3e16cc2012-06-04 18:50:12 +0200217 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
218
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200219 armada_370_xp_mpic_domain =
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300220 irq_domain_add_linear(node, (control >> 2) & 0x3ff,
221 &armada_370_xp_mpic_irq_ops, NULL);
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200222
223 if (!armada_370_xp_mpic_domain)
224 panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
225
226 irq_set_default_host(armada_370_xp_mpic_domain);
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300227
228#ifdef CONFIG_SMP
229 armada_xp_mpic_smp_cpu_init();
Gregory CLEMENT3202bf02012-12-05 21:43:23 +0100230
231 /*
232 * Set the default affinity from all CPUs to the boot cpu.
233 * This is required since the MPIC doesn't limit several CPUs
234 * from acknowledging the same interrupt.
235 */
236 cpumask_clear(irq_default_affinity);
237 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
238
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300239#endif
240
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200241 return 0;
242}
243
244asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
245 *regs)
246{
247 u32 irqstat, irqnr;
248
249 do {
250 irqstat = readl_relaxed(per_cpu_int_base +
251 ARMADA_370_XP_CPU_INTACK_OFFS);
252 irqnr = irqstat & 0x3FF;
253
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300254 if (irqnr > 1022)
255 break;
256
Gregory CLEMENT3a6f08a2013-01-25 18:32:41 +0100257 if (irqnr > 0) {
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300258 irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
259 irqnr);
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200260 handle_IRQ(irqnr, regs);
261 continue;
262 }
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300263#ifdef CONFIG_SMP
264 /* IPI Handling */
265 if (irqnr == 0) {
266 u32 ipimask, ipinr;
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200267
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300268 ipimask = readl_relaxed(per_cpu_int_base +
269 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
270 & 0xFF;
271
272 writel(0x0, per_cpu_int_base +
273 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
274
275 /* Handle all pending doorbells */
276 for (ipinr = 0; ipinr < ACTIVE_DOORBELLS; ipinr++) {
277 if (ipimask & (0x1 << ipinr))
278 handle_IPI(ipinr, regs);
279 }
280 continue;
281 }
282#endif
283
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200284 } while (1);
285}
286
287static const struct of_device_id mpic_of_match[] __initconst = {
288 {.compatible = "marvell,mpic", .data = armada_370_xp_mpic_of_init},
289 {},
290};
291
292void __init armada_370_xp_init_irq(void)
293{
294 of_irq_init(mpic_of_match);
Gregory CLEMENTd792b1e2012-09-26 18:02:48 +0200295#ifdef CONFIG_CACHE_L2X0
296 l2x0_of_init(0, ~0UL);
297#endif
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200298}