| Paul Mundt | 2be6bb0 | 2010-10-05 22:10:30 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * Shared interrupt handling code for IPR and INTC2 types of IRQs. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2007, 2008 Magnus Damm | 
|  | 5 | * Copyright (C) 2009, 2010 Paul Mundt | 
|  | 6 | * | 
|  | 7 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 8 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 9 | * for more details. | 
|  | 10 | */ | 
|  | 11 | #include <linux/init.h> | 
|  | 12 | #include <linux/irq.h> | 
|  | 13 | #include <linux/spinlock.h> | 
|  | 14 | #include "internals.h" | 
|  | 15 |  | 
|  | 16 | static unsigned long ack_handle[NR_IRQS]; | 
|  | 17 |  | 
|  | 18 | static intc_enum __init intc_grp_id(struct intc_desc *desc, | 
|  | 19 | intc_enum enum_id) | 
|  | 20 | { | 
|  | 21 | struct intc_group *g = desc->hw.groups; | 
|  | 22 | unsigned int i, j; | 
|  | 23 |  | 
|  | 24 | for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { | 
|  | 25 | g = desc->hw.groups + i; | 
|  | 26 |  | 
|  | 27 | for (j = 0; g->enum_ids[j]; j++) { | 
|  | 28 | if (g->enum_ids[j] != enum_id) | 
|  | 29 | continue; | 
|  | 30 |  | 
|  | 31 | return g->enum_id; | 
|  | 32 | } | 
|  | 33 | } | 
|  | 34 |  | 
|  | 35 | return 0; | 
|  | 36 | } | 
|  | 37 |  | 
|  | 38 | static unsigned int __init _intc_mask_data(struct intc_desc *desc, | 
|  | 39 | struct intc_desc_int *d, | 
|  | 40 | intc_enum enum_id, | 
|  | 41 | unsigned int *reg_idx, | 
|  | 42 | unsigned int *fld_idx) | 
|  | 43 | { | 
|  | 44 | struct intc_mask_reg *mr = desc->hw.mask_regs; | 
|  | 45 | unsigned int fn, mode; | 
|  | 46 | unsigned long reg_e, reg_d; | 
|  | 47 |  | 
|  | 48 | while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { | 
|  | 49 | mr = desc->hw.mask_regs + *reg_idx; | 
|  | 50 |  | 
|  | 51 | for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) { | 
|  | 52 | if (mr->enum_ids[*fld_idx] != enum_id) | 
|  | 53 | continue; | 
|  | 54 |  | 
|  | 55 | if (mr->set_reg && mr->clr_reg) { | 
|  | 56 | fn = REG_FN_WRITE_BASE; | 
|  | 57 | mode = MODE_DUAL_REG; | 
|  | 58 | reg_e = mr->clr_reg; | 
|  | 59 | reg_d = mr->set_reg; | 
|  | 60 | } else { | 
|  | 61 | fn = REG_FN_MODIFY_BASE; | 
|  | 62 | if (mr->set_reg) { | 
|  | 63 | mode = MODE_ENABLE_REG; | 
|  | 64 | reg_e = mr->set_reg; | 
|  | 65 | reg_d = mr->set_reg; | 
|  | 66 | } else { | 
|  | 67 | mode = MODE_MASK_REG; | 
|  | 68 | reg_e = mr->clr_reg; | 
|  | 69 | reg_d = mr->clr_reg; | 
|  | 70 | } | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | fn += (mr->reg_width >> 3) - 1; | 
|  | 74 | return _INTC_MK(fn, mode, | 
|  | 75 | intc_get_reg(d, reg_e), | 
|  | 76 | intc_get_reg(d, reg_d), | 
|  | 77 | 1, | 
|  | 78 | (mr->reg_width - 1) - *fld_idx); | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | *fld_idx = 0; | 
|  | 82 | (*reg_idx)++; | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | return 0; | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | unsigned int __init | 
|  | 89 | intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d, | 
|  | 90 | intc_enum enum_id, int do_grps) | 
|  | 91 | { | 
|  | 92 | unsigned int i = 0; | 
|  | 93 | unsigned int j = 0; | 
|  | 94 | unsigned int ret; | 
|  | 95 |  | 
|  | 96 | ret = _intc_mask_data(desc, d, enum_id, &i, &j); | 
|  | 97 | if (ret) | 
|  | 98 | return ret; | 
|  | 99 |  | 
|  | 100 | if (do_grps) | 
|  | 101 | return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0); | 
|  | 102 |  | 
|  | 103 | return 0; | 
|  | 104 | } | 
|  | 105 |  | 
|  | 106 | static unsigned int __init _intc_prio_data(struct intc_desc *desc, | 
|  | 107 | struct intc_desc_int *d, | 
|  | 108 | intc_enum enum_id, | 
|  | 109 | unsigned int *reg_idx, | 
|  | 110 | unsigned int *fld_idx) | 
|  | 111 | { | 
|  | 112 | struct intc_prio_reg *pr = desc->hw.prio_regs; | 
|  | 113 | unsigned int fn, n, mode, bit; | 
|  | 114 | unsigned long reg_e, reg_d; | 
|  | 115 |  | 
|  | 116 | while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { | 
|  | 117 | pr = desc->hw.prio_regs + *reg_idx; | 
|  | 118 |  | 
|  | 119 | for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) { | 
|  | 120 | if (pr->enum_ids[*fld_idx] != enum_id) | 
|  | 121 | continue; | 
|  | 122 |  | 
|  | 123 | if (pr->set_reg && pr->clr_reg) { | 
|  | 124 | fn = REG_FN_WRITE_BASE; | 
|  | 125 | mode = MODE_PCLR_REG; | 
|  | 126 | reg_e = pr->set_reg; | 
|  | 127 | reg_d = pr->clr_reg; | 
|  | 128 | } else { | 
|  | 129 | fn = REG_FN_MODIFY_BASE; | 
|  | 130 | mode = MODE_PRIO_REG; | 
|  | 131 | if (!pr->set_reg) | 
|  | 132 | BUG(); | 
|  | 133 | reg_e = pr->set_reg; | 
|  | 134 | reg_d = pr->set_reg; | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | fn += (pr->reg_width >> 3) - 1; | 
|  | 138 | n = *fld_idx + 1; | 
|  | 139 |  | 
|  | 140 | BUG_ON(n * pr->field_width > pr->reg_width); | 
|  | 141 |  | 
|  | 142 | bit = pr->reg_width - (n * pr->field_width); | 
|  | 143 |  | 
|  | 144 | return _INTC_MK(fn, mode, | 
|  | 145 | intc_get_reg(d, reg_e), | 
|  | 146 | intc_get_reg(d, reg_d), | 
|  | 147 | pr->field_width, bit); | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | *fld_idx = 0; | 
|  | 151 | (*reg_idx)++; | 
|  | 152 | } | 
|  | 153 |  | 
|  | 154 | return 0; | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | unsigned int __init | 
|  | 158 | intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d, | 
|  | 159 | intc_enum enum_id, int do_grps) | 
|  | 160 | { | 
|  | 161 | unsigned int i = 0; | 
|  | 162 | unsigned int j = 0; | 
|  | 163 | unsigned int ret; | 
|  | 164 |  | 
|  | 165 | ret = _intc_prio_data(desc, d, enum_id, &i, &j); | 
|  | 166 | if (ret) | 
|  | 167 | return ret; | 
|  | 168 |  | 
|  | 169 | if (do_grps) | 
|  | 170 | return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0); | 
|  | 171 |  | 
|  | 172 | return 0; | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static unsigned int __init intc_ack_data(struct intc_desc *desc, | 
|  | 176 | struct intc_desc_int *d, | 
|  | 177 | intc_enum enum_id) | 
|  | 178 | { | 
|  | 179 | struct intc_mask_reg *mr = desc->hw.ack_regs; | 
|  | 180 | unsigned int i, j, fn, mode; | 
|  | 181 | unsigned long reg_e, reg_d; | 
|  | 182 |  | 
|  | 183 | for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { | 
|  | 184 | mr = desc->hw.ack_regs + i; | 
|  | 185 |  | 
|  | 186 | for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { | 
|  | 187 | if (mr->enum_ids[j] != enum_id) | 
|  | 188 | continue; | 
|  | 189 |  | 
|  | 190 | fn = REG_FN_MODIFY_BASE; | 
|  | 191 | mode = MODE_ENABLE_REG; | 
|  | 192 | reg_e = mr->set_reg; | 
|  | 193 | reg_d = mr->set_reg; | 
|  | 194 |  | 
|  | 195 | fn += (mr->reg_width >> 3) - 1; | 
|  | 196 | return _INTC_MK(fn, mode, | 
|  | 197 | intc_get_reg(d, reg_e), | 
|  | 198 | intc_get_reg(d, reg_d), | 
|  | 199 | 1, | 
|  | 200 | (mr->reg_width - 1) - j); | 
|  | 201 | } | 
|  | 202 | } | 
|  | 203 |  | 
|  | 204 | return 0; | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | static void intc_enable_disable(struct intc_desc_int *d, | 
|  | 208 | unsigned long handle, int do_enable) | 
|  | 209 | { | 
|  | 210 | unsigned long addr; | 
|  | 211 | unsigned int cpu; | 
|  | 212 | unsigned long (*fn)(unsigned long, unsigned long, | 
|  | 213 | unsigned long (*)(unsigned long, unsigned long, | 
|  | 214 | unsigned long), | 
|  | 215 | unsigned int); | 
|  | 216 |  | 
|  | 217 | if (do_enable) { | 
|  | 218 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { | 
|  | 219 | addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); | 
|  | 220 | fn = intc_enable_noprio_fns[_INTC_MODE(handle)]; | 
|  | 221 | fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); | 
|  | 222 | } | 
|  | 223 | } else { | 
|  | 224 | for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { | 
|  | 225 | addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); | 
|  | 226 | fn = intc_disable_fns[_INTC_MODE(handle)]; | 
|  | 227 | fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0); | 
|  | 228 | } | 
|  | 229 | } | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 | void __init intc_enable_disable_enum(struct intc_desc *desc, | 
|  | 233 | struct intc_desc_int *d, | 
|  | 234 | intc_enum enum_id, int enable) | 
|  | 235 | { | 
|  | 236 | unsigned int i, j, data; | 
|  | 237 |  | 
|  | 238 | /* go through and enable/disable all mask bits */ | 
|  | 239 | i = j = 0; | 
|  | 240 | do { | 
|  | 241 | data = _intc_mask_data(desc, d, enum_id, &i, &j); | 
|  | 242 | if (data) | 
|  | 243 | intc_enable_disable(d, data, enable); | 
|  | 244 | j++; | 
|  | 245 | } while (data); | 
|  | 246 |  | 
|  | 247 | /* go through and enable/disable all priority fields */ | 
|  | 248 | i = j = 0; | 
|  | 249 | do { | 
|  | 250 | data = _intc_prio_data(desc, d, enum_id, &i, &j); | 
|  | 251 | if (data) | 
|  | 252 | intc_enable_disable(d, data, enable); | 
|  | 253 |  | 
|  | 254 | j++; | 
|  | 255 | } while (data); | 
|  | 256 | } | 
|  | 257 |  | 
|  | 258 | unsigned int __init | 
|  | 259 | intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d, | 
|  | 260 | intc_enum enum_id) | 
|  | 261 | { | 
|  | 262 | struct intc_sense_reg *sr = desc->hw.sense_regs; | 
|  | 263 | unsigned int i, j, fn, bit; | 
|  | 264 |  | 
|  | 265 | for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { | 
|  | 266 | sr = desc->hw.sense_regs + i; | 
|  | 267 |  | 
|  | 268 | for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { | 
|  | 269 | if (sr->enum_ids[j] != enum_id) | 
|  | 270 | continue; | 
|  | 271 |  | 
|  | 272 | fn = REG_FN_MODIFY_BASE; | 
|  | 273 | fn += (sr->reg_width >> 3) - 1; | 
|  | 274 |  | 
|  | 275 | BUG_ON((j + 1) * sr->field_width > sr->reg_width); | 
|  | 276 |  | 
|  | 277 | bit = sr->reg_width - ((j + 1) * sr->field_width); | 
|  | 278 |  | 
|  | 279 | return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg), | 
|  | 280 | 0, sr->field_width, bit); | 
|  | 281 | } | 
|  | 282 | } | 
|  | 283 |  | 
|  | 284 | return 0; | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 |  | 
|  | 288 | void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc, | 
|  | 289 | struct intc_desc_int *d, intc_enum id) | 
|  | 290 | { | 
|  | 291 | unsigned long flags; | 
|  | 292 |  | 
|  | 293 | /* | 
|  | 294 | * Nothing to do for this IRQ. | 
|  | 295 | */ | 
|  | 296 | if (!desc->hw.ack_regs) | 
|  | 297 | return; | 
|  | 298 |  | 
|  | 299 | raw_spin_lock_irqsave(&intc_big_lock, flags); | 
|  | 300 | ack_handle[irq] = intc_ack_data(desc, d, id); | 
|  | 301 | raw_spin_unlock_irqrestore(&intc_big_lock, flags); | 
|  | 302 | } | 
|  | 303 |  | 
|  | 304 | unsigned long intc_get_ack_handle(unsigned int irq) | 
|  | 305 | { | 
|  | 306 | return ack_handle[irq]; | 
|  | 307 | } |