Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Platform dependent support for SGI SN |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General Public |
| 5 | * License. See the file "COPYING" in the main directory of this archive |
| 6 | * for more details. |
| 7 | * |
John Keller | ff740fb | 2008-12-08 11:44:11 -0600 | [diff] [blame] | 8 | * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/irq.h> |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 12 | #include <linux/spinlock.h> |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 13 | #include <linux/init.h> |
Franck Bui-Huu | 8252474 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 14 | #include <linux/rculist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/sn/addrs.h> |
| 16 | #include <asm/sn/arch.h> |
Prarit Bhargava | c13cf37 | 2005-07-06 15:26:51 -0700 | [diff] [blame] | 17 | #include <asm/sn/intr.h> |
| 18 | #include <asm/sn/pcibr_provider.h> |
Mark Maule | 9b08ebd | 2005-04-25 11:32:16 -0700 | [diff] [blame] | 19 | #include <asm/sn/pcibus_provider_defs.h> |
| 20 | #include <asm/sn/pcidev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/sn/shub_mmr.h> |
| 22 | #include <asm/sn/sn_sal.h> |
John Keller | 6e9de18 | 2007-08-22 19:32:06 -0500 | [diff] [blame] | 23 | #include <asm/sn/sn_feature_sets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
| 25 | static void force_interrupt(int irq); |
| 26 | static void register_intr_pda(struct sn_irq_info *sn_irq_info); |
| 27 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); |
| 28 | |
Len Brown | d0d59b9 | 2005-08-25 12:41:22 -0400 | [diff] [blame] | 29 | int sn_force_interrupt_flag = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | extern int sn_ioif_inited; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 31 | struct list_head **sn_irq_lh; |
Ingo Molnar | 34af946 | 2006-06-27 02:53:55 -0700 | [diff] [blame] | 32 | static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 34 | u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, |
| 35 | struct sn_irq_info *sn_irq_info, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | int req_irq, nasid_t req_nasid, |
| 37 | int req_slice) |
| 38 | { |
| 39 | struct ia64_sal_retval ret_stuff; |
| 40 | ret_stuff.status = 0; |
| 41 | ret_stuff.v0 = 0; |
| 42 | |
| 43 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
| 44 | (u64) SAL_INTR_ALLOC, (u64) local_nasid, |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 45 | (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | (u64) req_nasid, (u64) req_slice); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 47 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | return ret_stuff.status; |
| 49 | } |
| 50 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 51 | void sn_intr_free(nasid_t local_nasid, int local_widget, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | struct sn_irq_info *sn_irq_info) |
| 53 | { |
| 54 | struct ia64_sal_retval ret_stuff; |
| 55 | ret_stuff.status = 0; |
| 56 | ret_stuff.v0 = 0; |
| 57 | |
| 58 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
| 59 | (u64) SAL_INTR_FREE, (u64) local_nasid, |
| 60 | (u64) local_widget, (u64) sn_irq_info->irq_irq, |
| 61 | (u64) sn_irq_info->irq_cookie, 0, 0); |
| 62 | } |
| 63 | |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 64 | u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, |
| 65 | struct sn_irq_info *sn_irq_info, |
| 66 | nasid_t req_nasid, int req_slice) |
| 67 | { |
| 68 | struct ia64_sal_retval ret_stuff; |
| 69 | ret_stuff.status = 0; |
| 70 | ret_stuff.v0 = 0; |
| 71 | |
| 72 | SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, |
| 73 | (u64) SAL_INTR_REDIRECT, (u64) local_nasid, |
| 74 | (u64) local_widget, __pa(sn_irq_info), |
| 75 | (u64) req_nasid, (u64) req_slice, 0); |
| 76 | |
| 77 | return ret_stuff.status; |
| 78 | } |
| 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | static unsigned int sn_startup_irq(unsigned int irq) |
| 81 | { |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | static void sn_shutdown_irq(unsigned int irq) |
| 86 | { |
| 87 | } |
| 88 | |
Russ Anderson | 1f3b604 | 2007-10-31 11:10:38 -0500 | [diff] [blame] | 89 | extern void ia64_mca_register_cpev(int); |
| 90 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | static void sn_disable_irq(unsigned int irq) |
| 92 | { |
Russ Anderson | 1f3b604 | 2007-10-31 11:10:38 -0500 | [diff] [blame] | 93 | if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
| 94 | ia64_mca_register_cpev(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } |
| 96 | |
| 97 | static void sn_enable_irq(unsigned int irq) |
| 98 | { |
Russ Anderson | 1f3b604 | 2007-10-31 11:10:38 -0500 | [diff] [blame] | 99 | if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) |
| 100 | ia64_mca_register_cpev(irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | static void sn_ack_irq(unsigned int irq) |
| 104 | { |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 105 | u64 event_occurred, mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
| 107 | irq = irq & 0xff; |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 108 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); |
Colin Ngam | be539c7 | 2005-04-25 13:06:28 -0700 | [diff] [blame] | 109 | mask = event_occurred & SH_ALL_INT_MASK; |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 110 | HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); |
| 112 | |
Mark Maule | 689388b | 2006-01-27 11:55:34 -0600 | [diff] [blame] | 113 | move_native_irq(irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | static void sn_end_irq(unsigned int irq) |
| 117 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | int ivec; |
Jack Steiner | 0aa2c72 | 2005-08-11 10:26:00 -0700 | [diff] [blame] | 119 | u64 event_occurred; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
| 121 | ivec = irq & 0xff; |
| 122 | if (ivec == SGI_UART_VECTOR) { |
Jack Steiner | 0aa2c72 | 2005-08-11 10:26:00 -0700 | [diff] [blame] | 123 | event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 124 | /* If the UART bit is set here, we may have received an |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | * interrupt from the UART that the driver missed. To |
| 126 | * make sure, we IPI ourselves to force us to look again. |
| 127 | */ |
| 128 | if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { |
| 129 | platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, |
| 130 | IA64_IPI_DM_INT, 0); |
| 131 | } |
| 132 | } |
| 133 | __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); |
| 134 | if (sn_force_interrupt_flag) |
| 135 | force_interrupt(irq); |
| 136 | } |
| 137 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 138 | static void sn_irq_info_free(struct rcu_head *head); |
| 139 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 140 | struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, |
| 141 | nasid_t nasid, int slice) |
| 142 | { |
| 143 | int vector; |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 144 | int cpuid; |
| 145 | #ifdef CONFIG_SMP |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 146 | int cpuphys; |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 147 | #endif |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 148 | int64_t bridge; |
| 149 | int local_widget, status; |
| 150 | nasid_t local_nasid; |
| 151 | struct sn_irq_info *new_irq_info; |
| 152 | struct sn_pcibus_provider *pci_provider; |
| 153 | |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 154 | bridge = (u64) sn_irq_info->irq_bridge; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 155 | if (!bridge) { |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 156 | return NULL; /* irq is not a device interrupt */ |
| 157 | } |
| 158 | |
| 159 | local_nasid = NASID_GET(bridge); |
| 160 | |
| 161 | if (local_nasid & 1) |
| 162 | local_widget = TIO_SWIN_WIDGETNUM(bridge); |
| 163 | else |
| 164 | local_widget = SWIN_WIDGETNUM(bridge); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 165 | vector = sn_irq_info->irq_irq; |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 166 | |
| 167 | /* Make use of SAL_INTR_REDIRECT if PROM supports it */ |
| 168 | status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice); |
| 169 | if (!status) { |
| 170 | new_irq_info = sn_irq_info; |
| 171 | goto finish_up; |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * PROM does not support SAL_INTR_REDIRECT, or it failed. |
| 176 | * Revert to old method. |
| 177 | */ |
| 178 | new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); |
| 179 | if (new_irq_info == NULL) |
| 180 | return NULL; |
| 181 | |
| 182 | memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); |
| 183 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 184 | /* Free the old PROM new_irq_info structure */ |
| 185 | sn_intr_free(local_nasid, local_widget, new_irq_info); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 186 | unregister_intr_pda(new_irq_info); |
| 187 | |
| 188 | /* allocate a new PROM new_irq_info struct */ |
| 189 | status = sn_intr_alloc(local_nasid, local_widget, |
| 190 | new_irq_info, vector, |
| 191 | nasid, slice); |
| 192 | |
| 193 | /* SAL call failed */ |
| 194 | if (status) { |
| 195 | kfree(new_irq_info); |
| 196 | return NULL; |
| 197 | } |
| 198 | |
John Keller | 0e17b56 | 2007-05-02 09:09:18 -0500 | [diff] [blame] | 199 | register_intr_pda(new_irq_info); |
| 200 | spin_lock(&sn_irq_info_lock); |
| 201 | list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); |
| 202 | spin_unlock(&sn_irq_info_lock); |
| 203 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); |
| 204 | |
| 205 | |
| 206 | finish_up: |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 207 | /* Update kernels new_irq_info with new target info */ |
| 208 | cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, |
| 209 | new_irq_info->irq_slice); |
| 210 | new_irq_info->irq_cpuid = cpuid; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 211 | |
| 212 | pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; |
| 213 | |
| 214 | /* |
| 215 | * If this represents a line interrupt, target it. If it's |
| 216 | * an msi (irq_int_bit < 0), it's already targeted. |
| 217 | */ |
| 218 | if (new_irq_info->irq_int_bit >= 0 && |
| 219 | pci_provider && pci_provider->target_interrupt) |
| 220 | (pci_provider->target_interrupt)(new_irq_info); |
| 221 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 222 | #ifdef CONFIG_SMP |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 223 | cpuphys = cpu_physical_id(cpuid); |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 224 | set_irq_affinity_info((vector & 0xff), cpuphys, 0); |
| 225 | #endif |
| 226 | |
| 227 | return new_irq_info; |
| 228 | } |
| 229 | |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 230 | static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | { |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 233 | nasid_t nasid; |
| 234 | int slice; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 236 | nasid = cpuid_to_nasid(cpumask_first(mask)); |
| 237 | slice = cpuid_to_slice(cpumask_first(mask)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 240 | sn_irq_lh[irq], list) |
| 241 | (void)sn_retarget_vector(sn_irq_info, nasid, slice); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 242 | |
| 243 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } |
| 245 | |
John Keller | 6e9de18 | 2007-08-22 19:32:06 -0500 | [diff] [blame] | 246 | #ifdef CONFIG_SMP |
| 247 | void sn_set_err_irq_affinity(unsigned int irq) |
| 248 | { |
| 249 | /* |
| 250 | * On systems which support CPU disabling (SHub2), all error interrupts |
| 251 | * are targetted at the boot CPU. |
| 252 | */ |
| 253 | if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) |
| 254 | set_irq_affinity_info(irq, cpu_physical_id(0), 0); |
| 255 | } |
| 256 | #else |
| 257 | void sn_set_err_irq_affinity(unsigned int irq) { } |
| 258 | #endif |
| 259 | |
KAMEZAWA Hiroyuki | e253eb0 | 2007-03-07 14:57:35 -0800 | [diff] [blame] | 260 | static void |
| 261 | sn_mask_irq(unsigned int irq) |
| 262 | { |
| 263 | } |
| 264 | |
| 265 | static void |
| 266 | sn_unmask_irq(unsigned int irq) |
| 267 | { |
| 268 | } |
| 269 | |
| 270 | struct irq_chip irq_type_sn = { |
Ingo Molnar | 06344db | 2006-11-16 00:43:02 -0800 | [diff] [blame] | 271 | .name = "SN hub", |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 272 | .startup = sn_startup_irq, |
| 273 | .shutdown = sn_shutdown_irq, |
| 274 | .enable = sn_enable_irq, |
| 275 | .disable = sn_disable_irq, |
| 276 | .ack = sn_ack_irq, |
| 277 | .end = sn_end_irq, |
KAMEZAWA Hiroyuki | e253eb0 | 2007-03-07 14:57:35 -0800 | [diff] [blame] | 278 | .mask = sn_mask_irq, |
| 279 | .unmask = sn_unmask_irq, |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 280 | .set_affinity = sn_set_affinity_irq |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | }; |
| 282 | |
Kenji Kaneshige | 1115200 | 2007-08-13 10:31:26 -0700 | [diff] [blame] | 283 | ia64_vector sn_irq_to_vector(int irq) |
| 284 | { |
| 285 | if (irq >= IA64_NUM_VECTORS) |
| 286 | return 0; |
| 287 | return (ia64_vector)irq; |
| 288 | } |
| 289 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | unsigned int sn_local_vector_to_irq(u8 vector) |
| 291 | { |
| 292 | return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); |
| 293 | } |
| 294 | |
| 295 | void sn_irq_init(void) |
| 296 | { |
| 297 | int i; |
Thomas Gleixner | 86bc3df | 2009-06-10 12:45:00 -0700 | [diff] [blame^] | 298 | struct irq_desc *base_desc = irq_desc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 300 | ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; |
| 301 | ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; |
| 302 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | for (i = 0; i < NR_IRQS; i++) { |
Thomas Gleixner | 8a7c3cd | 2009-06-10 12:44:59 -0700 | [diff] [blame] | 304 | if (base_desc[i].chip == &no_irq_chip) { |
Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 305 | base_desc[i].chip = &irq_type_sn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | } |
| 307 | } |
| 308 | } |
| 309 | |
| 310 | static void register_intr_pda(struct sn_irq_info *sn_irq_info) |
| 311 | { |
| 312 | int irq = sn_irq_info->irq_irq; |
| 313 | int cpu = sn_irq_info->irq_cpuid; |
| 314 | |
| 315 | if (pdacpu(cpu)->sn_last_irq < irq) { |
| 316 | pdacpu(cpu)->sn_last_irq = irq; |
| 317 | } |
| 318 | |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 319 | if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | pdacpu(cpu)->sn_first_irq = irq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) |
| 324 | { |
| 325 | int irq = sn_irq_info->irq_irq; |
| 326 | int cpu = sn_irq_info->irq_cpuid; |
| 327 | struct sn_irq_info *tmp_irq_info; |
| 328 | int i, foundmatch; |
| 329 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 330 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | if (pdacpu(cpu)->sn_last_irq == irq) { |
| 332 | foundmatch = 0; |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 333 | for (i = pdacpu(cpu)->sn_last_irq - 1; |
| 334 | i && !foundmatch; i--) { |
| 335 | list_for_each_entry_rcu(tmp_irq_info, |
| 336 | sn_irq_lh[i], |
| 337 | list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | if (tmp_irq_info->irq_cpuid == cpu) { |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 339 | foundmatch = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | break; |
| 341 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | } |
| 343 | } |
| 344 | pdacpu(cpu)->sn_last_irq = i; |
| 345 | } |
| 346 | |
| 347 | if (pdacpu(cpu)->sn_first_irq == irq) { |
| 348 | foundmatch = 0; |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 349 | for (i = pdacpu(cpu)->sn_first_irq + 1; |
| 350 | i < NR_IRQS && !foundmatch; i++) { |
| 351 | list_for_each_entry_rcu(tmp_irq_info, |
| 352 | sn_irq_lh[i], |
| 353 | list) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | if (tmp_irq_info->irq_cpuid == cpu) { |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 355 | foundmatch = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | break; |
| 357 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } |
| 359 | } |
| 360 | pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); |
| 361 | } |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 362 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | } |
| 364 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 365 | static void sn_irq_info_free(struct rcu_head *head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | { |
| 367 | struct sn_irq_info *sn_irq_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 369 | sn_irq_info = container_of(head, struct sn_irq_info, rcu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | kfree(sn_irq_info); |
| 371 | } |
| 372 | |
| 373 | void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) |
| 374 | { |
| 375 | nasid_t nasid = sn_irq_info->irq_nasid; |
| 376 | int slice = sn_irq_info->irq_slice; |
| 377 | int cpu = nasid_slice_to_cpuid(nasid, slice); |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 378 | #ifdef CONFIG_SMP |
| 379 | int cpuphys; |
Thomas Gleixner | 86bc3df | 2009-06-10 12:45:00 -0700 | [diff] [blame^] | 380 | struct irq_desc *desc; |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 381 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 383 | pci_dev_get(pci_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | sn_irq_info->irq_cpuid = cpu; |
| 385 | sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); |
| 386 | |
| 387 | /* link it into the sn_irq[irq] list */ |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 388 | spin_lock(&sn_irq_info_lock); |
| 389 | list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 390 | reserve_irq_vector(sn_irq_info->irq_irq); |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 391 | spin_unlock(&sn_irq_info_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 393 | register_intr_pda(sn_irq_info); |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 394 | #ifdef CONFIG_SMP |
| 395 | cpuphys = cpu_physical_id(cpu); |
| 396 | set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); |
John Keller | ff740fb | 2008-12-08 11:44:11 -0600 | [diff] [blame] | 397 | desc = irq_to_desc(sn_irq_info->irq_irq); |
| 398 | /* |
| 399 | * Affinity was set by the PROM, prevent it from |
| 400 | * being reset by the request_irq() path. |
| 401 | */ |
| 402 | desc->status |= IRQ_AFFINITY_SET; |
John Keller | c695777 | 2006-11-06 14:10:50 -0600 | [diff] [blame] | 403 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | } |
| 405 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 406 | void sn_irq_unfixup(struct pci_dev *pci_dev) |
| 407 | { |
| 408 | struct sn_irq_info *sn_irq_info; |
| 409 | |
| 410 | /* Only cleanup IRQ stuff if this device has a host bus context */ |
| 411 | if (!SN_PCIDEV_BUSSOFT(pci_dev)) |
| 412 | return; |
| 413 | |
| 414 | sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; |
Prarit Bhargava | 8b34ff4 | 2006-02-09 14:12:24 -0800 | [diff] [blame] | 415 | if (!sn_irq_info) |
| 416 | return; |
| 417 | if (!sn_irq_info->irq_irq) { |
Prarit Bhargava | 6f354b0 | 2005-07-06 15:29:53 -0700 | [diff] [blame] | 418 | kfree(sn_irq_info); |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 419 | return; |
Prarit Bhargava | 6f354b0 | 2005-07-06 15:29:53 -0700 | [diff] [blame] | 420 | } |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 421 | |
| 422 | unregister_intr_pda(sn_irq_info); |
| 423 | spin_lock(&sn_irq_info_lock); |
| 424 | list_del_rcu(&sn_irq_info->list); |
| 425 | spin_unlock(&sn_irq_info_lock); |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 426 | if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) |
| 427 | free_irq_vector(sn_irq_info->irq_irq); |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 428 | call_rcu(&sn_irq_info->rcu, sn_irq_info_free); |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 429 | pci_dev_put(pci_dev); |
Mark Maule | 1008307 | 2006-04-14 16:03:49 -0500 | [diff] [blame] | 430 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 431 | } |
| 432 | |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 433 | static inline void |
| 434 | sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) |
| 435 | { |
| 436 | struct sn_pcibus_provider *pci_provider; |
| 437 | |
| 438 | pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; |
Mike Habeck | 352b0ef | 2007-08-13 17:13:08 -0500 | [diff] [blame] | 439 | |
| 440 | /* Don't force an interrupt if the irq has been disabled */ |
| 441 | if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) && |
| 442 | pci_provider && pci_provider->force_interrupt) |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 443 | (*pci_provider->force_interrupt)(sn_irq_info); |
| 444 | } |
| 445 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | static void force_interrupt(int irq) |
| 447 | { |
| 448 | struct sn_irq_info *sn_irq_info; |
| 449 | |
| 450 | if (!sn_ioif_inited) |
| 451 | return; |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 452 | |
| 453 | rcu_read_lock(); |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 454 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) |
| 455 | sn_call_force_intr_provider(sn_irq_info); |
| 456 | |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 457 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | /* |
| 461 | * Check for lost interrupts. If the PIC int_status reg. says that |
| 462 | * an interrupt has been sent, but not handled, and the interrupt |
| 463 | * is not pending in either the cpu irr regs or in the soft irr regs, |
| 464 | * and the interrupt is not in service, then the interrupt may have |
| 465 | * been lost. Force an interrupt on that pin. It is possible that |
| 466 | * the interrupt is in flight, so we may generate a spurious interrupt, |
| 467 | * but we should never miss a real lost interrupt. |
| 468 | */ |
| 469 | static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) |
| 470 | { |
Prarit Bhargava | 53493dc | 2006-01-16 19:54:40 -0800 | [diff] [blame] | 471 | u64 regval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | struct pcidev_info *pcidev_info; |
| 473 | struct pcibus_info *pcibus_info; |
| 474 | |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 475 | /* |
| 476 | * Bridge types attached to TIO (anything but PIC) do not need this WAR |
| 477 | * since they do not target Shub II interrupt registers. If that |
| 478 | * ever changes, this check needs to accomodate. |
| 479 | */ |
| 480 | if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) |
| 481 | return; |
| 482 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; |
| 484 | if (!pcidev_info) |
| 485 | return; |
| 486 | |
| 487 | pcibus_info = |
| 488 | (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> |
| 489 | pdi_pcibus_info; |
| 490 | regval = pcireg_intr_status_get(pcibus_info); |
| 491 | |
Bjorn Helgaas | 9a4e554 | 2006-03-21 10:44:07 -0700 | [diff] [blame] | 492 | if (!ia64_get_irr(irq_to_vector(irq))) { |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 493 | if (!test_bit(irq, pda->sn_in_service_ivecs)) { |
| 494 | regval &= 0xff; |
| 495 | if (sn_irq_info->irq_int_bit & regval & |
| 496 | sn_irq_info->irq_last_intr) { |
| 497 | regval &= ~(sn_irq_info->irq_int_bit & regval); |
| 498 | sn_call_force_intr_provider(sn_irq_info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | } |
| 500 | } |
| 501 | } |
| 502 | sn_irq_info->irq_last_intr = regval; |
| 503 | } |
| 504 | |
| 505 | void sn_lb_int_war_check(void) |
| 506 | { |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 507 | struct sn_irq_info *sn_irq_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | int i; |
| 509 | |
| 510 | if (!sn_ioif_inited || pda->sn_first_irq == 0) |
| 511 | return; |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 512 | |
| 513 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 515 | list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { |
Mark Maule | 735e60f | 2005-08-03 14:06:00 -0700 | [diff] [blame] | 516 | sn_check_intr(i, sn_irq_info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | } |
| 518 | } |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 519 | rcu_read_unlock(); |
| 520 | } |
| 521 | |
Jes Sorensen | 2fcc3db | 2006-02-02 05:15:51 -0500 | [diff] [blame] | 522 | void __init sn_irq_lh_init(void) |
Prarit Bhargava | cb4cb2c | 2005-07-06 14:59:44 -0700 | [diff] [blame] | 523 | { |
| 524 | int i; |
| 525 | |
| 526 | sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); |
| 527 | if (!sn_irq_lh) |
| 528 | panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); |
| 529 | |
| 530 | for (i = 0; i < NR_IRQS; i++) { |
| 531 | sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); |
| 532 | if (!sn_irq_lh[i]) |
| 533 | panic("SN PCI INIT: Failed IRQ memory allocation\n"); |
| 534 | |
| 535 | INIT_LIST_HEAD(sn_irq_lh[i]); |
| 536 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | } |