blob: c42254613f7343c816e7dcfa5fcd6e2c35196cca [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17#include <bfa.h>
Krishna Gudipatiab533612010-03-03 17:43:09 -080018#include <bfi/bfi_ctreg.h>
Jing Huang7725ccf2009-09-23 17:46:15 -070019#include <bfa_port_priv.h>
20#include <bfa_intr_priv.h>
21#include <cs/bfa_debug.h>
22
23BFA_TRC_FILE(HAL, INTR);
24
25static void
26bfa_msix_errint(struct bfa_s *bfa, u32 intr)
27{
28 bfa_ioc_error_isr(&bfa->ioc);
29}
30
31static void
32bfa_msix_lpu(struct bfa_s *bfa)
33{
34 bfa_ioc_mbox_isr(&bfa->ioc);
35}
36
37void
38bfa_msix_all(struct bfa_s *bfa, int vec)
39{
40 bfa_intx(bfa);
41}
42
43/**
44 * hal_intr_api
45 */
46bfa_boolean_t
47bfa_intx(struct bfa_s *bfa)
48{
49 u32 intr, qintr;
50 int queue;
51
52 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
53 if (!intr)
54 return BFA_FALSE;
55
56 /**
57 * RME completion queue interrupt
58 */
59 qintr = intr & __HFN_INT_RME_MASK;
60 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
61
Jing Huangf8ceafd2009-09-25 12:29:54 -070062 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
Jing Huang7725ccf2009-09-23 17:46:15 -070063 if (intr & (__HFN_INT_RME_Q0 << queue))
64 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
65 }
66 intr &= ~qintr;
67 if (!intr)
68 return BFA_TRUE;
69
70 /**
71 * CPE completion queue interrupt
72 */
73 qintr = intr & __HFN_INT_CPE_MASK;
74 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
75
76 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
77 if (intr & (__HFN_INT_CPE_Q0 << queue))
78 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
79 }
80 intr &= ~qintr;
81 if (!intr)
82 return BFA_TRUE;
83
84 bfa_msix_lpu_err(bfa, intr);
85
86 return BFA_TRUE;
87}
88
89void
90bfa_isr_enable(struct bfa_s *bfa)
91{
92 u32 intr_unmask;
93 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
94
95 bfa_trc(bfa, pci_func);
96
97 bfa_msix_install(bfa);
98 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
Krishna Gudipatiab533612010-03-03 17:43:09 -080099 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
100 __HFN_INT_LL_HALT);
Jing Huang7725ccf2009-09-23 17:46:15 -0700101
102 if (pci_func == 0)
103 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
104 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
105 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
106 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
107 __HFN_INT_MBOX_LPU0);
108 else
109 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
110 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
111 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
112 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
113 __HFN_INT_MBOX_LPU1);
114
115 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
116 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
117 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
118}
119
120void
121bfa_isr_disable(struct bfa_s *bfa)
122{
123 bfa_isr_mode_set(bfa, BFA_FALSE);
124 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
125 bfa_msix_uninstall(bfa);
126}
127
128void
129bfa_msix_reqq(struct bfa_s *bfa, int qid)
130{
131 struct list_head *waitq, *qe, *qen;
132 struct bfa_reqq_wait_s *wqe;
133
134 qid &= (BFI_IOC_MAX_CQS - 1);
135
136 waitq = bfa_reqq(bfa, qid);
137 list_for_each_safe(qe, qen, waitq) {
138 /**
139 * Callback only as long as there is room in request queue
140 */
141 if (bfa_reqq_full(bfa, qid))
142 break;
143
144 list_del(qe);
145 wqe = (struct bfa_reqq_wait_s *) qe;
146 wqe->qresume(wqe->cbarg);
147 }
148}
149
150void
151bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
152{
153 bfa_trc(bfa, m->mhdr.msg_class);
154 bfa_trc(bfa, m->mhdr.msg_id);
155 bfa_trc(bfa, m->mhdr.mtag.i2htok);
156 bfa_assert(0);
157 bfa_trc_stop(bfa->trcmod);
158}
159
160void
161bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
162{
163 struct bfi_msg_s *m;
164 u32 pi, ci;
165
166 bfa_trc_fp(bfa, rsp_qid);
167
168 rsp_qid &= (BFI_IOC_MAX_CQS - 1);
169
170 bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid);
171
172 ci = bfa_rspq_ci(bfa, rsp_qid);
173 pi = bfa_rspq_pi(bfa, rsp_qid);
174
175 bfa_trc_fp(bfa, ci);
176 bfa_trc_fp(bfa, pi);
177
178 if (bfa->rme_process) {
179 while (ci != pi) {
180 m = bfa_rspq_elem(bfa, rsp_qid, ci);
181 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
182
183 bfa_isrs[m->mhdr.msg_class] (bfa, m);
184
185 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
186 }
187 }
188
189 /**
190 * update CI
191 */
192 bfa_rspq_ci(bfa, rsp_qid) = pi;
193 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi);
194 bfa_os_mmiowb();
195}
196
197void
198bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
199{
Krishna Gudipati8b651b42010-03-05 19:34:44 -0800200 u32 intr, curr_value;
Jing Huang7725ccf2009-09-23 17:46:15 -0700201
202 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
203
204 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
205 bfa_msix_lpu(bfa);
206
Krishna Gudipati8b651b42010-03-05 19:34:44 -0800207 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
208 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
209
210 if (intr) {
211 if (intr & __HFN_INT_LL_HALT) {
212 /**
213 * If LL_HALT bit is set then FW Init Halt LL Port
214 * Register needs to be cleared as well so Interrupt
215 * Status Register will be cleared.
216 */
217 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
218 curr_value &= ~__FW_INIT_HALT_P;
219 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
220 }
221
222 if (intr & __HFN_INT_ERR_PSS) {
223 /**
224 * ERR_PSS bit needs to be cleared as well in case
225 * interrups are shared so driver's interrupt handler is
226 * still called eventhough it is already masked out.
227 */
228 curr_value = bfa_reg_read(
229 bfa->ioc.ioc_regs.pss_err_status_reg);
230 curr_value &= __PSS_ERR_STATUS_SET;
231 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
232 curr_value);
233 }
234
235 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
Jing Huang7725ccf2009-09-23 17:46:15 -0700236 bfa_msix_errint(bfa, intr);
Krishna Gudipati8b651b42010-03-05 19:34:44 -0800237 }
Jing Huang7725ccf2009-09-23 17:46:15 -0700238}
239
240void
241bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
242{
243 bfa_isrs[mc] = isr_func;
244}
245
246