blob: 6ea5e98b850b8db822490d180f66a58e56b8b13d [file] [log] [blame]
Giridhar Malavalia9083012010-04-12 17:59:55 -07001/*
2 * QLogic Fibre Channel HBA Driver
Giridhar Malavalide7c5d02010-07-23 15:28:36 +05003 * Copyright (c) 2003-2010 QLogic Corporation
Giridhar Malavalia9083012010-04-12 17:59:55 -07004 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include <linux/delay.h>
9#include <linux/pci.h>
10
11#define MASK(n) ((1ULL<<(n))-1)
12#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
13 ((addr >> 25) & 0x3ff))
14#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \
15 ((addr >> 25) & 0x3ff))
16#define MS_WIN(addr) (addr & 0x0ffc0000)
17#define QLA82XX_PCI_MN_2M (0)
18#define QLA82XX_PCI_MS_2M (0x80000)
19#define QLA82XX_PCI_OCM0_2M (0xc0000)
20#define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800)
21#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
Lalit Chandivade0547fb32010-05-28 15:08:26 -070022#define BLOCK_PROTECT_BITS 0x0F
Giridhar Malavalia9083012010-04-12 17:59:55 -070023
24/* CRB window related */
25#define CRB_BLK(off) ((off >> 20) & 0x3f)
26#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
27#define CRB_WINDOW_2M (0x130060)
28#define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL)
29#define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
30 ((off) & 0xf0000))
31#define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL)
32#define CRB_INDIRECT_2M (0x1e0000UL)
33
Giridhar Malavalia9083012010-04-12 17:59:55 -070034#define MAX_CRB_XFORM 60
35static unsigned long crb_addr_xform[MAX_CRB_XFORM];
36int qla82xx_crb_table_initialized;
37
38#define qla82xx_crb_addr_transform(name) \
39 (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
40 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
41
42static void qla82xx_crb_addr_transform_setup(void)
43{
44 qla82xx_crb_addr_transform(XDMA);
45 qla82xx_crb_addr_transform(TIMR);
46 qla82xx_crb_addr_transform(SRE);
47 qla82xx_crb_addr_transform(SQN3);
48 qla82xx_crb_addr_transform(SQN2);
49 qla82xx_crb_addr_transform(SQN1);
50 qla82xx_crb_addr_transform(SQN0);
51 qla82xx_crb_addr_transform(SQS3);
52 qla82xx_crb_addr_transform(SQS2);
53 qla82xx_crb_addr_transform(SQS1);
54 qla82xx_crb_addr_transform(SQS0);
55 qla82xx_crb_addr_transform(RPMX7);
56 qla82xx_crb_addr_transform(RPMX6);
57 qla82xx_crb_addr_transform(RPMX5);
58 qla82xx_crb_addr_transform(RPMX4);
59 qla82xx_crb_addr_transform(RPMX3);
60 qla82xx_crb_addr_transform(RPMX2);
61 qla82xx_crb_addr_transform(RPMX1);
62 qla82xx_crb_addr_transform(RPMX0);
63 qla82xx_crb_addr_transform(ROMUSB);
64 qla82xx_crb_addr_transform(SN);
65 qla82xx_crb_addr_transform(QMN);
66 qla82xx_crb_addr_transform(QMS);
67 qla82xx_crb_addr_transform(PGNI);
68 qla82xx_crb_addr_transform(PGND);
69 qla82xx_crb_addr_transform(PGN3);
70 qla82xx_crb_addr_transform(PGN2);
71 qla82xx_crb_addr_transform(PGN1);
72 qla82xx_crb_addr_transform(PGN0);
73 qla82xx_crb_addr_transform(PGSI);
74 qla82xx_crb_addr_transform(PGSD);
75 qla82xx_crb_addr_transform(PGS3);
76 qla82xx_crb_addr_transform(PGS2);
77 qla82xx_crb_addr_transform(PGS1);
78 qla82xx_crb_addr_transform(PGS0);
79 qla82xx_crb_addr_transform(PS);
80 qla82xx_crb_addr_transform(PH);
81 qla82xx_crb_addr_transform(NIU);
82 qla82xx_crb_addr_transform(I2Q);
83 qla82xx_crb_addr_transform(EG);
84 qla82xx_crb_addr_transform(MN);
85 qla82xx_crb_addr_transform(MS);
86 qla82xx_crb_addr_transform(CAS2);
87 qla82xx_crb_addr_transform(CAS1);
88 qla82xx_crb_addr_transform(CAS0);
89 qla82xx_crb_addr_transform(CAM);
90 qla82xx_crb_addr_transform(C2C1);
91 qla82xx_crb_addr_transform(C2C0);
92 qla82xx_crb_addr_transform(SMB);
93 qla82xx_crb_addr_transform(OCM0);
94 /*
95 * Used only in P3 just define it for P2 also.
96 */
97 qla82xx_crb_addr_transform(I2C0);
98
99 qla82xx_crb_table_initialized = 1;
100}
101
102struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
103 {{{0, 0, 0, 0} } },
104 {{{1, 0x0100000, 0x0102000, 0x120000},
105 {1, 0x0110000, 0x0120000, 0x130000},
106 {1, 0x0120000, 0x0122000, 0x124000},
107 {1, 0x0130000, 0x0132000, 0x126000},
108 {1, 0x0140000, 0x0142000, 0x128000},
109 {1, 0x0150000, 0x0152000, 0x12a000},
110 {1, 0x0160000, 0x0170000, 0x110000},
111 {1, 0x0170000, 0x0172000, 0x12e000},
112 {0, 0x0000000, 0x0000000, 0x000000},
113 {0, 0x0000000, 0x0000000, 0x000000},
114 {0, 0x0000000, 0x0000000, 0x000000},
115 {0, 0x0000000, 0x0000000, 0x000000},
116 {0, 0x0000000, 0x0000000, 0x000000},
117 {0, 0x0000000, 0x0000000, 0x000000},
118 {1, 0x01e0000, 0x01e0800, 0x122000},
119 {0, 0x0000000, 0x0000000, 0x000000} } } ,
120 {{{1, 0x0200000, 0x0210000, 0x180000} } },
121 {{{0, 0, 0, 0} } },
122 {{{1, 0x0400000, 0x0401000, 0x169000} } },
123 {{{1, 0x0500000, 0x0510000, 0x140000} } },
124 {{{1, 0x0600000, 0x0610000, 0x1c0000} } },
125 {{{1, 0x0700000, 0x0704000, 0x1b8000} } },
126 {{{1, 0x0800000, 0x0802000, 0x170000},
127 {0, 0x0000000, 0x0000000, 0x000000},
128 {0, 0x0000000, 0x0000000, 0x000000},
129 {0, 0x0000000, 0x0000000, 0x000000},
130 {0, 0x0000000, 0x0000000, 0x000000},
131 {0, 0x0000000, 0x0000000, 0x000000},
132 {0, 0x0000000, 0x0000000, 0x000000},
133 {0, 0x0000000, 0x0000000, 0x000000},
134 {0, 0x0000000, 0x0000000, 0x000000},
135 {0, 0x0000000, 0x0000000, 0x000000},
136 {0, 0x0000000, 0x0000000, 0x000000},
137 {0, 0x0000000, 0x0000000, 0x000000},
138 {0, 0x0000000, 0x0000000, 0x000000},
139 {0, 0x0000000, 0x0000000, 0x000000},
140 {0, 0x0000000, 0x0000000, 0x000000},
141 {1, 0x08f0000, 0x08f2000, 0x172000} } },
142 {{{1, 0x0900000, 0x0902000, 0x174000},
143 {0, 0x0000000, 0x0000000, 0x000000},
144 {0, 0x0000000, 0x0000000, 0x000000},
145 {0, 0x0000000, 0x0000000, 0x000000},
146 {0, 0x0000000, 0x0000000, 0x000000},
147 {0, 0x0000000, 0x0000000, 0x000000},
148 {0, 0x0000000, 0x0000000, 0x000000},
149 {0, 0x0000000, 0x0000000, 0x000000},
150 {0, 0x0000000, 0x0000000, 0x000000},
151 {0, 0x0000000, 0x0000000, 0x000000},
152 {0, 0x0000000, 0x0000000, 0x000000},
153 {0, 0x0000000, 0x0000000, 0x000000},
154 {0, 0x0000000, 0x0000000, 0x000000},
155 {0, 0x0000000, 0x0000000, 0x000000},
156 {0, 0x0000000, 0x0000000, 0x000000},
157 {1, 0x09f0000, 0x09f2000, 0x176000} } },
158 {{{0, 0x0a00000, 0x0a02000, 0x178000},
159 {0, 0x0000000, 0x0000000, 0x000000},
160 {0, 0x0000000, 0x0000000, 0x000000},
161 {0, 0x0000000, 0x0000000, 0x000000},
162 {0, 0x0000000, 0x0000000, 0x000000},
163 {0, 0x0000000, 0x0000000, 0x000000},
164 {0, 0x0000000, 0x0000000, 0x000000},
165 {0, 0x0000000, 0x0000000, 0x000000},
166 {0, 0x0000000, 0x0000000, 0x000000},
167 {0, 0x0000000, 0x0000000, 0x000000},
168 {0, 0x0000000, 0x0000000, 0x000000},
169 {0, 0x0000000, 0x0000000, 0x000000},
170 {0, 0x0000000, 0x0000000, 0x000000},
171 {0, 0x0000000, 0x0000000, 0x000000},
172 {0, 0x0000000, 0x0000000, 0x000000},
173 {1, 0x0af0000, 0x0af2000, 0x17a000} } },
174 {{{0, 0x0b00000, 0x0b02000, 0x17c000},
175 {0, 0x0000000, 0x0000000, 0x000000},
176 {0, 0x0000000, 0x0000000, 0x000000},
177 {0, 0x0000000, 0x0000000, 0x000000},
178 {0, 0x0000000, 0x0000000, 0x000000},
179 {0, 0x0000000, 0x0000000, 0x000000},
180 {0, 0x0000000, 0x0000000, 0x000000},
181 {0, 0x0000000, 0x0000000, 0x000000},
182 {0, 0x0000000, 0x0000000, 0x000000},
183 {0, 0x0000000, 0x0000000, 0x000000},
184 {0, 0x0000000, 0x0000000, 0x000000},
185 {0, 0x0000000, 0x0000000, 0x000000},
186 {0, 0x0000000, 0x0000000, 0x000000},
187 {0, 0x0000000, 0x0000000, 0x000000},
188 {0, 0x0000000, 0x0000000, 0x000000},
189 {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
190 {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },
191 {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },
192 {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },
193 {{{1, 0x0f00000, 0x0f01000, 0x164000} } },
194 {{{0, 0x1000000, 0x1004000, 0x1a8000} } },
195 {{{1, 0x1100000, 0x1101000, 0x160000} } },
196 {{{1, 0x1200000, 0x1201000, 0x161000} } },
197 {{{1, 0x1300000, 0x1301000, 0x162000} } },
198 {{{1, 0x1400000, 0x1401000, 0x163000} } },
199 {{{1, 0x1500000, 0x1501000, 0x165000} } },
200 {{{1, 0x1600000, 0x1601000, 0x166000} } },
201 {{{0, 0, 0, 0} } },
202 {{{0, 0, 0, 0} } },
203 {{{0, 0, 0, 0} } },
204 {{{0, 0, 0, 0} } },
205 {{{0, 0, 0, 0} } },
206 {{{0, 0, 0, 0} } },
207 {{{1, 0x1d00000, 0x1d10000, 0x190000} } },
208 {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },
209 {{{1, 0x1f00000, 0x1f10000, 0x150000} } },
210 {{{0} } },
211 {{{1, 0x2100000, 0x2102000, 0x120000},
212 {1, 0x2110000, 0x2120000, 0x130000},
213 {1, 0x2120000, 0x2122000, 0x124000},
214 {1, 0x2130000, 0x2132000, 0x126000},
215 {1, 0x2140000, 0x2142000, 0x128000},
216 {1, 0x2150000, 0x2152000, 0x12a000},
217 {1, 0x2160000, 0x2170000, 0x110000},
218 {1, 0x2170000, 0x2172000, 0x12e000},
219 {0, 0x0000000, 0x0000000, 0x000000},
220 {0, 0x0000000, 0x0000000, 0x000000},
221 {0, 0x0000000, 0x0000000, 0x000000},
222 {0, 0x0000000, 0x0000000, 0x000000},
223 {0, 0x0000000, 0x0000000, 0x000000},
224 {0, 0x0000000, 0x0000000, 0x000000},
225 {0, 0x0000000, 0x0000000, 0x000000},
226 {0, 0x0000000, 0x0000000, 0x000000} } },
227 {{{1, 0x2200000, 0x2204000, 0x1b0000} } },
228 {{{0} } },
229 {{{0} } },
230 {{{0} } },
231 {{{0} } },
232 {{{0} } },
233 {{{1, 0x2800000, 0x2804000, 0x1a4000} } },
234 {{{1, 0x2900000, 0x2901000, 0x16b000} } },
235 {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },
236 {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },
237 {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },
238 {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },
239 {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },
240 {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },
241 {{{1, 0x3000000, 0x3000400, 0x1adc00} } },
242 {{{0, 0x3100000, 0x3104000, 0x1a8000} } },
243 {{{1, 0x3200000, 0x3204000, 0x1d4000} } },
244 {{{1, 0x3300000, 0x3304000, 0x1a0000} } },
245 {{{0} } },
246 {{{1, 0x3500000, 0x3500400, 0x1ac000} } },
247 {{{1, 0x3600000, 0x3600400, 0x1ae000} } },
248 {{{1, 0x3700000, 0x3700400, 0x1ae400} } },
249 {{{1, 0x3800000, 0x3804000, 0x1d0000} } },
250 {{{1, 0x3900000, 0x3904000, 0x1b4000} } },
251 {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },
252 {{{0} } },
253 {{{0} } },
254 {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },
255 {{{1, 0x3e00000, 0x3e01000, 0x167000} } },
256 {{{1, 0x3f00000, 0x3f01000, 0x168000} } }
257};
258
259/*
260 * top 12 bits of crb internal address (hub, agent)
261 */
262unsigned qla82xx_crb_hub_agt[64] = {
263 0,
264 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
265 QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
266 QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
267 0,
268 QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
269 QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
270 QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
271 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
272 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
273 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
274 QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
275 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
276 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
277 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
278 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
279 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
280 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
281 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
282 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
283 QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
284 QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
285 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
286 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
287 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
288 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
289 QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
290 0,
291 QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
292 QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
293 0,
294 QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
295 0,
296 QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
297 QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
298 0,
299 0,
300 0,
301 0,
302 0,
303 QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
304 0,
305 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
306 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
307 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
308 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
309 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
310 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
311 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
312 QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
313 QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
314 QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
315 0,
316 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
317 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
318 QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
319 QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
320 0,
321 QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
322 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
323 QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
324 0,
325 QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
326 0,
327};
328
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700329/* Device states */
330char *qdev_state[] = {
331 "Unknown",
332 "Cold",
333 "Initializing",
334 "Ready",
335 "Need Reset",
336 "Need Quiescent",
337 "Failed",
338 "Quiescent",
339};
340
Giridhar Malavalia9083012010-04-12 17:59:55 -0700341/*
342 * In: 'off' is offset from CRB space in 128M pci map
343 * Out: 'off' is 2M pci map addr
344 * side effect: lock crb window
345 */
346static void
347qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
348{
349 u32 win_read;
350
351 ha->crb_win = CRB_HI(*off);
352 writel(ha->crb_win,
353 (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
354
355 /* Read back value to make sure write has gone through before trying
356 * to use it.
357 */
358 win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
359 if (win_read != ha->crb_win) {
360 DEBUG2(qla_printk(KERN_INFO, ha,
361 "%s: Written crbwin (0x%x) != Read crbwin (0x%x), "
362 "off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
363 }
364 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
365}
366
367static inline unsigned long
368qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
369{
370 /* See if we are currently pointing to the region we want to use next */
371 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) {
372 /* No need to change window. PCIX and PCIEregs are in both
373 * regs are in both windows.
374 */
375 return off;
376 }
377
378 if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) {
379 /* We are in first CRB window */
380 if (ha->curr_window != 0)
381 WARN_ON(1);
382 return off;
383 }
384
385 if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) {
386 /* We are in second CRB window */
387 off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST;
388
389 if (ha->curr_window != 1)
390 return off;
391
392 /* We are in the QM or direct access
393 * register region - do nothing
394 */
395 if ((off >= QLA82XX_PCI_DIRECT_CRB) &&
396 (off < QLA82XX_PCI_CAMQM_MAX))
397 return off;
398 }
399 /* strange address given */
400 qla_printk(KERN_WARNING, ha,
401 "%s: Warning: unm_nic_pci_set_crbwindow called with"
402 " an unknown address(%llx)\n", QLA2XXX_DRIVER_NAME, off);
403 return off;
404}
405
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700406static int
407qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
408{
409 struct crb_128M_2M_sub_block_map *m;
410
411 if (*off >= QLA82XX_CRB_MAX)
412 return -1;
413
414 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
415 *off = (*off - QLA82XX_PCI_CAMQM) +
416 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
417 return 0;
418 }
419
420 if (*off < QLA82XX_PCI_CRBSPACE)
421 return -1;
422
423 *off -= QLA82XX_PCI_CRBSPACE;
424
425 /* Try direct map */
426 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
427
428 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
429 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
430 return 0;
431 }
432 /* Not in direct map, use crb window */
433 return 1;
434}
435
436#define CRB_WIN_LOCK_TIMEOUT 100000000
437static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
438{
439 int done = 0, timeout = 0;
440
441 while (!done) {
442 /* acquire semaphore3 from PCI HW block */
443 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
444 if (done == 1)
445 break;
446 if (timeout >= CRB_WIN_LOCK_TIMEOUT)
447 return -1;
448 timeout++;
449 }
450 qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum);
451 return 0;
452}
453
Giridhar Malavalia9083012010-04-12 17:59:55 -0700454int
455qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
456{
457 unsigned long flags = 0;
458 int rv;
459
460 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
461
462 BUG_ON(rv == -1);
463
464 if (rv == 1) {
465 write_lock_irqsave(&ha->hw_lock, flags);
466 qla82xx_crb_win_lock(ha);
467 qla82xx_pci_set_crbwindow_2M(ha, &off);
468 }
469
470 writel(data, (void __iomem *)off);
471
472 if (rv == 1) {
473 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
474 write_unlock_irqrestore(&ha->hw_lock, flags);
475 }
476 return 0;
477}
478
479int
480qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
481{
482 unsigned long flags = 0;
483 int rv;
484 u32 data;
485
486 rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
487
488 BUG_ON(rv == -1);
489
490 if (rv == 1) {
491 write_lock_irqsave(&ha->hw_lock, flags);
492 qla82xx_crb_win_lock(ha);
493 qla82xx_pci_set_crbwindow_2M(ha, &off);
494 }
495 data = RD_REG_DWORD((void __iomem *)off);
496
497 if (rv == 1) {
498 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
499 write_unlock_irqrestore(&ha->hw_lock, flags);
500 }
501 return data;
502}
503
Giridhar Malavalia9083012010-04-12 17:59:55 -0700504#define IDC_LOCK_TIMEOUT 100000000
505int qla82xx_idc_lock(struct qla_hw_data *ha)
506{
507 int i;
508 int done = 0, timeout = 0;
509
510 while (!done) {
511 /* acquire semaphore5 from PCI HW block */
512 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
513 if (done == 1)
514 break;
515 if (timeout >= IDC_LOCK_TIMEOUT)
516 return -1;
517
518 timeout++;
519
520 /* Yield CPU */
521 if (!in_interrupt())
522 schedule();
523 else {
524 for (i = 0; i < 20; i++)
525 cpu_relax();
526 }
527 }
528
529 return 0;
530}
531
532void qla82xx_idc_unlock(struct qla_hw_data *ha)
533{
534 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
535}
536
Giridhar Malavalia9083012010-04-12 17:59:55 -0700537/* PCI Windowing for DDR regions. */
538#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
539 (((addr) <= (high)) && ((addr) >= (low)))
540/*
541 * check memory access boundary.
542 * used by test agent. support ddr access only for now
543 */
544static unsigned long
545qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
546 unsigned long long addr, int size)
547{
548 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
549 QLA82XX_ADDR_DDR_NET_MAX) ||
550 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
551 QLA82XX_ADDR_DDR_NET_MAX) ||
552 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
553 return 0;
554 else
555 return 1;
556}
557
558int qla82xx_pci_set_window_warning_count;
559
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700560static unsigned long
Giridhar Malavalia9083012010-04-12 17:59:55 -0700561qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
562{
563 int window;
564 u32 win_read;
565
566 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
567 QLA82XX_ADDR_DDR_NET_MAX)) {
568 /* DDR network side */
569 window = MN_WIN(addr);
570 ha->ddr_mn_window = window;
571 qla82xx_wr_32(ha,
572 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
573 win_read = qla82xx_rd_32(ha,
574 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
575 if ((win_read << 17) != window) {
576 qla_printk(KERN_WARNING, ha,
577 "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
578 __func__, window, win_read);
579 }
580 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
581 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
582 QLA82XX_ADDR_OCM0_MAX)) {
583 unsigned int temp1;
584 if ((addr & 0x00ff800) == 0xff800) {
585 qla_printk(KERN_WARNING, ha,
586 "%s: QM access not handled.\n", __func__);
587 addr = -1UL;
588 }
589 window = OCM_WIN(addr);
590 ha->ddr_mn_window = window;
591 qla82xx_wr_32(ha,
592 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window);
593 win_read = qla82xx_rd_32(ha,
594 ha->mn_win_crb | QLA82XX_PCI_CRBSPACE);
595 temp1 = ((window & 0x1FF) << 7) |
596 ((window & 0x0FFFE0000) >> 17);
597 if (win_read != temp1) {
598 qla_printk(KERN_WARNING, ha,
599 "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n",
600 __func__, temp1, win_read);
601 }
602 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
603
604 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
605 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
606 /* QDR network side */
607 window = MS_WIN(addr);
608 ha->qdr_sn_window = window;
609 qla82xx_wr_32(ha,
610 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window);
611 win_read = qla82xx_rd_32(ha,
612 ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
613 if (win_read != window) {
614 qla_printk(KERN_WARNING, ha,
615 "%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n",
616 __func__, window, win_read);
617 }
618 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
619 } else {
620 /*
621 * peg gdb frequently accesses memory that doesn't exist,
622 * this limits the chit chat so debugging isn't slowed down.
623 */
624 if ((qla82xx_pci_set_window_warning_count++ < 8) ||
625 (qla82xx_pci_set_window_warning_count%64 == 0)) {
626 qla_printk(KERN_WARNING, ha,
627 "%s: Warning:%s Unknown address range!\n", __func__,
628 QLA2XXX_DRIVER_NAME);
629 }
630 addr = -1UL;
631 }
632 return addr;
633}
634
635/* check if address is in the same windows as the previous access */
636static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
637 unsigned long long addr)
638{
639 int window;
640 unsigned long long qdr_max;
641
642 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
643
644 /* DDR network side */
645 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
646 QLA82XX_ADDR_DDR_NET_MAX))
647 BUG();
648 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
649 QLA82XX_ADDR_OCM0_MAX))
650 return 1;
651 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
652 QLA82XX_ADDR_OCM1_MAX))
653 return 1;
654 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
655 /* QDR network side */
656 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
657 if (ha->qdr_sn_window == window)
658 return 1;
659 }
660 return 0;
661}
662
663static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
664 u64 off, void *data, int size)
665{
666 unsigned long flags;
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700667 void *addr = NULL;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700668 int ret = 0;
669 u64 start;
670 uint8_t *mem_ptr = NULL;
671 unsigned long mem_base;
672 unsigned long mem_page;
673
674 write_lock_irqsave(&ha->hw_lock, flags);
675
676 /*
677 * If attempting to access unknown address or straddle hw windows,
678 * do not access.
679 */
680 start = qla82xx_pci_set_window(ha, off);
681 if ((start == -1UL) ||
682 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
683 write_unlock_irqrestore(&ha->hw_lock, flags);
684 qla_printk(KERN_ERR, ha,
685 "%s out of bound pci memory access. "
686 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
687 return -1;
688 }
689
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700690 write_unlock_irqrestore(&ha->hw_lock, flags);
691 mem_base = pci_resource_start(ha->pdev, 0);
692 mem_page = start & PAGE_MASK;
693 /* Map two pages whenever user tries to access addresses in two
694 * consecutive pages.
695 */
696 if (mem_page != ((start + size - 1) & PAGE_MASK))
697 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
698 else
699 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
700 if (mem_ptr == 0UL) {
701 *(u8 *)data = 0;
702 return -1;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700703 }
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700704 addr = mem_ptr;
705 addr += start & (PAGE_SIZE - 1);
706 write_lock_irqsave(&ha->hw_lock, flags);
Giridhar Malavalia9083012010-04-12 17:59:55 -0700707
708 switch (size) {
709 case 1:
710 *(u8 *)data = readb(addr);
711 break;
712 case 2:
713 *(u16 *)data = readw(addr);
714 break;
715 case 4:
716 *(u32 *)data = readl(addr);
717 break;
718 case 8:
719 *(u64 *)data = readq(addr);
720 break;
721 default:
722 ret = -1;
723 break;
724 }
725 write_unlock_irqrestore(&ha->hw_lock, flags);
726
727 if (mem_ptr)
728 iounmap(mem_ptr);
729 return ret;
730}
731
732static int
733qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
734 u64 off, void *data, int size)
735{
736 unsigned long flags;
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700737 void *addr = NULL;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700738 int ret = 0;
739 u64 start;
740 uint8_t *mem_ptr = NULL;
741 unsigned long mem_base;
742 unsigned long mem_page;
743
744 write_lock_irqsave(&ha->hw_lock, flags);
745
746 /*
747 * If attempting to access unknown address or straddle hw windows,
748 * do not access.
749 */
750 start = qla82xx_pci_set_window(ha, off);
751 if ((start == -1UL) ||
752 (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
753 write_unlock_irqrestore(&ha->hw_lock, flags);
754 qla_printk(KERN_ERR, ha,
755 "%s out of bound pci memory access. "
756 "offset is 0x%llx\n", QLA2XXX_DRIVER_NAME, off);
757 return -1;
758 }
759
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700760 write_unlock_irqrestore(&ha->hw_lock, flags);
761 mem_base = pci_resource_start(ha->pdev, 0);
762 mem_page = start & PAGE_MASK;
763 /* Map two pages whenever user tries to access addresses in two
764 * consecutive pages.
765 */
766 if (mem_page != ((start + size - 1) & PAGE_MASK))
767 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
768 else
769 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
770 if (mem_ptr == 0UL)
771 return -1;
Giridhar Malavalia9083012010-04-12 17:59:55 -0700772
Giridhar Malavalif1af6202010-05-04 15:01:34 -0700773 addr = mem_ptr;
774 addr += start & (PAGE_SIZE - 1);
775 write_lock_irqsave(&ha->hw_lock, flags);
Giridhar Malavalia9083012010-04-12 17:59:55 -0700776
777 switch (size) {
778 case 1:
779 writeb(*(u8 *)data, addr);
780 break;
781 case 2:
782 writew(*(u16 *)data, addr);
783 break;
784 case 4:
785 writel(*(u32 *)data, addr);
786 break;
787 case 8:
788 writeq(*(u64 *)data, addr);
789 break;
790 default:
791 ret = -1;
792 break;
793 }
794 write_unlock_irqrestore(&ha->hw_lock, flags);
795 if (mem_ptr)
796 iounmap(mem_ptr);
797 return ret;
798}
799
Giridhar Malavalia9083012010-04-12 17:59:55 -0700800#define MTU_FUDGE_FACTOR 100
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700801static unsigned long
802qla82xx_decode_crb_addr(unsigned long addr)
Giridhar Malavalia9083012010-04-12 17:59:55 -0700803{
804 int i;
805 unsigned long base_addr, offset, pci_base;
806
807 if (!qla82xx_crb_table_initialized)
808 qla82xx_crb_addr_transform_setup();
809
810 pci_base = ADDR_ERROR;
811 base_addr = addr & 0xfff00000;
812 offset = addr & 0x000fffff;
813
814 for (i = 0; i < MAX_CRB_XFORM; i++) {
815 if (crb_addr_xform[i] == base_addr) {
816 pci_base = i << 20;
817 break;
818 }
819 }
820 if (pci_base == ADDR_ERROR)
821 return pci_base;
822 return pci_base + offset;
823}
824
825static long rom_max_timeout = 100;
826static long qla82xx_rom_lock_timeout = 100;
827
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700828static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700829qla82xx_rom_lock(struct qla_hw_data *ha)
830{
831 int done = 0, timeout = 0;
832
833 while (!done) {
834 /* acquire semaphore2 from PCI HW block */
835 done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
836 if (done == 1)
837 break;
838 if (timeout >= qla82xx_rom_lock_timeout)
839 return -1;
840 timeout++;
841 }
842 qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
843 return 0;
844}
845
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700846static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700847qla82xx_wait_rom_busy(struct qla_hw_data *ha)
848{
849 long timeout = 0;
850 long done = 0 ;
851
852 while (done == 0) {
853 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
854 done &= 4;
855 timeout++;
856 if (timeout >= rom_max_timeout) {
857 DEBUG(qla_printk(KERN_INFO, ha,
858 "%s: Timeout reached waiting for rom busy",
859 QLA2XXX_DRIVER_NAME));
860 return -1;
861 }
862 }
863 return 0;
864}
865
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700866static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700867qla82xx_wait_rom_done(struct qla_hw_data *ha)
868{
869 long timeout = 0;
870 long done = 0 ;
871
872 while (done == 0) {
873 done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
874 done &= 2;
875 timeout++;
876 if (timeout >= rom_max_timeout) {
877 DEBUG(qla_printk(KERN_INFO, ha,
878 "%s: Timeout reached waiting for rom done",
879 QLA2XXX_DRIVER_NAME));
880 return -1;
881 }
882 }
883 return 0;
884}
885
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700886static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700887qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
888{
889 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
890 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
891 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
892 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
893 qla82xx_wait_rom_busy(ha);
894 if (qla82xx_wait_rom_done(ha)) {
895 qla_printk(KERN_WARNING, ha,
896 "%s: Error waiting for rom done\n",
897 QLA2XXX_DRIVER_NAME);
898 return -1;
899 }
900 /* Reset abyte_cnt and dummy_byte_cnt */
901 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
902 udelay(10);
903 cond_resched();
904 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
905 *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
906 return 0;
907}
908
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700909static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700910qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
911{
912 int ret, loops = 0;
913
914 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
915 udelay(100);
916 schedule();
917 loops++;
918 }
919 if (loops >= 50000) {
920 qla_printk(KERN_INFO, ha,
921 "%s: qla82xx_rom_lock failed\n",
922 QLA2XXX_DRIVER_NAME);
923 return -1;
924 }
925 ret = qla82xx_do_rom_fast_read(ha, addr, valp);
926 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
927 return ret;
928}
929
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700930static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700931qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val)
932{
933 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR);
934 qla82xx_wait_rom_busy(ha);
935 if (qla82xx_wait_rom_done(ha)) {
936 qla_printk(KERN_WARNING, ha,
937 "Error waiting for rom done\n");
938 return -1;
939 }
940 *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
941 return 0;
942}
943
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700944static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700945qla82xx_flash_wait_write_finish(struct qla_hw_data *ha)
946{
947 long timeout = 0;
948 uint32_t done = 1 ;
949 uint32_t val;
950 int ret = 0;
951
952 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
953 while ((done != 0) && (ret == 0)) {
954 ret = qla82xx_read_status_reg(ha, &val);
955 done = val & 1;
956 timeout++;
957 udelay(10);
958 cond_resched();
959 if (timeout >= 50000) {
960 qla_printk(KERN_WARNING, ha,
961 "Timeout reached waiting for write finish");
962 return -1;
963 }
964 }
965 return ret;
966}
967
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700968static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700969qla82xx_flash_set_write_enable(struct qla_hw_data *ha)
970{
971 uint32_t val;
972 qla82xx_wait_rom_busy(ha);
973 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
974 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN);
975 qla82xx_wait_rom_busy(ha);
976 if (qla82xx_wait_rom_done(ha))
977 return -1;
978 if (qla82xx_read_status_reg(ha, &val) != 0)
979 return -1;
980 if ((val & 2) != 2)
981 return -1;
982 return 0;
983}
984
Giridhar Malavali77e334d2010-09-03 15:20:52 -0700985static int
Giridhar Malavalia9083012010-04-12 17:59:55 -0700986qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val)
987{
988 if (qla82xx_flash_set_write_enable(ha))
989 return -1;
990 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val);
991 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1);
992 if (qla82xx_wait_rom_done(ha)) {
993 qla_printk(KERN_WARNING, ha,
994 "Error waiting for rom done\n");
995 return -1;
996 }
997 return qla82xx_flash_wait_write_finish(ha);
998}
999
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001000static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001001qla82xx_write_disable_flash(struct qla_hw_data *ha)
1002{
1003 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI);
1004 if (qla82xx_wait_rom_done(ha)) {
1005 qla_printk(KERN_WARNING, ha,
1006 "Error waiting for rom done\n");
1007 return -1;
1008 }
1009 return 0;
1010}
1011
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001012static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001013ql82xx_rom_lock_d(struct qla_hw_data *ha)
1014{
1015 int loops = 0;
1016 while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
1017 udelay(100);
1018 cond_resched();
1019 loops++;
1020 }
1021 if (loops >= 50000) {
1022 qla_printk(KERN_WARNING, ha, "ROM lock failed\n");
1023 return -1;
1024 }
1025 return 0;;
1026}
1027
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001028static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001029qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
1030 uint32_t data)
1031{
1032 int ret = 0;
1033
1034 ret = ql82xx_rom_lock_d(ha);
1035 if (ret < 0) {
1036 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
1037 return ret;
1038 }
1039
1040 if (qla82xx_flash_set_write_enable(ha))
1041 goto done_write;
1042
1043 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
1044 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr);
1045 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
1046 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP);
1047 qla82xx_wait_rom_busy(ha);
1048 if (qla82xx_wait_rom_done(ha)) {
1049 qla_printk(KERN_WARNING, ha,
1050 "Error waiting for rom done\n");
1051 ret = -1;
1052 goto done_write;
1053 }
1054
1055 ret = qla82xx_flash_wait_write_finish(ha);
1056
1057done_write:
1058 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1059 return ret;
1060}
1061
1062/* This routine does CRB initialize sequence
1063 * to put the ISP into operational state
1064 */
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001065static int
1066qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001067{
1068 int addr, val;
1069 int i ;
1070 struct crb_addr_pair *buf;
1071 unsigned long off;
1072 unsigned offset, n;
1073 struct qla_hw_data *ha = vha->hw;
1074
1075 struct crb_addr_pair {
1076 long addr;
1077 long data;
1078 };
1079
1080 /* Halt all the indiviual PEGs and other blocks of the ISP */
1081 qla82xx_rom_lock(ha);
Madhuranath Iyengarc9e8fd52010-12-21 16:00:19 -08001082
1083 /* mask all niu interrupts */
1084 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1085 /* disable xge rx/tx */
1086 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1087 /* disable xg1 rx/tx */
1088 qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1089
1090 /* halt sre */
1091 val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1092 qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1093
1094 /* halt epg */
1095 qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1096
1097 /* halt timers */
1098 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1099 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1100 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1101 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1102 qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1103
1104 /* halt pegs */
1105 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1106 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1107 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1108 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1109 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1110
1111 /* big hammer */
1112 msleep(1000);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001113 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
1114 /* don't reset CAM block on reset */
1115 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1116 else
1117 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
Madhuranath Iyengarc9e8fd52010-12-21 16:00:19 -08001118
1119 /* reset ms */
1120 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1121 val |= (1 << 1);
1122 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1123 msleep(20);
1124
1125 /* unreset ms */
1126 val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1127 val &= ~(1 << 1);
1128 qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1129 msleep(20);
1130
Giridhar Malavalia9083012010-04-12 17:59:55 -07001131 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
1132
1133 /* Read the signature value from the flash.
1134 * Offset 0: Contain signature (0xcafecafe)
1135 * Offset 4: Offset and number of addr/value pairs
1136 * that present in CRB initialize sequence
1137 */
1138 if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1139 qla82xx_rom_fast_read(ha, 4, &n) != 0) {
1140 qla_printk(KERN_WARNING, ha,
1141 "[ERROR] Reading crb_init area: n: %08x\n", n);
1142 return -1;
1143 }
1144
1145 /* Offset in flash = lower 16 bits
1146 * Number of enteries = upper 16 bits
1147 */
1148 offset = n & 0xffffU;
1149 n = (n >> 16) & 0xffffU;
1150
1151 /* number of addr/value pair should not exceed 1024 enteries */
1152 if (n >= 1024) {
1153 qla_printk(KERN_WARNING, ha,
1154 "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1155 QLA2XXX_DRIVER_NAME, __func__, n);
1156 return -1;
1157 }
1158
1159 qla_printk(KERN_INFO, ha,
1160 "%s: %d CRB init values found in ROM.\n", QLA2XXX_DRIVER_NAME, n);
1161
1162 buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1163 if (buf == NULL) {
1164 qla_printk(KERN_WARNING, ha,
1165 "%s: [ERROR] Unable to malloc memory.\n",
1166 QLA2XXX_DRIVER_NAME);
1167 return -1;
1168 }
1169
1170 for (i = 0; i < n; i++) {
1171 if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1172 qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) {
1173 kfree(buf);
1174 return -1;
1175 }
1176
1177 buf[i].addr = addr;
1178 buf[i].data = val;
1179 }
1180
1181 for (i = 0; i < n; i++) {
1182 /* Translate internal CRB initialization
1183 * address to PCI bus address
1184 */
1185 off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1186 QLA82XX_PCI_CRBSPACE;
1187 /* Not all CRB addr/value pair to be written,
1188 * some of them are skipped
1189 */
1190
1191 /* skipping cold reboot MAGIC */
1192 if (off == QLA82XX_CAM_RAM(0x1fc))
1193 continue;
1194
1195 /* do not reset PCI */
1196 if (off == (ROMUSB_GLB + 0xbc))
1197 continue;
1198
1199 /* skip core clock, so that firmware can increase the clock */
1200 if (off == (ROMUSB_GLB + 0xc8))
1201 continue;
1202
1203 /* skip the function enable register */
1204 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1205 continue;
1206
1207 if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1208 continue;
1209
1210 if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1211 continue;
1212
1213 if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1214 continue;
1215
1216 if (off == ADDR_ERROR) {
1217 qla_printk(KERN_WARNING, ha,
1218 "%s: [ERROR] Unknown addr: 0x%08lx\n",
1219 QLA2XXX_DRIVER_NAME, buf[i].addr);
1220 continue;
1221 }
1222
Giridhar Malavalia9083012010-04-12 17:59:55 -07001223 qla82xx_wr_32(ha, off, buf[i].data);
1224
1225 /* ISP requires much bigger delay to settle down,
1226 * else crb_window returns 0xffffffff
1227 */
1228 if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1229 msleep(1000);
1230
1231 /* ISP requires millisec delay between
1232 * successive CRB register updation
1233 */
1234 msleep(1);
1235 }
1236
1237 kfree(buf);
1238
1239 /* Resetting the data and instruction cache */
1240 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1241 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1242 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1243
1244 /* Clear all protocol processing engines */
1245 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1246 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1247 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1248 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1249 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1250 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1251 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1252 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1253 return 0;
1254}
1255
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001256static int
1257qla82xx_check_for_bad_spd(struct qla_hw_data *ha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001258{
1259 u32 val = 0;
1260 val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS);
1261 val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
1262 if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
1263 qla_printk(KERN_INFO, ha,
1264 "Memory DIMM SPD not programmed. "
1265 " Assumed valid.\n");
1266 return 1;
1267 } else if (val) {
1268 qla_printk(KERN_INFO, ha,
1269 "Memory DIMM type incorrect.Info:%08X.\n", val);
1270 return 2;
1271 }
1272 return 0;
1273}
1274
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001275static int
1276qla82xx_pci_mem_write_2M(struct qla_hw_data *ha,
1277 u64 off, void *data, int size)
1278{
1279 int i, j, ret = 0, loop, sz[2], off0;
1280 int scale, shift_amount, startword;
1281 uint32_t temp;
1282 uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1283
1284 /*
1285 * If not MN, go check for MS or invalid.
1286 */
1287 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1288 mem_crb = QLA82XX_CRB_QDR_NET;
1289 else {
1290 mem_crb = QLA82XX_CRB_DDR_NET;
1291 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1292 return qla82xx_pci_mem_write_direct(ha,
1293 off, data, size);
1294 }
1295
1296 off0 = off & 0x7;
1297 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1298 sz[1] = size - sz[0];
1299
1300 off8 = off & 0xfffffff0;
1301 loop = (((off & 0xf) + size - 1) >> 4) + 1;
1302 shift_amount = 4;
1303 scale = 2;
1304 startword = (off & 0xf)/8;
1305
1306 for (i = 0; i < loop; i++) {
1307 if (qla82xx_pci_mem_read_2M(ha, off8 +
1308 (i << shift_amount), &word[i * scale], 8))
1309 return -1;
1310 }
1311
1312 switch (size) {
1313 case 1:
1314 tmpw = *((uint8_t *)data);
1315 break;
1316 case 2:
1317 tmpw = *((uint16_t *)data);
1318 break;
1319 case 4:
1320 tmpw = *((uint32_t *)data);
1321 break;
1322 case 8:
1323 default:
1324 tmpw = *((uint64_t *)data);
1325 break;
1326 }
1327
1328 if (sz[0] == 8) {
1329 word[startword] = tmpw;
1330 } else {
1331 word[startword] &=
1332 ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1333 word[startword] |= tmpw << (off0 * 8);
1334 }
1335 if (sz[1] != 0) {
1336 word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1337 word[startword+1] |= tmpw >> (sz[0] * 8);
1338 }
1339
1340 /*
1341 * don't lock here - write_wx gets the lock if each time
1342 * write_lock_irqsave(&adapter->adapter_lock, flags);
1343 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1344 */
1345 for (i = 0; i < loop; i++) {
1346 temp = off8 + (i << shift_amount);
1347 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1348 temp = 0;
1349 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1350 temp = word[i * scale] & 0xffffffff;
1351 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1352 temp = (word[i * scale] >> 32) & 0xffffffff;
1353 qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1354 temp = word[i*scale + 1] & 0xffffffff;
1355 qla82xx_wr_32(ha, mem_crb +
1356 MIU_TEST_AGT_WRDATA_UPPER_LO, temp);
1357 temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1358 qla82xx_wr_32(ha, mem_crb +
1359 MIU_TEST_AGT_WRDATA_UPPER_HI, temp);
1360
1361 temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1362 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1363 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE;
1364 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1365
1366 for (j = 0; j < MAX_CTL_CHECK; j++) {
1367 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1368 if ((temp & MIU_TA_CTL_BUSY) == 0)
1369 break;
1370 }
1371
1372 if (j >= MAX_CTL_CHECK) {
1373 if (printk_ratelimit())
1374 dev_err(&ha->pdev->dev,
1375 "failed to write through agent\n");
1376 ret = -1;
1377 break;
1378 }
1379 }
1380
1381 return ret;
1382}
1383
1384static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07001385qla82xx_fw_load_from_flash(struct qla_hw_data *ha)
1386{
1387 int i;
1388 long size = 0;
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001389 long flashaddr = ha->flt_region_bootload << 2;
1390 long memaddr = BOOTLD_START;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001391 u64 data;
1392 u32 high, low;
1393 size = (IMAGE_START - BOOTLD_START) / 8;
1394
1395 for (i = 0; i < size; i++) {
1396 if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1397 (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) {
1398 return -1;
1399 }
1400 data = ((u64)high << 32) | low ;
1401 qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1402 flashaddr += 8;
1403 memaddr += 8;
1404
1405 if (i % 0x1000 == 0)
1406 msleep(1);
1407 }
1408 udelay(100);
1409 read_lock(&ha->hw_lock);
Giridhar Malavali37113332010-07-23 15:28:34 +05001410 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1411 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001412 read_unlock(&ha->hw_lock);
1413 return 0;
1414}
1415
1416int
1417qla82xx_pci_mem_read_2M(struct qla_hw_data *ha,
1418 u64 off, void *data, int size)
1419{
1420 int i, j = 0, k, start, end, loop, sz[2], off0[2];
1421 int shift_amount;
1422 uint32_t temp;
1423 uint64_t off8, val, mem_crb, word[2] = {0, 0};
1424
1425 /*
1426 * If not MN, go check for MS or invalid.
1427 */
1428
1429 if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1430 mem_crb = QLA82XX_CRB_QDR_NET;
1431 else {
1432 mem_crb = QLA82XX_CRB_DDR_NET;
1433 if (qla82xx_pci_mem_bound_check(ha, off, size) == 0)
1434 return qla82xx_pci_mem_read_direct(ha,
1435 off, data, size);
1436 }
1437
Giridhar Malavali37113332010-07-23 15:28:34 +05001438 off8 = off & 0xfffffff0;
1439 off0[0] = off & 0xf;
1440 sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1441 shift_amount = 4;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001442 loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1443 off0[1] = 0;
1444 sz[1] = size - sz[0];
1445
1446 /*
1447 * don't lock here - write_wx gets the lock if each time
1448 * write_lock_irqsave(&adapter->adapter_lock, flags);
1449 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1450 */
1451
1452 for (i = 0; i < loop; i++) {
1453 temp = off8 + (i << shift_amount);
1454 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1455 temp = 0;
1456 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1457 temp = MIU_TA_CTL_ENABLE;
1458 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1459 temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
1460 qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1461
1462 for (j = 0; j < MAX_CTL_CHECK; j++) {
1463 temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1464 if ((temp & MIU_TA_CTL_BUSY) == 0)
1465 break;
1466 }
1467
1468 if (j >= MAX_CTL_CHECK) {
1469 if (printk_ratelimit())
1470 dev_err(&ha->pdev->dev,
1471 "failed to read through agent\n");
1472 break;
1473 }
1474
1475 start = off0[i] >> 2;
1476 end = (off0[i] + sz[i] - 1) >> 2;
1477 for (k = start; k <= end; k++) {
1478 temp = qla82xx_rd_32(ha,
1479 mem_crb + MIU_TEST_AGT_RDDATA(k));
1480 word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1481 }
1482 }
1483
1484 /*
1485 * netxen_nic_pci_change_crbwindow_128M(adapter, 1);
1486 * write_unlock_irqrestore(&adapter->adapter_lock, flags);
1487 */
1488
1489 if (j >= MAX_CTL_CHECK)
1490 return -1;
1491
1492 if ((off0[0] & 7) == 0) {
1493 val = word[0];
1494 } else {
1495 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1496 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1497 }
1498
1499 switch (size) {
1500 case 1:
1501 *(uint8_t *)data = val;
1502 break;
1503 case 2:
1504 *(uint16_t *)data = val;
1505 break;
1506 case 4:
1507 *(uint32_t *)data = val;
1508 break;
1509 case 8:
1510 *(uint64_t *)data = val;
1511 break;
1512 }
1513 return 0;
1514}
1515
Giridhar Malavalia9083012010-04-12 17:59:55 -07001516
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001517static struct qla82xx_uri_table_desc *
1518qla82xx_get_table_desc(const u8 *unirom, int section)
1519{
1520 uint32_t i;
1521 struct qla82xx_uri_table_desc *directory =
1522 (struct qla82xx_uri_table_desc *)&unirom[0];
1523 __le32 offset;
1524 __le32 tab_type;
1525 __le32 entries = cpu_to_le32(directory->num_entries);
1526
1527 for (i = 0; i < entries; i++) {
1528 offset = cpu_to_le32(directory->findex) +
1529 (i * cpu_to_le32(directory->entry_size));
1530 tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8));
1531
1532 if (tab_type == section)
1533 return (struct qla82xx_uri_table_desc *)&unirom[offset];
1534 }
1535
1536 return NULL;
1537}
1538
1539static struct qla82xx_uri_data_desc *
1540qla82xx_get_data_desc(struct qla_hw_data *ha,
1541 u32 section, u32 idx_offset)
1542{
1543 const u8 *unirom = ha->hablob->fw->data;
1544 int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset));
1545 struct qla82xx_uri_table_desc *tab_desc = NULL;
1546 __le32 offset;
1547
1548 tab_desc = qla82xx_get_table_desc(unirom, section);
1549 if (!tab_desc)
1550 return NULL;
1551
1552 offset = cpu_to_le32(tab_desc->findex) +
1553 (cpu_to_le32(tab_desc->entry_size) * idx);
1554
1555 return (struct qla82xx_uri_data_desc *)&unirom[offset];
1556}
1557
1558static u8 *
1559qla82xx_get_bootld_offset(struct qla_hw_data *ha)
1560{
1561 u32 offset = BOOTLD_START;
1562 struct qla82xx_uri_data_desc *uri_desc = NULL;
1563
1564 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1565 uri_desc = qla82xx_get_data_desc(ha,
1566 QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF);
1567 if (uri_desc)
1568 offset = cpu_to_le32(uri_desc->findex);
1569 }
1570
1571 return (u8 *)&ha->hablob->fw->data[offset];
1572}
1573
1574static __le32
1575qla82xx_get_fw_size(struct qla_hw_data *ha)
1576{
1577 struct qla82xx_uri_data_desc *uri_desc = NULL;
1578
1579 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1580 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1581 QLA82XX_URI_FIRMWARE_IDX_OFF);
1582 if (uri_desc)
1583 return cpu_to_le32(uri_desc->size);
1584 }
1585
1586 return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]);
1587}
1588
1589static u8 *
1590qla82xx_get_fw_offs(struct qla_hw_data *ha)
1591{
1592 u32 offset = IMAGE_START;
1593 struct qla82xx_uri_data_desc *uri_desc = NULL;
1594
1595 if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1596 uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW,
1597 QLA82XX_URI_FIRMWARE_IDX_OFF);
1598 if (uri_desc)
1599 offset = cpu_to_le32(uri_desc->findex);
1600 }
1601
1602 return (u8 *)&ha->hablob->fw->data[offset];
1603}
1604
Giridhar Malavalia9083012010-04-12 17:59:55 -07001605/* PCI related functions */
1606char *
1607qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
1608{
1609 int pcie_reg;
1610 struct qla_hw_data *ha = vha->hw;
1611 char lwstr[6];
1612 uint16_t lnk;
1613
1614 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
1615 pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
1616 ha->link_width = (lnk >> 4) & 0x3f;
1617
1618 strcpy(str, "PCIe (");
1619 strcat(str, "2.5Gb/s ");
1620 snprintf(lwstr, sizeof(lwstr), "x%d)", ha->link_width);
1621 strcat(str, lwstr);
1622 return str;
1623}
1624
1625int qla82xx_pci_region_offset(struct pci_dev *pdev, int region)
1626{
1627 unsigned long val = 0;
1628 u32 control;
1629
1630 switch (region) {
1631 case 0:
1632 val = 0;
1633 break;
1634 case 1:
1635 pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control);
1636 val = control + QLA82XX_MSIX_TBL_SPACE;
1637 break;
1638 }
1639 return val;
1640}
1641
Giridhar Malavalia9083012010-04-12 17:59:55 -07001642
1643int
1644qla82xx_iospace_config(struct qla_hw_data *ha)
1645{
1646 uint32_t len = 0;
1647
1648 if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) {
1649 qla_printk(KERN_WARNING, ha,
1650 "Failed to reserve selected regions (%s)\n",
1651 pci_name(ha->pdev));
1652 goto iospace_error_exit;
1653 }
1654
1655 /* Use MMIO operations for all accesses. */
1656 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1657 qla_printk(KERN_ERR, ha,
1658 "region #0 not an MMIO resource (%s), aborting\n",
1659 pci_name(ha->pdev));
1660 goto iospace_error_exit;
1661 }
1662
1663 len = pci_resource_len(ha->pdev, 0);
1664 ha->nx_pcibase =
1665 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1666 if (!ha->nx_pcibase) {
1667 qla_printk(KERN_ERR, ha,
1668 "cannot remap pcibase MMIO (%s), aborting\n",
1669 pci_name(ha->pdev));
1670 pci_release_regions(ha->pdev);
1671 goto iospace_error_exit;
1672 }
1673
1674 /* Mapping of IO base pointer */
1675 ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
1676 0xbc000 + (ha->pdev->devfn << 11));
1677
1678 if (!ql2xdbwr) {
1679 ha->nxdb_wr_ptr =
1680 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1681 (ha->pdev->devfn << 12)), 4);
1682 if (!ha->nxdb_wr_ptr) {
1683 qla_printk(KERN_ERR, ha,
1684 "cannot remap MMIO (%s), aborting\n",
1685 pci_name(ha->pdev));
1686 pci_release_regions(ha->pdev);
1687 goto iospace_error_exit;
1688 }
1689
1690 /* Mapping of IO base pointer,
1691 * door bell read and write pointer
1692 */
1693 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
1694 (ha->pdev->devfn * 8);
1695 } else {
1696 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
1697 QLA82XX_CAMRAM_DB1 :
1698 QLA82XX_CAMRAM_DB2);
1699 }
1700
1701 ha->max_req_queues = ha->max_rsp_queues = 1;
1702 ha->msix_count = ha->max_rsp_queues + 1;
1703 return 0;
1704
1705iospace_error_exit:
1706 return -ENOMEM;
1707}
1708
1709/* GS related functions */
1710
1711/* Initialization related functions */
1712
1713/**
1714 * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
1715 * @ha: HA context
1716 *
1717 * Returns 0 on success.
1718*/
1719int
1720qla82xx_pci_config(scsi_qla_host_t *vha)
1721{
1722 struct qla_hw_data *ha = vha->hw;
1723 int ret;
1724
1725 pci_set_master(ha->pdev);
1726 ret = pci_set_mwi(ha->pdev);
1727 ha->chip_revision = ha->pdev->revision;
1728 return 0;
1729}
1730
1731/**
1732 * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
1733 * @ha: HA context
1734 *
1735 * Returns 0 on success.
1736 */
1737void
1738qla82xx_reset_chip(scsi_qla_host_t *vha)
1739{
1740 struct qla_hw_data *ha = vha->hw;
1741 ha->isp_ops->disable_intrs(ha);
1742}
1743
1744void qla82xx_config_rings(struct scsi_qla_host *vha)
1745{
1746 struct qla_hw_data *ha = vha->hw;
1747 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1748 struct init_cb_81xx *icb;
1749 struct req_que *req = ha->req_q_map[0];
1750 struct rsp_que *rsp = ha->rsp_q_map[0];
1751
1752 /* Setup ring parameters in initialization control block. */
1753 icb = (struct init_cb_81xx *)ha->init_cb;
1754 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1755 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1756 icb->request_q_length = cpu_to_le16(req->length);
1757 icb->response_q_length = cpu_to_le16(rsp->length);
1758 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1759 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1760 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1761 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1762
Giridhar Malavalia9083012010-04-12 17:59:55 -07001763 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0);
1764 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0);
1765 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
1766}
1767
Giridhar Malavalif1af6202010-05-04 15:01:34 -07001768void qla82xx_reset_adapter(struct scsi_qla_host *vha)
1769{
1770 struct qla_hw_data *ha = vha->hw;
1771 vha->flags.online = 0;
1772 qla2x00_try_to_stop_firmware(vha);
1773 ha->isp_ops->disable_intrs(ha);
1774}
1775
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001776static int
1777qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001778{
1779 u64 *ptr64;
1780 u32 i, flashaddr, size;
1781 __le64 data;
1782
1783 size = (IMAGE_START - BOOTLD_START) / 8;
1784
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001785 ptr64 = (u64 *)qla82xx_get_bootld_offset(ha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001786 flashaddr = BOOTLD_START;
1787
1788 for (i = 0; i < size; i++) {
1789 data = cpu_to_le64(ptr64[i]);
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001790 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1791 return -EIO;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001792 flashaddr += 8;
1793 }
1794
Giridhar Malavalia9083012010-04-12 17:59:55 -07001795 flashaddr = FLASH_ADDR_START;
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001796 size = (__force u32)qla82xx_get_fw_size(ha) / 8;
1797 ptr64 = (u64 *)qla82xx_get_fw_offs(ha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001798
1799 for (i = 0; i < size; i++) {
1800 data = cpu_to_le64(ptr64[i]);
1801
1802 if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8))
1803 return -EIO;
1804 flashaddr += 8;
1805 }
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001806 udelay(100);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001807
1808 /* Write a magic value to CAMRAM register
1809 * at a specified offset to indicate
1810 * that all data is written and
1811 * ready for firmware to initialize.
1812 */
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001813 qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC);
Giridhar Malavalia9083012010-04-12 17:59:55 -07001814
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001815 read_lock(&ha->hw_lock);
Giridhar Malavali37113332010-07-23 15:28:34 +05001816 qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1817 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07001818 read_unlock(&ha->hw_lock);
1819 return 0;
1820}
1821
1822static int
1823qla82xx_set_product_offset(struct qla_hw_data *ha)
1824{
1825 struct qla82xx_uri_table_desc *ptab_desc = NULL;
1826 const uint8_t *unirom = ha->hablob->fw->data;
1827 uint32_t i;
1828 __le32 entries;
1829 __le32 flags, file_chiprev, offset;
1830 uint8_t chiprev = ha->chip_revision;
1831 /* Hardcoding mn_present flag for P3P */
1832 int mn_present = 0;
1833 uint32_t flagbit;
1834
1835 ptab_desc = qla82xx_get_table_desc(unirom,
1836 QLA82XX_URI_DIR_SECT_PRODUCT_TBL);
1837 if (!ptab_desc)
1838 return -1;
1839
1840 entries = cpu_to_le32(ptab_desc->num_entries);
1841
1842 for (i = 0; i < entries; i++) {
1843 offset = cpu_to_le32(ptab_desc->findex) +
1844 (i * cpu_to_le32(ptab_desc->entry_size));
1845 flags = cpu_to_le32(*((int *)&unirom[offset] +
1846 QLA82XX_URI_FLAGS_OFF));
1847 file_chiprev = cpu_to_le32(*((int *)&unirom[offset] +
1848 QLA82XX_URI_CHIP_REV_OFF));
1849
1850 flagbit = mn_present ? 1 : 2;
1851
1852 if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) {
1853 ha->file_prd_off = offset;
1854 return 0;
1855 }
1856 }
1857 return -1;
1858}
1859
1860int
1861qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
1862{
1863 __le32 val;
1864 uint32_t min_size;
1865 struct qla_hw_data *ha = vha->hw;
1866 const struct firmware *fw = ha->hablob->fw;
1867
1868 ha->fw_type = fw_type;
1869
1870 if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) {
1871 if (qla82xx_set_product_offset(ha))
1872 return -EINVAL;
1873
1874 min_size = QLA82XX_URI_FW_MIN_SIZE;
1875 } else {
1876 val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]);
1877 if ((__force u32)val != QLA82XX_BDINFO_MAGIC)
1878 return -EINVAL;
1879
1880 min_size = QLA82XX_FW_MIN_SIZE;
1881 }
1882
1883 if (fw->size < min_size)
1884 return -EINVAL;
Giridhar Malavalia9083012010-04-12 17:59:55 -07001885 return 0;
1886}
1887
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001888static int
1889qla82xx_check_cmdpeg_state(struct qla_hw_data *ha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001890{
1891 u32 val = 0;
1892 int retries = 60;
1893
1894 do {
1895 read_lock(&ha->hw_lock);
1896 val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE);
1897 read_unlock(&ha->hw_lock);
1898
1899 switch (val) {
1900 case PHAN_INITIALIZE_COMPLETE:
1901 case PHAN_INITIALIZE_ACK:
1902 return QLA_SUCCESS;
1903 case PHAN_INITIALIZE_FAILED:
1904 break;
1905 default:
1906 break;
1907 }
1908 qla_printk(KERN_WARNING, ha,
1909 "CRB_CMDPEG_STATE: 0x%x and retries: 0x%x\n",
1910 val, retries);
1911
1912 msleep(500);
1913
1914 } while (--retries);
1915
1916 qla_printk(KERN_INFO, ha,
1917 "Cmd Peg initialization failed: 0x%x.\n", val);
1918
1919 qla82xx_check_for_bad_spd(ha);
1920 val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1921 read_lock(&ha->hw_lock);
1922 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
1923 read_unlock(&ha->hw_lock);
1924 return QLA_FUNCTION_FAILED;
1925}
1926
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001927static int
1928qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07001929{
1930 u32 val = 0;
1931 int retries = 60;
1932
1933 do {
1934 read_lock(&ha->hw_lock);
1935 val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE);
1936 read_unlock(&ha->hw_lock);
1937
1938 switch (val) {
1939 case PHAN_INITIALIZE_COMPLETE:
1940 case PHAN_INITIALIZE_ACK:
1941 return QLA_SUCCESS;
1942 case PHAN_INITIALIZE_FAILED:
1943 break;
1944 default:
1945 break;
1946 }
1947
1948 qla_printk(KERN_WARNING, ha,
1949 "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x\n",
1950 val, retries);
1951
1952 msleep(500);
1953
1954 } while (--retries);
1955
1956 qla_printk(KERN_INFO, ha,
1957 "Rcv Peg initialization failed: 0x%x.\n", val);
1958 read_lock(&ha->hw_lock);
1959 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED);
1960 read_unlock(&ha->hw_lock);
1961 return QLA_FUNCTION_FAILED;
1962}
1963
1964/* ISR related functions */
1965uint32_t qla82xx_isr_int_target_mask_enable[8] = {
1966 ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
1967 ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
1968 ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
1969 ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
1970};
1971
1972uint32_t qla82xx_isr_int_target_status[8] = {
1973 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
1974 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
1975 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
1976 ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
1977};
1978
1979static struct qla82xx_legacy_intr_set legacy_intr[] = \
1980 QLA82XX_LEGACY_INTR_CONFIG;
1981
1982/*
1983 * qla82xx_mbx_completion() - Process mailbox command completions.
1984 * @ha: SCSI driver HA context
1985 * @mb0: Mailbox0 register
1986 */
Giridhar Malavali77e334d2010-09-03 15:20:52 -07001987static void
Giridhar Malavalia9083012010-04-12 17:59:55 -07001988qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1989{
1990 uint16_t cnt;
1991 uint16_t __iomem *wptr;
1992 struct qla_hw_data *ha = vha->hw;
1993 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
1994 wptr = (uint16_t __iomem *)&reg->mailbox_out[1];
1995
1996 /* Load return mailbox registers. */
1997 ha->flags.mbox_int = 1;
1998 ha->mailbox_out[0] = mb0;
1999
2000 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2001 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2002 wptr++;
2003 }
2004
2005 if (ha->mcp) {
2006 DEBUG3_11(printk(KERN_INFO "%s(%ld): "
2007 "Got mailbox completion. cmd=%x.\n",
2008 __func__, vha->host_no, ha->mcp->mb[0]));
2009 } else {
2010 qla_printk(KERN_INFO, ha,
2011 "%s(%ld): MBX pointer ERROR!\n",
2012 __func__, vha->host_no);
2013 }
2014}
2015
2016/*
2017 * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2018 * @irq:
2019 * @dev_id: SCSI driver HA context
2020 * @regs:
2021 *
2022 * Called by system whenever the host adapter generates an interrupt.
2023 *
2024 * Returns handled flag.
2025 */
2026irqreturn_t
2027qla82xx_intr_handler(int irq, void *dev_id)
2028{
2029 scsi_qla_host_t *vha;
2030 struct qla_hw_data *ha;
2031 struct rsp_que *rsp;
2032 struct device_reg_82xx __iomem *reg;
2033 int status = 0, status1 = 0;
2034 unsigned long flags;
2035 unsigned long iter;
2036 uint32_t stat;
2037 uint16_t mb[4];
2038
2039 rsp = (struct rsp_que *) dev_id;
2040 if (!rsp) {
2041 printk(KERN_INFO
2042 "%s(): NULL response queue pointer\n", __func__);
2043 return IRQ_NONE;
2044 }
2045 ha = rsp->hw;
2046
2047 if (!ha->flags.msi_enabled) {
2048 status = qla82xx_rd_32(ha, ISR_INT_VECTOR);
2049 if (!(status & ha->nx_legacy_intr.int_vec_bit))
2050 return IRQ_NONE;
2051
2052 status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG);
2053 if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1))
2054 return IRQ_NONE;
2055 }
2056
2057 /* clear the interrupt */
2058 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
2059
2060 /* read twice to ensure write is flushed */
2061 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2062 qla82xx_rd_32(ha, ISR_INT_VECTOR);
2063
2064 reg = &ha->iobase->isp82;
2065
2066 spin_lock_irqsave(&ha->hardware_lock, flags);
2067 vha = pci_get_drvdata(ha->pdev);
2068 for (iter = 1; iter--; ) {
2069
2070 if (RD_REG_DWORD(&reg->host_int)) {
2071 stat = RD_REG_DWORD(&reg->host_status);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002072
2073 switch (stat & 0xff) {
2074 case 0x1:
2075 case 0x2:
2076 case 0x10:
2077 case 0x11:
2078 qla82xx_mbx_completion(vha, MSW(stat));
2079 status |= MBX_INTERRUPT;
2080 break;
2081 case 0x12:
2082 mb[0] = MSW(stat);
2083 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2084 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2085 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2086 qla2x00_async_event(vha, rsp, mb);
2087 break;
2088 case 0x13:
2089 qla24xx_process_response_queue(vha, rsp);
2090 break;
2091 default:
2092 DEBUG2(printk("scsi(%ld): "
2093 " Unrecognized interrupt type (%d).\n",
2094 vha->host_no, stat & 0xff));
2095 break;
2096 }
2097 }
2098 WRT_REG_DWORD(&reg->host_int, 0);
2099 }
2100 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2101 if (!ha->flags.msi_enabled)
2102 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2103
2104#ifdef QL_DEBUG_LEVEL_17
2105 if (!irq && ha->flags.eeh_busy)
2106 qla_printk(KERN_WARNING, ha,
2107 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2108 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2109#endif
2110
2111 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2112 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2113 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2114 complete(&ha->mbx_intr_comp);
2115 }
2116 return IRQ_HANDLED;
2117}
2118
2119irqreturn_t
2120qla82xx_msix_default(int irq, void *dev_id)
2121{
2122 scsi_qla_host_t *vha;
2123 struct qla_hw_data *ha;
2124 struct rsp_que *rsp;
2125 struct device_reg_82xx __iomem *reg;
2126 int status = 0;
2127 unsigned long flags;
2128 uint32_t stat;
2129 uint16_t mb[4];
2130
2131 rsp = (struct rsp_que *) dev_id;
2132 if (!rsp) {
2133 printk(KERN_INFO
2134 "%s(): NULL response queue pointer\n", __func__);
2135 return IRQ_NONE;
2136 }
2137 ha = rsp->hw;
2138
2139 reg = &ha->iobase->isp82;
2140
2141 spin_lock_irqsave(&ha->hardware_lock, flags);
2142 vha = pci_get_drvdata(ha->pdev);
2143 do {
2144 if (RD_REG_DWORD(&reg->host_int)) {
2145 stat = RD_REG_DWORD(&reg->host_status);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002146
2147 switch (stat & 0xff) {
2148 case 0x1:
2149 case 0x2:
2150 case 0x10:
2151 case 0x11:
2152 qla82xx_mbx_completion(vha, MSW(stat));
2153 status |= MBX_INTERRUPT;
2154 break;
2155 case 0x12:
2156 mb[0] = MSW(stat);
2157 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2158 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2159 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2160 qla2x00_async_event(vha, rsp, mb);
2161 break;
2162 case 0x13:
2163 qla24xx_process_response_queue(vha, rsp);
2164 break;
2165 default:
2166 DEBUG2(printk("scsi(%ld): "
2167 " Unrecognized interrupt type (%d).\n",
2168 vha->host_no, stat & 0xff));
2169 break;
2170 }
2171 }
2172 WRT_REG_DWORD(&reg->host_int, 0);
2173 } while (0);
2174
2175 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2176
2177#ifdef QL_DEBUG_LEVEL_17
2178 if (!irq && ha->flags.eeh_busy)
2179 qla_printk(KERN_WARNING, ha,
2180 "isr: status %x, cmd_flags %lx, mbox_int %x, stat %x\n",
2181 status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
2182#endif
2183
2184 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2185 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2186 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2187 complete(&ha->mbx_intr_comp);
2188 }
2189 return IRQ_HANDLED;
2190}
2191
2192irqreturn_t
2193qla82xx_msix_rsp_q(int irq, void *dev_id)
2194{
2195 scsi_qla_host_t *vha;
2196 struct qla_hw_data *ha;
2197 struct rsp_que *rsp;
2198 struct device_reg_82xx __iomem *reg;
2199
2200 rsp = (struct rsp_que *) dev_id;
2201 if (!rsp) {
2202 printk(KERN_INFO
2203 "%s(): NULL response queue pointer\n", __func__);
2204 return IRQ_NONE;
2205 }
2206
2207 ha = rsp->hw;
2208 reg = &ha->iobase->isp82;
2209 spin_lock_irq(&ha->hardware_lock);
2210 vha = pci_get_drvdata(ha->pdev);
2211 qla24xx_process_response_queue(vha, rsp);
2212 WRT_REG_DWORD(&reg->host_int, 0);
2213 spin_unlock_irq(&ha->hardware_lock);
2214 return IRQ_HANDLED;
2215}
2216
2217void
2218qla82xx_poll(int irq, void *dev_id)
2219{
2220 scsi_qla_host_t *vha;
2221 struct qla_hw_data *ha;
2222 struct rsp_que *rsp;
2223 struct device_reg_82xx __iomem *reg;
2224 int status = 0;
2225 uint32_t stat;
2226 uint16_t mb[4];
2227 unsigned long flags;
2228
2229 rsp = (struct rsp_que *) dev_id;
2230 if (!rsp) {
2231 printk(KERN_INFO
2232 "%s(): NULL response queue pointer\n", __func__);
2233 return;
2234 }
2235 ha = rsp->hw;
2236
2237 reg = &ha->iobase->isp82;
2238 spin_lock_irqsave(&ha->hardware_lock, flags);
2239 vha = pci_get_drvdata(ha->pdev);
2240
2241 if (RD_REG_DWORD(&reg->host_int)) {
2242 stat = RD_REG_DWORD(&reg->host_status);
2243 switch (stat & 0xff) {
2244 case 0x1:
2245 case 0x2:
2246 case 0x10:
2247 case 0x11:
2248 qla82xx_mbx_completion(vha, MSW(stat));
2249 status |= MBX_INTERRUPT;
2250 break;
2251 case 0x12:
2252 mb[0] = MSW(stat);
2253 mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
2254 mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
2255 mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
2256 qla2x00_async_event(vha, rsp, mb);
2257 break;
2258 case 0x13:
2259 qla24xx_process_response_queue(vha, rsp);
2260 break;
2261 default:
2262 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2263 "(%d).\n",
2264 vha->host_no, stat & 0xff));
2265 break;
2266 }
2267 }
2268 WRT_REG_DWORD(&reg->host_int, 0);
2269 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2270}
2271
2272void
2273qla82xx_enable_intrs(struct qla_hw_data *ha)
2274{
2275 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2276 qla82xx_mbx_intr_enable(vha);
2277 spin_lock_irq(&ha->hardware_lock);
2278 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
2279 spin_unlock_irq(&ha->hardware_lock);
2280 ha->interrupts_on = 1;
2281}
2282
2283void
2284qla82xx_disable_intrs(struct qla_hw_data *ha)
2285{
2286 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2287 qla82xx_mbx_intr_disable(vha);
2288 spin_lock_irq(&ha->hardware_lock);
2289 qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
2290 spin_unlock_irq(&ha->hardware_lock);
2291 ha->interrupts_on = 0;
2292}
2293
2294void qla82xx_init_flags(struct qla_hw_data *ha)
2295{
2296 struct qla82xx_legacy_intr_set *nx_legacy_intr;
2297
2298 /* ISP 8021 initializations */
2299 rwlock_init(&ha->hw_lock);
2300 ha->qdr_sn_window = -1;
2301 ha->ddr_mn_window = -1;
2302 ha->curr_window = 255;
2303 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2304 nx_legacy_intr = &legacy_intr[ha->portnum];
2305 ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit;
2306 ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg;
2307 ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
2308 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2309}
2310
Lalit Chandivadea5b36322010-09-03 15:20:50 -07002311inline void
Giridhar Malavalia9083012010-04-12 17:59:55 -07002312qla82xx_set_drv_active(scsi_qla_host_t *vha)
2313{
2314 uint32_t drv_active;
2315 struct qla_hw_data *ha = vha->hw;
2316
2317 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2318
2319 /* If reset value is all FF's, initialize DRV_ACTIVE */
2320 if (drv_active == 0xffffffff) {
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002321 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE,
2322 QLA82XX_DRV_NOT_ACTIVE);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002323 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
2324 }
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002325 drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002326 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2327}
2328
2329inline void
2330qla82xx_clear_drv_active(struct qla_hw_data *ha)
2331{
2332 uint32_t drv_active;
2333
2334 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002335 drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002336 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
2337}
2338
2339static inline int
2340qla82xx_need_reset(struct qla_hw_data *ha)
2341{
2342 uint32_t drv_state;
2343 int rval;
2344
2345 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002346 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002347 return rval;
2348}
2349
2350static inline void
2351qla82xx_set_rst_ready(struct qla_hw_data *ha)
2352{
2353 uint32_t drv_state;
2354 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2355
2356 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2357
2358 /* If reset value is all FF's, initialize DRV_STATE */
2359 if (drv_state == 0xffffffff) {
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002360 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002361 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2362 }
2363 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2364 qla_printk(KERN_INFO, ha,
2365 "%s(%ld):drv_state = 0x%x\n",
2366 __func__, vha->host_no, drv_state);
2367 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2368}
2369
2370static inline void
2371qla82xx_clear_rst_ready(struct qla_hw_data *ha)
2372{
2373 uint32_t drv_state;
2374
2375 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2376 drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2377 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2378}
2379
2380static inline void
2381qla82xx_set_qsnt_ready(struct qla_hw_data *ha)
2382{
2383 uint32_t qsnt_state;
2384
2385 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2386 qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2387 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2388}
2389
Saurav Kashyap579d12b2010-12-21 16:00:14 -08002390void
2391qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha)
2392{
2393 struct qla_hw_data *ha = vha->hw;
2394 uint32_t qsnt_state;
2395
2396 qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2397 qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4));
2398 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state);
2399}
2400
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002401static int
2402qla82xx_load_fw(scsi_qla_host_t *vha)
Giridhar Malavalia9083012010-04-12 17:59:55 -07002403{
2404 int rst;
2405 struct fw_blob *blob;
2406 struct qla_hw_data *ha = vha->hw;
2407
Giridhar Malavalia9083012010-04-12 17:59:55 -07002408 if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) {
2409 qla_printk(KERN_ERR, ha,
2410 "%s: Error during CRB Initialization\n", __func__);
2411 return QLA_FUNCTION_FAILED;
2412 }
2413 udelay(500);
2414
2415 /* Bring QM and CAMRAM out of reset */
2416 rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
2417 rst &= ~((1 << 28) | (1 << 24));
2418 qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
2419
2420 /*
2421 * FW Load priority:
2422 * 1) Operational firmware residing in flash.
2423 * 2) Firmware via request-firmware interface (.bin file).
2424 */
2425 if (ql2xfwloadbin == 2)
2426 goto try_blob_fw;
2427
2428 qla_printk(KERN_INFO, ha,
2429 "Attempting to load firmware from flash\n");
2430
2431 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2432 qla_printk(KERN_ERR, ha,
2433 "Firmware loaded successfully from flash\n");
2434 return QLA_SUCCESS;
2435 }
2436try_blob_fw:
2437 qla_printk(KERN_INFO, ha,
2438 "Attempting to load firmware from blob\n");
2439
2440 /* Load firmware blob. */
2441 blob = ha->hablob = qla2x00_request_firmware(vha);
2442 if (!blob) {
2443 qla_printk(KERN_ERR, ha,
2444 "Firmware image not present.\n");
2445 goto fw_load_failed;
2446 }
2447
Harish Zunjarrao9c2b2972010-05-28 15:08:23 -07002448 /* Validating firmware blob */
2449 if (qla82xx_validate_firmware_blob(vha,
2450 QLA82XX_FLASH_ROMIMAGE)) {
2451 /* Fallback to URI format */
2452 if (qla82xx_validate_firmware_blob(vha,
2453 QLA82XX_UNIFIED_ROMIMAGE)) {
2454 qla_printk(KERN_ERR, ha,
2455 "No valid firmware image found!!!");
2456 return QLA_FUNCTION_FAILED;
2457 }
2458 }
2459
Giridhar Malavalia9083012010-04-12 17:59:55 -07002460 if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) {
2461 qla_printk(KERN_ERR, ha,
2462 "%s: Firmware loaded successfully "
2463 " from binary blob\n", __func__);
2464 return QLA_SUCCESS;
2465 } else {
2466 qla_printk(KERN_ERR, ha,
2467 "Firmware load failed from binary blob\n");
2468 blob->fw = NULL;
2469 blob = NULL;
2470 goto fw_load_failed;
2471 }
2472 return QLA_SUCCESS;
2473
2474fw_load_failed:
2475 return QLA_FUNCTION_FAILED;
2476}
2477
Lalit Chandivadea5b36322010-09-03 15:20:50 -07002478int
Giridhar Malavalia9083012010-04-12 17:59:55 -07002479qla82xx_start_firmware(scsi_qla_host_t *vha)
2480{
2481 int pcie_cap;
2482 uint16_t lnk;
2483 struct qla_hw_data *ha = vha->hw;
2484
2485 /* scrub dma mask expansion register */
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002486 qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002487
Giridhar Malavali37113332010-07-23 15:28:34 +05002488 /* Put both the PEG CMD and RCV PEG to default state
2489 * of 0 before resetting the hardware
2490 */
2491 qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
2492 qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
2493
Giridhar Malavalia9083012010-04-12 17:59:55 -07002494 /* Overwrite stale initialization register values */
2495 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
2496 qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
2497
2498 if (qla82xx_load_fw(vha) != QLA_SUCCESS) {
2499 qla_printk(KERN_INFO, ha,
2500 "%s: Error trying to start fw!\n", __func__);
2501 return QLA_FUNCTION_FAILED;
2502 }
2503
2504 /* Handshake with the card before we register the devices. */
2505 if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) {
2506 qla_printk(KERN_INFO, ha,
2507 "%s: Error during card handshake!\n", __func__);
2508 return QLA_FUNCTION_FAILED;
2509 }
2510
2511 /* Negotiated Link width */
2512 pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
2513 pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
2514 ha->link_width = (lnk >> 4) & 0x3f;
2515
2516 /* Synchronize with Receive peg */
2517 return qla82xx_check_rcvpeg_state(ha);
2518}
2519
2520static inline int
2521qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2522 uint16_t tot_dsds)
2523{
2524 uint32_t *cur_dsd = NULL;
2525 scsi_qla_host_t *vha;
2526 struct qla_hw_data *ha;
2527 struct scsi_cmnd *cmd;
2528 struct scatterlist *cur_seg;
2529 uint32_t *dsd_seg;
2530 void *next_dsd;
2531 uint8_t avail_dsds;
2532 uint8_t first_iocb = 1;
2533 uint32_t dsd_list_len;
2534 struct dsd_dma *dsd_ptr;
2535 struct ct6_dsd *ctx;
2536
2537 cmd = sp->cmd;
2538
2539 /* Update entry type to indicate Command Type 3 IOCB */
2540 *((uint32_t *)(&cmd_pkt->entry_type)) =
2541 __constant_cpu_to_le32(COMMAND_TYPE_6);
2542
2543 /* No data transfer */
2544 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
2545 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
2546 return 0;
2547 }
2548
2549 vha = sp->fcport->vha;
2550 ha = vha->hw;
2551
2552 /* Set transfer direction */
2553 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
2554 cmd_pkt->control_flags =
2555 __constant_cpu_to_le16(CF_WRITE_DATA);
2556 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
2557 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
2558 cmd_pkt->control_flags =
2559 __constant_cpu_to_le16(CF_READ_DATA);
2560 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
2561 }
2562
2563 cur_seg = scsi_sglist(cmd);
2564 ctx = sp->ctx;
2565
2566 while (tot_dsds) {
2567 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
2568 QLA_DSDS_PER_IOCB : tot_dsds;
2569 tot_dsds -= avail_dsds;
2570 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
2571
2572 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
2573 struct dsd_dma, list);
2574 next_dsd = dsd_ptr->dsd_addr;
2575 list_del(&dsd_ptr->list);
2576 ha->gbl_dsd_avail--;
2577 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
2578 ctx->dsd_use_cnt++;
2579 ha->gbl_dsd_inuse++;
2580
2581 if (first_iocb) {
2582 first_iocb = 0;
2583 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2584 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2585 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2586 *dsd_seg++ = dsd_list_len;
2587 } else {
2588 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2589 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2590 *cur_dsd++ = dsd_list_len;
2591 }
2592 cur_dsd = (uint32_t *)next_dsd;
2593 while (avail_dsds) {
2594 dma_addr_t sle_dma;
2595
2596 sle_dma = sg_dma_address(cur_seg);
2597 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2598 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2599 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
2600 cur_seg++;
2601 avail_dsds--;
2602 }
2603 }
2604
2605 /* Null termination */
2606 *cur_dsd++ = 0;
2607 *cur_dsd++ = 0;
2608 *cur_dsd++ = 0;
2609 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
2610 return 0;
2611}
2612
2613/*
2614 * qla82xx_calc_dsd_lists() - Determine number of DSD list required
2615 * for Command Type 6.
2616 *
2617 * @dsds: number of data segment decriptors needed
2618 *
2619 * Returns the number of dsd list needed to store @dsds.
2620 */
2621inline uint16_t
2622qla82xx_calc_dsd_lists(uint16_t dsds)
2623{
2624 uint16_t dsd_lists = 0;
2625
2626 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
2627 if (dsds % QLA_DSDS_PER_IOCB)
2628 dsd_lists++;
2629 return dsd_lists;
2630}
2631
2632/*
2633 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2634 * @sp: command to send to the ISP
2635 *
2636 * Returns non-zero if a failure occured, else zero.
2637 */
2638int
2639qla82xx_start_scsi(srb_t *sp)
2640{
2641 int ret, nseg;
2642 unsigned long flags;
2643 struct scsi_cmnd *cmd;
2644 uint32_t *clr_ptr;
2645 uint32_t index;
2646 uint32_t handle;
2647 uint16_t cnt;
2648 uint16_t req_cnt;
2649 uint16_t tot_dsds;
2650 struct device_reg_82xx __iomem *reg;
2651 uint32_t dbval;
2652 uint32_t *fcp_dl;
2653 uint8_t additional_cdb_len;
2654 struct ct6_dsd *ctx;
2655 struct scsi_qla_host *vha = sp->fcport->vha;
2656 struct qla_hw_data *ha = vha->hw;
2657 struct req_que *req = NULL;
2658 struct rsp_que *rsp = NULL;
2659
2660 /* Setup device pointers. */
2661 ret = 0;
2662 reg = &ha->iobase->isp82;
2663 cmd = sp->cmd;
2664 req = vha->req;
2665 rsp = ha->rsp_q_map[0];
2666
2667 /* So we know we haven't pci_map'ed anything yet */
2668 tot_dsds = 0;
2669
2670 dbval = 0x04 | (ha->portnum << 5);
2671
2672 /* Send marker if required */
2673 if (vha->marker_needed != 0) {
2674 if (qla2x00_marker(vha, req,
2675 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2676 return QLA_FUNCTION_FAILED;
2677 vha->marker_needed = 0;
2678 }
2679
2680 /* Acquire ring specific lock */
2681 spin_lock_irqsave(&ha->hardware_lock, flags);
2682
2683 /* Check for room in outstanding command list. */
2684 handle = req->current_outstanding_cmd;
2685 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2686 handle++;
2687 if (handle == MAX_OUTSTANDING_COMMANDS)
2688 handle = 1;
2689 if (!req->outstanding_cmds[handle])
2690 break;
2691 }
2692 if (index == MAX_OUTSTANDING_COMMANDS)
2693 goto queuing_error;
2694
2695 /* Map the sg table so we have an accurate count of sg entries needed */
2696 if (scsi_sg_count(cmd)) {
2697 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2698 scsi_sg_count(cmd), cmd->sc_data_direction);
2699 if (unlikely(!nseg))
2700 goto queuing_error;
2701 } else
2702 nseg = 0;
2703
2704 tot_dsds = nseg;
2705
2706 if (tot_dsds > ql2xshiftctondsd) {
2707 struct cmd_type_6 *cmd_pkt;
2708 uint16_t more_dsd_lists = 0;
2709 struct dsd_dma *dsd_ptr;
2710 uint16_t i;
2711
2712 more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
2713 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN)
2714 goto queuing_error;
2715
2716 if (more_dsd_lists <= ha->gbl_dsd_avail)
2717 goto sufficient_dsds;
2718 else
2719 more_dsd_lists -= ha->gbl_dsd_avail;
2720
2721 for (i = 0; i < more_dsd_lists; i++) {
2722 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2723 if (!dsd_ptr)
2724 goto queuing_error;
2725
2726 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2727 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2728 if (!dsd_ptr->dsd_addr) {
2729 kfree(dsd_ptr);
2730 goto queuing_error;
2731 }
2732 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2733 ha->gbl_dsd_avail++;
2734 }
2735
2736sufficient_dsds:
2737 req_cnt = 1;
2738
Giridhar Malavali1bd58b82010-09-03 14:57:05 -07002739 if (req->cnt < (req_cnt + 2)) {
2740 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2741 &reg->req_q_out[0]);
2742 if (req->ring_index < cnt)
2743 req->cnt = cnt - req->ring_index;
2744 else
2745 req->cnt = req->length -
2746 (req->ring_index - cnt);
2747 }
2748
2749 if (req->cnt < (req_cnt + 2))
2750 goto queuing_error;
2751
Giridhar Malavalia9083012010-04-12 17:59:55 -07002752 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2753 if (!sp->ctx) {
2754 DEBUG(printk(KERN_INFO
2755 "%s(%ld): failed to allocate"
2756 " ctx.\n", __func__, vha->host_no));
2757 goto queuing_error;
2758 }
2759 memset(ctx, 0, sizeof(struct ct6_dsd));
2760 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2761 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2762 if (!ctx->fcp_cmnd) {
2763 DEBUG2_3(printk("%s(%ld): failed to allocate"
2764 " fcp_cmnd.\n", __func__, vha->host_no));
2765 goto queuing_error_fcp_cmnd;
2766 }
2767
2768 /* Initialize the DSD list and dma handle */
2769 INIT_LIST_HEAD(&ctx->dsd_list);
2770 ctx->dsd_use_cnt = 0;
2771
2772 if (cmd->cmd_len > 16) {
2773 additional_cdb_len = cmd->cmd_len - 16;
2774 if ((cmd->cmd_len % 4) != 0) {
2775 /* SCSI command bigger than 16 bytes must be
2776 * multiple of 4
2777 */
2778 goto queuing_error_fcp_cmnd;
2779 }
2780 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2781 } else {
2782 additional_cdb_len = 0;
2783 ctx->fcp_cmnd_len = 12 + 16 + 4;
2784 }
2785
2786 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2787 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2788
2789 /* Zero out remaining portion of packet. */
2790 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2791 clr_ptr = (uint32_t *)cmd_pkt + 2;
2792 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2793 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2794
2795 /* Set NPORT-ID and LUN number*/
2796 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2797 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2798 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2799 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2800 cmd_pkt->vp_index = sp->fcport->vp_idx;
2801
2802 /* Build IOCB segments */
2803 if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2804 goto queuing_error_fcp_cmnd;
2805
2806 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
Mike Hernandez85727e12010-11-23 16:52:46 -08002807 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Giridhar Malavalia9083012010-04-12 17:59:55 -07002808
2809 /* build FCP_CMND IU */
2810 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2811 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2812 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2813
2814 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2815 ctx->fcp_cmnd->additional_cdb_len |= 1;
2816 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2817 ctx->fcp_cmnd->additional_cdb_len |= 2;
2818
2819 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2820
2821 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2822 additional_cdb_len);
2823 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2824
2825 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2826 cmd_pkt->fcp_cmnd_dseg_address[0] =
2827 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2828 cmd_pkt->fcp_cmnd_dseg_address[1] =
2829 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2830
2831 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2832 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2833 /* Set total data segment count. */
2834 cmd_pkt->entry_count = (uint8_t)req_cnt;
2835 /* Specify response queue number where
2836 * completion should happen
2837 */
2838 cmd_pkt->entry_status = (uint8_t) rsp->id;
2839 } else {
2840 struct cmd_type_7 *cmd_pkt;
2841 req_cnt = qla24xx_calc_iocbs(tot_dsds);
2842 if (req->cnt < (req_cnt + 2)) {
2843 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2844 &reg->req_q_out[0]);
2845 if (req->ring_index < cnt)
2846 req->cnt = cnt - req->ring_index;
2847 else
2848 req->cnt = req->length -
2849 (req->ring_index - cnt);
2850 }
2851 if (req->cnt < (req_cnt + 2))
2852 goto queuing_error;
2853
2854 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2855 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2856
2857 /* Zero out remaining portion of packet. */
2858 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2859 clr_ptr = (uint32_t *)cmd_pkt + 2;
2860 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2861 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2862
2863 /* Set NPORT-ID and LUN number*/
2864 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2865 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2866 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2867 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2868 cmd_pkt->vp_index = sp->fcport->vp_idx;
2869
2870 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2871 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2872 sizeof(cmd_pkt->lun));
2873
2874 /* Load SCSI command packet. */
2875 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2876 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2877
2878 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2879
2880 /* Build IOCB segments */
2881 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2882
2883 /* Set total data segment count. */
2884 cmd_pkt->entry_count = (uint8_t)req_cnt;
2885 /* Specify response queue number where
2886 * completion should happen.
2887 */
2888 cmd_pkt->entry_status = (uint8_t) rsp->id;
2889
2890 }
2891 /* Build command packet. */
2892 req->current_outstanding_cmd = handle;
2893 req->outstanding_cmds[handle] = sp;
2894 sp->handle = handle;
2895 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2896 req->cnt -= req_cnt;
2897 wmb();
2898
2899 /* Adjust ring index. */
2900 req->ring_index++;
2901 if (req->ring_index == req->length) {
2902 req->ring_index = 0;
2903 req->ring_ptr = req->ring;
2904 } else
2905 req->ring_ptr++;
2906
2907 sp->flags |= SRB_DMA_VALID;
2908
2909 /* Set chip new ring index. */
2910 /* write, read and verify logic */
2911 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2912 if (ql2xdbwr)
2913 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2914 else {
2915 WRT_REG_DWORD(
2916 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2917 dbval);
2918 wmb();
2919 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2920 WRT_REG_DWORD(
2921 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2922 dbval);
2923 wmb();
2924 }
2925 }
2926
2927 /* Manage unprocessed RIO/ZIO commands in response queue. */
2928 if (vha->flags.process_response_queue &&
2929 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2930 qla24xx_process_response_queue(vha, rsp);
2931
2932 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2933 return QLA_SUCCESS;
2934
2935queuing_error_fcp_cmnd:
2936 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2937queuing_error:
2938 if (tot_dsds)
2939 scsi_dma_unmap(cmd);
2940
2941 if (sp->ctx) {
2942 mempool_free(sp->ctx, ha->ctx_mempool);
2943 sp->ctx = NULL;
2944 }
2945 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2946
2947 return QLA_FUNCTION_FAILED;
2948}
2949
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002950static uint32_t *
Giridhar Malavalia9083012010-04-12 17:59:55 -07002951qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2952 uint32_t length)
2953{
2954 uint32_t i;
2955 uint32_t val;
2956 struct qla_hw_data *ha = vha->hw;
2957
2958 /* Dword reads to flash. */
2959 for (i = 0; i < length/4; i++, faddr += 4) {
2960 if (qla82xx_rom_fast_read(ha, faddr, &val)) {
2961 qla_printk(KERN_WARNING, ha,
2962 "Do ROM fast read failed\n");
2963 goto done_read;
2964 }
2965 dwptr[i] = __constant_cpu_to_le32(val);
2966 }
2967done_read:
2968 return dwptr;
2969}
2970
Giridhar Malavali77e334d2010-09-03 15:20:52 -07002971static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07002972qla82xx_unprotect_flash(struct qla_hw_data *ha)
2973{
2974 int ret;
2975 uint32_t val;
2976
2977 ret = ql82xx_rom_lock_d(ha);
2978 if (ret < 0) {
2979 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
2980 return ret;
2981 }
2982
2983 ret = qla82xx_read_status_reg(ha, &val);
2984 if (ret < 0)
2985 goto done_unprotect;
2986
Lalit Chandivade0547fb32010-05-28 15:08:26 -07002987 val &= ~(BLOCK_PROTECT_BITS << 2);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002988 ret = qla82xx_write_status_reg(ha, val);
2989 if (ret < 0) {
Lalit Chandivade0547fb32010-05-28 15:08:26 -07002990 val |= (BLOCK_PROTECT_BITS << 2);
Giridhar Malavalia9083012010-04-12 17:59:55 -07002991 qla82xx_write_status_reg(ha, val);
2992 }
2993
2994 if (qla82xx_write_disable_flash(ha) != 0)
2995 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
2996
2997done_unprotect:
2998 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
2999 return ret;
3000}
3001
Giridhar Malavali77e334d2010-09-03 15:20:52 -07003002static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07003003qla82xx_protect_flash(struct qla_hw_data *ha)
3004{
3005 int ret;
3006 uint32_t val;
3007
3008 ret = ql82xx_rom_lock_d(ha);
3009 if (ret < 0) {
3010 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3011 return ret;
3012 }
3013
3014 ret = qla82xx_read_status_reg(ha, &val);
3015 if (ret < 0)
3016 goto done_protect;
3017
Lalit Chandivade0547fb32010-05-28 15:08:26 -07003018 val |= (BLOCK_PROTECT_BITS << 2);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003019 /* LOCK all sectors */
3020 ret = qla82xx_write_status_reg(ha, val);
3021 if (ret < 0)
3022 qla_printk(KERN_WARNING, ha, "Write status register failed\n");
3023
3024 if (qla82xx_write_disable_flash(ha) != 0)
3025 qla_printk(KERN_WARNING, ha, "Write disable failed\n");
3026done_protect:
3027 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3028 return ret;
3029}
3030
Giridhar Malavali77e334d2010-09-03 15:20:52 -07003031static int
Giridhar Malavalia9083012010-04-12 17:59:55 -07003032qla82xx_erase_sector(struct qla_hw_data *ha, int addr)
3033{
3034 int ret = 0;
3035
3036 ret = ql82xx_rom_lock_d(ha);
3037 if (ret < 0) {
3038 qla_printk(KERN_WARNING, ha, "ROM Lock failed\n");
3039 return ret;
3040 }
3041
3042 qla82xx_flash_set_write_enable(ha);
3043 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
3044 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
3045 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE);
3046
3047 if (qla82xx_wait_rom_done(ha)) {
3048 qla_printk(KERN_WARNING, ha,
3049 "Error waiting for rom done\n");
3050 ret = -1;
3051 goto done;
3052 }
3053 ret = qla82xx_flash_wait_write_finish(ha);
3054done:
3055 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3056 return ret;
3057}
3058
3059/*
3060 * Address and length are byte address
3061 */
3062uint8_t *
3063qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3064 uint32_t offset, uint32_t length)
3065{
3066 scsi_block_requests(vha->host);
3067 qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length);
3068 scsi_unblock_requests(vha->host);
3069 return buf;
3070}
3071
3072static int
3073qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
3074 uint32_t faddr, uint32_t dwords)
3075{
3076 int ret;
3077 uint32_t liter;
3078 uint32_t sec_mask, rest_addr;
3079 dma_addr_t optrom_dma;
3080 void *optrom = NULL;
3081 int page_mode = 0;
3082 struct qla_hw_data *ha = vha->hw;
3083
3084 ret = -1;
3085
3086 /* Prepare burst-capable write on supported ISPs. */
3087 if (page_mode && !(faddr & 0xfff) &&
3088 dwords > OPTROM_BURST_DWORDS) {
3089 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
3090 &optrom_dma, GFP_KERNEL);
3091 if (!optrom) {
3092 qla_printk(KERN_DEBUG, ha,
3093 "Unable to allocate memory for optrom "
3094 "burst write (%x KB).\n",
3095 OPTROM_BURST_SIZE / 1024);
3096 }
3097 }
3098
3099 rest_addr = ha->fdt_block_size - 1;
3100 sec_mask = ~rest_addr;
3101
3102 ret = qla82xx_unprotect_flash(ha);
3103 if (ret) {
3104 qla_printk(KERN_WARNING, ha,
3105 "Unable to unprotect flash for update.\n");
3106 goto write_done;
3107 }
3108
3109 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3110 /* Are we at the beginning of a sector? */
3111 if ((faddr & rest_addr) == 0) {
3112
3113 ret = qla82xx_erase_sector(ha, faddr);
3114 if (ret) {
3115 DEBUG9(qla_printk(KERN_ERR, ha,
3116 "Unable to erase sector: "
3117 "address=%x.\n", faddr));
3118 break;
3119 }
3120 }
3121
3122 /* Go with burst-write. */
3123 if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) {
3124 /* Copy data to DMA'ble buffer. */
3125 memcpy(optrom, dwptr, OPTROM_BURST_SIZE);
3126
3127 ret = qla2x00_load_ram(vha, optrom_dma,
3128 (ha->flash_data_off | faddr),
3129 OPTROM_BURST_DWORDS);
3130 if (ret != QLA_SUCCESS) {
3131 qla_printk(KERN_WARNING, ha,
3132 "Unable to burst-write optrom segment "
3133 "(%x/%x/%llx).\n", ret,
3134 (ha->flash_data_off | faddr),
3135 (unsigned long long)optrom_dma);
3136 qla_printk(KERN_WARNING, ha,
3137 "Reverting to slow-write.\n");
3138
3139 dma_free_coherent(&ha->pdev->dev,
3140 OPTROM_BURST_SIZE, optrom, optrom_dma);
3141 optrom = NULL;
3142 } else {
3143 liter += OPTROM_BURST_DWORDS - 1;
3144 faddr += OPTROM_BURST_DWORDS - 1;
3145 dwptr += OPTROM_BURST_DWORDS - 1;
3146 continue;
3147 }
3148 }
3149
3150 ret = qla82xx_write_flash_dword(ha, faddr,
3151 cpu_to_le32(*dwptr));
3152 if (ret) {
3153 DEBUG9(printk(KERN_DEBUG "%s(%ld) Unable to program"
3154 "flash address=%x data=%x.\n", __func__,
3155 ha->host_no, faddr, *dwptr));
3156 break;
3157 }
3158 }
3159
3160 ret = qla82xx_protect_flash(ha);
3161 if (ret)
3162 qla_printk(KERN_WARNING, ha,
3163 "Unable to protect flash after update.\n");
3164write_done:
3165 if (optrom)
3166 dma_free_coherent(&ha->pdev->dev,
3167 OPTROM_BURST_SIZE, optrom, optrom_dma);
3168 return ret;
3169}
3170
3171int
3172qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
3173 uint32_t offset, uint32_t length)
3174{
3175 int rval;
3176
3177 /* Suspend HBA. */
3178 scsi_block_requests(vha->host);
3179 rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset,
3180 length >> 2);
3181 scsi_unblock_requests(vha->host);
3182
3183 /* Convert return ISP82xx to generic */
3184 if (rval)
3185 rval = QLA_FUNCTION_FAILED;
3186 else
3187 rval = QLA_SUCCESS;
3188 return rval;
3189}
3190
3191void
3192qla82xx_start_iocbs(srb_t *sp)
3193{
3194 struct qla_hw_data *ha = sp->fcport->vha->hw;
3195 struct req_que *req = ha->req_q_map[0];
3196 struct device_reg_82xx __iomem *reg;
3197 uint32_t dbval;
3198
3199 /* Adjust ring index. */
3200 req->ring_index++;
3201 if (req->ring_index == req->length) {
3202 req->ring_index = 0;
3203 req->ring_ptr = req->ring;
3204 } else
3205 req->ring_ptr++;
3206
3207 reg = &ha->iobase->isp82;
3208 dbval = 0x04 | (ha->portnum << 5);
3209
3210 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
Giridhar Malavali69078692010-05-28 15:08:28 -07003211 if (ql2xdbwr)
3212 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
3213 else {
3214 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003215 wmb();
Giridhar Malavali69078692010-05-28 15:08:28 -07003216 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3217 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
3218 dbval);
3219 wmb();
3220 }
Giridhar Malavalia9083012010-04-12 17:59:55 -07003221 }
3222}
3223
Shyam Sundare6a42022010-09-07 20:55:32 -07003224void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
3225{
3226 if (qla82xx_rom_lock(ha))
3227 /* Someone else is holding the lock. */
3228 qla_printk(KERN_INFO, ha, "Resetting rom_lock\n");
3229
3230 /*
3231 * Either we got the lock, or someone
3232 * else died while holding it.
3233 * In either case, unlock.
3234 */
3235 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
3236}
3237
Giridhar Malavalia9083012010-04-12 17:59:55 -07003238/*
3239 * qla82xx_device_bootstrap
3240 * Initialize device, set DEV_READY, start fw
3241 *
3242 * Note:
3243 * IDC lock must be held upon entry
3244 *
3245 * Return:
3246 * Success : 0
3247 * Failed : 1
3248 */
3249static int
3250qla82xx_device_bootstrap(scsi_qla_host_t *vha)
3251{
Shyam Sundare6a42022010-09-07 20:55:32 -07003252 int rval = QLA_SUCCESS;
3253 int i, timeout;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003254 uint32_t old_count, count;
3255 struct qla_hw_data *ha = vha->hw;
Shyam Sundare6a42022010-09-07 20:55:32 -07003256 int need_reset = 0, peg_stuck = 1;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003257
Shyam Sundare6a42022010-09-07 20:55:32 -07003258 need_reset = qla82xx_need_reset(ha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003259
3260 old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3261
3262 for (i = 0; i < 10; i++) {
3263 timeout = msleep_interruptible(200);
3264 if (timeout) {
3265 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3266 QLA82XX_DEV_FAILED);
3267 return QLA_FUNCTION_FAILED;
3268 }
3269
3270 count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
3271 if (count != old_count)
Shyam Sundare6a42022010-09-07 20:55:32 -07003272 peg_stuck = 0;
3273 }
3274
3275 if (need_reset) {
3276 /* We are trying to perform a recovery here. */
3277 if (peg_stuck)
3278 qla82xx_rom_lock_recovery(ha);
3279 goto dev_initialize;
3280 } else {
3281 /* Start of day for this ha context. */
3282 if (peg_stuck) {
3283 /* Either we are the first or recovery in progress. */
3284 qla82xx_rom_lock_recovery(ha);
3285 goto dev_initialize;
3286 } else
3287 /* Firmware already running. */
Giridhar Malavalia9083012010-04-12 17:59:55 -07003288 goto dev_ready;
3289 }
3290
Shyam Sundare6a42022010-09-07 20:55:32 -07003291 return rval;
3292
Giridhar Malavalia9083012010-04-12 17:59:55 -07003293dev_initialize:
3294 /* set to DEV_INITIALIZING */
3295 qla_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3296 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_INITIALIZING);
3297
3298 /* Driver that sets device state to initializating sets IDC version */
3299 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
3300
3301 qla82xx_idc_unlock(ha);
3302 rval = qla82xx_start_firmware(vha);
3303 qla82xx_idc_lock(ha);
3304
3305 if (rval != QLA_SUCCESS) {
3306 qla_printk(KERN_INFO, ha, "HW State: FAILED\n");
3307 qla82xx_clear_drv_active(ha);
3308 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED);
3309 return rval;
3310 }
3311
3312dev_ready:
3313 qla_printk(KERN_INFO, ha, "HW State: READY\n");
3314 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
3315
3316 return QLA_SUCCESS;
3317}
3318
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003319/*
3320* qla82xx_need_qsnt_handler
3321* Code to start quiescence sequence
3322*
3323* Note:
3324* IDC lock must be held upon entry
3325*
3326* Return: void
3327*/
3328
3329static void
3330qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
3331{
3332 struct qla_hw_data *ha = vha->hw;
3333 uint32_t dev_state, drv_state, drv_active;
3334 unsigned long reset_timeout;
3335
3336 if (vha->flags.online) {
3337 /*Block any further I/O and wait for pending cmnds to complete*/
3338 qla82xx_quiescent_state_cleanup(vha);
3339 }
3340
3341 /* Set the quiescence ready bit */
3342 qla82xx_set_qsnt_ready(ha);
3343
3344 /*wait for 30 secs for other functions to ack */
3345 reset_timeout = jiffies + (30 * HZ);
3346
3347 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3348 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3349 /* Its 2 that is written when qsnt is acked, moving one bit */
3350 drv_active = drv_active << 0x01;
3351
3352 while (drv_state != drv_active) {
3353
3354 if (time_after_eq(jiffies, reset_timeout)) {
3355 /* quiescence timeout, other functions didn't ack
3356 * changing the state to DEV_READY
3357 */
3358 qla_printk(KERN_INFO, ha,
3359 "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME);
3360 qla_printk(KERN_INFO, ha,
3361 "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active,
3362 drv_state);
3363 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3364 QLA82XX_DEV_READY);
3365 qla_printk(KERN_INFO, ha,
3366 "HW State: DEV_READY\n");
3367 qla82xx_idc_unlock(ha);
3368 qla2x00_perform_loop_resync(vha);
3369 qla82xx_idc_lock(ha);
3370
3371 qla82xx_clear_qsnt_ready(vha);
3372 return;
3373 }
3374
3375 qla82xx_idc_unlock(ha);
3376 msleep(1000);
3377 qla82xx_idc_lock(ha);
3378
3379 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3380 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3381 drv_active = drv_active << 0x01;
3382 }
3383 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3384 /* everyone acked so set the state to DEV_QUIESCENCE */
3385 if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) {
3386 qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n");
3387 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT);
3388 }
3389}
3390
3391/*
3392* qla82xx_wait_for_state_change
3393* Wait for device state to change from given current state
3394*
3395* Note:
3396* IDC lock must not be held upon entry
3397*
3398* Return:
3399* Changed device state.
3400*/
3401uint32_t
3402qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state)
3403{
3404 struct qla_hw_data *ha = vha->hw;
3405 uint32_t dev_state;
3406
3407 do {
3408 msleep(1000);
3409 qla82xx_idc_lock(ha);
3410 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3411 qla82xx_idc_unlock(ha);
3412 } while (dev_state == curr_state);
3413
3414 return dev_state;
3415}
3416
Giridhar Malavalia9083012010-04-12 17:59:55 -07003417static void
3418qla82xx_dev_failed_handler(scsi_qla_host_t *vha)
3419{
3420 struct qla_hw_data *ha = vha->hw;
3421
3422 /* Disable the board */
3423 qla_printk(KERN_INFO, ha, "Disabling the board\n");
3424
Giridhar Malavalib9637522010-05-28 15:08:15 -07003425 qla82xx_idc_lock(ha);
3426 qla82xx_clear_drv_active(ha);
3427 qla82xx_idc_unlock(ha);
3428
Giridhar Malavalia9083012010-04-12 17:59:55 -07003429 /* Set DEV_FAILED flag to disable timer */
3430 vha->device_flags |= DFLG_DEV_FAILED;
3431 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3432 qla2x00_mark_all_devices_lost(vha, 0);
3433 vha->flags.online = 0;
3434 vha->flags.init_done = 0;
3435}
3436
3437/*
3438 * qla82xx_need_reset_handler
3439 * Code to start reset sequence
3440 *
3441 * Note:
3442 * IDC lock must be held upon entry
3443 *
3444 * Return:
3445 * Success : 0
3446 * Failed : 1
3447 */
3448static void
3449qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3450{
3451 uint32_t dev_state, drv_state, drv_active;
3452 unsigned long reset_timeout;
3453 struct qla_hw_data *ha = vha->hw;
3454 struct req_que *req = ha->req_q_map[0];
3455
3456 if (vha->flags.online) {
3457 qla82xx_idc_unlock(ha);
3458 qla2x00_abort_isp_cleanup(vha);
3459 ha->isp_ops->get_flash_version(vha, req->ring);
3460 ha->isp_ops->nvram_config(vha);
3461 qla82xx_idc_lock(ha);
3462 }
3463
3464 qla82xx_set_rst_ready(ha);
3465
3466 /* wait for 10 seconds for reset ack from all functions */
3467 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3468
3469 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3470 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3471
3472 while (drv_state != drv_active) {
3473 if (time_after_eq(jiffies, reset_timeout)) {
3474 qla_printk(KERN_INFO, ha,
3475 "%s: RESET TIMEOUT!\n", QLA2XXX_DRIVER_NAME);
3476 break;
3477 }
3478 qla82xx_idc_unlock(ha);
3479 msleep(1000);
3480 qla82xx_idc_lock(ha);
3481 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3482 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3483 }
3484
3485 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003486 qla_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
3487 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3488
Giridhar Malavalia9083012010-04-12 17:59:55 -07003489 /* Force to DEV_COLD unless someone else is starting a reset */
3490 if (dev_state != QLA82XX_DEV_INITIALIZING) {
3491 qla_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3492 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3493 }
3494}
3495
3496static void
3497qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3498{
3499 uint32_t fw_heartbeat_counter, halt_status;
3500 struct qla_hw_data *ha = vha->hw;
3501
3502 fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
Lalit Chandivadea5b36322010-09-03 15:20:50 -07003503 /* all 0xff, assume AER/EEH in progress, ignore */
3504 if (fw_heartbeat_counter == 0xffffffff)
3505 return;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003506 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
3507 vha->seconds_since_last_heartbeat++;
3508 /* FW not alive after 2 seconds */
3509 if (vha->seconds_since_last_heartbeat == 2) {
3510 vha->seconds_since_last_heartbeat = 0;
3511 halt_status = qla82xx_rd_32(ha,
3512 QLA82XX_PEG_HALT_STATUS1);
3513 if (halt_status & HALT_STATUS_UNRECOVERABLE) {
3514 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3515 } else {
3516 qla_printk(KERN_INFO, ha,
3517 "scsi(%ld): %s - detect abort needed\n",
3518 vha->host_no, __func__);
3519 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3520 }
3521 qla2xxx_wake_dpc(vha);
Giridhar Malavali4142b192010-09-03 14:57:03 -07003522 ha->flags.fw_hung = 1;
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003523 if (ha->flags.mbox_busy) {
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003524 ha->flags.mbox_int = 1;
3525 DEBUG2(qla_printk(KERN_ERR, ha,
Giridhar Malavali4142b192010-09-03 14:57:03 -07003526 "Due to fw hung, doing premature "
3527 "completion of mbx command\n"));
3528 if (test_bit(MBX_INTR_WAIT,
3529 &ha->mbx_cmd_flags))
3530 complete(&ha->mbx_intr_comp);
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003531 }
Giridhar Malavalia9083012010-04-12 17:59:55 -07003532 }
Lalit Chandivadeefa786c2010-09-03 14:57:02 -07003533 } else
3534 vha->seconds_since_last_heartbeat = 0;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003535 vha->fw_heartbeat_counter = fw_heartbeat_counter;
3536}
3537
3538/*
3539 * qla82xx_device_state_handler
3540 * Main state handler
3541 *
3542 * Note:
3543 * IDC lock must be held upon entry
3544 *
3545 * Return:
3546 * Success : 0
3547 * Failed : 1
3548 */
3549int
3550qla82xx_device_state_handler(scsi_qla_host_t *vha)
3551{
3552 uint32_t dev_state;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003553 int rval = QLA_SUCCESS;
3554 unsigned long dev_init_timeout;
3555 struct qla_hw_data *ha = vha->hw;
3556
3557 qla82xx_idc_lock(ha);
3558 if (!vha->flags.init_done)
3559 qla82xx_set_drv_active(vha);
3560
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003561 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3562 qla_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
3563 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
Giridhar Malavalia9083012010-04-12 17:59:55 -07003564
3565 /* wait for 30 seconds for device to go ready */
3566 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3567
3568 while (1) {
3569
3570 if (time_after_eq(jiffies, dev_init_timeout)) {
3571 DEBUG(qla_printk(KERN_INFO, ha,
3572 "%s: device init failed!\n",
3573 QLA2XXX_DRIVER_NAME));
3574 rval = QLA_FUNCTION_FAILED;
3575 break;
3576 }
3577 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003578 qla_printk(KERN_INFO, ha,
3579 "2:Device state is 0x%x = %s\n", dev_state,
3580 dev_state < MAX_STATES ?
3581 qdev_state[dev_state] : "Unknown");
3582
Giridhar Malavalia9083012010-04-12 17:59:55 -07003583 switch (dev_state) {
3584 case QLA82XX_DEV_READY:
3585 goto exit;
3586 case QLA82XX_DEV_COLD:
3587 rval = qla82xx_device_bootstrap(vha);
3588 goto exit;
3589 case QLA82XX_DEV_INITIALIZING:
3590 qla82xx_idc_unlock(ha);
3591 msleep(1000);
3592 qla82xx_idc_lock(ha);
3593 break;
3594 case QLA82XX_DEV_NEED_RESET:
3595 if (!ql2xdontresethba)
3596 qla82xx_need_reset_handler(vha);
3597 break;
3598 case QLA82XX_DEV_NEED_QUIESCENT:
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003599 qla82xx_need_qsnt_handler(vha);
3600 /* Reset timeout value after quiescence handler */
3601 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3602 * HZ);
3603 break;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003604 case QLA82XX_DEV_QUIESCENT:
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003605 /* Owner will exit and other will wait for the state
3606 * to get changed
3607 */
3608 if (ha->flags.quiesce_owner)
3609 goto exit;
3610
Giridhar Malavalia9083012010-04-12 17:59:55 -07003611 qla82xx_idc_unlock(ha);
3612 msleep(1000);
3613 qla82xx_idc_lock(ha);
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003614
3615 /* Reset timeout value after quiescence handler */
3616 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\
3617 * HZ);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003618 break;
3619 case QLA82XX_DEV_FAILED:
3620 qla82xx_dev_failed_handler(vha);
3621 rval = QLA_FUNCTION_FAILED;
3622 goto exit;
3623 default:
3624 qla82xx_idc_unlock(ha);
3625 msleep(1000);
3626 qla82xx_idc_lock(ha);
3627 }
3628 }
3629exit:
3630 qla82xx_idc_unlock(ha);
3631 return rval;
3632}
3633
3634void qla82xx_watchdog(scsi_qla_host_t *vha)
3635{
3636 uint32_t dev_state;
3637 struct qla_hw_data *ha = vha->hw;
3638
3639 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3640
3641 /* don't poll if reset is going on */
3642 if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
3643 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
3644 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
3645 if (dev_state == QLA82XX_DEV_NEED_RESET) {
3646 qla_printk(KERN_WARNING, ha,
3647 "%s(): Adapter reset needed!\n", __func__);
3648 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3649 qla2xxx_wake_dpc(vha);
Giridhar Malavali4142b192010-09-03 14:57:03 -07003650 ha->flags.fw_hung = 1;
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003651 if (ha->flags.mbox_busy) {
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003652 ha->flags.mbox_int = 1;
3653 DEBUG2(qla_printk(KERN_ERR, ha,
Giridhar Malavali4142b192010-09-03 14:57:03 -07003654 "Need reset, doing premature "
3655 "completion of mbx command\n"));
3656 if (test_bit(MBX_INTR_WAIT,
3657 &ha->mbx_cmd_flags))
3658 complete(&ha->mbx_intr_comp);
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003659 }
Saurav Kashyap579d12b2010-12-21 16:00:14 -08003660 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3661 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3662 DEBUG(qla_printk(KERN_INFO, ha,
3663 "scsi(%ld) %s - detected quiescence needed\n",
3664 vha->host_no, __func__));
3665 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3666 qla2xxx_wake_dpc(vha);
Giridhar Malavalia9083012010-04-12 17:59:55 -07003667 } else {
3668 qla82xx_check_fw_alive(vha);
3669 }
3670 }
3671}
3672
3673int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3674{
3675 int rval;
3676 rval = qla82xx_device_state_handler(vha);
3677 return rval;
3678}
3679
3680/*
3681 * qla82xx_abort_isp
3682 * Resets ISP and aborts all outstanding commands.
3683 *
3684 * Input:
3685 * ha = adapter block pointer.
3686 *
3687 * Returns:
3688 * 0 = success
3689 */
3690int
3691qla82xx_abort_isp(scsi_qla_host_t *vha)
3692{
3693 int rval;
3694 struct qla_hw_data *ha = vha->hw;
3695 uint32_t dev_state;
3696
3697 if (vha->device_flags & DFLG_DEV_FAILED) {
3698 qla_printk(KERN_WARNING, ha,
3699 "%s(%ld): Device in failed state, "
3700 "Exiting.\n", __func__, vha->host_no);
3701 return QLA_SUCCESS;
3702 }
3703
3704 qla82xx_idc_lock(ha);
3705 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003706 if (dev_state == QLA82XX_DEV_READY) {
Giridhar Malavalia9083012010-04-12 17:59:55 -07003707 qla_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
3708 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3709 QLA82XX_DEV_NEED_RESET);
3710 } else
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003711 qla_printk(KERN_INFO, ha, "HW State: %s\n",
3712 dev_state < MAX_STATES ?
3713 qdev_state[dev_state] : "Unknown");
Giridhar Malavalia9083012010-04-12 17:59:55 -07003714 qla82xx_idc_unlock(ha);
3715
3716 rval = qla82xx_device_state_handler(vha);
3717
3718 qla82xx_idc_lock(ha);
3719 qla82xx_clear_rst_ready(ha);
3720 qla82xx_idc_unlock(ha);
3721
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003722 if (rval == QLA_SUCCESS) {
3723 ha->flags.fw_hung = 0;
Giridhar Malavalia9083012010-04-12 17:59:55 -07003724 qla82xx_restart_isp(vha);
Santosh Vernekarcdbb0a42010-05-28 15:08:25 -07003725 }
Giridhar Malavalif1af6202010-05-04 15:01:34 -07003726
3727 if (rval) {
3728 vha->flags.online = 1;
3729 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3730 if (ha->isp_abort_cnt == 0) {
3731 qla_printk(KERN_WARNING, ha,
3732 "ISP error recovery failed - "
3733 "board disabled\n");
3734 /*
3735 * The next call disables the board
3736 * completely.
3737 */
3738 ha->isp_ops->reset_adapter(vha);
3739 vha->flags.online = 0;
3740 clear_bit(ISP_ABORT_RETRY,
3741 &vha->dpc_flags);
3742 rval = QLA_SUCCESS;
3743 } else { /* schedule another ISP abort */
3744 ha->isp_abort_cnt--;
3745 DEBUG(qla_printk(KERN_INFO, ha,
3746 "qla%ld: ISP abort - retry remaining %d\n",
3747 vha->host_no, ha->isp_abort_cnt));
3748 rval = QLA_FUNCTION_FAILED;
3749 }
3750 } else {
3751 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3752 DEBUG(qla_printk(KERN_INFO, ha,
3753 "(%ld): ISP error recovery - retrying (%d) "
3754 "more times\n", vha->host_no, ha->isp_abort_cnt));
3755 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3756 rval = QLA_FUNCTION_FAILED;
3757 }
3758 }
Giridhar Malavalia9083012010-04-12 17:59:55 -07003759 return rval;
3760}
3761
3762/*
3763 * qla82xx_fcoe_ctx_reset
3764 * Perform a quick reset and aborts all outstanding commands.
3765 * This will only perform an FCoE context reset and avoids a full blown
3766 * chip reset.
3767 *
3768 * Input:
3769 * ha = adapter block pointer.
3770 * is_reset_path = flag for identifying the reset path.
3771 *
3772 * Returns:
3773 * 0 = success
3774 */
3775int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha)
3776{
3777 int rval = QLA_FUNCTION_FAILED;
3778
3779 if (vha->flags.online) {
3780 /* Abort all outstanding commands, so as to be requeued later */
3781 qla2x00_abort_isp_cleanup(vha);
3782 }
3783
3784 /* Stop currently executing firmware.
3785 * This will destroy existing FCoE context at the F/W end.
3786 */
3787 qla2x00_try_to_stop_firmware(vha);
3788
3789 /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */
3790 rval = qla82xx_restart_isp(vha);
3791
3792 return rval;
3793}
3794
3795/*
3796 * qla2x00_wait_for_fcoe_ctx_reset
3797 * Wait till the FCoE context is reset.
3798 *
3799 * Note:
3800 * Does context switching here.
3801 * Release SPIN_LOCK (if any) before calling this routine.
3802 *
3803 * Return:
3804 * Success (fcoe_ctx reset is done) : 0
3805 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1
3806 */
3807int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
3808{
3809 int status = QLA_FUNCTION_FAILED;
3810 unsigned long wait_reset;
3811
3812 wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
3813 while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
3814 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
3815 && time_before(jiffies, wait_reset)) {
3816
3817 set_current_state(TASK_UNINTERRUPTIBLE);
3818 schedule_timeout(HZ);
3819
3820 if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) &&
3821 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3822 status = QLA_SUCCESS;
3823 break;
3824 }
3825 }
3826 DEBUG2(printk(KERN_INFO
3827 "%s status=%d\n", __func__, status));
3828
3829 return status;
3830}