blob: 6887538d1736dba48f1bcbbfefd6d4ae28e43c9f [file] [log] [blame]
Giridhar Malavali6e980162010-03-19 17:03:58 -07001/*
2 * QLogic Fibre Channel HBA Driver
Andrew Vasquez07e264b2011-03-30 11:46:23 -07003 * Copyright (c) 2003-2011 QLogic Corporation
Giridhar Malavali6e980162010-03-19 17:03:58 -07004 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/delay.h>
12
13/* BSG support for ELS/CT pass through */
14inline srb_t *
15qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16{
17 srb_t *sp;
18 struct qla_hw_data *ha = vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -070019 struct srb_ctx *ctx;
Giridhar Malavali6e980162010-03-19 17:03:58 -070020
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 if (!sp)
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
29 }
30
31 memset(sp, 0, sizeof(*sp));
32 sp->fcport = fcport;
33 sp->ctx = ctx;
Andrew Vasquez57807902011-11-18 09:03:20 -080034 ctx->iocbs = 1;
Giridhar Malavali6e980162010-03-19 17:03:58 -070035done:
36 return sp;
37}
38
Sarang Radke09ff7012010-03-19 17:03:59 -070039int
Saurav Kashyap7c3df132011-07-14 12:00:13 -070040qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
41 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
Sarang Radke09ff7012010-03-19 17:03:59 -070042{
43 int i, ret, num_valid;
44 uint8_t *bcode;
45 struct qla_fcp_prio_entry *pri_entry;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050046 uint32_t *bcode_val_ptr, bcode_val;
Sarang Radke09ff7012010-03-19 17:03:59 -070047
48 ret = 1;
49 num_valid = 0;
50 bcode = (uint8_t *)pri_cfg;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050051 bcode_val_ptr = (uint32_t *)pri_cfg;
52 bcode_val = (uint32_t)(*bcode_val_ptr);
Sarang Radke09ff7012010-03-19 17:03:59 -070053
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050054 if (bcode_val == 0xFFFFFFFF) {
55 /* No FCP Priority config data in flash */
Saurav Kashyap7c3df132011-07-14 12:00:13 -070056 ql_dbg(ql_dbg_user, vha, 0x7051,
57 "No FCP Priority config data.\n");
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050058 return 0;
59 }
60
61 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
62 bcode[3] != 'S') {
63 /* Invalid FCP priority data header*/
Saurav Kashyap7c3df132011-07-14 12:00:13 -070064 ql_dbg(ql_dbg_user, vha, 0x7052,
65 "Invalid FCP Priority data header. bcode=0x%x.\n",
66 bcode_val);
Sarang Radke09ff7012010-03-19 17:03:59 -070067 return 0;
68 }
69 if (flag != 1)
70 return ret;
71
72 pri_entry = &pri_cfg->entry[0];
73 for (i = 0; i < pri_cfg->num_entries; i++) {
74 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
75 num_valid++;
76 pri_entry++;
77 }
78
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050079 if (num_valid == 0) {
80 /* No valid FCP priority data entries */
Saurav Kashyap7c3df132011-07-14 12:00:13 -070081 ql_dbg(ql_dbg_user, vha, 0x7053,
82 "No valid FCP Priority data entries.\n");
Sarang Radke09ff7012010-03-19 17:03:59 -070083 ret = 0;
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050084 } else {
85 /* FCP priority data is valid */
Saurav Kashyap7c3df132011-07-14 12:00:13 -070086 ql_dbg(ql_dbg_user, vha, 0x7054,
87 "Valid FCP priority data. num entries = %d.\n",
88 num_valid);
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +050089 }
Sarang Radke09ff7012010-03-19 17:03:59 -070090
91 return ret;
92}
93
94static int
95qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
96{
97 struct Scsi_Host *host = bsg_job->shost;
98 scsi_qla_host_t *vha = shost_priv(host);
99 struct qla_hw_data *ha = vha->hw;
100 int ret = 0;
101 uint32_t len;
102 uint32_t oper;
103
104 bsg_job->reply->reply_payload_rcv_len = 0;
105
Saurav Kashyapa00f6292011-11-18 09:03:19 -0800106 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
Madhuranath Iyengar2f0f3f42010-07-23 15:28:24 +0500107 ret = -EINVAL;
108 goto exit_fcp_prio_cfg;
109 }
110
Sarang Radke09ff7012010-03-19 17:03:59 -0700111 /* Get the sub command */
112 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
113
114 /* Only set config is allowed if config memory is not allocated */
115 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
116 ret = -EINVAL;
117 goto exit_fcp_prio_cfg;
118 }
119 switch (oper) {
120 case QLFC_FCP_PRIO_DISABLE:
121 if (ha->flags.fcp_prio_enabled) {
122 ha->flags.fcp_prio_enabled = 0;
123 ha->fcp_prio_cfg->attributes &=
124 ~FCP_PRIO_ATTR_ENABLE;
125 qla24xx_update_all_fcp_prio(vha);
126 bsg_job->reply->result = DID_OK;
127 } else {
128 ret = -EINVAL;
129 bsg_job->reply->result = (DID_ERROR << 16);
130 goto exit_fcp_prio_cfg;
131 }
132 break;
133
134 case QLFC_FCP_PRIO_ENABLE:
135 if (!ha->flags.fcp_prio_enabled) {
136 if (ha->fcp_prio_cfg) {
137 ha->flags.fcp_prio_enabled = 1;
138 ha->fcp_prio_cfg->attributes |=
139 FCP_PRIO_ATTR_ENABLE;
140 qla24xx_update_all_fcp_prio(vha);
141 bsg_job->reply->result = DID_OK;
142 } else {
143 ret = -EINVAL;
144 bsg_job->reply->result = (DID_ERROR << 16);
145 goto exit_fcp_prio_cfg;
146 }
147 }
148 break;
149
150 case QLFC_FCP_PRIO_GET_CONFIG:
151 len = bsg_job->reply_payload.payload_len;
152 if (!len || len > FCP_PRIO_CFG_SIZE) {
153 ret = -EINVAL;
154 bsg_job->reply->result = (DID_ERROR << 16);
155 goto exit_fcp_prio_cfg;
156 }
157
158 bsg_job->reply->result = DID_OK;
159 bsg_job->reply->reply_payload_rcv_len =
160 sg_copy_from_buffer(
161 bsg_job->reply_payload.sg_list,
162 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
163 len);
164
165 break;
166
167 case QLFC_FCP_PRIO_SET_CONFIG:
168 len = bsg_job->request_payload.payload_len;
169 if (!len || len > FCP_PRIO_CFG_SIZE) {
170 bsg_job->reply->result = (DID_ERROR << 16);
171 ret = -EINVAL;
172 goto exit_fcp_prio_cfg;
173 }
174
175 if (!ha->fcp_prio_cfg) {
176 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
177 if (!ha->fcp_prio_cfg) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700178 ql_log(ql_log_warn, vha, 0x7050,
179 "Unable to allocate memory for fcp prio "
180 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
Sarang Radke09ff7012010-03-19 17:03:59 -0700181 bsg_job->reply->result = (DID_ERROR << 16);
182 ret = -ENOMEM;
183 goto exit_fcp_prio_cfg;
184 }
185 }
186
187 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
188 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
189 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
190 FCP_PRIO_CFG_SIZE);
191
192 /* validate fcp priority data */
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700193
194 if (!qla24xx_fcp_prio_cfg_valid(vha,
195 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
Sarang Radke09ff7012010-03-19 17:03:59 -0700196 bsg_job->reply->result = (DID_ERROR << 16);
197 ret = -EINVAL;
198 /* If buffer was invalidatic int
199 * fcp_prio_cfg is of no use
200 */
201 vfree(ha->fcp_prio_cfg);
202 ha->fcp_prio_cfg = NULL;
203 goto exit_fcp_prio_cfg;
204 }
205
206 ha->flags.fcp_prio_enabled = 0;
207 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
208 ha->flags.fcp_prio_enabled = 1;
209 qla24xx_update_all_fcp_prio(vha);
210 bsg_job->reply->result = DID_OK;
211 break;
212 default:
213 ret = -EINVAL;
214 break;
215 }
216exit_fcp_prio_cfg:
217 bsg_job->job_done(bsg_job);
218 return ret;
219}
Giridhar Malavali6e980162010-03-19 17:03:58 -0700220static int
221qla2x00_process_els(struct fc_bsg_job *bsg_job)
222{
223 struct fc_rport *rport;
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500224 fc_port_t *fcport = NULL;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700225 struct Scsi_Host *host;
226 scsi_qla_host_t *vha;
227 struct qla_hw_data *ha;
228 srb_t *sp;
229 const char *type;
230 int req_sg_cnt, rsp_sg_cnt;
231 int rval = (DRIVER_ERROR << 16);
232 uint16_t nextlid = 0;
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700233 struct srb_ctx *els;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700234
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500235 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
236 rport = bsg_job->rport;
237 fcport = *(fc_port_t **) rport->dd_data;
238 host = rport_to_shost(rport);
239 vha = shost_priv(host);
240 ha = vha->hw;
241 type = "FC_BSG_RPT_ELS";
242 } else {
243 host = bsg_job->shost;
244 vha = shost_priv(host);
245 ha = vha->hw;
246 type = "FC_BSG_HST_ELS_NOLOGIN";
247 }
248
249 /* pass through is supported only for ISP 4Gb or higher */
250 if (!IS_FWI2_CAPABLE(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700251 ql_dbg(ql_dbg_user, vha, 0x7001,
252 "ELS passthru not supported for ISP23xx based adapters.\n");
Harish Zunjarrao08f71e02010-07-23 15:28:33 +0500253 rval = -EPERM;
254 goto done;
255 }
256
Giridhar Malavali6e980162010-03-19 17:03:58 -0700257 /* Multiple SG's are not supported for ELS requests */
258 if (bsg_job->request_payload.sg_cnt > 1 ||
259 bsg_job->reply_payload.sg_cnt > 1) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700260 ql_dbg(ql_dbg_user, vha, 0x7002,
261 "Multiple SG's are not suppored for ELS requests, "
262 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
263 bsg_job->request_payload.sg_cnt,
264 bsg_job->reply_payload.sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700265 rval = -EPERM;
266 goto done;
267 }
268
269 /* ELS request for rport */
270 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700271 /* make sure the rport is logged in,
272 * if not perform fabric login
273 */
274 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700275 ql_dbg(ql_dbg_user, vha, 0x7003,
276 "Failed to login port %06X for ELS passthru.\n",
277 fcport->d_id.b24);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700278 rval = -EIO;
279 goto done;
280 }
281 } else {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700282 /* Allocate a dummy fcport structure, since functions
283 * preparing the IOCB and mailbox command retrieves port
284 * specific information from fcport structure. For Host based
285 * ELS commands there will be no fcport structure allocated
286 */
287 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
288 if (!fcport) {
289 rval = -ENOMEM;
290 goto done;
291 }
292
293 /* Initialize all required fields of fcport */
294 fcport->vha = vha;
295 fcport->vp_idx = vha->vp_idx;
296 fcport->d_id.b.al_pa =
297 bsg_job->request->rqst_data.h_els.port_id[0];
298 fcport->d_id.b.area =
299 bsg_job->request->rqst_data.h_els.port_id[1];
300 fcport->d_id.b.domain =
301 bsg_job->request->rqst_data.h_els.port_id[2];
302 fcport->loop_id =
303 (fcport->d_id.b.al_pa == 0xFD) ?
304 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
305 }
306
307 if (!vha->flags.online) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700308 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700309 rval = -EIO;
310 goto done;
311 }
312
313 req_sg_cnt =
314 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
315 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
316 if (!req_sg_cnt) {
317 rval = -ENOMEM;
318 goto done_free_fcport;
319 }
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700320
321 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
322 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700323 if (!rsp_sg_cnt) {
324 rval = -ENOMEM;
325 goto done_free_fcport;
326 }
327
328 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700329 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700330 ql_log(ql_log_warn, vha, 0x7008,
331 "dma mapping resulted in different sg counts, "
332 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
333 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
334 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700335 rval = -EAGAIN;
336 goto done_unmap_sg;
337 }
338
339 /* Alloc SRB structure */
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700340 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700341 if (!sp) {
342 rval = -ENOMEM;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700343 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700344 }
345
346 els = sp->ctx;
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700347 els->type =
Giridhar Malavali6e980162010-03-19 17:03:58 -0700348 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
349 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
Madhuranath Iyengar38222632010-05-04 15:01:29 -0700350 els->name =
351 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
352 "bsg_els_rpt" : "bsg_els_hst");
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700353 els->u.bsg_job = bsg_job;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700354
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700355 ql_dbg(ql_dbg_user, vha, 0x700a,
356 "bsg rqst type: %s els type: %x - loop-id=%x "
357 "portid=%-2x%02x%02x.\n", type,
358 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
359 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700360
361 rval = qla2x00_start_sp(sp);
362 if (rval != QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700363 ql_log(ql_log_warn, vha, 0x700e,
364 "qla2x00_start_sp failed = %d\n", rval);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700365 kfree(sp->ctx);
366 mempool_free(sp, ha->srb_mempool);
367 rval = -EIO;
368 goto done_unmap_sg;
369 }
370 return rval;
371
372done_unmap_sg:
373 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
374 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
376 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 goto done_free_fcport;
378
379done_free_fcport:
380 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
381 kfree(fcport);
382done:
383 return rval;
384}
385
Andrew Vasquez57807902011-11-18 09:03:20 -0800386inline uint16_t
387qla24xx_calc_ct_iocbs(uint16_t dsds)
388{
389 uint16_t iocbs;
390
391 iocbs = 1;
392 if (dsds > 2) {
393 iocbs += (dsds - 2) / 5;
394 if ((dsds - 2) % 5)
395 iocbs++;
396 }
397 return iocbs;
398}
399
Giridhar Malavali6e980162010-03-19 17:03:58 -0700400static int
401qla2x00_process_ct(struct fc_bsg_job *bsg_job)
402{
403 srb_t *sp;
404 struct Scsi_Host *host = bsg_job->shost;
405 scsi_qla_host_t *vha = shost_priv(host);
406 struct qla_hw_data *ha = vha->hw;
407 int rval = (DRIVER_ERROR << 16);
408 int req_sg_cnt, rsp_sg_cnt;
409 uint16_t loop_id;
410 struct fc_port *fcport;
411 char *type = "FC_BSG_HST_CT";
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700412 struct srb_ctx *ct;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700413
Giridhar Malavali6e980162010-03-19 17:03:58 -0700414 req_sg_cnt =
415 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
416 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700417 if (!req_sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700418 ql_log(ql_log_warn, vha, 0x700f,
419 "dma_map_sg return %d for request\n", req_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700420 rval = -ENOMEM;
421 goto done;
422 }
423
424 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
425 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
426 if (!rsp_sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700427 ql_log(ql_log_warn, vha, 0x7010,
428 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700429 rval = -ENOMEM;
430 goto done;
431 }
432
433 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700434 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700435 ql_log(ql_log_warn, vha, 0x7011,
436 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
437 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
438 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700439 rval = -EAGAIN;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700440 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700441 }
442
443 if (!vha->flags.online) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700444 ql_log(ql_log_warn, vha, 0x7012,
445 "Host is not online.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700446 rval = -EIO;
447 goto done_unmap_sg;
448 }
449
450 loop_id =
451 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
452 >> 24;
453 switch (loop_id) {
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700454 case 0xFC:
455 loop_id = cpu_to_le16(NPH_SNS);
456 break;
457 case 0xFA:
458 loop_id = vha->mgmt_svr_loop_id;
459 break;
460 default:
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700461 ql_dbg(ql_dbg_user, vha, 0x7013,
462 "Unknown loop id: %x.\n", loop_id);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700463 rval = -EINVAL;
464 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700465 }
466
467 /* Allocate a dummy fcport structure, since functions preparing the
468 * IOCB and mailbox command retrieves port specific information
469 * from fcport structure. For Host based ELS commands there will be
470 * no fcport structure allocated
471 */
472 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700473 if (!fcport) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700474 ql_log(ql_log_warn, vha, 0x7014,
475 "Failed to allocate fcport.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700476 rval = -ENOMEM;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700477 goto done_unmap_sg;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700478 }
479
480 /* Initialize all required fields of fcport */
481 fcport->vha = vha;
482 fcport->vp_idx = vha->vp_idx;
483 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
484 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
485 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
486 fcport->loop_id = loop_id;
487
488 /* Alloc SRB structure */
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700489 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
Giridhar Malavali6e980162010-03-19 17:03:58 -0700490 if (!sp) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700491 ql_log(ql_log_warn, vha, 0x7015,
492 "qla2x00_get_ctx_bsg_sp failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700493 rval = -ENOMEM;
494 goto done_free_fcport;
495 }
496
497 ct = sp->ctx;
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700498 ct->type = SRB_CT_CMD;
Madhuranath Iyengar38222632010-05-04 15:01:29 -0700499 ct->name = "bsg_ct";
Andrew Vasquez57807902011-11-18 09:03:20 -0800500 ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
Madhuranath Iyengar49163922010-05-04 15:01:28 -0700501 ct->u.bsg_job = bsg_job;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700502
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700503 ql_dbg(ql_dbg_user, vha, 0x7016,
504 "bsg rqst type: %s else type: %x - "
505 "loop-id=%x portid=%02x%02x%02x.\n", type,
506 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
507 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
508 fcport->d_id.b.al_pa);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700509
510 rval = qla2x00_start_sp(sp);
511 if (rval != QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700512 ql_log(ql_log_warn, vha, 0x7017,
513 "qla2x00_start_sp failed=%d.\n", rval);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700514 kfree(sp->ctx);
515 mempool_free(sp, ha->srb_mempool);
516 rval = -EIO;
517 goto done_free_fcport;
518 }
519 return rval;
520
521done_free_fcport:
522 kfree(fcport);
523done_unmap_sg:
524 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
525 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
526 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
527 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
528done:
529 return rval;
530}
531
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700532/* Set the port configuration to enable the
533 * internal loopback on ISP81XX
534 */
535static inline int
536qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
537 uint16_t *new_config)
538{
539 int ret = 0;
540 int rval = 0;
541 struct qla_hw_data *ha = vha->hw;
542
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800543 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700544 goto done_set_internal;
545
546 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
547 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
548
549 ha->notify_dcbx_comp = 1;
550 ret = qla81xx_set_port_config(vha, new_config);
551 if (ret != QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700552 ql_log(ql_log_warn, vha, 0x7021,
553 "set port config failed.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700554 ha->notify_dcbx_comp = 0;
555 rval = -EINVAL;
556 goto done_set_internal;
557 }
558
559 /* Wait for DCBX complete event */
560 if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700561 ql_dbg(ql_dbg_user, vha, 0x7022,
562 "State change notification not received.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700563 } else
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700564 ql_dbg(ql_dbg_user, vha, 0x7023,
565 "State change received.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700566
567 ha->notify_dcbx_comp = 0;
568
569done_set_internal:
570 return rval;
571}
572
573/* Set the port configuration to disable the
574 * internal loopback on ISP81XX
575 */
576static inline int
577qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
578 int wait)
579{
580 int ret = 0;
581 int rval = 0;
582 uint16_t new_config[4];
583 struct qla_hw_data *ha = vha->hw;
584
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800585 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700586 goto done_reset_internal;
587
588 memset(new_config, 0 , sizeof(new_config));
589 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
590 ENABLE_INTERNAL_LOOPBACK) {
591 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
592 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
593
594 ha->notify_dcbx_comp = wait;
595 ret = qla81xx_set_port_config(vha, new_config);
596 if (ret != QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700597 ql_log(ql_log_warn, vha, 0x7025,
598 "Set port config failed.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700599 ha->notify_dcbx_comp = 0;
600 rval = -EINVAL;
601 goto done_reset_internal;
602 }
603
604 /* Wait for DCBX complete event */
605 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
606 (20 * HZ))) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700607 ql_dbg(ql_dbg_user, vha, 0x7026,
608 "State change notification not received.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700609 ha->notify_dcbx_comp = 0;
610 rval = -EINVAL;
611 goto done_reset_internal;
612 } else
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700613 ql_dbg(ql_dbg_user, vha, 0x7027,
614 "State change received.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700615
616 ha->notify_dcbx_comp = 0;
617 }
618done_reset_internal:
619 return rval;
620}
621
Giridhar Malavali6e980162010-03-19 17:03:58 -0700622static int
623qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
624{
625 struct Scsi_Host *host = bsg_job->shost;
626 scsi_qla_host_t *vha = shost_priv(host);
627 struct qla_hw_data *ha = vha->hw;
628 int rval;
629 uint8_t command_sent;
630 char *type;
631 struct msg_echo_lb elreq;
632 uint16_t response[MAILBOX_REGISTER_COUNT];
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700633 uint16_t config[4], new_config[4];
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700634 uint8_t *fw_sts_ptr;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700635 uint8_t *req_data = NULL;
636 dma_addr_t req_data_dma;
637 uint32_t req_data_len;
638 uint8_t *rsp_data = NULL;
639 dma_addr_t rsp_data_dma;
640 uint32_t rsp_data_len;
641
Giridhar Malavali6e980162010-03-19 17:03:58 -0700642 if (!vha->flags.online) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700643 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700644 return -EIO;
645 }
646
647 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
649 DMA_TO_DEVICE);
650
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700651 if (!elreq.req_sg_cnt) {
652 ql_log(ql_log_warn, vha, 0x701a,
653 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700654 return -ENOMEM;
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700655 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700656
657 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
658 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
659 DMA_FROM_DEVICE);
660
661 if (!elreq.rsp_sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700662 ql_log(ql_log_warn, vha, 0x701b,
663 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700664 rval = -ENOMEM;
665 goto done_unmap_req_sg;
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700666 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700667
668 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
669 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700670 ql_log(ql_log_warn, vha, 0x701c,
671 "dma mapping resulted in different sg counts, "
672 "request_sg_cnt: %x dma_request_sg_cnt: %x "
673 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
674 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
675 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700676 rval = -EAGAIN;
677 goto done_unmap_sg;
678 }
679 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
680 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
681 &req_data_dma, GFP_KERNEL);
682 if (!req_data) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700683 ql_log(ql_log_warn, vha, 0x701d,
684 "dma alloc failed for req_data.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700685 rval = -ENOMEM;
686 goto done_unmap_sg;
687 }
688
689 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
690 &rsp_data_dma, GFP_KERNEL);
691 if (!rsp_data) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700692 ql_log(ql_log_warn, vha, 0x7004,
693 "dma alloc failed for rsp_data.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700694 rval = -ENOMEM;
695 goto done_free_dma_req;
696 }
697
698 /* Copy the request buffer in req_data now */
699 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
700 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
701
702 elreq.send_dma = req_data_dma;
703 elreq.rcv_dma = rsp_data_dma;
704 elreq.transfer_size = req_data_len;
705
706 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
707
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700708 if ((ha->current_topology == ISP_CFG_F ||
Saurav Kashyap491118d2011-08-16 11:31:50 -0700709 (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800710 ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700711 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
712 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
713 elreq.options == EXTERNAL_LOOPBACK) {
Giridhar Malavali6e980162010-03-19 17:03:58 -0700714 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700715 ql_dbg(ql_dbg_user, vha, 0x701e,
716 "BSG request type: %s.\n", type);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700717 command_sent = INT_DEF_LB_ECHO_CMD;
718 rval = qla2x00_echo_test(vha, &elreq, response);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700719 } else {
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800720 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700721 memset(config, 0, sizeof(config));
722 memset(new_config, 0, sizeof(new_config));
723 if (qla81xx_get_port_config(vha, config)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700724 ql_log(ql_log_warn, vha, 0x701f,
725 "Get port config failed.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700726 bsg_job->reply->reply_payload_rcv_len = 0;
727 bsg_job->reply->result = (DID_ERROR << 16);
728 rval = -EPERM;
729 goto done_free_dma_req;
730 }
731
732 if (elreq.options != EXTERNAL_LOOPBACK) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700733 ql_dbg(ql_dbg_user, vha, 0x7020,
734 "Internal: curent port config = %x\n",
735 config[0]);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700736 if (qla81xx_set_internal_loopback(vha, config,
737 new_config)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700738 ql_log(ql_log_warn, vha, 0x7024,
739 "Internal loopback failed.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700740 bsg_job->reply->reply_payload_rcv_len =
741 0;
742 bsg_job->reply->result =
743 (DID_ERROR << 16);
744 rval = -EPERM;
745 goto done_free_dma_req;
746 }
747 } else {
748 /* For external loopback to work
749 * ensure internal loopback is disabled
750 */
751 if (qla81xx_reset_internal_loopback(vha,
752 config, 1)) {
753 bsg_job->reply->reply_payload_rcv_len =
754 0;
755 bsg_job->reply->result =
756 (DID_ERROR << 16);
757 rval = -EPERM;
758 goto done_free_dma_req;
759 }
760 }
761
762 type = "FC_BSG_HST_VENDOR_LOOPBACK";
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700763 ql_dbg(ql_dbg_user, vha, 0x7028,
764 "BSG request type: %s.\n", type);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700765
766 command_sent = INT_DEF_LB_LOOPBACK_CMD;
767 rval = qla2x00_loopback_test(vha, &elreq, response);
768
Joe Carnuccio4052bd52010-12-21 16:00:17 -0800769 if (new_config[0]) {
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700770 /* Revert back to original port config
771 * Also clear internal loopback
772 */
773 qla81xx_reset_internal_loopback(vha,
774 new_config, 0);
775 }
776
777 if (response[0] == MBS_COMMAND_ERROR &&
778 response[1] == MBS_LB_RESET) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700779 ql_log(ql_log_warn, vha, 0x7029,
780 "MBX command error, Aborting ISP.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700781 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
782 qla2xxx_wake_dpc(vha);
783 qla2x00_wait_for_chip_reset(vha);
784 /* Also reset the MPI */
785 if (qla81xx_restart_mpi_firmware(vha) !=
786 QLA_SUCCESS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700787 ql_log(ql_log_warn, vha, 0x702a,
788 "MPI reset failed.\n");
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700789 }
790
791 bsg_job->reply->reply_payload_rcv_len = 0;
792 bsg_job->reply->result = (DID_ERROR << 16);
793 rval = -EIO;
794 goto done_free_dma_req;
795 }
796 } else {
797 type = "FC_BSG_HST_VENDOR_LOOPBACK";
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700798 ql_dbg(ql_dbg_user, vha, 0x702b,
799 "BSG request type: %s.\n", type);
Sarang Radke23f2ebd2010-05-28 15:08:21 -0700800 command_sent = INT_DEF_LB_LOOPBACK_CMD;
801 rval = qla2x00_loopback_test(vha, &elreq, response);
802 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700803 }
804
805 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700806 ql_log(ql_log_warn, vha, 0x702c,
807 "Vendor request %s failed.\n", type);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700808
809 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700810 sizeof(struct fc_bsg_reply);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700811
812 memcpy(fw_sts_ptr, response, sizeof(response));
813 fw_sts_ptr += sizeof(response);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700814 *fw_sts_ptr = command_sent;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700815 rval = 0;
816 bsg_job->reply->reply_payload_rcv_len = 0;
817 bsg_job->reply->result = (DID_ERROR << 16);
818 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700819 ql_dbg(ql_dbg_user, vha, 0x702d,
820 "Vendor request %s completed.\n", type);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700821
822 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
823 sizeof(response) + sizeof(uint8_t);
824 bsg_job->reply->reply_payload_rcv_len =
825 bsg_job->reply_payload.payload_len;
826 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
827 sizeof(struct fc_bsg_reply);
828 memcpy(fw_sts_ptr, response, sizeof(response));
829 fw_sts_ptr += sizeof(response);
830 *fw_sts_ptr = command_sent;
831 bsg_job->reply->result = DID_OK;
832 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
833 bsg_job->reply_payload.sg_cnt, rsp_data,
834 rsp_data_len);
835 }
836 bsg_job->job_done(bsg_job);
837
838 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
839 rsp_data, rsp_data_dma);
840done_free_dma_req:
841 dma_free_coherent(&ha->pdev->dev, req_data_len,
842 req_data, req_data_dma);
843done_unmap_sg:
844 dma_unmap_sg(&ha->pdev->dev,
845 bsg_job->reply_payload.sg_list,
846 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
847done_unmap_req_sg:
848 dma_unmap_sg(&ha->pdev->dev,
849 bsg_job->request_payload.sg_list,
850 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Andrew Vasquez6c452a42010-03-19 17:04:02 -0700851 return rval;
Giridhar Malavali6e980162010-03-19 17:03:58 -0700852}
853
854static int
855qla84xx_reset(struct fc_bsg_job *bsg_job)
856{
857 struct Scsi_Host *host = bsg_job->shost;
858 scsi_qla_host_t *vha = shost_priv(host);
859 struct qla_hw_data *ha = vha->hw;
860 int rval = 0;
861 uint32_t flag;
862
Giridhar Malavali6e980162010-03-19 17:03:58 -0700863 if (!IS_QLA84XX(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700864 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700865 return -EINVAL;
866 }
867
868 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
869
870 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
871
872 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700873 ql_log(ql_log_warn, vha, 0x7030,
874 "Vendor request 84xx reset failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700875 rval = bsg_job->reply->reply_payload_rcv_len = 0;
876 bsg_job->reply->result = (DID_ERROR << 16);
877
878 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700879 ql_dbg(ql_dbg_user, vha, 0x7031,
880 "Vendor request 84xx reset completed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700881 bsg_job->reply->result = DID_OK;
882 }
883
884 bsg_job->job_done(bsg_job);
885 return rval;
886}
887
888static int
889qla84xx_updatefw(struct fc_bsg_job *bsg_job)
890{
891 struct Scsi_Host *host = bsg_job->shost;
892 scsi_qla_host_t *vha = shost_priv(host);
893 struct qla_hw_data *ha = vha->hw;
894 struct verify_chip_entry_84xx *mn = NULL;
895 dma_addr_t mn_dma, fw_dma;
896 void *fw_buf = NULL;
897 int rval = 0;
898 uint32_t sg_cnt;
899 uint32_t data_len;
900 uint16_t options;
901 uint32_t flag;
902 uint32_t fw_ver;
903
Giridhar Malavali6e980162010-03-19 17:03:58 -0700904 if (!IS_QLA84XX(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700905 ql_dbg(ql_dbg_user, vha, 0x7032,
906 "Not 84xx, exiting.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700907 return -EINVAL;
908 }
909
910 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700912 if (!sg_cnt) {
913 ql_log(ql_log_warn, vha, 0x7033,
914 "dma_map_sg returned %d for request.\n", sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700915 return -ENOMEM;
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700916 }
Giridhar Malavali6e980162010-03-19 17:03:58 -0700917
918 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700919 ql_log(ql_log_warn, vha, 0x7034,
920 "DMA mapping resulted in different sg counts, "
921 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
922 bsg_job->request_payload.sg_cnt, sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -0700923 rval = -EAGAIN;
924 goto done_unmap_sg;
925 }
926
927 data_len = bsg_job->request_payload.payload_len;
928 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
929 &fw_dma, GFP_KERNEL);
930 if (!fw_buf) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700931 ql_log(ql_log_warn, vha, 0x7035,
932 "DMA alloc failed for fw_buf.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700933 rval = -ENOMEM;
934 goto done_unmap_sg;
935 }
936
937 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
938 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
939
940 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
941 if (!mn) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700942 ql_log(ql_log_warn, vha, 0x7036,
943 "DMA alloc failed for fw buffer.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700944 rval = -ENOMEM;
945 goto done_free_fw_buf;
946 }
947
948 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
949 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
950
951 memset(mn, 0, sizeof(struct access_chip_84xx));
952 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
953 mn->entry_count = 1;
954
955 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
956 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
957 options |= VCO_DIAG_FW;
958
959 mn->options = cpu_to_le16(options);
960 mn->fw_ver = cpu_to_le32(fw_ver);
961 mn->fw_size = cpu_to_le32(data_len);
962 mn->fw_seq_size = cpu_to_le32(data_len);
963 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
964 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
965 mn->dseg_length = cpu_to_le32(data_len);
966 mn->data_seg_cnt = cpu_to_le16(1);
967
968 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
969
970 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700971 ql_log(ql_log_warn, vha, 0x7037,
972 "Vendor request 84xx updatefw failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700973
974 rval = bsg_job->reply->reply_payload_rcv_len = 0;
975 bsg_job->reply->result = (DID_ERROR << 16);
976
977 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700978 ql_dbg(ql_dbg_user, vha, 0x7038,
979 "Vendor request 84xx updatefw completed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -0700980
981 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
982 bsg_job->reply->result = DID_OK;
983 }
984
985 bsg_job->job_done(bsg_job);
986 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
987
988done_free_fw_buf:
989 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
990
991done_unmap_sg:
992 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
993 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
994
995 return rval;
996}
997
998static int
999qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1000{
1001 struct Scsi_Host *host = bsg_job->shost;
1002 scsi_qla_host_t *vha = shost_priv(host);
1003 struct qla_hw_data *ha = vha->hw;
1004 struct access_chip_84xx *mn = NULL;
1005 dma_addr_t mn_dma, mgmt_dma;
1006 void *mgmt_b = NULL;
1007 int rval = 0;
1008 struct qla_bsg_a84_mgmt *ql84_mgmt;
1009 uint32_t sg_cnt;
Harish Zunjarraod5459082010-03-19 17:04:00 -07001010 uint32_t data_len = 0;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001011 uint32_t dma_direction = DMA_NONE;
1012
Giridhar Malavali6e980162010-03-19 17:03:58 -07001013 if (!IS_QLA84XX(ha)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001014 ql_log(ql_log_warn, vha, 0x703a,
1015 "Not 84xx, exiting.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001016 return -EINVAL;
1017 }
1018
1019 ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1020 sizeof(struct fc_bsg_request));
1021 if (!ql84_mgmt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001022 ql_log(ql_log_warn, vha, 0x703b,
1023 "MGMT header not provided, exiting.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001024 return -EINVAL;
1025 }
1026
1027 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1028 if (!mn) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001029 ql_log(ql_log_warn, vha, 0x703c,
1030 "DMA alloc failed for fw buffer.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001031 return -ENOMEM;
1032 }
1033
1034 memset(mn, 0, sizeof(struct access_chip_84xx));
1035 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1036 mn->entry_count = 1;
1037
1038 switch (ql84_mgmt->mgmt.cmd) {
1039 case QLA84_MGMT_READ_MEM:
1040 case QLA84_MGMT_GET_INFO:
1041 sg_cnt = dma_map_sg(&ha->pdev->dev,
1042 bsg_job->reply_payload.sg_list,
1043 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1044 if (!sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001045 ql_log(ql_log_warn, vha, 0x703d,
1046 "dma_map_sg returned %d for reply.\n", sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001047 rval = -ENOMEM;
1048 goto exit_mgmt;
1049 }
1050
1051 dma_direction = DMA_FROM_DEVICE;
1052
1053 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001054 ql_log(ql_log_warn, vha, 0x703e,
1055 "DMA mapping resulted in different sg counts, "
1056 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1057 bsg_job->reply_payload.sg_cnt, sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001058 rval = -EAGAIN;
1059 goto done_unmap_sg;
1060 }
1061
1062 data_len = bsg_job->reply_payload.payload_len;
1063
1064 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 &mgmt_dma, GFP_KERNEL);
1066 if (!mgmt_b) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001067 ql_log(ql_log_warn, vha, 0x703f,
1068 "DMA alloc failed for mgmt_b.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001069 rval = -ENOMEM;
1070 goto done_unmap_sg;
1071 }
1072
1073 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1074 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1075 mn->parameter1 =
1076 cpu_to_le32(
1077 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1078
1079 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1080 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1081 mn->parameter1 =
1082 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1083
1084 mn->parameter2 =
1085 cpu_to_le32(
1086 ql84_mgmt->mgmt.mgmtp.u.info.context);
1087 }
1088 break;
1089
1090 case QLA84_MGMT_WRITE_MEM:
1091 sg_cnt = dma_map_sg(&ha->pdev->dev,
1092 bsg_job->request_payload.sg_list,
1093 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1094
1095 if (!sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001096 ql_log(ql_log_warn, vha, 0x7040,
1097 "dma_map_sg returned %d.\n", sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001098 rval = -ENOMEM;
1099 goto exit_mgmt;
1100 }
1101
1102 dma_direction = DMA_TO_DEVICE;
1103
1104 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001105 ql_log(ql_log_warn, vha, 0x7041,
1106 "DMA mapping resulted in different sg counts, "
1107 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1108 bsg_job->request_payload.sg_cnt, sg_cnt);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001109 rval = -EAGAIN;
1110 goto done_unmap_sg;
1111 }
1112
1113 data_len = bsg_job->request_payload.payload_len;
1114 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1115 &mgmt_dma, GFP_KERNEL);
1116 if (!mgmt_b) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001117 ql_log(ql_log_warn, vha, 0x7042,
1118 "DMA alloc failed for mgmt_b.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001119 rval = -ENOMEM;
1120 goto done_unmap_sg;
1121 }
1122
1123 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1124 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1125
1126 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1127 mn->parameter1 =
1128 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1129 break;
1130
1131 case QLA84_MGMT_CHNG_CONFIG:
1132 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1133 mn->parameter1 =
1134 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1135
1136 mn->parameter2 =
1137 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1138
1139 mn->parameter3 =
1140 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1141 break;
1142
1143 default:
1144 rval = -EIO;
1145 goto exit_mgmt;
1146 }
1147
1148 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1149 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1150 mn->dseg_count = cpu_to_le16(1);
1151 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1152 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1153 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1154 }
1155
1156 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1157
1158 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001159 ql_log(ql_log_warn, vha, 0x7043,
1160 "Vendor request 84xx mgmt failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001161
1162 rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 bsg_job->reply->result = (DID_ERROR << 16);
1164
1165 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001166 ql_dbg(ql_dbg_user, vha, 0x7044,
1167 "Vendor request 84xx mgmt completed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001168
1169 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 bsg_job->reply->result = DID_OK;
1171
1172 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1173 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1174 bsg_job->reply->reply_payload_rcv_len =
1175 bsg_job->reply_payload.payload_len;
1176
1177 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001178 bsg_job->reply_payload.sg_cnt, mgmt_b,
1179 data_len);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001180 }
1181 }
1182
1183 bsg_job->job_done(bsg_job);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001184
1185done_unmap_sg:
Harish Zunjarraod5459082010-03-19 17:04:00 -07001186 if (mgmt_b)
1187 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1188
Giridhar Malavali6e980162010-03-19 17:03:58 -07001189 if (dma_direction == DMA_TO_DEVICE)
1190 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1191 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1192 else if (dma_direction == DMA_FROM_DEVICE)
1193 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1194 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1195
1196exit_mgmt:
1197 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1198
1199 return rval;
1200}
1201
1202static int
1203qla24xx_iidma(struct fc_bsg_job *bsg_job)
1204{
1205 struct Scsi_Host *host = bsg_job->shost;
1206 scsi_qla_host_t *vha = shost_priv(host);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001207 int rval = 0;
1208 struct qla_port_param *port_param = NULL;
1209 fc_port_t *fcport = NULL;
1210 uint16_t mb[MAILBOX_REGISTER_COUNT];
1211 uint8_t *rsp_ptr = NULL;
1212
1213 bsg_job->reply->reply_payload_rcv_len = 0;
1214
Giridhar Malavali6e980162010-03-19 17:03:58 -07001215 if (!IS_IIDMA_CAPABLE(vha->hw)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001216 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001217 return -EINVAL;
1218 }
1219
1220 port_param = (struct qla_port_param *)((char *)bsg_job->request +
1221 sizeof(struct fc_bsg_request));
1222 if (!port_param) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001223 ql_log(ql_log_warn, vha, 0x7047,
1224 "port_param header not provided.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001225 return -EINVAL;
1226 }
1227
1228 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001229 ql_log(ql_log_warn, vha, 0x7048,
1230 "Invalid destination type.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001231 return -EINVAL;
1232 }
1233
1234 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1235 if (fcport->port_type != FCT_TARGET)
1236 continue;
1237
1238 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1239 fcport->port_name, sizeof(fcport->port_name)))
1240 continue;
1241 break;
1242 }
1243
1244 if (!fcport) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001245 ql_log(ql_log_warn, vha, 0x7049,
1246 "Failed to find port.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001247 return -EINVAL;
1248 }
1249
Giridhar Malavalic9afb9a2010-09-03 15:20:48 -07001250 if (atomic_read(&fcport->state) != FCS_ONLINE) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001251 ql_log(ql_log_warn, vha, 0x704a,
1252 "Port is not online.\n");
Madhuranath Iyengar17cf2c52010-07-23 15:28:22 +05001253 return -EINVAL;
1254 }
1255
Madhuranath Iyengar9a15eb42010-07-23 15:28:31 +05001256 if (fcport->flags & FCF_LOGIN_NEEDED) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001257 ql_log(ql_log_warn, vha, 0x704b,
1258 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
Madhuranath Iyengar9a15eb42010-07-23 15:28:31 +05001259 return -EINVAL;
1260 }
1261
Giridhar Malavali6e980162010-03-19 17:03:58 -07001262 if (port_param->mode)
1263 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1264 port_param->speed, mb);
1265 else
1266 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1267 &port_param->speed, mb);
1268
1269 if (rval) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001270 ql_log(ql_log_warn, vha, 0x704c,
1271 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1272 "%04x %x %04x %04x.\n", fcport->port_name[0],
1273 fcport->port_name[1], fcport->port_name[2],
1274 fcport->port_name[3], fcport->port_name[4],
1275 fcport->port_name[5], fcport->port_name[6],
1276 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001277 rval = 0;
1278 bsg_job->reply->result = (DID_ERROR << 16);
1279
1280 } else {
1281 if (!port_param->mode) {
1282 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1283 sizeof(struct qla_port_param);
1284
1285 rsp_ptr = ((uint8_t *)bsg_job->reply) +
1286 sizeof(struct fc_bsg_reply);
1287
1288 memcpy(rsp_ptr, port_param,
1289 sizeof(struct qla_port_param));
1290 }
1291
1292 bsg_job->reply->result = DID_OK;
1293 }
1294
1295 bsg_job->job_done(bsg_job);
1296 return rval;
1297}
1298
1299static int
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001300qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
Harish Zunjarraof19af162010-10-15 11:27:43 -07001301 uint8_t is_update)
1302{
1303 uint32_t start = 0;
1304 int valid = 0;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001305 struct qla_hw_data *ha = vha->hw;
Harish Zunjarraof19af162010-10-15 11:27:43 -07001306
1307 bsg_job->reply->reply_payload_rcv_len = 0;
1308
1309 if (unlikely(pci_channel_offline(ha->pdev)))
1310 return -EINVAL;
1311
1312 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001313 if (start > ha->optrom_size) {
1314 ql_log(ql_log_warn, vha, 0x7055,
1315 "start %d > optrom_size %d.\n", start, ha->optrom_size);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001316 return -EINVAL;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001317 }
Harish Zunjarraof19af162010-10-15 11:27:43 -07001318
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001319 if (ha->optrom_state != QLA_SWAITING) {
1320 ql_log(ql_log_info, vha, 0x7056,
1321 "optrom_state %d.\n", ha->optrom_state);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001322 return -EBUSY;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001323 }
Harish Zunjarraof19af162010-10-15 11:27:43 -07001324
1325 ha->optrom_region_start = start;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001326 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001327 if (is_update) {
1328 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1329 valid = 1;
1330 else if (start == (ha->flt_region_boot * 4) ||
1331 start == (ha->flt_region_fw * 4))
1332 valid = 1;
1333 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
Giridhar Malavali6246b8a2012-02-09 11:15:34 -08001334 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
Harish Zunjarraof19af162010-10-15 11:27:43 -07001335 valid = 1;
1336 if (!valid) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001337 ql_log(ql_log_warn, vha, 0x7058,
1338 "Invalid start region 0x%x/0x%x.\n", start,
1339 bsg_job->request_payload.payload_len);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001340 return -EINVAL;
1341 }
1342
1343 ha->optrom_region_size = start +
1344 bsg_job->request_payload.payload_len > ha->optrom_size ?
1345 ha->optrom_size - start :
1346 bsg_job->request_payload.payload_len;
1347 ha->optrom_state = QLA_SWRITING;
1348 } else {
1349 ha->optrom_region_size = start +
1350 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1351 ha->optrom_size - start :
1352 bsg_job->reply_payload.payload_len;
1353 ha->optrom_state = QLA_SREADING;
1354 }
1355
1356 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1357 if (!ha->optrom_buffer) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001358 ql_log(ql_log_warn, vha, 0x7059,
Harish Zunjarraof19af162010-10-15 11:27:43 -07001359 "Read: Unable to allocate memory for optrom retrieval "
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001360 "(%x)\n", ha->optrom_region_size);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001361
1362 ha->optrom_state = QLA_SWAITING;
1363 return -ENOMEM;
1364 }
1365
1366 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1367 return 0;
1368}
1369
1370static int
1371qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1372{
1373 struct Scsi_Host *host = bsg_job->shost;
1374 scsi_qla_host_t *vha = shost_priv(host);
1375 struct qla_hw_data *ha = vha->hw;
1376 int rval = 0;
1377
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001378 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001379 if (rval)
1380 return rval;
1381
1382 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1383 ha->optrom_region_start, ha->optrom_region_size);
1384
1385 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1386 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1387 ha->optrom_region_size);
1388
1389 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1390 bsg_job->reply->result = DID_OK;
1391 vfree(ha->optrom_buffer);
1392 ha->optrom_buffer = NULL;
1393 ha->optrom_state = QLA_SWAITING;
1394 bsg_job->job_done(bsg_job);
1395 return rval;
1396}
1397
1398static int
1399qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1400{
1401 struct Scsi_Host *host = bsg_job->shost;
1402 scsi_qla_host_t *vha = shost_priv(host);
1403 struct qla_hw_data *ha = vha->hw;
1404 int rval = 0;
1405
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001406 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
Harish Zunjarraof19af162010-10-15 11:27:43 -07001407 if (rval)
1408 return rval;
1409
1410 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1411 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1412 ha->optrom_region_size);
1413
1414 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1415 ha->optrom_region_start, ha->optrom_region_size);
1416
1417 bsg_job->reply->result = DID_OK;
1418 vfree(ha->optrom_buffer);
1419 ha->optrom_buffer = NULL;
1420 ha->optrom_state = QLA_SWAITING;
1421 bsg_job->job_done(bsg_job);
1422 return rval;
1423}
1424
1425static int
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001426qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1427{
1428 struct Scsi_Host *host = bsg_job->shost;
1429 scsi_qla_host_t *vha = shost_priv(host);
1430 struct qla_hw_data *ha = vha->hw;
1431 int rval = 0;
1432 uint8_t bsg[DMA_POOL_SIZE];
1433 struct qla_image_version_list *list = (void *)bsg;
1434 struct qla_image_version *image;
1435 uint32_t count;
1436 dma_addr_t sfp_dma;
1437 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1438 if (!sfp) {
1439 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1440 EXT_STATUS_NO_MEMORY;
1441 goto done;
1442 }
1443
1444 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1445 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1446
1447 image = list->version;
1448 count = list->count;
1449 while (count--) {
1450 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1451 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1452 image->field_address.device, image->field_address.offset,
1453 sizeof(image->field_info), image->field_address.option);
1454 if (rval) {
1455 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1456 EXT_STATUS_MAILBOX;
1457 goto dealloc;
1458 }
1459 image++;
1460 }
1461
1462 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1463
1464dealloc:
1465 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1466
1467done:
1468 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1469 bsg_job->reply->result = DID_OK << 16;
1470 bsg_job->job_done(bsg_job);
1471
1472 return 0;
1473}
1474
1475static int
1476qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1477{
1478 struct Scsi_Host *host = bsg_job->shost;
1479 scsi_qla_host_t *vha = shost_priv(host);
1480 struct qla_hw_data *ha = vha->hw;
1481 int rval = 0;
1482 uint8_t bsg[DMA_POOL_SIZE];
1483 struct qla_status_reg *sr = (void *)bsg;
1484 dma_addr_t sfp_dma;
1485 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1486 if (!sfp) {
1487 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1488 EXT_STATUS_NO_MEMORY;
1489 goto done;
1490 }
1491
1492 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1493 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1494
1495 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1496 sr->field_address.device, sr->field_address.offset,
1497 sizeof(sr->status_reg), sr->field_address.option);
1498 sr->status_reg = *sfp;
1499
1500 if (rval) {
1501 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1502 EXT_STATUS_MAILBOX;
1503 goto dealloc;
1504 }
1505
1506 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1507 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1508
1509 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1510
1511dealloc:
1512 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1513
1514done:
1515 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1516 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1517 bsg_job->reply->result = DID_OK << 16;
1518 bsg_job->job_done(bsg_job);
1519
1520 return 0;
1521}
1522
1523static int
1524qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1525{
1526 struct Scsi_Host *host = bsg_job->shost;
1527 scsi_qla_host_t *vha = shost_priv(host);
1528 struct qla_hw_data *ha = vha->hw;
1529 int rval = 0;
1530 uint8_t bsg[DMA_POOL_SIZE];
1531 struct qla_status_reg *sr = (void *)bsg;
1532 dma_addr_t sfp_dma;
1533 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1534 if (!sfp) {
1535 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1536 EXT_STATUS_NO_MEMORY;
1537 goto done;
1538 }
1539
1540 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1541 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1542
1543 *sfp = sr->status_reg;
1544 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1545 sr->field_address.device, sr->field_address.offset,
1546 sizeof(sr->status_reg), sr->field_address.option);
1547
1548 if (rval) {
1549 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1550 EXT_STATUS_MAILBOX;
1551 goto dealloc;
1552 }
1553
1554 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1555
1556dealloc:
1557 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1558
1559done:
1560 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1561 bsg_job->reply->result = DID_OK << 16;
1562 bsg_job->job_done(bsg_job);
1563
1564 return 0;
1565}
1566
1567static int
Giridhar Malavali6e980162010-03-19 17:03:58 -07001568qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1569{
1570 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1571 case QL_VND_LOOPBACK:
1572 return qla2x00_process_loopback(bsg_job);
1573
1574 case QL_VND_A84_RESET:
1575 return qla84xx_reset(bsg_job);
1576
1577 case QL_VND_A84_UPDATE_FW:
1578 return qla84xx_updatefw(bsg_job);
1579
1580 case QL_VND_A84_MGMT_CMD:
1581 return qla84xx_mgmt_cmd(bsg_job);
1582
1583 case QL_VND_IIDMA:
1584 return qla24xx_iidma(bsg_job);
1585
Sarang Radke09ff7012010-03-19 17:03:59 -07001586 case QL_VND_FCP_PRIO_CFG_CMD:
1587 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1588
Harish Zunjarraof19af162010-10-15 11:27:43 -07001589 case QL_VND_READ_FLASH:
1590 return qla2x00_read_optrom(bsg_job);
1591
1592 case QL_VND_UPDATE_FLASH:
1593 return qla2x00_update_optrom(bsg_job);
1594
Joe Carnuccio697a4bc2011-08-16 11:31:52 -07001595 case QL_VND_SET_FRU_VERSION:
1596 return qla2x00_update_fru_versions(bsg_job);
1597
1598 case QL_VND_READ_FRU_STATUS:
1599 return qla2x00_read_fru_status(bsg_job);
1600
1601 case QL_VND_WRITE_FRU_STATUS:
1602 return qla2x00_write_fru_status(bsg_job);
1603
Giridhar Malavali6e980162010-03-19 17:03:58 -07001604 default:
1605 bsg_job->reply->result = (DID_ERROR << 16);
1606 bsg_job->job_done(bsg_job);
1607 return -ENOSYS;
1608 }
1609}
1610
1611int
1612qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1613{
1614 int ret = -EINVAL;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001615 struct fc_rport *rport;
1616 fc_port_t *fcport = NULL;
1617 struct Scsi_Host *host;
1618 scsi_qla_host_t *vha;
1619
1620 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1621 rport = bsg_job->rport;
1622 fcport = *(fc_port_t **) rport->dd_data;
1623 host = rport_to_shost(rport);
1624 vha = shost_priv(host);
1625 } else {
1626 host = bsg_job->shost;
1627 vha = shost_priv(host);
1628 }
1629
Andrew Vasquezd051a5aa2012-02-09 11:14:05 -08001630 if (qla2x00_reset_active(vha)) {
1631 ql_dbg(ql_dbg_user, vha, 0x709f,
1632 "BSG: ISP abort active/needed -- cmd=%d.\n",
1633 bsg_job->request->msgcode);
1634 bsg_job->reply->result = (DID_ERROR << 16);
1635 bsg_job->job_done(bsg_job);
1636 return -EBUSY;
1637 }
1638
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001639 ql_dbg(ql_dbg_user, vha, 0x7000,
Chad Dupuiscfb09192011-11-18 09:03:07 -08001640 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001641
1642 switch (bsg_job->request->msgcode) {
1643 case FC_BSG_RPT_ELS:
1644 case FC_BSG_HST_ELS_NOLOGIN:
1645 ret = qla2x00_process_els(bsg_job);
1646 break;
1647 case FC_BSG_HST_CT:
1648 ret = qla2x00_process_ct(bsg_job);
1649 break;
1650 case FC_BSG_HST_VENDOR:
1651 ret = qla2x00_process_vendor_specific(bsg_job);
1652 break;
1653 case FC_BSG_HST_ADD_RPORT:
1654 case FC_BSG_HST_DEL_RPORT:
1655 case FC_BSG_RPT_CT:
1656 default:
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001657 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001658 break;
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001659 }
Giridhar Malavali6e980162010-03-19 17:03:58 -07001660 return ret;
1661}
1662
1663int
1664qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1665{
1666 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1667 struct qla_hw_data *ha = vha->hw;
1668 srb_t *sp;
1669 int cnt, que;
1670 unsigned long flags;
1671 struct req_que *req;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001672 struct srb_ctx *sp_bsg;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001673
1674 /* find the bsg job from the active list of commands */
1675 spin_lock_irqsave(&ha->hardware_lock, flags);
1676 for (que = 0; que < ha->max_req_queues; que++) {
1677 req = ha->req_q_map[que];
1678 if (!req)
1679 continue;
1680
Andrew Vasquez6c452a42010-03-19 17:04:02 -07001681 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
Giridhar Malavali6e980162010-03-19 17:03:58 -07001682 sp = req->outstanding_cmds[cnt];
Giridhar Malavali6e980162010-03-19 17:03:58 -07001683 if (sp) {
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001684 sp_bsg = sp->ctx;
Giridhar Malavali6e980162010-03-19 17:03:58 -07001685
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001686 if (((sp_bsg->type == SRB_CT_CMD) ||
1687 (sp_bsg->type == SRB_ELS_CMD_HST))
1688 && (sp_bsg->u.bsg_job == bsg_job)) {
Giridhar Malavali900a36e2010-12-21 16:00:26 -08001689 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001690 if (ha->isp_ops->abort_command(sp)) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001691 ql_log(ql_log_warn, vha, 0x7089,
1692 "mbx abort_command "
1693 "failed.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001694 bsg_job->req->errors =
1695 bsg_job->reply->result = -EIO;
1696 } else {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001697 ql_dbg(ql_dbg_user, vha, 0x708a,
1698 "mbx abort_command "
1699 "success.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001700 bsg_job->req->errors =
1701 bsg_job->reply->result = 0;
1702 }
Giridhar Malavali900a36e2010-12-21 16:00:26 -08001703 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavali6e980162010-03-19 17:03:58 -07001704 goto done;
1705 }
1706 }
1707 }
1708 }
1709 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001710 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
Giridhar Malavali6e980162010-03-19 17:03:58 -07001711 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1712 return 0;
1713
1714done:
1715 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1716 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1717 kfree(sp->fcport);
1718 kfree(sp->ctx);
1719 mempool_free(sp, ha->srb_mempool);
1720 return 0;
1721}