blob: c47545356f7b6645468794955260f39a66aa8f68 [file] [log] [blame]
Vikas Chaudharya3559432011-07-25 13:48:51 -05001/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2011 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_bsg.h"
11
12static int
13qla4xxx_read_flash(struct bsg_job *bsg_job)
14{
15 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
16 struct scsi_qla_host *ha = to_qla_host(host);
17 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
18 struct iscsi_bsg_request *bsg_req = bsg_job->request;
Vikas Chaudharya3559432011-07-25 13:48:51 -050019 uint32_t offset = 0;
20 uint32_t length = 0;
21 dma_addr_t flash_dma;
22 uint8_t *flash = NULL;
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070023 int rval = -EINVAL;
Vikas Chaudharya3559432011-07-25 13:48:51 -050024
25 bsg_reply->reply_payload_rcv_len = 0;
26
27 if (unlikely(pci_channel_offline(ha->pdev)))
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070028 goto leave;
Vikas Chaudharya3559432011-07-25 13:48:51 -050029
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070030 if (ql4xxx_reset_active(ha)) {
31 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
32 rval = -EBUSY;
33 goto leave;
Vikas Chaudharya3559432011-07-25 13:48:51 -050034 }
35
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070036 if (ha->flash_state != QLFLASH_WAITING) {
37 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
38 "active\n", __func__);
39 rval = -EBUSY;
40 goto leave;
41 }
42
43 ha->flash_state = QLFLASH_READING;
Vikas Chaudharya3559432011-07-25 13:48:51 -050044 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
45 length = bsg_job->reply_payload.payload_len;
46
47 flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
48 GFP_KERNEL);
49 if (!flash) {
50 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
51 "data\n", __func__);
52 rval = -ENOMEM;
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070053 goto leave;
Vikas Chaudharya3559432011-07-25 13:48:51 -050054 }
55
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070056 rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
57 if (rval) {
58 ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
59 bsg_reply->result = DID_ERROR << 16;
60 rval = -EIO;
61 } else {
62 bsg_reply->reply_payload_rcv_len =
63 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
64 bsg_job->reply_payload.sg_cnt,
65 flash, length);
66 bsg_reply->result = DID_OK << 16;
Vikas Chaudharya3559432011-07-25 13:48:51 -050067 }
68
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070069 bsg_job_done(bsg_job, bsg_reply->result,
70 bsg_reply->reply_payload_rcv_len);
71 dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
72leave:
Vikas Chaudharya3559432011-07-25 13:48:51 -050073 ha->flash_state = QLFLASH_WAITING;
Vikas Chaudharya3559432011-07-25 13:48:51 -050074 return rval;
75}
76
77static int
78qla4xxx_update_flash(struct bsg_job *bsg_job)
79{
80 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
81 struct scsi_qla_host *ha = to_qla_host(host);
82 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
83 struct iscsi_bsg_request *bsg_req = bsg_job->request;
Vikas Chaudharya3559432011-07-25 13:48:51 -050084 uint32_t length = 0;
85 uint32_t offset = 0;
86 uint32_t options = 0;
87 dma_addr_t flash_dma;
88 uint8_t *flash = NULL;
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070089 int rval = -EINVAL;
Vikas Chaudharya3559432011-07-25 13:48:51 -050090
91 bsg_reply->reply_payload_rcv_len = 0;
92
93 if (unlikely(pci_channel_offline(ha->pdev)))
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070094 goto leave;
Vikas Chaudharya3559432011-07-25 13:48:51 -050095
Harish Zunjarraoef7830b2011-08-01 03:26:14 -070096 if (ql4xxx_reset_active(ha)) {
97 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
98 rval = -EBUSY;
99 goto leave;
Vikas Chaudharya3559432011-07-25 13:48:51 -0500100 }
101
Harish Zunjarraoef7830b2011-08-01 03:26:14 -0700102 if (ha->flash_state != QLFLASH_WAITING) {
103 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
104 "active\n", __func__);
105 rval = -EBUSY;
106 goto leave;
107 }
108
109 ha->flash_state = QLFLASH_WRITING;
Vikas Chaudharya3559432011-07-25 13:48:51 -0500110 length = bsg_job->request_payload.payload_len;
111 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
112 options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
113
114 flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
115 GFP_KERNEL);
116 if (!flash) {
117 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
118 "data\n", __func__);
119 rval = -ENOMEM;
Harish Zunjarraoef7830b2011-08-01 03:26:14 -0700120 goto leave;
Vikas Chaudharya3559432011-07-25 13:48:51 -0500121 }
122
Vikas Chaudharya3559432011-07-25 13:48:51 -0500123 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
124 bsg_job->request_payload.sg_cnt, flash, length);
125
Harish Zunjarraoef7830b2011-08-01 03:26:14 -0700126 rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
127 if (rval) {
128 ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
129 bsg_reply->result = DID_ERROR << 16;
130 rval = -EIO;
131 } else
132 bsg_reply->result = DID_OK << 16;
Vikas Chaudharya3559432011-07-25 13:48:51 -0500133
Harish Zunjarraoef7830b2011-08-01 03:26:14 -0700134 bsg_job_done(bsg_job, bsg_reply->result,
135 bsg_reply->reply_payload_rcv_len);
136 dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
137leave:
Vikas Chaudharya3559432011-07-25 13:48:51 -0500138 ha->flash_state = QLFLASH_WAITING;
Vikas Chaudharya3559432011-07-25 13:48:51 -0500139 return rval;
140}
141
Harish Zunjarrao8b0402e2011-08-01 03:26:15 -0700142static int
143qla4xxx_get_acb_state(struct bsg_job *bsg_job)
144{
145 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
146 struct scsi_qla_host *ha = to_qla_host(host);
147 struct iscsi_bsg_request *bsg_req = bsg_job->request;
148 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
149 uint32_t status[MBOX_REG_COUNT];
150 uint32_t acb_idx;
151 uint32_t ip_idx;
152 int rval = -EINVAL;
153
154 bsg_reply->reply_payload_rcv_len = 0;
155
156 if (unlikely(pci_channel_offline(ha->pdev)))
157 goto leave;
158
159 /* Only 4022 and above adapters are supported */
160 if (is_qla4010(ha))
161 goto leave;
162
163 if (ql4xxx_reset_active(ha)) {
164 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
165 rval = -EBUSY;
166 goto leave;
167 }
168
169 if (bsg_job->reply_payload.payload_len < sizeof(status)) {
170 ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
171 __func__, bsg_job->reply_payload.payload_len);
172 rval = -EINVAL;
173 goto leave;
174 }
175
176 acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
177 ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
178
179 rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
180 if (rval) {
181 ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
182 __func__);
183 bsg_reply->result = DID_ERROR << 16;
184 rval = -EIO;
185 } else {
186 bsg_reply->reply_payload_rcv_len =
187 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
188 bsg_job->reply_payload.sg_cnt,
189 status, sizeof(status));
190 bsg_reply->result = DID_OK << 16;
191 }
192
193 bsg_job_done(bsg_job, bsg_reply->result,
194 bsg_reply->reply_payload_rcv_len);
195leave:
196 return rval;
197}
198
Harish Zunjarrao7c07d132011-08-01 03:26:16 -0700199static int
200qla4xxx_read_nvram(struct bsg_job *bsg_job)
201{
202 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
203 struct scsi_qla_host *ha = to_qla_host(host);
204 struct iscsi_bsg_request *bsg_req = bsg_job->request;
205 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
206 uint32_t offset = 0;
207 uint32_t len = 0;
208 uint32_t total_len = 0;
209 dma_addr_t nvram_dma;
210 uint8_t *nvram = NULL;
211 int rval = -EINVAL;
212
213 bsg_reply->reply_payload_rcv_len = 0;
214
215 if (unlikely(pci_channel_offline(ha->pdev)))
216 goto leave;
217
218 /* Only 40xx adapters are supported */
219 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
220 goto leave;
221
222 if (ql4xxx_reset_active(ha)) {
223 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
224 rval = -EBUSY;
225 goto leave;
226 }
227
228 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
229 len = bsg_job->reply_payload.payload_len;
230 total_len = offset + len;
231
232 /* total len should not be greater than max NVRAM size */
233 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
234 ((is_qla4022(ha) || is_qla4032(ha)) &&
235 total_len > QL40X2_NVRAM_SIZE)) {
236 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
237 " nvram size, offset=%d len=%d\n",
238 __func__, offset, len);
239 goto leave;
240 }
241
242 nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
243 GFP_KERNEL);
244 if (!nvram) {
245 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
246 "data\n", __func__);
247 rval = -ENOMEM;
248 goto leave;
249 }
250
251 rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
252 if (rval) {
253 ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
254 bsg_reply->result = DID_ERROR << 16;
255 rval = -EIO;
256 } else {
257 bsg_reply->reply_payload_rcv_len =
258 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
259 bsg_job->reply_payload.sg_cnt,
260 nvram, len);
261 bsg_reply->result = DID_OK << 16;
262 }
263
264 bsg_job_done(bsg_job, bsg_reply->result,
265 bsg_reply->reply_payload_rcv_len);
266 dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
267leave:
268 return rval;
269}
270
271static int
272qla4xxx_update_nvram(struct bsg_job *bsg_job)
273{
274 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
275 struct scsi_qla_host *ha = to_qla_host(host);
276 struct iscsi_bsg_request *bsg_req = bsg_job->request;
277 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
278 uint32_t offset = 0;
279 uint32_t len = 0;
280 uint32_t total_len = 0;
281 dma_addr_t nvram_dma;
282 uint8_t *nvram = NULL;
283 int rval = -EINVAL;
284
285 bsg_reply->reply_payload_rcv_len = 0;
286
287 if (unlikely(pci_channel_offline(ha->pdev)))
288 goto leave;
289
290 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
291 goto leave;
292
293 if (ql4xxx_reset_active(ha)) {
294 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
295 rval = -EBUSY;
296 goto leave;
297 }
298
299 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
300 len = bsg_job->request_payload.payload_len;
301 total_len = offset + len;
302
303 /* total len should not be greater than max NVRAM size */
304 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
305 ((is_qla4022(ha) || is_qla4032(ha)) &&
306 total_len > QL40X2_NVRAM_SIZE)) {
307 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
308 " nvram size, offset=%d len=%d\n",
309 __func__, offset, len);
310 goto leave;
311 }
312
313 nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
314 GFP_KERNEL);
315 if (!nvram) {
316 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
317 "data\n", __func__);
318 rval = -ENOMEM;
319 goto leave;
320 }
321
322 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
323 bsg_job->request_payload.sg_cnt, nvram, len);
324
325 rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
326 if (rval) {
327 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
328 bsg_reply->result = DID_ERROR << 16;
329 rval = -EIO;
330 } else
331 bsg_reply->result = DID_OK << 16;
332
333 bsg_job_done(bsg_job, bsg_reply->result,
334 bsg_reply->reply_payload_rcv_len);
335 dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
336leave:
337 return rval;
338}
339
Vikas Chaudharya3559432011-07-25 13:48:51 -0500340/**
341 * qla4xxx_process_vendor_specific - handle vendor specific bsg request
342 * @job: iscsi_bsg_job to handle
343 **/
344int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
345{
346 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
347 struct iscsi_bsg_request *bsg_req = bsg_job->request;
348 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
349 struct scsi_qla_host *ha = to_qla_host(host);
350
351 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
352 case QLISCSI_VND_READ_FLASH:
353 return qla4xxx_read_flash(bsg_job);
354
355 case QLISCSI_VND_UPDATE_FLASH:
356 return qla4xxx_update_flash(bsg_job);
357
Harish Zunjarrao8b0402e2011-08-01 03:26:15 -0700358 case QLISCSI_VND_GET_ACB_STATE:
359 return qla4xxx_get_acb_state(bsg_job);
360
Harish Zunjarrao7c07d132011-08-01 03:26:16 -0700361 case QLISCSI_VND_READ_NVRAM:
362 return qla4xxx_read_nvram(bsg_job);
363
364 case QLISCSI_VND_UPDATE_NVRAM:
365 return qla4xxx_update_nvram(bsg_job);
366
Vikas Chaudharya3559432011-07-25 13:48:51 -0500367 default:
368 ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
369 "0x%x\n", __func__, bsg_req->msgcode);
370 bsg_reply->result = (DID_ERROR << 16);
371 bsg_reply->reply_payload_rcv_len = 0;
372 bsg_job_done(bsg_job, bsg_reply->result,
373 bsg_reply->reply_payload_rcv_len);
374 return -ENOSYS;
375 }
376}
377
378/**
379 * qla4xxx_bsg_request - handle bsg request from ISCSI transport
380 * @job: iscsi_bsg_job to handle
381 */
382int qla4xxx_bsg_request(struct bsg_job *bsg_job)
383{
384 struct iscsi_bsg_request *bsg_req = bsg_job->request;
385 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
386 struct scsi_qla_host *ha = to_qla_host(host);
387
388 switch (bsg_req->msgcode) {
389 case ISCSI_BSG_HST_VENDOR:
390 return qla4xxx_process_vendor_specific(bsg_job);
391
392 default:
393 ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
394 __func__, bsg_req->msgcode);
395 }
396
397 return -ENOSYS;
398}