blob: d63132536c88ab615113e95b81302d670093988e [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Andrew Vasquez07e264b2011-03-30 11:46:23 -07003 * Copyright (c) 2003-2011 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
Anirban Chakraborty73208df2008-12-09 16:45:39 -080014static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -070016static void qla25xx_set_que(srb_t *, struct rsp_que **);
Linus Torvalds1da177e2005-04-16 15:20:36 -070017/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
19 * @cmd: SCSI command
20 *
21 * Returns the proper CF_* direction based on CDB.
22 */
23static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070024qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 uint16_t cflags;
27
28 cflags = 0;
29
30 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070031 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 cflags = CF_WRITE;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080033 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070034 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 cflags = CF_READ;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080037 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070038 scsi_bufflen(sp->cmd);
39 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 return (cflags);
41}
42
43/**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51uint16_t
52qla2x00_calc_iocbs_32(uint16_t dsds)
53{
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63}
64
65/**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73uint16_t
74qla2x00_calc_iocbs_64(uint16_t dsds)
75{
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85}
86
87/**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @ha: HA context
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070094qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070097 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080099 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800104 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
106
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800107 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) =
111 __constant_cpu_to_le32(CONTINUE_TYPE);
112
113 return (cont_pkt);
114}
115
116/**
117 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118 * @ha: HA context
119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
125 cont_a64_entry_t *cont_pkt;
126
127 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800128 req->ring_index++;
129 if (req->ring_index == req->length) {
130 req->ring_index = 0;
131 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800133 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 }
135
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) =
140 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141
142 return (cont_pkt);
143}
144
Arun Easibad75002010-05-04 15:01:30 -0700145static inline int
146qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147{
148 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
149
150 /* We only support T10 DIF right now */
151 if (guard != SHOST_DIX_GUARD_CRC) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -0700154 return 0;
155 }
156
157 /* We always use DIFF Bundling for best performance */
158 *fw_prot_opts = 0;
159
160 /* Translate SCSI opcode to a protection opcode */
161 switch (scsi_get_prot_op(sp->cmd)) {
162 case SCSI_PROT_READ_STRIP:
163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164 break;
165 case SCSI_PROT_WRITE_INSERT:
166 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 break;
168 case SCSI_PROT_READ_INSERT:
169 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170 break;
171 case SCSI_PROT_WRITE_STRIP:
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break;
174 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break;
180 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 break;
183 }
184
185 return scsi_prot_sg_count(sp->cmd);
186}
187
188/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
191 *
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
195 */
196void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 uint16_t tot_dsds)
198{
199 uint16_t avail_dsds;
200 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800201 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900203 struct scatterlist *sg;
204 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 cmd = sp->cmd;
207
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt->entry_type)) =
210 __constant_cpu_to_le32(COMMAND_TYPE);
211
212 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900213 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 return;
216 }
217
Andrew Vasquez444786d2009-01-05 11:18:10 -0800218 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700219 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* Three DSDs are available in the Command Type 2 IOCB */
222 avail_dsds = 3;
223 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900226 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900229 /* Allocate additional continuation packets? */
230 if (avail_dsds == 0) {
231 /*
232 * Seven DSDs are available in the Continuation
233 * Type 0 IOCB.
234 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700235 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900236 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900239
240 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
244}
245
246/**
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
249 *
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
253 */
254void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 uint16_t tot_dsds)
256{
257 uint16_t avail_dsds;
258 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800259 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900261 struct scatterlist *sg;
262 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 cmd = sp->cmd;
265
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt->entry_type)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900271 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 return;
274 }
275
Andrew Vasquez444786d2009-01-05 11:18:10 -0800276 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700277 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 /* Two DSDs are available in the Command Type 3 IOCB */
280 avail_dsds = 2;
281 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900284 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285 dma_addr_t sle_dma;
286 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900288 /* Allocate additional continuation packets? */
289 if (avail_dsds == 0) {
290 /*
291 * Five DSDs are available in the Continuation
292 * Type 1 IOCB.
293 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900298
299 sle_dma = sg_dma_address(sg);
300 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
305}
306
307/**
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
310 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700311 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 */
313int
314qla2x00_start_scsi(srb_t *sp)
315{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900316 int ret, nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800318 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 struct scsi_cmnd *cmd;
320 uint32_t *clr_ptr;
321 uint32_t index;
322 uint32_t handle;
323 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 uint16_t cnt;
325 uint16_t req_cnt;
326 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700327 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800328 struct qla_hw_data *ha;
329 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800330 struct rsp_que *rsp;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800331 char tag[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 /* Setup device pointers. */
334 ret = 0;
Andrew Vasquez444786d2009-01-05 11:18:10 -0800335 vha = sp->fcport->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800336 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700337 reg = &ha->iobase->isp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 cmd = sp->cmd;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800339 req = ha->req_q_map[0];
340 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500341 /* So we know we haven't pci_map'ed anything yet */
342 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800345 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700349 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800350 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 }
352
353 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700354 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800357 handle = req->current_outstanding_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
359 handle++;
360 if (handle == MAX_OUTSTANDING_COMMANDS)
361 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800362 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 break;
364 }
365 if (index == MAX_OUTSTANDING_COMMANDS)
366 goto queuing_error;
367
83021922005-04-17 15:10:41 -0500368 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700369 if (scsi_sg_count(cmd)) {
370 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371 scsi_sg_count(cmd), cmd->sc_data_direction);
372 if (unlikely(!nseg))
373 goto queuing_error;
374 } else
375 nseg = 0;
376
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900377 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700380 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800381 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800383 if (req->ring_index < cnt)
384 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800386 req->cnt = req->length -
387 (req->ring_index - cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800389 if (req->cnt < (req_cnt + 2))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 goto queuing_error;
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800393 req->current_outstanding_cmd = handle;
394 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700395 sp->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800397 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800399 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 cmd_pkt->handle = handle;
401 /* Zero out remaining portion of packet. */
402 clr_ptr = (uint32_t *)cmd_pkt + 2;
403 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
404 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
405
bdf79622005-04-17 15:06:53 -0500406 /* Set target ID and LUN number*/
407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
408 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 /* Update tagged queuing modifier */
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800411 if (scsi_populate_tag_msg(cmd, tag)) {
412 switch (tag[0]) {
413 case HEAD_OF_QUEUE_TAG:
414 cmd_pkt->control_flags =
415 __constant_cpu_to_le16(CF_HEAD_TAG);
416 break;
417 case ORDERED_QUEUE_TAG:
418 cmd_pkt->control_flags =
419 __constant_cpu_to_le16(CF_ORDERED_TAG);
420 break;
421 default:
422 cmd_pkt->control_flags =
423 __constant_cpu_to_le16(CF_SIMPLE_TAG);
424 break;
425 }
426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 /* Load SCSI command packet. */
429 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900430 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700433 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 /* Set total data segment count. */
436 cmd_pkt->entry_count = (uint8_t)req_cnt;
437 wmb();
438
439 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800440 req->ring_index++;
441 if (req->ring_index == req->length) {
442 req->ring_index = 0;
443 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800445 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800450 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
452
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700453 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800454 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800455 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700457
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 return (QLA_SUCCESS);
460
461queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900462 if (tot_dsds)
463 scsi_dma_unmap(cmd);
464
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
467 return (QLA_FUNCTION_FAILED);
468}
469
470/**
471 * qla2x00_marker() - Send a marker IOCB to the firmware.
472 * @ha: HA context
473 * @loop_id: loop ID
474 * @lun: LUN
475 * @type: marker modifier
476 *
477 * Can be called from both normal and interrupt context.
478 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700479 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500481static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800482__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
483 struct rsp_que *rsp, uint16_t loop_id,
484 uint16_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700486 mrk_entry_t *mrk;
487 struct mrk_entry_24xx *mrk24;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800488 struct qla_hw_data *ha = vha->hw;
489 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700491 mrk24 = NULL;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +0500492 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700493 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700494 ql_log(ql_log_warn, base_vha, 0x3026,
495 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 return (QLA_FUNCTION_FAILED);
498 }
499
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700500 mrk->entry_type = MARKER_TYPE;
501 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (type != MK_SYNC_ALL) {
Andrew Vasqueze4289242007-07-19 15:05:56 -0700503 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700504 mrk24 = (struct mrk_entry_24xx *) mrk;
505 mrk24->nport_handle = cpu_to_le16(loop_id);
506 mrk24->lun[1] = LSB(lun);
507 mrk24->lun[2] = MSB(lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700508 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800509 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700510 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700511 } else {
512 SET_TARGET_ID(ha, mrk->target, loop_id);
513 mrk->lun = cpu_to_le16(lun);
514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516 wmb();
517
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800518 qla2x00_isp_cmd(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 return (QLA_SUCCESS);
521}
522
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700523int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800524qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
525 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
526 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527{
528 int ret;
529 unsigned long flags = 0;
530
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800531 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
532 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
533 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 return (ret);
536}
537
538/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 * qla2x00_isp_cmd() - Modify the request ring pointer.
540 * @ha: HA context
541 *
542 * Note: The caller must hold the hardware lock before calling this routine.
543 */
Adrian Bunk413975a2006-06-30 02:33:06 -0700544static void
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800545qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800547 struct qla_hw_data *ha = vha->hw;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800548 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
Anirban Chakraborty17d98632008-12-18 10:06:15 -0800549 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700551 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
552 "IOCB data:\n");
553 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
554 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
556 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800557 req->ring_index++;
558 if (req->ring_index == req->length) {
559 req->ring_index = 0;
560 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800562 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
564 /* Set chip new ring index. */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700565 if (IS_QLA82XX(ha)) {
566 uint32_t dbval = 0x04 | (ha->portnum << 5);
567
568 /* write, read and verify logic */
569 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
570 if (ql2xdbwr)
571 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
572 else {
573 WRT_REG_DWORD(
574 (unsigned long __iomem *)ha->nxdb_wr_ptr,
575 dbval);
576 wmb();
577 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
578 WRT_REG_DWORD((unsigned long __iomem *)
579 ha->nxdb_wr_ptr, dbval);
580 wmb();
581 }
582 }
583 } else if (ha->mqenable) {
584 /* Set chip new ring index. */
Anirban Chakraborty17d98632008-12-18 10:06:15 -0800585 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
586 RD_REG_DWORD(&ioreg->hccr);
Giridhar Malavalia9083012010-04-12 17:59:55 -0700587 } else {
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800588 if (IS_FWI2_CAPABLE(ha)) {
589 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
590 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
591 } else {
592 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
593 req->ring_index);
594 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
595 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700596 }
597
598}
599
600/**
601 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
602 * Continuation Type 1 IOCBs to allocate.
603 *
604 * @dsds: number of data segment decriptors needed
605 *
606 * Returns the number of IOCB entries needed to store @dsds.
607 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700608inline uint16_t
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700609qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700610{
611 uint16_t iocbs;
612
613 iocbs = 1;
614 if (dsds > 1) {
615 iocbs += (dsds - 1) / 5;
616 if ((dsds - 1) % 5)
617 iocbs++;
618 }
619 return iocbs;
620}
621
622/**
623 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
624 * IOCB types.
625 *
626 * @sp: SRB command to process
627 * @cmd_pkt: Command type 3 IOCB
628 * @tot_dsds: Total number of segments to transfer
629 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700630inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700631qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
632 uint16_t tot_dsds)
633{
634 uint16_t avail_dsds;
635 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800636 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700637 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900638 struct scatterlist *sg;
639 int i;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800640 struct req_que *req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700641
642 cmd = sp->cmd;
643
644 /* Update entry type to indicate Command Type 3 IOCB */
645 *((uint32_t *)(&cmd_pkt->entry_type)) =
646 __constant_cpu_to_le32(COMMAND_TYPE_7);
647
648 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900649 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700650 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
651 return;
652 }
653
Andrew Vasquez444786d2009-01-05 11:18:10 -0800654 vha = sp->fcport->vha;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700655 req = vha->req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700656
657 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700658 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700659 cmd_pkt->task_mgmt_flags =
660 __constant_cpu_to_le16(TMF_WRITE_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800661 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700662 scsi_bufflen(sp->cmd);
663 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700664 cmd_pkt->task_mgmt_flags =
665 __constant_cpu_to_le16(TMF_READ_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800666 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700667 scsi_bufflen(sp->cmd);
668 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700669
670 /* One DSD is available in the Command Type 3 IOCB */
671 avail_dsds = 1;
672 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
673
674 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700675
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900676 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
677 dma_addr_t sle_dma;
678 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700679
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900680 /* Allocate additional continuation packets? */
681 if (avail_dsds == 0) {
682 /*
683 * Five DSDs are available in the Continuation
684 * Type 1 IOCB.
685 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800686 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900687 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
688 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700689 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900690
691 sle_dma = sg_dma_address(sg);
692 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
693 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
694 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
695 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700696 }
697}
698
Arun Easibad75002010-05-04 15:01:30 -0700699struct fw_dif_context {
700 uint32_t ref_tag;
701 uint16_t app_tag;
702 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
703 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
704};
705
706/*
707 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
708 *
709 */
710static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700711qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700712 unsigned int protcnt)
713{
Arun Easie02587d2011-08-16 11:29:23 -0700714 struct scsi_cmnd *cmd = sp->cmd;
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700715 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700716
717 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700718 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700719 /*
720 * No check for ql2xenablehba_err_chk, as it would be an
721 * I/O error if hba tag generation is not done.
722 */
723 pkt->ref_tag = cpu_to_le32((uint32_t)
724 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700725
726 if (!qla2x00_hba_err_chk_enabled(sp))
727 break;
728
Arun Easi8cb20492011-08-16 11:29:22 -0700729 pkt->ref_tag_mask[0] = 0xff;
730 pkt->ref_tag_mask[1] = 0xff;
731 pkt->ref_tag_mask[2] = 0xff;
732 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700733 break;
734
735 /*
736 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
737 * match LBA in CDB + N
738 */
739 case SCSI_PROT_DIF_TYPE2:
Arun Easie02587d2011-08-16 11:29:23 -0700740 pkt->app_tag = __constant_cpu_to_le16(0);
741 pkt->app_tag_mask[0] = 0x0;
742 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500743
744 pkt->ref_tag = cpu_to_le32((uint32_t)
745 (0xffffffff & scsi_get_lba(cmd)));
746
Arun Easie02587d2011-08-16 11:29:23 -0700747 if (!qla2x00_hba_err_chk_enabled(sp))
748 break;
749
Arun Easi0c470872010-07-23 15:28:38 +0500750 /* enable ALL bytes of the ref tag */
751 pkt->ref_tag_mask[0] = 0xff;
752 pkt->ref_tag_mask[1] = 0xff;
753 pkt->ref_tag_mask[2] = 0xff;
754 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700755 break;
756
757 /* For Type 3 protection: 16 bit GUARD only */
758 case SCSI_PROT_DIF_TYPE3:
759 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
760 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
761 0x00;
762 break;
763
764 /*
765 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
766 * 16 bit app tag.
767 */
768 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700769 pkt->ref_tag = cpu_to_le32((uint32_t)
770 (0xffffffff & scsi_get_lba(cmd)));
771 pkt->app_tag = __constant_cpu_to_le16(0);
772 pkt->app_tag_mask[0] = 0x0;
773 pkt->app_tag_mask[1] = 0x0;
774
775 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700776 break;
777
Arun Easibad75002010-05-04 15:01:30 -0700778 /* enable ALL bytes of the ref tag */
779 pkt->ref_tag_mask[0] = 0xff;
780 pkt->ref_tag_mask[1] = 0xff;
781 pkt->ref_tag_mask[2] = 0xff;
782 pkt->ref_tag_mask[3] = 0xff;
783 break;
784 }
785
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700786 ql_dbg(ql_dbg_io, vha, 0x3009,
787 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
788 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
789 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
790 scsi_get_prot_type(cmd), cmd);
Arun Easibad75002010-05-04 15:01:30 -0700791}
792
Arun Easi8cb20492011-08-16 11:29:22 -0700793struct qla2_sgx {
794 dma_addr_t dma_addr; /* OUT */
795 uint32_t dma_len; /* OUT */
Arun Easibad75002010-05-04 15:01:30 -0700796
Arun Easi8cb20492011-08-16 11:29:22 -0700797 uint32_t tot_bytes; /* IN */
798 struct scatterlist *cur_sg; /* IN */
799
800 /* for book keeping, bzero on initial invocation */
801 uint32_t bytes_consumed;
802 uint32_t num_bytes;
803 uint32_t tot_partial;
804
805 /* for debugging */
806 uint32_t num_sg;
807 srb_t *sp;
808};
809
810static int
811qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
812 uint32_t *partial)
813{
814 struct scatterlist *sg;
815 uint32_t cumulative_partial, sg_len;
816 dma_addr_t sg_dma_addr;
817
818 if (sgx->num_bytes == sgx->tot_bytes)
819 return 0;
820
821 sg = sgx->cur_sg;
822 cumulative_partial = sgx->tot_partial;
823
824 sg_dma_addr = sg_dma_address(sg);
825 sg_len = sg_dma_len(sg);
826
827 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
828
829 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
830 sgx->dma_len = (blk_sz - cumulative_partial);
831 sgx->tot_partial = 0;
832 sgx->num_bytes += blk_sz;
833 *partial = 0;
834 } else {
835 sgx->dma_len = sg_len - sgx->bytes_consumed;
836 sgx->tot_partial += sgx->dma_len;
837 *partial = 1;
838 }
839
840 sgx->bytes_consumed += sgx->dma_len;
841
842 if (sg_len == sgx->bytes_consumed) {
843 sg = sg_next(sg);
844 sgx->num_sg++;
845 sgx->cur_sg = sg;
846 sgx->bytes_consumed = 0;
847 }
848
849 return 1;
850}
851
852static int
853qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
854 uint32_t *dsd, uint16_t tot_dsds)
855{
856 void *next_dsd;
857 uint8_t avail_dsds = 0;
858 uint32_t dsd_list_len;
859 struct dsd_dma *dsd_ptr;
860 struct scatterlist *sg_prot;
861 uint32_t *cur_dsd = dsd;
862 uint16_t used_dsds = tot_dsds;
863
864 uint32_t prot_int;
865 uint32_t partial;
866 struct qla2_sgx sgx;
867 dma_addr_t sle_dma;
868 uint32_t sle_dma_len, tot_prot_dma_len = 0;
869 struct scsi_cmnd *cmd = sp->cmd;
870
871 prot_int = cmd->device->sector_size;
872
873 memset(&sgx, 0, sizeof(struct qla2_sgx));
874 sgx.tot_bytes = scsi_bufflen(sp->cmd);
875 sgx.cur_sg = scsi_sglist(sp->cmd);
876 sgx.sp = sp;
877
878 sg_prot = scsi_prot_sglist(sp->cmd);
879
880 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
881
882 sle_dma = sgx.dma_addr;
883 sle_dma_len = sgx.dma_len;
884alloc_and_fill:
885 /* Allocate additional continuation packets? */
886 if (avail_dsds == 0) {
887 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
888 QLA_DSDS_PER_IOCB : used_dsds;
889 dsd_list_len = (avail_dsds + 1) * 12;
890 used_dsds -= avail_dsds;
891
892 /* allocate tracking DS */
893 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
894 if (!dsd_ptr)
895 return 1;
896
897 /* allocate new list */
898 dsd_ptr->dsd_addr = next_dsd =
899 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
900 &dsd_ptr->dsd_list_dma);
901
902 if (!next_dsd) {
903 /*
904 * Need to cleanup only this dsd_ptr, rest
905 * will be done by sp_free_dma()
906 */
907 kfree(dsd_ptr);
908 return 1;
909 }
910
911 list_add_tail(&dsd_ptr->list,
912 &((struct crc_context *)sp->ctx)->dsd_list);
913
914 sp->flags |= SRB_CRC_CTX_DSD_VALID;
915
916 /* add new list to cmd iocb or last list */
917 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
918 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
919 *cur_dsd++ = dsd_list_len;
920 cur_dsd = (uint32_t *)next_dsd;
921 }
922 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
923 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
924 *cur_dsd++ = cpu_to_le32(sle_dma_len);
925 avail_dsds--;
926
927 if (partial == 0) {
928 /* Got a full protection interval */
929 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
930 sle_dma_len = 8;
931
932 tot_prot_dma_len += sle_dma_len;
933 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
934 tot_prot_dma_len = 0;
935 sg_prot = sg_next(sg_prot);
936 }
937
938 partial = 1; /* So as to not re-enter this block */
939 goto alloc_and_fill;
940 }
941 }
942 /* Null termination */
943 *cur_dsd++ = 0;
944 *cur_dsd++ = 0;
945 *cur_dsd++ = 0;
946 return 0;
947}
Arun Easibad75002010-05-04 15:01:30 -0700948static int
949qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
950 uint16_t tot_dsds)
951{
952 void *next_dsd;
953 uint8_t avail_dsds = 0;
954 uint32_t dsd_list_len;
955 struct dsd_dma *dsd_ptr;
956 struct scatterlist *sg;
957 uint32_t *cur_dsd = dsd;
958 int i;
959 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700960 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700961
962 uint8_t *cp;
963
964 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
965 dma_addr_t sle_dma;
966
967 /* Allocate additional continuation packets? */
968 if (avail_dsds == 0) {
969 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
970 QLA_DSDS_PER_IOCB : used_dsds;
971 dsd_list_len = (avail_dsds + 1) * 12;
972 used_dsds -= avail_dsds;
973
974 /* allocate tracking DS */
975 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
976 if (!dsd_ptr)
977 return 1;
978
979 /* allocate new list */
980 dsd_ptr->dsd_addr = next_dsd =
981 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
982 &dsd_ptr->dsd_list_dma);
983
984 if (!next_dsd) {
985 /*
986 * Need to cleanup only this dsd_ptr, rest
987 * will be done by sp_free_dma()
988 */
989 kfree(dsd_ptr);
990 return 1;
991 }
992
993 list_add_tail(&dsd_ptr->list,
994 &((struct crc_context *)sp->ctx)->dsd_list);
995
996 sp->flags |= SRB_CRC_CTX_DSD_VALID;
997
998 /* add new list to cmd iocb or last list */
999 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1000 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1001 *cur_dsd++ = dsd_list_len;
1002 cur_dsd = (uint32_t *)next_dsd;
1003 }
1004 sle_dma = sg_dma_address(sg);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001005 ql_dbg(ql_dbg_io, vha, 0x300a,
1006 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
Joe Perchesd8424f62011-11-18 09:03:06 -08001007 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001008 sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -07001009 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1010 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1011 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1012 avail_dsds--;
1013
1014 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1015 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001016 ql_dbg(ql_dbg_io, vha, 0x300b,
1017 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -07001018 }
1019 }
1020 /* Null termination */
1021 *cur_dsd++ = 0;
1022 *cur_dsd++ = 0;
1023 *cur_dsd++ = 0;
1024 return 0;
1025}
1026
1027static int
1028qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1029 uint32_t *dsd,
1030 uint16_t tot_dsds)
1031{
1032 void *next_dsd;
1033 uint8_t avail_dsds = 0;
1034 uint32_t dsd_list_len;
1035 struct dsd_dma *dsd_ptr;
1036 struct scatterlist *sg;
1037 int i;
1038 struct scsi_cmnd *cmd;
1039 uint32_t *cur_dsd = dsd;
1040 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001041 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
Arun Easibad75002010-05-04 15:01:30 -07001042 uint8_t *cp;
1043
1044
1045 cmd = sp->cmd;
1046 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1047 dma_addr_t sle_dma;
1048
1049 /* Allocate additional continuation packets? */
1050 if (avail_dsds == 0) {
1051 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1052 QLA_DSDS_PER_IOCB : used_dsds;
1053 dsd_list_len = (avail_dsds + 1) * 12;
1054 used_dsds -= avail_dsds;
1055
1056 /* allocate tracking DS */
1057 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1058 if (!dsd_ptr)
1059 return 1;
1060
1061 /* allocate new list */
1062 dsd_ptr->dsd_addr = next_dsd =
1063 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1064 &dsd_ptr->dsd_list_dma);
1065
1066 if (!next_dsd) {
1067 /*
1068 * Need to cleanup only this dsd_ptr, rest
1069 * will be done by sp_free_dma()
1070 */
1071 kfree(dsd_ptr);
1072 return 1;
1073 }
1074
1075 list_add_tail(&dsd_ptr->list,
1076 &((struct crc_context *)sp->ctx)->dsd_list);
1077
1078 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1079
1080 /* add new list to cmd iocb or last list */
1081 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1082 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1083 *cur_dsd++ = dsd_list_len;
1084 cur_dsd = (uint32_t *)next_dsd;
1085 }
1086 sle_dma = sg_dma_address(sg);
1087 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001088 ql_dbg(ql_dbg_io, vha, 0x3027,
1089 "%s(): %p, sg_entry %d - "
1090 "addr=0x%x0x%x, len=%d.\n",
1091 __func__, cur_dsd, i,
1092 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
Arun Easibad75002010-05-04 15:01:30 -07001093 }
1094 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1095 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1096 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1097
1098 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1099 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001100 ql_dbg(ql_dbg_io, vha, 0x3028,
1101 "%s(): Protection Data buffer = %p.\n", __func__,
1102 cp);
Arun Easibad75002010-05-04 15:01:30 -07001103 }
1104 avail_dsds--;
1105 }
1106 /* Null termination */
1107 *cur_dsd++ = 0;
1108 *cur_dsd++ = 0;
1109 *cur_dsd++ = 0;
1110 return 0;
1111}
1112
1113/**
1114 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1115 * Type 6 IOCB types.
1116 *
1117 * @sp: SRB command to process
1118 * @cmd_pkt: Command type 3 IOCB
1119 * @tot_dsds: Total number of segments to transfer
1120 */
1121static inline int
1122qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1123 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1124{
1125 uint32_t *cur_dsd, *fcp_dl;
1126 scsi_qla_host_t *vha;
1127 struct scsi_cmnd *cmd;
1128 struct scatterlist *cur_seg;
1129 int sgc;
Arun Easi8cb20492011-08-16 11:29:22 -07001130 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001131 uint32_t data_bytes;
1132 uint32_t dif_bytes;
1133 uint8_t bundling = 1;
1134 uint16_t blk_size;
1135 uint8_t *clr_ptr;
1136 struct crc_context *crc_ctx_pkt = NULL;
1137 struct qla_hw_data *ha;
1138 uint8_t additional_fcpcdb_len;
1139 uint16_t fcp_cmnd_len;
1140 struct fcp_cmnd *fcp_cmnd;
1141 dma_addr_t crc_ctx_dma;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001142 char tag[2];
Arun Easibad75002010-05-04 15:01:30 -07001143
1144 cmd = sp->cmd;
1145
1146 sgc = 0;
1147 /* Update entry type to indicate Command Type CRC_2 IOCB */
1148 *((uint32_t *)(&cmd_pkt->entry_type)) =
1149 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1150
Arun Easibad75002010-05-04 15:01:30 -07001151 vha = sp->fcport->vha;
1152 ha = vha->hw;
1153
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001154 /* No data transfer */
1155 data_bytes = scsi_bufflen(cmd);
1156 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1157 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1158 return QLA_SUCCESS;
1159 }
Arun Easibad75002010-05-04 15:01:30 -07001160
1161 cmd_pkt->vp_index = sp->fcport->vp_idx;
1162
1163 /* Set transfer direction */
1164 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1165 cmd_pkt->control_flags =
1166 __constant_cpu_to_le16(CF_WRITE_DATA);
1167 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1168 cmd_pkt->control_flags =
1169 __constant_cpu_to_le16(CF_READ_DATA);
1170 }
1171
Arun Easi8cb20492011-08-16 11:29:22 -07001172 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1173 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1174 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1175 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001176 bundling = 0;
1177
1178 /* Allocate CRC context from global pool */
1179 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1180 GFP_ATOMIC, &crc_ctx_dma);
1181
1182 if (!crc_ctx_pkt)
1183 goto crc_queuing_error;
1184
1185 /* Zero out CTX area. */
1186 clr_ptr = (uint8_t *)crc_ctx_pkt;
1187 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1188
1189 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1190
1191 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1192
1193 /* Set handle */
1194 crc_ctx_pkt->handle = cmd_pkt->handle;
1195
1196 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1197
Arun Easie02587d2011-08-16 11:29:23 -07001198 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001199 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1200
1201 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1202 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1203 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1204
1205 /* Determine SCSI command length -- align to 4 byte boundary */
1206 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001207 additional_fcpcdb_len = cmd->cmd_len - 16;
1208 if ((cmd->cmd_len % 4) != 0) {
1209 /* SCSI cmd > 16 bytes must be multiple of 4 */
1210 goto crc_queuing_error;
1211 }
1212 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1213 } else {
1214 additional_fcpcdb_len = 0;
1215 fcp_cmnd_len = 12 + 16 + 4;
1216 }
1217
1218 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1219
1220 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1221 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1222 fcp_cmnd->additional_cdb_len |= 1;
1223 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1224 fcp_cmnd->additional_cdb_len |= 2;
1225
1226 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1227 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1228 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1229 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1230 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1231 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1232 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001233 fcp_cmnd->task_management = 0;
Arun Easibad75002010-05-04 15:01:30 -07001234
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001235 /*
1236 * Update tagged queuing modifier if using command tag queuing
1237 */
1238 if (scsi_populate_tag_msg(cmd, tag)) {
1239 switch (tag[0]) {
1240 case HEAD_OF_QUEUE_TAG:
1241 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1242 break;
1243 case ORDERED_QUEUE_TAG:
1244 fcp_cmnd->task_attribute = TSK_ORDERED;
1245 break;
1246 default:
1247 fcp_cmnd->task_attribute = 0;
1248 break;
1249 }
1250 } else {
1251 fcp_cmnd->task_attribute = 0;
1252 }
1253
Arun Easibad75002010-05-04 15:01:30 -07001254 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1255
Arun Easibad75002010-05-04 15:01:30 -07001256 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001257 dif_bytes = 0;
1258 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001259 dif_bytes = (data_bytes / blk_size) * 8;
1260
1261 switch (scsi_get_prot_op(sp->cmd)) {
1262 case SCSI_PROT_READ_INSERT:
1263 case SCSI_PROT_WRITE_STRIP:
1264 total_bytes = data_bytes;
1265 data_bytes += dif_bytes;
1266 break;
1267
1268 case SCSI_PROT_READ_STRIP:
1269 case SCSI_PROT_WRITE_INSERT:
1270 case SCSI_PROT_READ_PASS:
1271 case SCSI_PROT_WRITE_PASS:
1272 total_bytes = data_bytes + dif_bytes;
1273 break;
1274 default:
1275 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001276 }
1277
Arun Easie02587d2011-08-16 11:29:23 -07001278 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001279 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1280
1281 if (!bundling) {
1282 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1283 } else {
1284 /*
1285 * Configure Bundling if we need to fetch interlaving
1286 * protection PCI accesses
1287 */
1288 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1289 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1290 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1291 tot_prot_dsds);
1292 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1293 }
1294
1295 /* Finish the common fields of CRC pkt */
1296 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1297 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1298 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1299 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1300 /* Fibre channel byte count */
1301 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1302 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1303 additional_fcpcdb_len);
1304 *fcp_dl = htonl(total_bytes);
1305
Arun Easi0c470872010-07-23 15:28:38 +05001306 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Arun Easi0c470872010-07-23 15:28:38 +05001307 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1308 return QLA_SUCCESS;
1309 }
Arun Easibad75002010-05-04 15:01:30 -07001310 /* Walks data segments */
1311
1312 cmd_pkt->control_flags |=
1313 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001314
1315 if (!bundling && tot_prot_dsds) {
1316 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1317 cur_dsd, tot_dsds))
1318 goto crc_queuing_error;
1319 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Arun Easibad75002010-05-04 15:01:30 -07001320 (tot_dsds - tot_prot_dsds)))
1321 goto crc_queuing_error;
1322
1323 if (bundling && tot_prot_dsds) {
1324 /* Walks dif segments */
1325 cur_seg = scsi_prot_sglist(cmd);
1326 cmd_pkt->control_flags |=
1327 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1328 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1329 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1330 tot_prot_dsds))
1331 goto crc_queuing_error;
1332 }
1333 return QLA_SUCCESS;
1334
1335crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001336 /* Cleanup will be performed by the caller */
1337
1338 return QLA_FUNCTION_FAILED;
1339}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001340
1341/**
1342 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1343 * @sp: command to send to the ISP
1344 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001345 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001346 */
1347int
1348qla24xx_start_scsi(srb_t *sp)
1349{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001350 int ret, nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001351 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001352 uint32_t *clr_ptr;
1353 uint32_t index;
1354 uint32_t handle;
1355 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001356 uint16_t cnt;
1357 uint16_t req_cnt;
1358 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001359 struct req_que *req = NULL;
1360 struct rsp_que *rsp = NULL;
1361 struct scsi_cmnd *cmd = sp->cmd;
Andrew Vasquez444786d2009-01-05 11:18:10 -08001362 struct scsi_qla_host *vha = sp->fcport->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001363 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001364 char tag[2];
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001365
1366 /* Setup device pointers. */
1367 ret = 0;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001368
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001369 qla25xx_set_que(sp, &rsp);
1370 req = vha->req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001371
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001372 /* So we know we haven't pci_map'ed anything yet */
1373 tot_dsds = 0;
1374
1375 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001376 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001377 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1378 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001379 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001380 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001381 }
1382
1383 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001384 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001385
1386 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001387 handle = req->current_outstanding_cmd;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001388 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1389 handle++;
1390 if (handle == MAX_OUTSTANDING_COMMANDS)
1391 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001392 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001393 break;
1394 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001395 if (index == MAX_OUTSTANDING_COMMANDS) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001396 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001397 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001398
1399 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001400 if (scsi_sg_count(cmd)) {
1401 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1402 scsi_sg_count(cmd), cmd->sc_data_direction);
1403 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001404 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001405 } else
1406 nseg = 0;
1407
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001408 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001409 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001410 if (req->cnt < (req_cnt + 2)) {
Andrew Vasquez08029992009-03-24 09:07:55 -07001411 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001412
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001413 if (req->ring_index < cnt)
1414 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001415 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001416 req->cnt = req->length -
1417 (req->ring_index - cnt);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001418 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001419 if (req->cnt < (req_cnt + 2))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001420 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001421
1422 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001423 req->current_outstanding_cmd = handle;
1424 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001425 sp->handle = handle;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001426 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001427 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001428
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001429 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001430 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001431
1432 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001433 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001434 clr_ptr = (uint32_t *)cmd_pkt + 2;
1435 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1436 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1437
1438 /* Set NPORT-ID and LUN number*/
1439 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1440 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1441 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1442 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001443 cmd_pkt->vp_index = sp->fcport->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001444
Andrew Vasquez661c3f62005-10-27 11:09:58 -07001445 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001446 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001447
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001448 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1449 if (scsi_populate_tag_msg(cmd, tag)) {
1450 switch (tag[0]) {
1451 case HEAD_OF_QUEUE_TAG:
1452 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1453 break;
1454 case ORDERED_QUEUE_TAG:
1455 cmd_pkt->task = TSK_ORDERED;
1456 break;
1457 }
1458 }
1459
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001460 /* Load SCSI command packet. */
1461 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1462 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1463
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001464 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001465
1466 /* Build IOCB segments */
1467 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1468
1469 /* Set total data segment count. */
1470 cmd_pkt->entry_count = (uint8_t)req_cnt;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001471 /* Specify response queue number where completion should happen */
1472 cmd_pkt->entry_status = (uint8_t) rsp->id;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001473 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001474 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001475 req->ring_index++;
1476 if (req->ring_index == req->length) {
1477 req->ring_index = 0;
1478 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001479 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001480 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001481
1482 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001483
1484 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001485 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1486 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001487
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001488 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001489 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001490 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001491 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001492
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001493 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001494 return QLA_SUCCESS;
1495
1496queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001497 if (tot_dsds)
1498 scsi_dma_unmap(cmd);
1499
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001500 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001501
1502 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001504
Arun Easibad75002010-05-04 15:01:30 -07001505
1506/**
1507 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1508 * @sp: command to send to the ISP
1509 *
1510 * Returns non-zero if a failure occurred, else zero.
1511 */
1512int
1513qla24xx_dif_start_scsi(srb_t *sp)
1514{
1515 int nseg;
1516 unsigned long flags;
1517 uint32_t *clr_ptr;
1518 uint32_t index;
1519 uint32_t handle;
1520 uint16_t cnt;
1521 uint16_t req_cnt = 0;
1522 uint16_t tot_dsds;
1523 uint16_t tot_prot_dsds;
1524 uint16_t fw_prot_opts = 0;
1525 struct req_que *req = NULL;
1526 struct rsp_que *rsp = NULL;
1527 struct scsi_cmnd *cmd = sp->cmd;
1528 struct scsi_qla_host *vha = sp->fcport->vha;
1529 struct qla_hw_data *ha = vha->hw;
1530 struct cmd_type_crc_2 *cmd_pkt;
1531 uint32_t status = 0;
1532
1533#define QDSS_GOT_Q_SPACE BIT_0
1534
Arun Easi0c470872010-07-23 15:28:38 +05001535 /* Only process protection or >16 cdb in this routine */
1536 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1537 if (cmd->cmd_len <= 16)
1538 return qla24xx_start_scsi(sp);
1539 }
Arun Easibad75002010-05-04 15:01:30 -07001540
1541 /* Setup device pointers. */
1542
1543 qla25xx_set_que(sp, &rsp);
1544 req = vha->req;
1545
1546 /* So we know we haven't pci_map'ed anything yet */
1547 tot_dsds = 0;
1548
1549 /* Send marker if required */
1550 if (vha->marker_needed != 0) {
1551 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1552 QLA_SUCCESS)
1553 return QLA_FUNCTION_FAILED;
1554 vha->marker_needed = 0;
1555 }
1556
1557 /* Acquire ring specific lock */
1558 spin_lock_irqsave(&ha->hardware_lock, flags);
1559
1560 /* Check for room in outstanding command list. */
1561 handle = req->current_outstanding_cmd;
1562 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1563 handle++;
1564 if (handle == MAX_OUTSTANDING_COMMANDS)
1565 handle = 1;
1566 if (!req->outstanding_cmds[handle])
1567 break;
1568 }
1569
1570 if (index == MAX_OUTSTANDING_COMMANDS)
1571 goto queuing_error;
1572
1573 /* Compute number of required data segments */
1574 /* Map the sg table so we have an accurate count of sg entries needed */
1575 if (scsi_sg_count(cmd)) {
1576 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1577 scsi_sg_count(cmd), cmd->sc_data_direction);
1578 if (unlikely(!nseg))
1579 goto queuing_error;
1580 else
1581 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001582
1583 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1584 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1585 struct qla2_sgx sgx;
1586 uint32_t partial;
1587
1588 memset(&sgx, 0, sizeof(struct qla2_sgx));
1589 sgx.tot_bytes = scsi_bufflen(cmd);
1590 sgx.cur_sg = scsi_sglist(cmd);
1591 sgx.sp = sp;
1592
1593 nseg = 0;
1594 while (qla24xx_get_one_block_sg(
1595 cmd->device->sector_size, &sgx, &partial))
1596 nseg++;
1597 }
Arun Easibad75002010-05-04 15:01:30 -07001598 } else
1599 nseg = 0;
1600
1601 /* number of required data segments */
1602 tot_dsds = nseg;
1603
1604 /* Compute number of required protection segments */
1605 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1606 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1607 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1608 if (unlikely(!nseg))
1609 goto queuing_error;
1610 else
1611 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001612
1613 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1614 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1615 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1616 }
Arun Easibad75002010-05-04 15:01:30 -07001617 } else {
1618 nseg = 0;
1619 }
1620
1621 req_cnt = 1;
1622 /* Total Data and protection sg segment(s) */
1623 tot_prot_dsds = nseg;
1624 tot_dsds += nseg;
1625 if (req->cnt < (req_cnt + 2)) {
1626 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1627
1628 if (req->ring_index < cnt)
1629 req->cnt = cnt - req->ring_index;
1630 else
1631 req->cnt = req->length -
1632 (req->ring_index - cnt);
1633 }
1634
1635 if (req->cnt < (req_cnt + 2))
1636 goto queuing_error;
1637
1638 status |= QDSS_GOT_Q_SPACE;
1639
1640 /* Build header part of command packet (excluding the OPCODE). */
1641 req->current_outstanding_cmd = handle;
1642 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001643 sp->handle = handle;
Arun Easibad75002010-05-04 15:01:30 -07001644 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1645 req->cnt -= req_cnt;
1646
1647 /* Fill-in common area */
1648 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1649 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1650
1651 clr_ptr = (uint32_t *)cmd_pkt + 2;
1652 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1653
1654 /* Set NPORT-ID and LUN number*/
1655 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1656 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1657 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1658 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1659
1660 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1661 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1662
1663 /* Total Data and protection segment(s) */
1664 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1665
1666 /* Build IOCB segments and adjust for data protection segments */
1667 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1668 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1669 QLA_SUCCESS)
1670 goto queuing_error;
1671
1672 cmd_pkt->entry_count = (uint8_t)req_cnt;
1673 /* Specify response queue number where completion should happen */
1674 cmd_pkt->entry_status = (uint8_t) rsp->id;
1675 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1676 wmb();
1677
1678 /* Adjust ring index. */
1679 req->ring_index++;
1680 if (req->ring_index == req->length) {
1681 req->ring_index = 0;
1682 req->ring_ptr = req->ring;
1683 } else
1684 req->ring_ptr++;
1685
1686 /* Set chip new ring index. */
1687 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1688 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1689
1690 /* Manage unprocessed RIO/ZIO commands in response queue. */
1691 if (vha->flags.process_response_queue &&
1692 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1693 qla24xx_process_response_queue(vha, rsp);
1694
1695 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1696
1697 return QLA_SUCCESS;
1698
1699queuing_error:
1700 if (status & QDSS_GOT_Q_SPACE) {
1701 req->outstanding_cmds[handle] = NULL;
1702 req->cnt += req_cnt;
1703 }
1704 /* Cleanup will be performed by the caller (queuecommand) */
1705
1706 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001707 return QLA_FUNCTION_FAILED;
1708}
1709
1710
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001711static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001712{
1713 struct scsi_cmnd *cmd = sp->cmd;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001714 struct qla_hw_data *ha = sp->fcport->vha->hw;
1715 int affinity = cmd->request->cpu;
1716
Anirban Chakraborty7163ea82009-08-05 09:18:40 -07001717 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001718 affinity < ha->max_rsp_queues - 1)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001719 *rsp = ha->rsp_q_map[affinity + 1];
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001720 else
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001721 *rsp = ha->rsp_q_map[0];
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001722}
Andrew Vasquezac280b62009-08-20 11:06:05 -07001723
1724/* Generic Control-SRB manipulation functions. */
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001725void *
1726qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001727{
Andrew Vasquezac280b62009-08-20 11:06:05 -07001728 struct qla_hw_data *ha = vha->hw;
1729 struct req_que *req = ha->req_q_map[0];
1730 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1731 uint32_t index, handle;
1732 request_t *pkt;
1733 uint16_t cnt, req_cnt;
1734
1735 pkt = NULL;
1736 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001737 handle = 0;
1738
1739 if (!sp)
1740 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001741
1742 /* Check for room in outstanding command list. */
1743 handle = req->current_outstanding_cmd;
1744 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1745 handle++;
1746 if (handle == MAX_OUTSTANDING_COMMANDS)
1747 handle = 1;
1748 if (!req->outstanding_cmds[handle])
1749 break;
1750 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001751 if (index == MAX_OUTSTANDING_COMMANDS) {
1752 ql_log(ql_log_warn, vha, 0x700b,
1753 "No room on oustanding cmd array.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07001754 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001755 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001756
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001757 /* Prep command array. */
1758 req->current_outstanding_cmd = handle;
1759 req->outstanding_cmds[handle] = sp;
1760 sp->handle = handle;
1761
1762skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07001763 /* Check for room on request queue. */
1764 if (req->cnt < req_cnt) {
1765 if (ha->mqenable)
1766 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001767 else if (IS_QLA82XX(ha))
1768 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001769 else if (IS_FWI2_CAPABLE(ha))
1770 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1771 else
1772 cnt = qla2x00_debounce_register(
1773 ISP_REQ_Q_OUT(ha, &reg->isp));
1774
1775 if (req->ring_index < cnt)
1776 req->cnt = cnt - req->ring_index;
1777 else
1778 req->cnt = req->length -
1779 (req->ring_index - cnt);
1780 }
1781 if (req->cnt < req_cnt)
1782 goto queuing_error;
1783
1784 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07001785 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001786 pkt = req->ring_ptr;
1787 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1788 pkt->entry_count = req_cnt;
1789 pkt->handle = handle;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001790
1791queuing_error:
1792 return pkt;
1793}
1794
1795static void
1796qla2x00_start_iocbs(srb_t *sp)
1797{
1798 struct qla_hw_data *ha = sp->fcport->vha->hw;
1799 struct req_que *req = ha->req_q_map[0];
1800 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1801 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1802
Giridhar Malavalia9083012010-04-12 17:59:55 -07001803 if (IS_QLA82XX(ha)) {
1804 qla82xx_start_iocbs(sp);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001805 } else {
Giridhar Malavalia9083012010-04-12 17:59:55 -07001806 /* Adjust ring index. */
1807 req->ring_index++;
1808 if (req->ring_index == req->length) {
1809 req->ring_index = 0;
1810 req->ring_ptr = req->ring;
1811 } else
1812 req->ring_ptr++;
1813
1814 /* Set chip new ring index. */
1815 if (ha->mqenable) {
1816 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
1817 RD_REG_DWORD(&ioreg->hccr);
1818 } else if (IS_QLA82XX(ha)) {
1819 qla82xx_start_iocbs(sp);
1820 } else if (IS_FWI2_CAPABLE(ha)) {
1821 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
1822 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
1823 } else {
1824 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
1825 req->ring_index);
1826 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
1827 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001828 }
1829}
1830
1831static void
1832qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1833{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001834 struct srb_ctx *ctx = sp->ctx;
1835 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001836
1837 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1838 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001839 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001840 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001841 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001842 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1843 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1844 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1845 logio->port_id[1] = sp->fcport->d_id.b.area;
1846 logio->port_id[2] = sp->fcport->d_id.b.domain;
1847 logio->vp_index = sp->fcport->vp_idx;
1848}
1849
1850static void
1851qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1852{
1853 struct qla_hw_data *ha = sp->fcport->vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001854 struct srb_ctx *ctx = sp->ctx;
1855 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001856 uint16_t opts;
1857
Giridhar Malavalib9637522010-05-28 15:08:15 -07001858 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001859 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1860 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001861 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1862 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001863 if (HAS_EXTENDED_IDS(ha)) {
1864 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1865 mbx->mb10 = cpu_to_le16(opts);
1866 } else {
1867 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1868 }
1869 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1870 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1871 sp->fcport->d_id.b.al_pa);
1872 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1873}
1874
1875static void
1876qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1877{
1878 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1879 logio->control_flags =
1880 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1881 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1882 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1883 logio->port_id[1] = sp->fcport->d_id.b.area;
1884 logio->port_id[2] = sp->fcport->d_id.b.domain;
1885 logio->vp_index = sp->fcport->vp_idx;
1886}
1887
1888static void
1889qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1890{
1891 struct qla_hw_data *ha = sp->fcport->vha->hw;
1892
Giridhar Malavalib9637522010-05-28 15:08:15 -07001893 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001894 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1895 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1896 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1897 cpu_to_le16(sp->fcport->loop_id):
1898 cpu_to_le16(sp->fcport->loop_id << 8);
1899 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1900 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1901 sp->fcport->d_id.b.al_pa);
1902 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1903 /* Implicit: mbx->mbx10 = 0. */
1904}
1905
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001906static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07001907qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1908{
1909 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1910 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1911 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1912 logio->vp_index = sp->fcport->vp_idx;
1913}
1914
1915static void
1916qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1917{
1918 struct qla_hw_data *ha = sp->fcport->vha->hw;
1919
1920 mbx->entry_type = MBX_IOCB_TYPE;
1921 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1922 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1923 if (HAS_EXTENDED_IDS(ha)) {
1924 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1925 mbx->mb10 = cpu_to_le16(BIT_0);
1926 } else {
1927 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1928 }
1929 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1930 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1931 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1932 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1933 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1934}
1935
1936static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07001937qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1938{
1939 uint32_t flags;
1940 unsigned int lun;
1941 struct fc_port *fcport = sp->fcport;
1942 scsi_qla_host_t *vha = fcport->vha;
1943 struct qla_hw_data *ha = vha->hw;
1944 struct srb_ctx *ctx = sp->ctx;
1945 struct srb_iocb *iocb = ctx->u.iocb_cmd;
1946 struct req_que *req = vha->req;
1947
1948 flags = iocb->u.tmf.flags;
1949 lun = iocb->u.tmf.lun;
1950
1951 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1952 tsk->entry_count = 1;
1953 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1954 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
1955 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1956 tsk->control_flags = cpu_to_le32(flags);
1957 tsk->port_id[0] = fcport->d_id.b.al_pa;
1958 tsk->port_id[1] = fcport->d_id.b.area;
1959 tsk->port_id[2] = fcport->d_id.b.domain;
1960 tsk->vp_index = fcport->vp_idx;
1961
1962 if (flags == TCF_LUN_RESET) {
1963 int_to_scsilun(lun, &tsk->lun);
1964 host_to_fcp_swap((uint8_t *)&tsk->lun,
1965 sizeof(tsk->lun));
1966 }
1967}
1968
1969static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001970qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
1971{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001972 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001973
1974 els_iocb->entry_type = ELS_IOCB_TYPE;
1975 els_iocb->entry_count = 1;
1976 els_iocb->sys_define = 0;
1977 els_iocb->entry_status = 0;
1978 els_iocb->handle = sp->handle;
1979 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1980 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
1981 els_iocb->vp_index = sp->fcport->vp_idx;
1982 els_iocb->sof_type = EST_SOFI3;
1983 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
1984
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001985 els_iocb->opcode =
1986 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
1987 bsg_job->request->rqst_data.r_els.els_code :
1988 bsg_job->request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001989 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
1990 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
1991 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
1992 els_iocb->control_flags = 0;
1993 els_iocb->rx_byte_count =
1994 cpu_to_le32(bsg_job->reply_payload.payload_len);
1995 els_iocb->tx_byte_count =
1996 cpu_to_le32(bsg_job->request_payload.payload_len);
1997
1998 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
1999 (bsg_job->request_payload.sg_list)));
2000 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2001 (bsg_job->request_payload.sg_list)));
2002 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2003 (bsg_job->request_payload.sg_list));
2004
2005 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2006 (bsg_job->reply_payload.sg_list)));
2007 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2008 (bsg_job->reply_payload.sg_list)));
2009 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2010 (bsg_job->reply_payload.sg_list));
2011}
2012
2013static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002014qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2015{
2016 uint16_t avail_dsds;
2017 uint32_t *cur_dsd;
2018 struct scatterlist *sg;
2019 int index;
2020 uint16_t tot_dsds;
2021 scsi_qla_host_t *vha = sp->fcport->vha;
2022 struct qla_hw_data *ha = vha->hw;
2023 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2024 int loop_iterartion = 0;
2025 int cont_iocb_prsnt = 0;
2026 int entry_count = 1;
2027
2028 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2029 ct_iocb->entry_type = CT_IOCB_TYPE;
2030 ct_iocb->entry_status = 0;
2031 ct_iocb->handle1 = sp->handle;
2032 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2033 ct_iocb->status = __constant_cpu_to_le16(0);
2034 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2035 ct_iocb->timeout = 0;
2036 ct_iocb->cmd_dsd_count =
2037 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2038 ct_iocb->total_dsd_count =
2039 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2040 ct_iocb->req_bytecount =
2041 cpu_to_le32(bsg_job->request_payload.payload_len);
2042 ct_iocb->rsp_bytecount =
2043 cpu_to_le32(bsg_job->reply_payload.payload_len);
2044
2045 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2046 (bsg_job->request_payload.sg_list)));
2047 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2048 (bsg_job->request_payload.sg_list)));
2049 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2050
2051 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2052 (bsg_job->reply_payload.sg_list)));
2053 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2054 (bsg_job->reply_payload.sg_list)));
2055 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2056
2057 avail_dsds = 1;
2058 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2059 index = 0;
2060 tot_dsds = bsg_job->reply_payload.sg_cnt;
2061
2062 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2063 dma_addr_t sle_dma;
2064 cont_a64_entry_t *cont_pkt;
2065
2066 /* Allocate additional continuation packets? */
2067 if (avail_dsds == 0) {
2068 /*
2069 * Five DSDs are available in the Cont.
2070 * Type 1 IOCB.
2071 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002072 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2073 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002074 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2075 avail_dsds = 5;
2076 cont_iocb_prsnt = 1;
2077 entry_count++;
2078 }
2079
2080 sle_dma = sg_dma_address(sg);
2081 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2082 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2083 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2084 loop_iterartion++;
2085 avail_dsds--;
2086 }
2087 ct_iocb->entry_count = entry_count;
2088}
2089
2090static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002091qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2092{
2093 uint16_t avail_dsds;
2094 uint32_t *cur_dsd;
2095 struct scatterlist *sg;
2096 int index;
2097 uint16_t tot_dsds;
2098 scsi_qla_host_t *vha = sp->fcport->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002099 struct qla_hw_data *ha = vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002100 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002101 int loop_iterartion = 0;
2102 int cont_iocb_prsnt = 0;
2103 int entry_count = 1;
2104
2105 ct_iocb->entry_type = CT_IOCB_TYPE;
2106 ct_iocb->entry_status = 0;
2107 ct_iocb->sys_define = 0;
2108 ct_iocb->handle = sp->handle;
2109
2110 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2111 ct_iocb->vp_index = sp->fcport->vp_idx;
2112 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2113
2114 ct_iocb->cmd_dsd_count =
2115 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2116 ct_iocb->timeout = 0;
2117 ct_iocb->rsp_dsd_count =
2118 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2119 ct_iocb->rsp_byte_count =
2120 cpu_to_le32(bsg_job->reply_payload.payload_len);
2121 ct_iocb->cmd_byte_count =
2122 cpu_to_le32(bsg_job->request_payload.payload_len);
2123 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2124 (bsg_job->request_payload.sg_list)));
2125 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2126 (bsg_job->request_payload.sg_list)));
2127 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2128 (bsg_job->request_payload.sg_list));
2129
2130 avail_dsds = 1;
2131 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2132 index = 0;
2133 tot_dsds = bsg_job->reply_payload.sg_cnt;
2134
2135 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2136 dma_addr_t sle_dma;
2137 cont_a64_entry_t *cont_pkt;
2138
2139 /* Allocate additional continuation packets? */
2140 if (avail_dsds == 0) {
2141 /*
2142 * Five DSDs are available in the Cont.
2143 * Type 1 IOCB.
2144 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002145 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2146 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002147 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2148 avail_dsds = 5;
2149 cont_iocb_prsnt = 1;
2150 entry_count++;
2151 }
2152
2153 sle_dma = sg_dma_address(sg);
2154 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2155 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2156 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2157 loop_iterartion++;
2158 avail_dsds--;
2159 }
2160 ct_iocb->entry_count = entry_count;
2161}
2162
Andrew Vasquezac280b62009-08-20 11:06:05 -07002163int
2164qla2x00_start_sp(srb_t *sp)
2165{
2166 int rval;
2167 struct qla_hw_data *ha = sp->fcport->vha->hw;
2168 void *pkt;
2169 struct srb_ctx *ctx = sp->ctx;
2170 unsigned long flags;
2171
2172 rval = QLA_FUNCTION_FAILED;
2173 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002174 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002175 if (!pkt) {
2176 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2177 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07002178 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002179 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002180
2181 rval = QLA_SUCCESS;
2182 switch (ctx->type) {
2183 case SRB_LOGIN_CMD:
2184 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002185 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002186 qla2x00_login_iocb(sp, pkt);
2187 break;
2188 case SRB_LOGOUT_CMD:
2189 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002190 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002191 qla2x00_logout_iocb(sp, pkt);
2192 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002193 case SRB_ELS_CMD_RPT:
2194 case SRB_ELS_CMD_HST:
2195 qla24xx_els_iocb(sp, pkt);
2196 break;
2197 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002198 IS_FWI2_CAPABLE(ha) ?
2199 qla24xx_ct_iocb(sp, pkt) :
2200 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002201 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002202 case SRB_ADISC_CMD:
2203 IS_FWI2_CAPABLE(ha) ?
2204 qla24xx_adisc_iocb(sp, pkt) :
2205 qla2x00_adisc_iocb(sp, pkt);
2206 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002207 case SRB_TM_CMD:
2208 qla24xx_tm_iocb(sp, pkt);
2209 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002210 default:
2211 break;
2212 }
2213
2214 wmb();
2215 qla2x00_start_iocbs(sp);
2216done:
2217 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2218 return rval;
2219}