blob: 1ae0d1dbbdb450807138abd48a980f0f5780db0f [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Andrew Vasquez07e264b2011-03-30 11:46:23 -07003 * Copyright (c) 2003-2011 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -070014static void qla25xx_set_que(srb_t *, struct rsp_que **);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070022qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023{
24 uint16_t cflags;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080025 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27 cflags = 0;
28
29 /* Set transfer direction */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080030 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 cflags = CF_WRITE;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080032 sp->fcport->vha->hw->qla_stats.output_bytes +=
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080033 scsi_bufflen(cmd);
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 cflags = CF_READ;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080036 sp->fcport->vha->hw->qla_stats.input_bytes +=
Giridhar Malavali9ba56b92012-02-09 11:15:36 -080037 scsi_bufflen(cmd);
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 return (cflags);
40}
41
42/**
43 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44 * Continuation Type 0 IOCBs to allocate.
45 *
46 * @dsds: number of data segment decriptors needed
47 *
48 * Returns the number of IOCB entries needed to store @dsds.
49 */
50uint16_t
51qla2x00_calc_iocbs_32(uint16_t dsds)
52{
53 uint16_t iocbs;
54
55 iocbs = 1;
56 if (dsds > 3) {
57 iocbs += (dsds - 3) / 7;
58 if ((dsds - 3) % 7)
59 iocbs++;
60 }
61 return (iocbs);
62}
63
64/**
65 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66 * Continuation Type 1 IOCBs to allocate.
67 *
68 * @dsds: number of data segment decriptors needed
69 *
70 * Returns the number of IOCB entries needed to store @dsds.
71 */
72uint16_t
73qla2x00_calc_iocbs_64(uint16_t dsds)
74{
75 uint16_t iocbs;
76
77 iocbs = 1;
78 if (dsds > 2) {
79 iocbs += (dsds - 2) / 5;
80 if ((dsds - 2) % 5)
81 iocbs++;
82 }
83 return (iocbs);
84}
85
86/**
87 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88 * @ha: HA context
89 *
90 * Returns a pointer to the Continuation Type 0 IOCB packet.
91 */
92static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070093qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070096 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080098 req->ring_index++;
99 if (req->ring_index == req->length) {
100 req->ring_index = 0;
101 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800103 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 }
105
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800106 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 /* Load packet defaults. */
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
111
112 return (cont_pkt);
113}
114
115/**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @ha: HA context
118 *
119 * Returns a pointer to the continuation type 1 IOCB packet.
120 */
121static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 cont_a64_entry_t *cont_pkt;
125
126 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800132 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 }
134
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 /* Load packet defaults. */
138 *((uint32_t *)(&cont_pkt->entry_type)) =
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
140
141 return (cont_pkt);
142}
143
Arun Easibad75002010-05-04 15:01:30 -0700144static inline int
145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700149
150 /* We only support T10 DIF right now */
151 if (guard != SHOST_DIX_GUARD_CRC) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
Arun Easibad75002010-05-04 15:01:30 -0700154 return 0;
155 }
156
157 /* We always use DIFF Bundling for best performance */
158 *fw_prot_opts = 0;
159
160 /* Translate SCSI opcode to a protection opcode */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800161 switch (scsi_get_prot_op(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700162 case SCSI_PROT_READ_STRIP:
163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164 break;
165 case SCSI_PROT_WRITE_INSERT:
166 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 break;
168 case SCSI_PROT_READ_INSERT:
169 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170 break;
171 case SCSI_PROT_WRITE_STRIP:
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break;
174 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break;
180 default: /* Normal Request */
181 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 break;
183 }
184
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800185 return scsi_prot_sg_count(cmd);
Arun Easibad75002010-05-04 15:01:30 -0700186}
187
188/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
190 * capable IOCB types.
191 *
192 * @sp: SRB command to process
193 * @cmd_pkt: Command type 2 IOCB
194 * @tot_dsds: Total number of segments to transfer
195 */
196void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 uint16_t tot_dsds)
198{
199 uint16_t avail_dsds;
200 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800201 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900203 struct scatterlist *sg;
204 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800206 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 /* Update entry type to indicate Command Type 2 IOCB */
209 *((uint32_t *)(&cmd_pkt->entry_type)) =
210 __constant_cpu_to_le32(COMMAND_TYPE);
211
212 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900213 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 return;
216 }
217
Andrew Vasquez444786d2009-01-05 11:18:10 -0800218 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700219 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* Three DSDs are available in the Command Type 2 IOCB */
222 avail_dsds = 3;
223 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900226 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900229 /* Allocate additional continuation packets? */
230 if (avail_dsds == 0) {
231 /*
232 * Seven DSDs are available in the Continuation
233 * Type 0 IOCB.
234 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700235 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900236 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900239
240 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 }
244}
245
246/**
247 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
248 * capable IOCB types.
249 *
250 * @sp: SRB command to process
251 * @cmd_pkt: Command type 3 IOCB
252 * @tot_dsds: Total number of segments to transfer
253 */
254void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 uint16_t tot_dsds)
256{
257 uint16_t avail_dsds;
258 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800259 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900261 struct scatterlist *sg;
262 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800264 cmd = GET_CMD_SP(sp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
266 /* Update entry type to indicate Command Type 3 IOCB */
267 *((uint32_t *)(&cmd_pkt->entry_type)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900271 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 return;
274 }
275
Andrew Vasquez444786d2009-01-05 11:18:10 -0800276 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700277 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 /* Two DSDs are available in the Command Type 3 IOCB */
280 avail_dsds = 2;
281 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900284 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285 dma_addr_t sle_dma;
286 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900288 /* Allocate additional continuation packets? */
289 if (avail_dsds == 0) {
290 /*
291 * Five DSDs are available in the Continuation
292 * Type 1 IOCB.
293 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900298
299 sle_dma = sg_dma_address(sg);
300 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 }
305}
306
307/**
308 * qla2x00_start_scsi() - Send a SCSI command to the ISP
309 * @sp: command to send to the ISP
310 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700311 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 */
313int
314qla2x00_start_scsi(srb_t *sp)
315{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900316 int ret, nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800318 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 struct scsi_cmnd *cmd;
320 uint32_t *clr_ptr;
321 uint32_t index;
322 uint32_t handle;
323 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 uint16_t cnt;
325 uint16_t req_cnt;
326 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700327 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800328 struct qla_hw_data *ha;
329 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800330 struct rsp_que *rsp;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800331 char tag[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333 /* Setup device pointers. */
334 ret = 0;
Andrew Vasquez444786d2009-01-05 11:18:10 -0800335 vha = sp->fcport->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800336 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700337 reg = &ha->iobase->isp;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800338 cmd = GET_CMD_SP(sp);
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800339 req = ha->req_q_map[0];
340 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500341 /* So we know we haven't pci_map'ed anything yet */
342 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800345 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700349 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800350 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 }
352
353 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700354 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800357 handle = req->current_outstanding_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
359 handle++;
360 if (handle == MAX_OUTSTANDING_COMMANDS)
361 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800362 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 break;
364 }
365 if (index == MAX_OUTSTANDING_COMMANDS)
366 goto queuing_error;
367
83021922005-04-17 15:10:41 -0500368 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700369 if (scsi_sg_count(cmd)) {
370 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371 scsi_sg_count(cmd), cmd->sc_data_direction);
372 if (unlikely(!nseg))
373 goto queuing_error;
374 } else
375 nseg = 0;
376
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900377 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700380 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800381 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800383 if (req->ring_index < cnt)
384 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800386 req->cnt = req->length -
387 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -0400388 /* If still no head room then bail out */
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800394 req->current_outstanding_cmd = handle;
395 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700396 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800397 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800398 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800400 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 cmd_pkt->handle = handle;
402 /* Zero out remaining portion of packet. */
403 clr_ptr = (uint32_t *)cmd_pkt + 2;
404 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
405 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
406
bdf79622005-04-17 15:06:53 -0500407 /* Set target ID and LUN number*/
408 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800409 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 /* Update tagged queuing modifier */
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800412 if (scsi_populate_tag_msg(cmd, tag)) {
413 switch (tag[0]) {
414 case HEAD_OF_QUEUE_TAG:
415 cmd_pkt->control_flags =
416 __constant_cpu_to_le16(CF_HEAD_TAG);
417 break;
418 case ORDERED_QUEUE_TAG:
419 cmd_pkt->control_flags =
420 __constant_cpu_to_le16(CF_ORDERED_TAG);
421 break;
422 default:
423 cmd_pkt->control_flags =
424 __constant_cpu_to_le16(CF_SIMPLE_TAG);
425 break;
426 }
427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 /* Load SCSI command packet. */
430 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900431 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700434 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 /* Set total data segment count. */
437 cmd_pkt->entry_count = (uint8_t)req_cnt;
438 wmb();
439
440 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800441 req->ring_index++;
442 if (req->ring_index == req->length) {
443 req->ring_index = 0;
444 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800446 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
450 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800451 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
453
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700454 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800455 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800456 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
457 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700458
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700459 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 return (QLA_SUCCESS);
461
462queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900463 if (tot_dsds)
464 scsi_dma_unmap(cmd);
465
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700466 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
468 return (QLA_FUNCTION_FAILED);
469}
470
471/**
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800472 * qla2x00_start_iocbs() - Execute the IOCB command
473 */
474static void
475qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
476{
477 struct qla_hw_data *ha = vha->hw;
478 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800479
480 if (IS_QLA82XX(ha)) {
481 qla82xx_start_iocbs(vha);
482 } else {
483 /* Adjust ring index. */
484 req->ring_index++;
485 if (req->ring_index == req->length) {
486 req->ring_index = 0;
487 req->ring_ptr = req->ring;
488 } else
489 req->ring_ptr++;
490
491 /* Set chip new ring index. */
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800492 if (ha->mqenable || IS_QLA83XX(ha)) {
493 WRT_REG_DWORD(req->req_q_in, req->ring_index);
Arun Easi98878a12012-02-09 11:15:59 -0800494 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800495 } else if (IS_FWI2_CAPABLE(ha)) {
496 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
497 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
498 } else {
499 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
500 req->ring_index);
501 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
502 }
503 }
504}
505
506/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 * qla2x00_marker() - Send a marker IOCB to the firmware.
508 * @ha: HA context
509 * @loop_id: loop ID
510 * @lun: LUN
511 * @type: marker modifier
512 *
513 * Can be called from both normal and interrupt context.
514 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700515 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500517static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800518__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
519 struct rsp_que *rsp, uint16_t loop_id,
520 uint16_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700522 mrk_entry_t *mrk;
523 struct mrk_entry_24xx *mrk24;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800524 struct qla_hw_data *ha = vha->hw;
525 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700527 mrk24 = NULL;
Giridhar Malavali99b82122011-11-18 09:03:17 -0800528 req = ha->req_q_map[0];
Giridhar Malavalid94d10e2010-07-23 15:28:23 +0500529 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700530 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700531 ql_log(ql_log_warn, base_vha, 0x3026,
532 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534 return (QLA_FUNCTION_FAILED);
535 }
536
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700537 mrk->entry_type = MARKER_TYPE;
538 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 if (type != MK_SYNC_ALL) {
Andrew Vasqueze4289242007-07-19 15:05:56 -0700540 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700541 mrk24 = (struct mrk_entry_24xx *) mrk;
542 mrk24->nport_handle = cpu_to_le16(loop_id);
543 mrk24->lun[1] = LSB(lun);
544 mrk24->lun[2] = MSB(lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700545 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800546 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700547 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700548 } else {
549 SET_TARGET_ID(ha, mrk->target, loop_id);
550 mrk->lun = cpu_to_le16(lun);
551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 }
553 wmb();
554
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800555 qla2x00_start_iocbs(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
557 return (QLA_SUCCESS);
558}
559
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700560int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800561qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
562 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
563 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564{
565 int ret;
566 unsigned long flags = 0;
567
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800568 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
569 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
570 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
572 return (ret);
573}
574
575/**
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700576 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
577 * Continuation Type 1 IOCBs to allocate.
578 *
579 * @dsds: number of data segment decriptors needed
580 *
581 * Returns the number of IOCB entries needed to store @dsds.
582 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700583inline uint16_t
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700584qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700585{
586 uint16_t iocbs;
587
588 iocbs = 1;
589 if (dsds > 1) {
590 iocbs += (dsds - 1) / 5;
591 if ((dsds - 1) % 5)
592 iocbs++;
593 }
594 return iocbs;
595}
596
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800597static inline int
598qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
599 uint16_t tot_dsds)
600{
601 uint32_t *cur_dsd = NULL;
602 scsi_qla_host_t *vha;
603 struct qla_hw_data *ha;
604 struct scsi_cmnd *cmd;
605 struct scatterlist *cur_seg;
606 uint32_t *dsd_seg;
607 void *next_dsd;
608 uint8_t avail_dsds;
609 uint8_t first_iocb = 1;
610 uint32_t dsd_list_len;
611 struct dsd_dma *dsd_ptr;
612 struct ct6_dsd *ctx;
613
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800614 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800615
616 /* Update entry type to indicate Command Type 3 IOCB */
617 *((uint32_t *)(&cmd_pkt->entry_type)) =
618 __constant_cpu_to_le32(COMMAND_TYPE_6);
619
620 /* No data transfer */
621 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
622 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
623 return 0;
624 }
625
626 vha = sp->fcport->vha;
627 ha = vha->hw;
628
629 /* Set transfer direction */
630 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
631 cmd_pkt->control_flags =
632 __constant_cpu_to_le16(CF_WRITE_DATA);
633 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
634 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
635 cmd_pkt->control_flags =
636 __constant_cpu_to_le16(CF_READ_DATA);
637 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
638 }
639
640 cur_seg = scsi_sglist(cmd);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800641 ctx = GET_CMD_CTX_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800642
643 while (tot_dsds) {
644 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
645 QLA_DSDS_PER_IOCB : tot_dsds;
646 tot_dsds -= avail_dsds;
647 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
648
649 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
650 struct dsd_dma, list);
651 next_dsd = dsd_ptr->dsd_addr;
652 list_del(&dsd_ptr->list);
653 ha->gbl_dsd_avail--;
654 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
655 ctx->dsd_use_cnt++;
656 ha->gbl_dsd_inuse++;
657
658 if (first_iocb) {
659 first_iocb = 0;
660 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
661 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
662 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
663 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
664 } else {
665 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
666 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
667 *cur_dsd++ = cpu_to_le32(dsd_list_len);
668 }
669 cur_dsd = (uint32_t *)next_dsd;
670 while (avail_dsds) {
671 dma_addr_t sle_dma;
672
673 sle_dma = sg_dma_address(cur_seg);
674 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
675 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
676 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
677 cur_seg = sg_next(cur_seg);
678 avail_dsds--;
679 }
680 }
681
682 /* Null termination */
683 *cur_dsd++ = 0;
684 *cur_dsd++ = 0;
685 *cur_dsd++ = 0;
686 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
687 return 0;
688}
689
690/*
691 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
692 * for Command Type 6.
693 *
694 * @dsds: number of data segment decriptors needed
695 *
696 * Returns the number of dsd list needed to store @dsds.
697 */
698inline uint16_t
699qla24xx_calc_dsd_lists(uint16_t dsds)
700{
701 uint16_t dsd_lists = 0;
702
703 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
704 if (dsds % QLA_DSDS_PER_IOCB)
705 dsd_lists++;
706 return dsd_lists;
707}
708
709
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700710/**
711 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
712 * IOCB types.
713 *
714 * @sp: SRB command to process
715 * @cmd_pkt: Command type 3 IOCB
716 * @tot_dsds: Total number of segments to transfer
717 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700718inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700719qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
720 uint16_t tot_dsds)
721{
722 uint16_t avail_dsds;
723 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800724 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700725 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900726 struct scatterlist *sg;
727 int i;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800728 struct req_que *req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700729
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800730 cmd = GET_CMD_SP(sp);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700731
732 /* Update entry type to indicate Command Type 3 IOCB */
733 *((uint32_t *)(&cmd_pkt->entry_type)) =
734 __constant_cpu_to_le32(COMMAND_TYPE_7);
735
736 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900737 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700738 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
739 return;
740 }
741
Andrew Vasquez444786d2009-01-05 11:18:10 -0800742 vha = sp->fcport->vha;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700743 req = vha->req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700744
745 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700746 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700747 cmd_pkt->task_mgmt_flags =
748 __constant_cpu_to_le16(TMF_WRITE_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800749 sp->fcport->vha->hw->qla_stats.output_bytes +=
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800750 scsi_bufflen(cmd);
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700751 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700752 cmd_pkt->task_mgmt_flags =
753 __constant_cpu_to_le16(TMF_READ_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800754 sp->fcport->vha->hw->qla_stats.input_bytes +=
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800755 scsi_bufflen(cmd);
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700756 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700757
758 /* One DSD is available in the Command Type 3 IOCB */
759 avail_dsds = 1;
760 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
761
762 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700763
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900764 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
765 dma_addr_t sle_dma;
766 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700767
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900768 /* Allocate additional continuation packets? */
769 if (avail_dsds == 0) {
770 /*
771 * Five DSDs are available in the Continuation
772 * Type 1 IOCB.
773 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800774 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900775 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
776 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700777 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900778
779 sle_dma = sg_dma_address(sg);
780 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
781 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
782 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
783 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700784 }
785}
786
Arun Easibad75002010-05-04 15:01:30 -0700787struct fw_dif_context {
788 uint32_t ref_tag;
789 uint16_t app_tag;
790 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
791 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
792};
793
794/*
795 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
796 *
797 */
798static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700799qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700800 unsigned int protcnt)
801{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800802 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700803 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700804
805 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700806 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700807 /*
808 * No check for ql2xenablehba_err_chk, as it would be an
809 * I/O error if hba tag generation is not done.
810 */
811 pkt->ref_tag = cpu_to_le32((uint32_t)
812 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700813
814 if (!qla2x00_hba_err_chk_enabled(sp))
815 break;
816
Arun Easi8cb20492011-08-16 11:29:22 -0700817 pkt->ref_tag_mask[0] = 0xff;
818 pkt->ref_tag_mask[1] = 0xff;
819 pkt->ref_tag_mask[2] = 0xff;
820 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700821 break;
822
823 /*
824 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
825 * match LBA in CDB + N
826 */
827 case SCSI_PROT_DIF_TYPE2:
Arun Easie02587d2011-08-16 11:29:23 -0700828 pkt->app_tag = __constant_cpu_to_le16(0);
829 pkt->app_tag_mask[0] = 0x0;
830 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500831
832 pkt->ref_tag = cpu_to_le32((uint32_t)
833 (0xffffffff & scsi_get_lba(cmd)));
834
Arun Easie02587d2011-08-16 11:29:23 -0700835 if (!qla2x00_hba_err_chk_enabled(sp))
836 break;
837
Arun Easi0c470872010-07-23 15:28:38 +0500838 /* enable ALL bytes of the ref tag */
839 pkt->ref_tag_mask[0] = 0xff;
840 pkt->ref_tag_mask[1] = 0xff;
841 pkt->ref_tag_mask[2] = 0xff;
842 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700843 break;
844
845 /* For Type 3 protection: 16 bit GUARD only */
846 case SCSI_PROT_DIF_TYPE3:
847 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
848 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
849 0x00;
850 break;
851
852 /*
853 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
854 * 16 bit app tag.
855 */
856 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700857 pkt->ref_tag = cpu_to_le32((uint32_t)
858 (0xffffffff & scsi_get_lba(cmd)));
859 pkt->app_tag = __constant_cpu_to_le16(0);
860 pkt->app_tag_mask[0] = 0x0;
861 pkt->app_tag_mask[1] = 0x0;
862
863 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700864 break;
865
Arun Easibad75002010-05-04 15:01:30 -0700866 /* enable ALL bytes of the ref tag */
867 pkt->ref_tag_mask[0] = 0xff;
868 pkt->ref_tag_mask[1] = 0xff;
869 pkt->ref_tag_mask[2] = 0xff;
870 pkt->ref_tag_mask[3] = 0xff;
871 break;
872 }
873
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700874 ql_dbg(ql_dbg_io, vha, 0x3009,
875 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
876 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
877 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
878 scsi_get_prot_type(cmd), cmd);
Arun Easibad75002010-05-04 15:01:30 -0700879}
880
Arun Easi8cb20492011-08-16 11:29:22 -0700881struct qla2_sgx {
882 dma_addr_t dma_addr; /* OUT */
883 uint32_t dma_len; /* OUT */
Arun Easibad75002010-05-04 15:01:30 -0700884
Arun Easi8cb20492011-08-16 11:29:22 -0700885 uint32_t tot_bytes; /* IN */
886 struct scatterlist *cur_sg; /* IN */
887
888 /* for book keeping, bzero on initial invocation */
889 uint32_t bytes_consumed;
890 uint32_t num_bytes;
891 uint32_t tot_partial;
892
893 /* for debugging */
894 uint32_t num_sg;
895 srb_t *sp;
896};
897
898static int
899qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
900 uint32_t *partial)
901{
902 struct scatterlist *sg;
903 uint32_t cumulative_partial, sg_len;
904 dma_addr_t sg_dma_addr;
905
906 if (sgx->num_bytes == sgx->tot_bytes)
907 return 0;
908
909 sg = sgx->cur_sg;
910 cumulative_partial = sgx->tot_partial;
911
912 sg_dma_addr = sg_dma_address(sg);
913 sg_len = sg_dma_len(sg);
914
915 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
916
917 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
918 sgx->dma_len = (blk_sz - cumulative_partial);
919 sgx->tot_partial = 0;
920 sgx->num_bytes += blk_sz;
921 *partial = 0;
922 } else {
923 sgx->dma_len = sg_len - sgx->bytes_consumed;
924 sgx->tot_partial += sgx->dma_len;
925 *partial = 1;
926 }
927
928 sgx->bytes_consumed += sgx->dma_len;
929
930 if (sg_len == sgx->bytes_consumed) {
931 sg = sg_next(sg);
932 sgx->num_sg++;
933 sgx->cur_sg = sg;
934 sgx->bytes_consumed = 0;
935 }
936
937 return 1;
938}
939
940static int
941qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
942 uint32_t *dsd, uint16_t tot_dsds)
943{
944 void *next_dsd;
945 uint8_t avail_dsds = 0;
946 uint32_t dsd_list_len;
947 struct dsd_dma *dsd_ptr;
948 struct scatterlist *sg_prot;
949 uint32_t *cur_dsd = dsd;
950 uint16_t used_dsds = tot_dsds;
951
952 uint32_t prot_int;
953 uint32_t partial;
954 struct qla2_sgx sgx;
955 dma_addr_t sle_dma;
956 uint32_t sle_dma_len, tot_prot_dma_len = 0;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800957 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easi8cb20492011-08-16 11:29:22 -0700958
959 prot_int = cmd->device->sector_size;
960
961 memset(&sgx, 0, sizeof(struct qla2_sgx));
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800962 sgx.tot_bytes = scsi_bufflen(cmd);
963 sgx.cur_sg = scsi_sglist(cmd);
Arun Easi8cb20492011-08-16 11:29:22 -0700964 sgx.sp = sp;
965
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800966 sg_prot = scsi_prot_sglist(cmd);
Arun Easi8cb20492011-08-16 11:29:22 -0700967
968 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
969
970 sle_dma = sgx.dma_addr;
971 sle_dma_len = sgx.dma_len;
972alloc_and_fill:
973 /* Allocate additional continuation packets? */
974 if (avail_dsds == 0) {
975 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
976 QLA_DSDS_PER_IOCB : used_dsds;
977 dsd_list_len = (avail_dsds + 1) * 12;
978 used_dsds -= avail_dsds;
979
980 /* allocate tracking DS */
981 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
982 if (!dsd_ptr)
983 return 1;
984
985 /* allocate new list */
986 dsd_ptr->dsd_addr = next_dsd =
987 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
988 &dsd_ptr->dsd_list_dma);
989
990 if (!next_dsd) {
991 /*
992 * Need to cleanup only this dsd_ptr, rest
993 * will be done by sp_free_dma()
994 */
995 kfree(dsd_ptr);
996 return 1;
997 }
998
999 list_add_tail(&dsd_ptr->list,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001000 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
Arun Easi8cb20492011-08-16 11:29:22 -07001001
1002 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1003
1004 /* add new list to cmd iocb or last list */
1005 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1006 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1007 *cur_dsd++ = dsd_list_len;
1008 cur_dsd = (uint32_t *)next_dsd;
1009 }
1010 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1011 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1012 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1013 avail_dsds--;
1014
1015 if (partial == 0) {
1016 /* Got a full protection interval */
1017 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1018 sle_dma_len = 8;
1019
1020 tot_prot_dma_len += sle_dma_len;
1021 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1022 tot_prot_dma_len = 0;
1023 sg_prot = sg_next(sg_prot);
1024 }
1025
1026 partial = 1; /* So as to not re-enter this block */
1027 goto alloc_and_fill;
1028 }
1029 }
1030 /* Null termination */
1031 *cur_dsd++ = 0;
1032 *cur_dsd++ = 0;
1033 *cur_dsd++ = 0;
1034 return 0;
1035}
Giridhar Malavali5162cf02011-11-18 09:03:18 -08001036
Arun Easibad75002010-05-04 15:01:30 -07001037static int
1038qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1039 uint16_t tot_dsds)
1040{
1041 void *next_dsd;
1042 uint8_t avail_dsds = 0;
1043 uint32_t dsd_list_len;
1044 struct dsd_dma *dsd_ptr;
1045 struct scatterlist *sg;
1046 uint32_t *cur_dsd = dsd;
1047 int i;
1048 uint16_t used_dsds = tot_dsds;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001049 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1050 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -07001051
1052 uint8_t *cp;
1053
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001054 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
Arun Easibad75002010-05-04 15:01:30 -07001055 dma_addr_t sle_dma;
1056
1057 /* Allocate additional continuation packets? */
1058 if (avail_dsds == 0) {
1059 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1060 QLA_DSDS_PER_IOCB : used_dsds;
1061 dsd_list_len = (avail_dsds + 1) * 12;
1062 used_dsds -= avail_dsds;
1063
1064 /* allocate tracking DS */
1065 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1066 if (!dsd_ptr)
1067 return 1;
1068
1069 /* allocate new list */
1070 dsd_ptr->dsd_addr = next_dsd =
1071 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1072 &dsd_ptr->dsd_list_dma);
1073
1074 if (!next_dsd) {
1075 /*
1076 * Need to cleanup only this dsd_ptr, rest
1077 * will be done by sp_free_dma()
1078 */
1079 kfree(dsd_ptr);
1080 return 1;
1081 }
1082
1083 list_add_tail(&dsd_ptr->list,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001084 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001085
1086 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1087
1088 /* add new list to cmd iocb or last list */
1089 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1090 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1091 *cur_dsd++ = dsd_list_len;
1092 cur_dsd = (uint32_t *)next_dsd;
1093 }
1094 sle_dma = sg_dma_address(sg);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001095 ql_dbg(ql_dbg_io, vha, 0x300a,
1096 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001097 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
Arun Easibad75002010-05-04 15:01:30 -07001098 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1099 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1100 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1101 avail_dsds--;
1102
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001103 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
Arun Easibad75002010-05-04 15:01:30 -07001104 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001105 ql_dbg(ql_dbg_io, vha, 0x300b,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001106 "User data buffer=%p for cmd=%p.\n", cp, cmd);
Arun Easibad75002010-05-04 15:01:30 -07001107 }
1108 }
1109 /* Null termination */
1110 *cur_dsd++ = 0;
1111 *cur_dsd++ = 0;
1112 *cur_dsd++ = 0;
1113 return 0;
1114}
1115
1116static int
1117qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1118 uint32_t *dsd,
1119 uint16_t tot_dsds)
1120{
1121 void *next_dsd;
1122 uint8_t avail_dsds = 0;
1123 uint32_t dsd_list_len;
1124 struct dsd_dma *dsd_ptr;
1125 struct scatterlist *sg;
1126 int i;
1127 struct scsi_cmnd *cmd;
1128 uint32_t *cur_dsd = dsd;
1129 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001130 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
Arun Easibad75002010-05-04 15:01:30 -07001131 uint8_t *cp;
1132
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001133 cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001134 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1135 dma_addr_t sle_dma;
1136
1137 /* Allocate additional continuation packets? */
1138 if (avail_dsds == 0) {
1139 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1140 QLA_DSDS_PER_IOCB : used_dsds;
1141 dsd_list_len = (avail_dsds + 1) * 12;
1142 used_dsds -= avail_dsds;
1143
1144 /* allocate tracking DS */
1145 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1146 if (!dsd_ptr)
1147 return 1;
1148
1149 /* allocate new list */
1150 dsd_ptr->dsd_addr = next_dsd =
1151 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1152 &dsd_ptr->dsd_list_dma);
1153
1154 if (!next_dsd) {
1155 /*
1156 * Need to cleanup only this dsd_ptr, rest
1157 * will be done by sp_free_dma()
1158 */
1159 kfree(dsd_ptr);
1160 return 1;
1161 }
1162
1163 list_add_tail(&dsd_ptr->list,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001164 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -07001165
1166 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1167
1168 /* add new list to cmd iocb or last list */
1169 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1170 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1171 *cur_dsd++ = dsd_list_len;
1172 cur_dsd = (uint32_t *)next_dsd;
1173 }
1174 sle_dma = sg_dma_address(sg);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001175 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001176 ql_dbg(ql_dbg_io, vha, 0x3027,
1177 "%s(): %p, sg_entry %d - "
1178 "addr=0x%x0x%x, len=%d.\n",
1179 __func__, cur_dsd, i,
1180 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
Arun Easibad75002010-05-04 15:01:30 -07001181 }
1182 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1183 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1184 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1185
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001186 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
Arun Easibad75002010-05-04 15:01:30 -07001187 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001188 ql_dbg(ql_dbg_io, vha, 0x3028,
1189 "%s(): Protection Data buffer = %p.\n", __func__,
1190 cp);
Arun Easibad75002010-05-04 15:01:30 -07001191 }
1192 avail_dsds--;
1193 }
1194 /* Null termination */
1195 *cur_dsd++ = 0;
1196 *cur_dsd++ = 0;
1197 *cur_dsd++ = 0;
1198 return 0;
1199}
1200
1201/**
1202 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1203 * Type 6 IOCB types.
1204 *
1205 * @sp: SRB command to process
1206 * @cmd_pkt: Command type 3 IOCB
1207 * @tot_dsds: Total number of segments to transfer
1208 */
1209static inline int
1210qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1211 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1212{
1213 uint32_t *cur_dsd, *fcp_dl;
1214 scsi_qla_host_t *vha;
1215 struct scsi_cmnd *cmd;
1216 struct scatterlist *cur_seg;
1217 int sgc;
Arun Easi8cb20492011-08-16 11:29:22 -07001218 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001219 uint32_t data_bytes;
1220 uint32_t dif_bytes;
1221 uint8_t bundling = 1;
1222 uint16_t blk_size;
1223 uint8_t *clr_ptr;
1224 struct crc_context *crc_ctx_pkt = NULL;
1225 struct qla_hw_data *ha;
1226 uint8_t additional_fcpcdb_len;
1227 uint16_t fcp_cmnd_len;
1228 struct fcp_cmnd *fcp_cmnd;
1229 dma_addr_t crc_ctx_dma;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001230 char tag[2];
Arun Easibad75002010-05-04 15:01:30 -07001231
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001232 cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001233
1234 sgc = 0;
1235 /* Update entry type to indicate Command Type CRC_2 IOCB */
1236 *((uint32_t *)(&cmd_pkt->entry_type)) =
1237 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1238
Arun Easibad75002010-05-04 15:01:30 -07001239 vha = sp->fcport->vha;
1240 ha = vha->hw;
1241
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001242 /* No data transfer */
1243 data_bytes = scsi_bufflen(cmd);
1244 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1245 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1246 return QLA_SUCCESS;
1247 }
Arun Easibad75002010-05-04 15:01:30 -07001248
1249 cmd_pkt->vp_index = sp->fcport->vp_idx;
1250
1251 /* Set transfer direction */
1252 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1253 cmd_pkt->control_flags =
1254 __constant_cpu_to_le16(CF_WRITE_DATA);
1255 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1256 cmd_pkt->control_flags =
1257 __constant_cpu_to_le16(CF_READ_DATA);
1258 }
1259
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001260 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1261 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1262 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1263 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001264 bundling = 0;
1265
1266 /* Allocate CRC context from global pool */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001267 crc_ctx_pkt = sp->u.scmd.ctx =
1268 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
Arun Easibad75002010-05-04 15:01:30 -07001269
1270 if (!crc_ctx_pkt)
1271 goto crc_queuing_error;
1272
1273 /* Zero out CTX area. */
1274 clr_ptr = (uint8_t *)crc_ctx_pkt;
1275 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1276
1277 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1278
1279 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1280
1281 /* Set handle */
1282 crc_ctx_pkt->handle = cmd_pkt->handle;
1283
1284 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1285
Arun Easie02587d2011-08-16 11:29:23 -07001286 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001287 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1288
1289 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1290 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1291 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1292
1293 /* Determine SCSI command length -- align to 4 byte boundary */
1294 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001295 additional_fcpcdb_len = cmd->cmd_len - 16;
1296 if ((cmd->cmd_len % 4) != 0) {
1297 /* SCSI cmd > 16 bytes must be multiple of 4 */
1298 goto crc_queuing_error;
1299 }
1300 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1301 } else {
1302 additional_fcpcdb_len = 0;
1303 fcp_cmnd_len = 12 + 16 + 4;
1304 }
1305
1306 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1307
1308 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1309 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1310 fcp_cmnd->additional_cdb_len |= 1;
1311 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1312 fcp_cmnd->additional_cdb_len |= 2;
1313
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001314 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
Arun Easibad75002010-05-04 15:01:30 -07001315 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1316 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1317 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1318 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1319 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1320 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001321 fcp_cmnd->task_management = 0;
Arun Easibad75002010-05-04 15:01:30 -07001322
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001323 /*
1324 * Update tagged queuing modifier if using command tag queuing
1325 */
1326 if (scsi_populate_tag_msg(cmd, tag)) {
1327 switch (tag[0]) {
1328 case HEAD_OF_QUEUE_TAG:
1329 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1330 break;
1331 case ORDERED_QUEUE_TAG:
1332 fcp_cmnd->task_attribute = TSK_ORDERED;
1333 break;
1334 default:
1335 fcp_cmnd->task_attribute = 0;
1336 break;
1337 }
1338 } else {
1339 fcp_cmnd->task_attribute = 0;
1340 }
1341
Arun Easibad75002010-05-04 15:01:30 -07001342 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1343
Arun Easibad75002010-05-04 15:01:30 -07001344 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001345 dif_bytes = 0;
1346 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001347 dif_bytes = (data_bytes / blk_size) * 8;
1348
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001349 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -07001350 case SCSI_PROT_READ_INSERT:
1351 case SCSI_PROT_WRITE_STRIP:
1352 total_bytes = data_bytes;
1353 data_bytes += dif_bytes;
1354 break;
1355
1356 case SCSI_PROT_READ_STRIP:
1357 case SCSI_PROT_WRITE_INSERT:
1358 case SCSI_PROT_READ_PASS:
1359 case SCSI_PROT_WRITE_PASS:
1360 total_bytes = data_bytes + dif_bytes;
1361 break;
1362 default:
1363 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001364 }
1365
Arun Easie02587d2011-08-16 11:29:23 -07001366 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001367 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1368
1369 if (!bundling) {
1370 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1371 } else {
1372 /*
1373 * Configure Bundling if we need to fetch interlaving
1374 * protection PCI accesses
1375 */
1376 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1377 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1378 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1379 tot_prot_dsds);
1380 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1381 }
1382
1383 /* Finish the common fields of CRC pkt */
1384 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1385 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1386 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1387 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1388 /* Fibre channel byte count */
1389 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1390 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1391 additional_fcpcdb_len);
1392 *fcp_dl = htonl(total_bytes);
1393
Arun Easi0c470872010-07-23 15:28:38 +05001394 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Arun Easi0c470872010-07-23 15:28:38 +05001395 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1396 return QLA_SUCCESS;
1397 }
Arun Easibad75002010-05-04 15:01:30 -07001398 /* Walks data segments */
1399
1400 cmd_pkt->control_flags |=
1401 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001402
1403 if (!bundling && tot_prot_dsds) {
1404 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1405 cur_dsd, tot_dsds))
1406 goto crc_queuing_error;
1407 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Arun Easibad75002010-05-04 15:01:30 -07001408 (tot_dsds - tot_prot_dsds)))
1409 goto crc_queuing_error;
1410
1411 if (bundling && tot_prot_dsds) {
1412 /* Walks dif segments */
1413 cur_seg = scsi_prot_sglist(cmd);
1414 cmd_pkt->control_flags |=
1415 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1416 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1417 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1418 tot_prot_dsds))
1419 goto crc_queuing_error;
1420 }
1421 return QLA_SUCCESS;
1422
1423crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001424 /* Cleanup will be performed by the caller */
1425
1426 return QLA_FUNCTION_FAILED;
1427}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001428
1429/**
1430 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1431 * @sp: command to send to the ISP
1432 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001433 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001434 */
1435int
1436qla24xx_start_scsi(srb_t *sp)
1437{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001438 int ret, nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001439 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001440 uint32_t *clr_ptr;
1441 uint32_t index;
1442 uint32_t handle;
1443 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001444 uint16_t cnt;
1445 uint16_t req_cnt;
1446 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001447 struct req_que *req = NULL;
1448 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001449 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Andrew Vasquez444786d2009-01-05 11:18:10 -08001450 struct scsi_qla_host *vha = sp->fcport->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001451 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001452 char tag[2];
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001453
1454 /* Setup device pointers. */
1455 ret = 0;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001456
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001457 qla25xx_set_que(sp, &rsp);
1458 req = vha->req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001459
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001460 /* So we know we haven't pci_map'ed anything yet */
1461 tot_dsds = 0;
1462
1463 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001464 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001465 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1466 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001467 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001468 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001469 }
1470
1471 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001472 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001473
1474 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001475 handle = req->current_outstanding_cmd;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001476 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1477 handle++;
1478 if (handle == MAX_OUTSTANDING_COMMANDS)
1479 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001480 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001481 break;
1482 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001483 if (index == MAX_OUTSTANDING_COMMANDS) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001484 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001485 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001486
1487 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001488 if (scsi_sg_count(cmd)) {
1489 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1490 scsi_sg_count(cmd), cmd->sc_data_direction);
1491 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001492 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001493 } else
1494 nseg = 0;
1495
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001496 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001497 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001498 if (req->cnt < (req_cnt + 2)) {
Andrew Vasquez08029992009-03-24 09:07:55 -07001499 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001500
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001501 if (req->ring_index < cnt)
1502 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001503 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001504 req->cnt = req->length -
1505 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001506 if (req->cnt < (req_cnt + 2))
1507 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001508 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001509
1510 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001511 req->current_outstanding_cmd = handle;
1512 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001513 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001514 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001515 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001516
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001517 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001518 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001519
1520 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001521 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001522 clr_ptr = (uint32_t *)cmd_pkt + 2;
1523 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1524 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1525
1526 /* Set NPORT-ID and LUN number*/
1527 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1528 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1529 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1530 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001531 cmd_pkt->vp_index = sp->fcport->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001532
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001533 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001534 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001535
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001536 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1537 if (scsi_populate_tag_msg(cmd, tag)) {
1538 switch (tag[0]) {
1539 case HEAD_OF_QUEUE_TAG:
1540 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1541 break;
1542 case ORDERED_QUEUE_TAG:
1543 cmd_pkt->task = TSK_ORDERED;
1544 break;
1545 }
1546 }
1547
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001548 /* Load SCSI command packet. */
1549 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1550 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1551
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001552 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001553
1554 /* Build IOCB segments */
1555 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1556
1557 /* Set total data segment count. */
1558 cmd_pkt->entry_count = (uint8_t)req_cnt;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001559 /* Specify response queue number where completion should happen */
1560 cmd_pkt->entry_status = (uint8_t) rsp->id;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001561 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001562 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001563 req->ring_index++;
1564 if (req->ring_index == req->length) {
1565 req->ring_index = 0;
1566 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001567 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001568 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001569
1570 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001571
1572 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001573 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1574 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001575
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001576 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001577 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001578 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001579 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001580
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001581 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001582 return QLA_SUCCESS;
1583
1584queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001585 if (tot_dsds)
1586 scsi_dma_unmap(cmd);
1587
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001588 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001589
1590 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001592
Arun Easibad75002010-05-04 15:01:30 -07001593
1594/**
1595 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1596 * @sp: command to send to the ISP
1597 *
1598 * Returns non-zero if a failure occurred, else zero.
1599 */
1600int
1601qla24xx_dif_start_scsi(srb_t *sp)
1602{
1603 int nseg;
1604 unsigned long flags;
1605 uint32_t *clr_ptr;
1606 uint32_t index;
1607 uint32_t handle;
1608 uint16_t cnt;
1609 uint16_t req_cnt = 0;
1610 uint16_t tot_dsds;
1611 uint16_t tot_prot_dsds;
1612 uint16_t fw_prot_opts = 0;
1613 struct req_que *req = NULL;
1614 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001615 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -07001616 struct scsi_qla_host *vha = sp->fcport->vha;
1617 struct qla_hw_data *ha = vha->hw;
1618 struct cmd_type_crc_2 *cmd_pkt;
1619 uint32_t status = 0;
1620
1621#define QDSS_GOT_Q_SPACE BIT_0
1622
Arun Easi0c470872010-07-23 15:28:38 +05001623 /* Only process protection or >16 cdb in this routine */
1624 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1625 if (cmd->cmd_len <= 16)
1626 return qla24xx_start_scsi(sp);
1627 }
Arun Easibad75002010-05-04 15:01:30 -07001628
1629 /* Setup device pointers. */
1630
1631 qla25xx_set_que(sp, &rsp);
1632 req = vha->req;
1633
1634 /* So we know we haven't pci_map'ed anything yet */
1635 tot_dsds = 0;
1636
1637 /* Send marker if required */
1638 if (vha->marker_needed != 0) {
1639 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1640 QLA_SUCCESS)
1641 return QLA_FUNCTION_FAILED;
1642 vha->marker_needed = 0;
1643 }
1644
1645 /* Acquire ring specific lock */
1646 spin_lock_irqsave(&ha->hardware_lock, flags);
1647
1648 /* Check for room in outstanding command list. */
1649 handle = req->current_outstanding_cmd;
1650 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1651 handle++;
1652 if (handle == MAX_OUTSTANDING_COMMANDS)
1653 handle = 1;
1654 if (!req->outstanding_cmds[handle])
1655 break;
1656 }
1657
1658 if (index == MAX_OUTSTANDING_COMMANDS)
1659 goto queuing_error;
1660
1661 /* Compute number of required data segments */
1662 /* Map the sg table so we have an accurate count of sg entries needed */
1663 if (scsi_sg_count(cmd)) {
1664 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1665 scsi_sg_count(cmd), cmd->sc_data_direction);
1666 if (unlikely(!nseg))
1667 goto queuing_error;
1668 else
1669 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001670
1671 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1672 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1673 struct qla2_sgx sgx;
1674 uint32_t partial;
1675
1676 memset(&sgx, 0, sizeof(struct qla2_sgx));
1677 sgx.tot_bytes = scsi_bufflen(cmd);
1678 sgx.cur_sg = scsi_sglist(cmd);
1679 sgx.sp = sp;
1680
1681 nseg = 0;
1682 while (qla24xx_get_one_block_sg(
1683 cmd->device->sector_size, &sgx, &partial))
1684 nseg++;
1685 }
Arun Easibad75002010-05-04 15:01:30 -07001686 } else
1687 nseg = 0;
1688
1689 /* number of required data segments */
1690 tot_dsds = nseg;
1691
1692 /* Compute number of required protection segments */
1693 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1694 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1695 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1696 if (unlikely(!nseg))
1697 goto queuing_error;
1698 else
1699 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001700
1701 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1702 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1703 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1704 }
Arun Easibad75002010-05-04 15:01:30 -07001705 } else {
1706 nseg = 0;
1707 }
1708
1709 req_cnt = 1;
1710 /* Total Data and protection sg segment(s) */
1711 tot_prot_dsds = nseg;
1712 tot_dsds += nseg;
1713 if (req->cnt < (req_cnt + 2)) {
1714 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1715
1716 if (req->ring_index < cnt)
1717 req->cnt = cnt - req->ring_index;
1718 else
1719 req->cnt = req->length -
1720 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04001721 if (req->cnt < (req_cnt + 2))
1722 goto queuing_error;
Arun Easibad75002010-05-04 15:01:30 -07001723 }
1724
Arun Easibad75002010-05-04 15:01:30 -07001725 status |= QDSS_GOT_Q_SPACE;
1726
1727 /* Build header part of command packet (excluding the OPCODE). */
1728 req->current_outstanding_cmd = handle;
1729 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001730 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001731 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Arun Easibad75002010-05-04 15:01:30 -07001732 req->cnt -= req_cnt;
1733
1734 /* Fill-in common area */
1735 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1736 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1737
1738 clr_ptr = (uint32_t *)cmd_pkt + 2;
1739 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1740
1741 /* Set NPORT-ID and LUN number*/
1742 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1743 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1744 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1745 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1746
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001747 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Arun Easibad75002010-05-04 15:01:30 -07001748 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1749
1750 /* Total Data and protection segment(s) */
1751 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1752
1753 /* Build IOCB segments and adjust for data protection segments */
1754 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1755 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1756 QLA_SUCCESS)
1757 goto queuing_error;
1758
1759 cmd_pkt->entry_count = (uint8_t)req_cnt;
1760 /* Specify response queue number where completion should happen */
1761 cmd_pkt->entry_status = (uint8_t) rsp->id;
1762 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1763 wmb();
1764
1765 /* Adjust ring index. */
1766 req->ring_index++;
1767 if (req->ring_index == req->length) {
1768 req->ring_index = 0;
1769 req->ring_ptr = req->ring;
1770 } else
1771 req->ring_ptr++;
1772
1773 /* Set chip new ring index. */
1774 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1775 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1776
1777 /* Manage unprocessed RIO/ZIO commands in response queue. */
1778 if (vha->flags.process_response_queue &&
1779 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1780 qla24xx_process_response_queue(vha, rsp);
1781
1782 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1783
1784 return QLA_SUCCESS;
1785
1786queuing_error:
1787 if (status & QDSS_GOT_Q_SPACE) {
1788 req->outstanding_cmds[handle] = NULL;
1789 req->cnt += req_cnt;
1790 }
1791 /* Cleanup will be performed by the caller (queuecommand) */
1792
1793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001794 return QLA_FUNCTION_FAILED;
1795}
1796
1797
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001798static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001799{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001800 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001801 struct qla_hw_data *ha = sp->fcport->vha->hw;
1802 int affinity = cmd->request->cpu;
1803
Anirban Chakraborty7163ea82009-08-05 09:18:40 -07001804 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001805 affinity < ha->max_rsp_queues - 1)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001806 *rsp = ha->rsp_q_map[affinity + 1];
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001807 else
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001808 *rsp = ha->rsp_q_map[0];
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001809}
Andrew Vasquezac280b62009-08-20 11:06:05 -07001810
1811/* Generic Control-SRB manipulation functions. */
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001812void *
1813qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001814{
Andrew Vasquezac280b62009-08-20 11:06:05 -07001815 struct qla_hw_data *ha = vha->hw;
1816 struct req_que *req = ha->req_q_map[0];
1817 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1818 uint32_t index, handle;
1819 request_t *pkt;
1820 uint16_t cnt, req_cnt;
1821
1822 pkt = NULL;
1823 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001824 handle = 0;
1825
1826 if (!sp)
1827 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001828
1829 /* Check for room in outstanding command list. */
1830 handle = req->current_outstanding_cmd;
1831 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1832 handle++;
1833 if (handle == MAX_OUTSTANDING_COMMANDS)
1834 handle = 1;
1835 if (!req->outstanding_cmds[handle])
1836 break;
1837 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001838 if (index == MAX_OUTSTANDING_COMMANDS) {
1839 ql_log(ql_log_warn, vha, 0x700b,
1840 "No room on oustanding cmd array.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07001841 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001842 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001843
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001844 /* Prep command array. */
1845 req->current_outstanding_cmd = handle;
1846 req->outstanding_cmds[handle] = sp;
1847 sp->handle = handle;
1848
Andrew Vasquez57807902011-11-18 09:03:20 -08001849 /* Adjust entry-counts as needed. */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001850 if (sp->type != SRB_SCSI_CMD)
1851 req_cnt = sp->iocbs;
Andrew Vasquez57807902011-11-18 09:03:20 -08001852
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001853skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07001854 /* Check for room on request queue. */
1855 if (req->cnt < req_cnt) {
Giridhar Malavali6246b8a2012-02-09 11:15:34 -08001856 if (ha->mqenable || IS_QLA83XX(ha))
Andrew Vasquezac280b62009-08-20 11:06:05 -07001857 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001858 else if (IS_QLA82XX(ha))
1859 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001860 else if (IS_FWI2_CAPABLE(ha))
1861 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1862 else
1863 cnt = qla2x00_debounce_register(
1864 ISP_REQ_Q_OUT(ha, &reg->isp));
1865
1866 if (req->ring_index < cnt)
1867 req->cnt = cnt - req->ring_index;
1868 else
1869 req->cnt = req->length -
1870 (req->ring_index - cnt);
1871 }
1872 if (req->cnt < req_cnt)
1873 goto queuing_error;
1874
1875 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07001876 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001877 pkt = req->ring_ptr;
1878 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1879 pkt->entry_count = req_cnt;
1880 pkt->handle = handle;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001881
1882queuing_error:
1883 return pkt;
1884}
1885
1886static void
Andrew Vasquezac280b62009-08-20 11:06:05 -07001887qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1888{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001889 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001890
1891 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1892 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001893 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001894 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001895 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001896 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1897 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1898 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1899 logio->port_id[1] = sp->fcport->d_id.b.area;
1900 logio->port_id[2] = sp->fcport->d_id.b.domain;
1901 logio->vp_index = sp->fcport->vp_idx;
1902}
1903
1904static void
1905qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1906{
1907 struct qla_hw_data *ha = sp->fcport->vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001908 struct srb_iocb *lio = &sp->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001909 uint16_t opts;
1910
Giridhar Malavalib9637522010-05-28 15:08:15 -07001911 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001912 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1913 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001914 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1915 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001916 if (HAS_EXTENDED_IDS(ha)) {
1917 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1918 mbx->mb10 = cpu_to_le16(opts);
1919 } else {
1920 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1921 }
1922 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1923 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1924 sp->fcport->d_id.b.al_pa);
1925 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1926}
1927
1928static void
1929qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1930{
1931 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1932 logio->control_flags =
1933 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1934 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1935 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1936 logio->port_id[1] = sp->fcport->d_id.b.area;
1937 logio->port_id[2] = sp->fcport->d_id.b.domain;
1938 logio->vp_index = sp->fcport->vp_idx;
1939}
1940
1941static void
1942qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1943{
1944 struct qla_hw_data *ha = sp->fcport->vha->hw;
1945
Giridhar Malavalib9637522010-05-28 15:08:15 -07001946 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001947 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1948 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1949 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1950 cpu_to_le16(sp->fcport->loop_id):
1951 cpu_to_le16(sp->fcport->loop_id << 8);
1952 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1953 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1954 sp->fcport->d_id.b.al_pa);
1955 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1956 /* Implicit: mbx->mbx10 = 0. */
1957}
1958
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001959static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07001960qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1961{
1962 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1963 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1964 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1965 logio->vp_index = sp->fcport->vp_idx;
1966}
1967
1968static void
1969qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1970{
1971 struct qla_hw_data *ha = sp->fcport->vha->hw;
1972
1973 mbx->entry_type = MBX_IOCB_TYPE;
1974 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1975 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1976 if (HAS_EXTENDED_IDS(ha)) {
1977 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1978 mbx->mb10 = cpu_to_le16(BIT_0);
1979 } else {
1980 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1981 }
1982 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1983 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1984 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1985 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1986 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1987}
1988
1989static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07001990qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1991{
1992 uint32_t flags;
1993 unsigned int lun;
1994 struct fc_port *fcport = sp->fcport;
1995 scsi_qla_host_t *vha = fcport->vha;
1996 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08001997 struct srb_iocb *iocb = &sp->u.iocb_cmd;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07001998 struct req_que *req = vha->req;
1999
2000 flags = iocb->u.tmf.flags;
2001 lun = iocb->u.tmf.lun;
2002
2003 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2004 tsk->entry_count = 1;
2005 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2006 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2007 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2008 tsk->control_flags = cpu_to_le32(flags);
2009 tsk->port_id[0] = fcport->d_id.b.al_pa;
2010 tsk->port_id[1] = fcport->d_id.b.area;
2011 tsk->port_id[2] = fcport->d_id.b.domain;
2012 tsk->vp_index = fcport->vp_idx;
2013
2014 if (flags == TCF_LUN_RESET) {
2015 int_to_scsilun(lun, &tsk->lun);
2016 host_to_fcp_swap((uint8_t *)&tsk->lun,
2017 sizeof(tsk->lun));
2018 }
2019}
2020
2021static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002022qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2023{
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002024 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002025
2026 els_iocb->entry_type = ELS_IOCB_TYPE;
2027 els_iocb->entry_count = 1;
2028 els_iocb->sys_define = 0;
2029 els_iocb->entry_status = 0;
2030 els_iocb->handle = sp->handle;
2031 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2032 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2033 els_iocb->vp_index = sp->fcport->vp_idx;
2034 els_iocb->sof_type = EST_SOFI3;
2035 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2036
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002037 els_iocb->opcode =
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002038 sp->type == SRB_ELS_CMD_RPT ?
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002039 bsg_job->request->rqst_data.r_els.els_code :
2040 bsg_job->request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002041 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2042 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2043 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2044 els_iocb->control_flags = 0;
2045 els_iocb->rx_byte_count =
2046 cpu_to_le32(bsg_job->reply_payload.payload_len);
2047 els_iocb->tx_byte_count =
2048 cpu_to_le32(bsg_job->request_payload.payload_len);
2049
2050 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2051 (bsg_job->request_payload.sg_list)));
2052 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2053 (bsg_job->request_payload.sg_list)));
2054 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2055 (bsg_job->request_payload.sg_list));
2056
2057 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2058 (bsg_job->reply_payload.sg_list)));
2059 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2060 (bsg_job->reply_payload.sg_list)));
2061 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2062 (bsg_job->reply_payload.sg_list));
2063}
2064
2065static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002066qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2067{
2068 uint16_t avail_dsds;
2069 uint32_t *cur_dsd;
2070 struct scatterlist *sg;
2071 int index;
2072 uint16_t tot_dsds;
2073 scsi_qla_host_t *vha = sp->fcport->vha;
2074 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002075 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002076 int loop_iterartion = 0;
2077 int cont_iocb_prsnt = 0;
2078 int entry_count = 1;
2079
2080 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2081 ct_iocb->entry_type = CT_IOCB_TYPE;
2082 ct_iocb->entry_status = 0;
2083 ct_iocb->handle1 = sp->handle;
2084 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2085 ct_iocb->status = __constant_cpu_to_le16(0);
2086 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2087 ct_iocb->timeout = 0;
2088 ct_iocb->cmd_dsd_count =
2089 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2090 ct_iocb->total_dsd_count =
2091 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2092 ct_iocb->req_bytecount =
2093 cpu_to_le32(bsg_job->request_payload.payload_len);
2094 ct_iocb->rsp_bytecount =
2095 cpu_to_le32(bsg_job->reply_payload.payload_len);
2096
2097 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2098 (bsg_job->request_payload.sg_list)));
2099 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2100 (bsg_job->request_payload.sg_list)));
2101 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2102
2103 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2104 (bsg_job->reply_payload.sg_list)));
2105 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2106 (bsg_job->reply_payload.sg_list)));
2107 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2108
2109 avail_dsds = 1;
2110 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2111 index = 0;
2112 tot_dsds = bsg_job->reply_payload.sg_cnt;
2113
2114 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2115 dma_addr_t sle_dma;
2116 cont_a64_entry_t *cont_pkt;
2117
2118 /* Allocate additional continuation packets? */
2119 if (avail_dsds == 0) {
2120 /*
2121 * Five DSDs are available in the Cont.
2122 * Type 1 IOCB.
2123 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002124 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2125 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002126 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2127 avail_dsds = 5;
2128 cont_iocb_prsnt = 1;
2129 entry_count++;
2130 }
2131
2132 sle_dma = sg_dma_address(sg);
2133 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2134 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2135 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2136 loop_iterartion++;
2137 avail_dsds--;
2138 }
2139 ct_iocb->entry_count = entry_count;
2140}
2141
2142static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002143qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2144{
2145 uint16_t avail_dsds;
2146 uint32_t *cur_dsd;
2147 struct scatterlist *sg;
2148 int index;
2149 uint16_t tot_dsds;
2150 scsi_qla_host_t *vha = sp->fcport->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002151 struct qla_hw_data *ha = vha->hw;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002152 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002153 int loop_iterartion = 0;
2154 int cont_iocb_prsnt = 0;
2155 int entry_count = 1;
2156
2157 ct_iocb->entry_type = CT_IOCB_TYPE;
2158 ct_iocb->entry_status = 0;
2159 ct_iocb->sys_define = 0;
2160 ct_iocb->handle = sp->handle;
2161
2162 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2163 ct_iocb->vp_index = sp->fcport->vp_idx;
2164 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2165
2166 ct_iocb->cmd_dsd_count =
2167 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2168 ct_iocb->timeout = 0;
2169 ct_iocb->rsp_dsd_count =
2170 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2171 ct_iocb->rsp_byte_count =
2172 cpu_to_le32(bsg_job->reply_payload.payload_len);
2173 ct_iocb->cmd_byte_count =
2174 cpu_to_le32(bsg_job->request_payload.payload_len);
2175 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2176 (bsg_job->request_payload.sg_list)));
2177 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2178 (bsg_job->request_payload.sg_list)));
2179 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2180 (bsg_job->request_payload.sg_list));
2181
2182 avail_dsds = 1;
2183 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2184 index = 0;
2185 tot_dsds = bsg_job->reply_payload.sg_cnt;
2186
2187 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2188 dma_addr_t sle_dma;
2189 cont_a64_entry_t *cont_pkt;
2190
2191 /* Allocate additional continuation packets? */
2192 if (avail_dsds == 0) {
2193 /*
2194 * Five DSDs are available in the Cont.
2195 * Type 1 IOCB.
2196 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002197 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2198 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002199 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2200 avail_dsds = 5;
2201 cont_iocb_prsnt = 1;
2202 entry_count++;
2203 }
2204
2205 sle_dma = sg_dma_address(sg);
2206 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2207 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2208 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2209 loop_iterartion++;
2210 avail_dsds--;
2211 }
2212 ct_iocb->entry_count = entry_count;
2213}
2214
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002215/*
2216 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2217 * @sp: command to send to the ISP
2218 *
2219 * Returns non-zero if a failure occurred, else zero.
2220 */
2221int
2222qla82xx_start_scsi(srb_t *sp)
2223{
2224 int ret, nseg;
2225 unsigned long flags;
2226 struct scsi_cmnd *cmd;
2227 uint32_t *clr_ptr;
2228 uint32_t index;
2229 uint32_t handle;
2230 uint16_t cnt;
2231 uint16_t req_cnt;
2232 uint16_t tot_dsds;
2233 struct device_reg_82xx __iomem *reg;
2234 uint32_t dbval;
2235 uint32_t *fcp_dl;
2236 uint8_t additional_cdb_len;
2237 struct ct6_dsd *ctx;
2238 struct scsi_qla_host *vha = sp->fcport->vha;
2239 struct qla_hw_data *ha = vha->hw;
2240 struct req_que *req = NULL;
2241 struct rsp_que *rsp = NULL;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002242 char tag[2];
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002243
2244 /* Setup device pointers. */
2245 ret = 0;
2246 reg = &ha->iobase->isp82;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002247 cmd = GET_CMD_SP(sp);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002248 req = vha->req;
2249 rsp = ha->rsp_q_map[0];
2250
2251 /* So we know we haven't pci_map'ed anything yet */
2252 tot_dsds = 0;
2253
2254 dbval = 0x04 | (ha->portnum << 5);
2255
2256 /* Send marker if required */
2257 if (vha->marker_needed != 0) {
2258 if (qla2x00_marker(vha, req,
2259 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2260 ql_log(ql_log_warn, vha, 0x300c,
2261 "qla2x00_marker failed for cmd=%p.\n", cmd);
2262 return QLA_FUNCTION_FAILED;
2263 }
2264 vha->marker_needed = 0;
2265 }
2266
2267 /* Acquire ring specific lock */
2268 spin_lock_irqsave(&ha->hardware_lock, flags);
2269
2270 /* Check for room in outstanding command list. */
2271 handle = req->current_outstanding_cmd;
2272 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2273 handle++;
2274 if (handle == MAX_OUTSTANDING_COMMANDS)
2275 handle = 1;
2276 if (!req->outstanding_cmds[handle])
2277 break;
2278 }
2279 if (index == MAX_OUTSTANDING_COMMANDS)
2280 goto queuing_error;
2281
2282 /* Map the sg table so we have an accurate count of sg entries needed */
2283 if (scsi_sg_count(cmd)) {
2284 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2285 scsi_sg_count(cmd), cmd->sc_data_direction);
2286 if (unlikely(!nseg))
2287 goto queuing_error;
2288 } else
2289 nseg = 0;
2290
2291 tot_dsds = nseg;
2292
2293 if (tot_dsds > ql2xshiftctondsd) {
2294 struct cmd_type_6 *cmd_pkt;
2295 uint16_t more_dsd_lists = 0;
2296 struct dsd_dma *dsd_ptr;
2297 uint16_t i;
2298
2299 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2300 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2301 ql_dbg(ql_dbg_io, vha, 0x300d,
2302 "Num of DSD list %d is than %d for cmd=%p.\n",
2303 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2304 cmd);
2305 goto queuing_error;
2306 }
2307
2308 if (more_dsd_lists <= ha->gbl_dsd_avail)
2309 goto sufficient_dsds;
2310 else
2311 more_dsd_lists -= ha->gbl_dsd_avail;
2312
2313 for (i = 0; i < more_dsd_lists; i++) {
2314 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2315 if (!dsd_ptr) {
2316 ql_log(ql_log_fatal, vha, 0x300e,
2317 "Failed to allocate memory for dsd_dma "
2318 "for cmd=%p.\n", cmd);
2319 goto queuing_error;
2320 }
2321
2322 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2323 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2324 if (!dsd_ptr->dsd_addr) {
2325 kfree(dsd_ptr);
2326 ql_log(ql_log_fatal, vha, 0x300f,
2327 "Failed to allocate memory for dsd_addr "
2328 "for cmd=%p.\n", cmd);
2329 goto queuing_error;
2330 }
2331 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2332 ha->gbl_dsd_avail++;
2333 }
2334
2335sufficient_dsds:
2336 req_cnt = 1;
2337
2338 if (req->cnt < (req_cnt + 2)) {
2339 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2340 &reg->req_q_out[0]);
2341 if (req->ring_index < cnt)
2342 req->cnt = cnt - req->ring_index;
2343 else
2344 req->cnt = req->length -
2345 (req->ring_index - cnt);
Chetan Lokea6eb3c92012-05-15 14:34:09 -04002346 if (req->cnt < (req_cnt + 2))
2347 goto queuing_error;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002348 }
2349
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002350 ctx = sp->u.scmd.ctx =
2351 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2352 if (!ctx) {
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002353 ql_log(ql_log_fatal, vha, 0x3010,
2354 "Failed to allocate ctx for cmd=%p.\n", cmd);
2355 goto queuing_error;
2356 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002357
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002358 memset(ctx, 0, sizeof(struct ct6_dsd));
2359 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2360 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2361 if (!ctx->fcp_cmnd) {
2362 ql_log(ql_log_fatal, vha, 0x3011,
2363 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2364 goto queuing_error_fcp_cmnd;
2365 }
2366
2367 /* Initialize the DSD list and dma handle */
2368 INIT_LIST_HEAD(&ctx->dsd_list);
2369 ctx->dsd_use_cnt = 0;
2370
2371 if (cmd->cmd_len > 16) {
2372 additional_cdb_len = cmd->cmd_len - 16;
2373 if ((cmd->cmd_len % 4) != 0) {
2374 /* SCSI command bigger than 16 bytes must be
2375 * multiple of 4
2376 */
2377 ql_log(ql_log_warn, vha, 0x3012,
2378 "scsi cmd len %d not multiple of 4 "
2379 "for cmd=%p.\n", cmd->cmd_len, cmd);
2380 goto queuing_error_fcp_cmnd;
2381 }
2382 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2383 } else {
2384 additional_cdb_len = 0;
2385 ctx->fcp_cmnd_len = 12 + 16 + 4;
2386 }
2387
2388 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2389 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2390
2391 /* Zero out remaining portion of packet. */
2392 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2393 clr_ptr = (uint32_t *)cmd_pkt + 2;
2394 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2395 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2396
2397 /* Set NPORT-ID and LUN number*/
2398 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2399 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2400 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2401 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2402 cmd_pkt->vp_index = sp->fcport->vp_idx;
2403
2404 /* Build IOCB segments */
2405 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2406 goto queuing_error_fcp_cmnd;
2407
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002408 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002409 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2410
2411 /* build FCP_CMND IU */
2412 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002413 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002414 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2415
2416 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2417 ctx->fcp_cmnd->additional_cdb_len |= 1;
2418 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2419 ctx->fcp_cmnd->additional_cdb_len |= 2;
2420
2421 /*
2422 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2423 */
2424 if (scsi_populate_tag_msg(cmd, tag)) {
2425 switch (tag[0]) {
2426 case HEAD_OF_QUEUE_TAG:
2427 ctx->fcp_cmnd->task_attribute =
2428 TSK_HEAD_OF_QUEUE;
2429 break;
2430 case ORDERED_QUEUE_TAG:
2431 ctx->fcp_cmnd->task_attribute =
2432 TSK_ORDERED;
2433 break;
2434 }
2435 }
2436
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002437 /* Populate the FCP_PRIO. */
2438 if (ha->flags.fcp_prio_enabled)
2439 ctx->fcp_cmnd->task_attribute |=
2440 sp->fcport->fcp_prio << 3;
2441
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002442 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2443
2444 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2445 additional_cdb_len);
2446 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2447
2448 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2449 cmd_pkt->fcp_cmnd_dseg_address[0] =
2450 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2451 cmd_pkt->fcp_cmnd_dseg_address[1] =
2452 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2453
2454 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2455 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2456 /* Set total data segment count. */
2457 cmd_pkt->entry_count = (uint8_t)req_cnt;
2458 /* Specify response queue number where
2459 * completion should happen
2460 */
2461 cmd_pkt->entry_status = (uint8_t) rsp->id;
2462 } else {
2463 struct cmd_type_7 *cmd_pkt;
2464 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2465 if (req->cnt < (req_cnt + 2)) {
2466 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2467 &reg->req_q_out[0]);
2468 if (req->ring_index < cnt)
2469 req->cnt = cnt - req->ring_index;
2470 else
2471 req->cnt = req->length -
2472 (req->ring_index - cnt);
2473 }
2474 if (req->cnt < (req_cnt + 2))
2475 goto queuing_error;
2476
2477 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2478 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2479
2480 /* Zero out remaining portion of packet. */
2481 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2482 clr_ptr = (uint32_t *)cmd_pkt + 2;
2483 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2484 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2485
2486 /* Set NPORT-ID and LUN number*/
2487 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2488 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2489 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2490 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2491 cmd_pkt->vp_index = sp->fcport->vp_idx;
2492
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002493 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002494 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002495 sizeof(cmd_pkt->lun));
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002496
2497 /*
2498 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2499 */
2500 if (scsi_populate_tag_msg(cmd, tag)) {
2501 switch (tag[0]) {
2502 case HEAD_OF_QUEUE_TAG:
2503 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2504 break;
2505 case ORDERED_QUEUE_TAG:
2506 cmd_pkt->task = TSK_ORDERED;
2507 break;
2508 }
2509 }
2510
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002511 /* Populate the FCP_PRIO. */
2512 if (ha->flags.fcp_prio_enabled)
2513 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2514
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002515 /* Load SCSI command packet. */
2516 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2517 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2518
2519 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2520
2521 /* Build IOCB segments */
2522 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2523
2524 /* Set total data segment count. */
2525 cmd_pkt->entry_count = (uint8_t)req_cnt;
2526 /* Specify response queue number where
2527 * completion should happen.
2528 */
2529 cmd_pkt->entry_status = (uint8_t) rsp->id;
2530
2531 }
2532 /* Build command packet. */
2533 req->current_outstanding_cmd = handle;
2534 req->outstanding_cmds[handle] = sp;
2535 sp->handle = handle;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002536 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002537 req->cnt -= req_cnt;
2538 wmb();
2539
2540 /* Adjust ring index. */
2541 req->ring_index++;
2542 if (req->ring_index == req->length) {
2543 req->ring_index = 0;
2544 req->ring_ptr = req->ring;
2545 } else
2546 req->ring_ptr++;
2547
2548 sp->flags |= SRB_DMA_VALID;
2549
2550 /* Set chip new ring index. */
2551 /* write, read and verify logic */
2552 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2553 if (ql2xdbwr)
2554 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2555 else {
2556 WRT_REG_DWORD(
2557 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2558 dbval);
2559 wmb();
2560 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2561 WRT_REG_DWORD(
2562 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2563 dbval);
2564 wmb();
2565 }
2566 }
2567
2568 /* Manage unprocessed RIO/ZIO commands in response queue. */
2569 if (vha->flags.process_response_queue &&
2570 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2571 qla24xx_process_response_queue(vha, rsp);
2572
2573 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2574 return QLA_SUCCESS;
2575
2576queuing_error_fcp_cmnd:
2577 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2578queuing_error:
2579 if (tot_dsds)
2580 scsi_dma_unmap(cmd);
2581
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002582 if (sp->u.scmd.ctx) {
2583 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2584 sp->u.scmd.ctx = NULL;
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002585 }
2586 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2587
2588 return QLA_FUNCTION_FAILED;
2589}
2590
Andrew Vasquezac280b62009-08-20 11:06:05 -07002591int
2592qla2x00_start_sp(srb_t *sp)
2593{
2594 int rval;
2595 struct qla_hw_data *ha = sp->fcport->vha->hw;
2596 void *pkt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002597 unsigned long flags;
2598
2599 rval = QLA_FUNCTION_FAILED;
2600 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002601 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002602 if (!pkt) {
2603 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2604 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07002605 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002606 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002607
2608 rval = QLA_SUCCESS;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -08002609 switch (sp->type) {
Andrew Vasquezac280b62009-08-20 11:06:05 -07002610 case SRB_LOGIN_CMD:
2611 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002612 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002613 qla2x00_login_iocb(sp, pkt);
2614 break;
2615 case SRB_LOGOUT_CMD:
2616 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002617 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002618 qla2x00_logout_iocb(sp, pkt);
2619 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002620 case SRB_ELS_CMD_RPT:
2621 case SRB_ELS_CMD_HST:
2622 qla24xx_els_iocb(sp, pkt);
2623 break;
2624 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002625 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez57807902011-11-18 09:03:20 -08002626 qla24xx_ct_iocb(sp, pkt) :
2627 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002628 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002629 case SRB_ADISC_CMD:
2630 IS_FWI2_CAPABLE(ha) ?
2631 qla24xx_adisc_iocb(sp, pkt) :
2632 qla2x00_adisc_iocb(sp, pkt);
2633 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002634 case SRB_TM_CMD:
2635 qla24xx_tm_iocb(sp, pkt);
2636 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002637 default:
2638 break;
2639 }
2640
2641 wmb();
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002642 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002643done:
2644 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2645 return rval;
2646}