blob: a281b5422df106bf3adf80232bc9403959ac6a1a [file] [log] [blame]
Andrew Vasquezfa90c542005-10-27 11:10:08 -07001/*
2 * QLogic Fibre Channel HBA Driver
Andrew Vasquez07e264b2011-03-30 11:46:23 -07003 * Copyright (c) 2003-2011 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -070014static void qla25xx_set_que(srb_t *, struct rsp_que **);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @cmd: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21static inline uint16_t
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070022qla2x00_get_cmd_direction(srb_t *sp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070023{
24 uint16_t cflags;
25
26 cflags = 0;
27
28 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070029 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 cflags = CF_WRITE;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080031 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070032 scsi_bufflen(sp->cmd);
33 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 cflags = CF_READ;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080035 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -070036 scsi_bufflen(sp->cmd);
37 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 return (cflags);
39}
40
41/**
42 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
43 * Continuation Type 0 IOCBs to allocate.
44 *
45 * @dsds: number of data segment decriptors needed
46 *
47 * Returns the number of IOCB entries needed to store @dsds.
48 */
49uint16_t
50qla2x00_calc_iocbs_32(uint16_t dsds)
51{
52 uint16_t iocbs;
53
54 iocbs = 1;
55 if (dsds > 3) {
56 iocbs += (dsds - 3) / 7;
57 if ((dsds - 3) % 7)
58 iocbs++;
59 }
60 return (iocbs);
61}
62
63/**
64 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
65 * Continuation Type 1 IOCBs to allocate.
66 *
67 * @dsds: number of data segment decriptors needed
68 *
69 * Returns the number of IOCB entries needed to store @dsds.
70 */
71uint16_t
72qla2x00_calc_iocbs_64(uint16_t dsds)
73{
74 uint16_t iocbs;
75
76 iocbs = 1;
77 if (dsds > 2) {
78 iocbs += (dsds - 2) / 5;
79 if ((dsds - 2) % 5)
80 iocbs++;
81 }
82 return (iocbs);
83}
84
85/**
86 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
87 * @ha: HA context
88 *
89 * Returns a pointer to the Continuation Type 0 IOCB packet.
90 */
91static inline cont_entry_t *
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070092qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 cont_entry_t *cont_pkt;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -070095 struct req_que *req = vha->req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080097 req->ring_index++;
98 if (req->ring_index == req->length) {
99 req->ring_index = 0;
100 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800102 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 }
104
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800105 cont_pkt = (cont_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 /* Load packet defaults. */
108 *((uint32_t *)(&cont_pkt->entry_type)) =
109 __constant_cpu_to_le32(CONTINUE_TYPE);
110
111 return (cont_pkt);
112}
113
114/**
115 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116 * @ha: HA context
117 *
118 * Returns a pointer to the continuation type 1 IOCB packet.
119 */
120static inline cont_a64_entry_t *
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800121qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 cont_a64_entry_t *cont_pkt;
124
125 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800126 req->ring_index++;
127 if (req->ring_index == req->length) {
128 req->ring_index = 0;
129 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 } else {
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800131 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 }
133
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800134 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
136 /* Load packet defaults. */
137 *((uint32_t *)(&cont_pkt->entry_type)) =
138 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
139
140 return (cont_pkt);
141}
142
Arun Easibad75002010-05-04 15:01:30 -0700143static inline int
144qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
145{
146 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host);
147
148 /* We only support T10 DIF right now */
149 if (guard != SHOST_DIX_GUARD_CRC) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700150 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
151 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -0700152 return 0;
153 }
154
155 /* We always use DIFF Bundling for best performance */
156 *fw_prot_opts = 0;
157
158 /* Translate SCSI opcode to a protection opcode */
159 switch (scsi_get_prot_op(sp->cmd)) {
160 case SCSI_PROT_READ_STRIP:
161 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
162 break;
163 case SCSI_PROT_WRITE_INSERT:
164 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 break;
166 case SCSI_PROT_READ_INSERT:
167 *fw_prot_opts |= PO_MODE_DIF_INSERT;
168 break;
169 case SCSI_PROT_WRITE_STRIP:
170 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
171 break;
172 case SCSI_PROT_READ_PASS:
173 *fw_prot_opts |= PO_MODE_DIF_PASS;
174 break;
175 case SCSI_PROT_WRITE_PASS:
176 *fw_prot_opts |= PO_MODE_DIF_PASS;
177 break;
178 default: /* Normal Request */
179 *fw_prot_opts |= PO_MODE_DIF_PASS;
180 break;
181 }
182
183 return scsi_prot_sg_count(sp->cmd);
184}
185
186/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
188 * capable IOCB types.
189 *
190 * @sp: SRB command to process
191 * @cmd_pkt: Command type 2 IOCB
192 * @tot_dsds: Total number of segments to transfer
193 */
194void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
195 uint16_t tot_dsds)
196{
197 uint16_t avail_dsds;
198 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800199 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900201 struct scatterlist *sg;
202 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 cmd = sp->cmd;
205
206 /* Update entry type to indicate Command Type 2 IOCB */
207 *((uint32_t *)(&cmd_pkt->entry_type)) =
208 __constant_cpu_to_le32(COMMAND_TYPE);
209
210 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900211 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
213 return;
214 }
215
Andrew Vasquez444786d2009-01-05 11:18:10 -0800216 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700217 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 /* Three DSDs are available in the Command Type 2 IOCB */
220 avail_dsds = 3;
221 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
222
223 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900224 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
225 cont_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900227 /* Allocate additional continuation packets? */
228 if (avail_dsds == 0) {
229 /*
230 * Seven DSDs are available in the Continuation
231 * Type 0 IOCB.
232 */
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700233 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900234 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
235 avail_dsds = 7;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900237
238 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
239 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
240 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 }
242}
243
244/**
245 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
246 * capable IOCB types.
247 *
248 * @sp: SRB command to process
249 * @cmd_pkt: Command type 3 IOCB
250 * @tot_dsds: Total number of segments to transfer
251 */
252void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
253 uint16_t tot_dsds)
254{
255 uint16_t avail_dsds;
256 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800257 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900259 struct scatterlist *sg;
260 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262 cmd = sp->cmd;
263
264 /* Update entry type to indicate Command Type 3 IOCB */
265 *((uint32_t *)(&cmd_pkt->entry_type)) =
266 __constant_cpu_to_le32(COMMAND_A64_TYPE);
267
268 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900269 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
271 return;
272 }
273
Andrew Vasquez444786d2009-01-05 11:18:10 -0800274 vha = sp->fcport->vha;
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700275 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277 /* Two DSDs are available in the Command Type 3 IOCB */
278 avail_dsds = 2;
279 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
280
281 /* Load data segments */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900282 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
283 dma_addr_t sle_dma;
284 cont_a64_entry_t *cont_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900286 /* Allocate additional continuation packets? */
287 if (avail_dsds == 0) {
288 /*
289 * Five DSDs are available in the Continuation
290 * Type 1 IOCB.
291 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800292 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900293 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
294 avail_dsds = 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900296
297 sle_dma = sg_dma_address(sg);
298 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
299 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
300 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
301 avail_dsds--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 }
303}
304
305/**
306 * qla2x00_start_scsi() - Send a SCSI command to the ISP
307 * @sp: command to send to the ISP
308 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700309 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 */
311int
312qla2x00_start_scsi(srb_t *sp)
313{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900314 int ret, nseg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800316 scsi_qla_host_t *vha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 struct scsi_cmnd *cmd;
318 uint32_t *clr_ptr;
319 uint32_t index;
320 uint32_t handle;
321 cmd_entry_t *cmd_pkt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 uint16_t cnt;
323 uint16_t req_cnt;
324 uint16_t tot_dsds;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700325 struct device_reg_2xxx __iomem *reg;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800326 struct qla_hw_data *ha;
327 struct req_que *req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800328 struct rsp_que *rsp;
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800329 char tag[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 /* Setup device pointers. */
332 ret = 0;
Andrew Vasquez444786d2009-01-05 11:18:10 -0800333 vha = sp->fcport->vha;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800334 ha = vha->hw;
Andrew Vasquez3d716442005-07-06 10:30:26 -0700335 reg = &ha->iobase->isp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 cmd = sp->cmd;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800337 req = ha->req_q_map[0];
338 rsp = ha->rsp_q_map[0];
83021922005-04-17 15:10:41 -0500339 /* So we know we haven't pci_map'ed anything yet */
340 tot_dsds = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800343 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700344 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
345 QLA_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return (QLA_FUNCTION_FAILED);
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700347 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800348 vha->marker_needed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 }
350
351 /* Acquire ring specific lock */
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700352 spin_lock_irqsave(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800355 handle = req->current_outstanding_cmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
357 handle++;
358 if (handle == MAX_OUTSTANDING_COMMANDS)
359 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800360 if (!req->outstanding_cmds[handle])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 break;
362 }
363 if (index == MAX_OUTSTANDING_COMMANDS)
364 goto queuing_error;
365
83021922005-04-17 15:10:41 -0500366 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -0700367 if (scsi_sg_count(cmd)) {
368 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369 scsi_sg_count(cmd), cmd->sc_data_direction);
370 if (unlikely(!nseg))
371 goto queuing_error;
372 } else
373 nseg = 0;
374
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900375 tot_dsds = nseg;
83021922005-04-17 15:10:41 -0500376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Calculate the number of request entries needed. */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700378 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800379 if (req->cnt < (req_cnt + 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800381 if (req->ring_index < cnt)
382 req->cnt = cnt - req->ring_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800384 req->cnt = req->length -
385 (req->ring_index - cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800387 if (req->cnt < (req_cnt + 2))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 goto queuing_error;
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 /* Build command packet */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800391 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -0700393 sp->handle = handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800395 req->cnt -= req_cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800397 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 cmd_pkt->handle = handle;
399 /* Zero out remaining portion of packet. */
400 clr_ptr = (uint32_t *)cmd_pkt + 2;
401 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
bdf79622005-04-17 15:06:53 -0500404 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 /* Update tagged queuing modifier */
Andrew Vasquezff2fc422011-02-23 15:27:15 -0800409 if (scsi_populate_tag_msg(cmd, tag)) {
410 switch (tag[0]) {
411 case HEAD_OF_QUEUE_TAG:
412 cmd_pkt->control_flags =
413 __constant_cpu_to_le16(CF_HEAD_TAG);
414 break;
415 case ORDERED_QUEUE_TAG:
416 cmd_pkt->control_flags =
417 __constant_cpu_to_le16(CF_ORDERED_TAG);
418 break;
419 default:
420 cmd_pkt->control_flags =
421 __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 break;
423 }
424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 /* Load SCSI command packet. */
427 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900428 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
430 /* Build IOCB segments */
Andrew Vasquezfd34f552007-07-19 15:06:00 -0700431 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /* Set total data segment count. */
434 cmd_pkt->entry_count = (uint8_t)req_cnt;
435 wmb();
436
437 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800438 req->ring_index++;
439 if (req->ring_index == req->length) {
440 req->ring_index = 0;
441 req->ring_ptr = req->ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800443 req->ring_ptr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 sp->flags |= SRB_DMA_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 /* Set chip new ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800448 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
450
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700451 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800452 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800453 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
454 qla2x00_process_response_queue(rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -0700455
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700456 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 return (QLA_SUCCESS);
458
459queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900460 if (tot_dsds)
461 scsi_dma_unmap(cmd);
462
Andrew Vasquezc9c5ced2008-07-24 08:31:49 -0700463 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 return (QLA_FUNCTION_FAILED);
466}
467
468/**
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800469 * qla2x00_start_iocbs() - Execute the IOCB command
470 */
471static void
472qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473{
474 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800476
477 if (IS_QLA82XX(ha)) {
478 qla82xx_start_iocbs(vha);
479 } else {
480 /* Adjust ring index. */
481 req->ring_index++;
482 if (req->ring_index == req->length) {
483 req->ring_index = 0;
484 req->ring_ptr = req->ring;
485 } else
486 req->ring_ptr++;
487
488 /* Set chip new ring index. */
Giridhar Malavali6246b8a2012-02-09 11:15:34 -0800489 if (ha->mqenable || IS_QLA83XX(ha)) {
490 WRT_REG_DWORD(req->req_q_in, req->ring_index);
491 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800492 } else if (IS_FWI2_CAPABLE(ha)) {
493 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
494 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
495 } else {
496 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
497 req->ring_index);
498 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
499 }
500 }
501}
502
503/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 * qla2x00_marker() - Send a marker IOCB to the firmware.
505 * @ha: HA context
506 * @loop_id: loop ID
507 * @lun: LUN
508 * @type: marker modifier
509 *
510 * Can be called from both normal and interrupt context.
511 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -0700512 * Returns non-zero if a failure occurred, else zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 */
Andrew Vasquez3dbe7562010-07-23 15:28:37 +0500514static int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800515__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
516 struct rsp_que *rsp, uint16_t loop_id,
517 uint16_t lun, uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700519 mrk_entry_t *mrk;
520 struct mrk_entry_24xx *mrk24;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800521 struct qla_hw_data *ha = vha->hw;
522 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700524 mrk24 = NULL;
Giridhar Malavali99b82122011-11-18 09:03:17 -0800525 req = ha->req_q_map[0];
Giridhar Malavalid94d10e2010-07-23 15:28:23 +0500526 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700527 if (mrk == NULL) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700528 ql_log(ql_log_warn, base_vha, 0x3026,
529 "Failed to allocate Marker IOCB.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 return (QLA_FUNCTION_FAILED);
532 }
533
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700534 mrk->entry_type = MARKER_TYPE;
535 mrk->modifier = type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 if (type != MK_SYNC_ALL) {
Andrew Vasqueze4289242007-07-19 15:05:56 -0700537 if (IS_FWI2_CAPABLE(ha)) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700538 mrk24 = (struct mrk_entry_24xx *) mrk;
539 mrk24->nport_handle = cpu_to_le16(loop_id);
540 mrk24->lun[1] = LSB(lun);
541 mrk24->lun[2] = MSB(lun);
Shyam Sundarb797b6d2006-08-01 13:48:13 -0700542 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800543 mrk24->vp_index = vha->vp_idx;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -0700544 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700545 } else {
546 SET_TARGET_ID(ha, mrk->target, loop_id);
547 mrk->lun = cpu_to_le16(lun);
548 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 }
550 wmb();
551
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800552 qla2x00_start_iocbs(vha, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 return (QLA_SUCCESS);
555}
556
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -0700557int
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800558qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
559 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
560 uint8_t type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
562 int ret;
563 unsigned long flags = 0;
564
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800565 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
566 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
567 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
569 return (ret);
570}
571
572/**
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700573 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
574 * Continuation Type 1 IOCBs to allocate.
575 *
576 * @dsds: number of data segment decriptors needed
577 *
578 * Returns the number of IOCB entries needed to store @dsds.
579 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700580inline uint16_t
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700581qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700582{
583 uint16_t iocbs;
584
585 iocbs = 1;
586 if (dsds > 1) {
587 iocbs += (dsds - 1) / 5;
588 if ((dsds - 1) % 5)
589 iocbs++;
590 }
591 return iocbs;
592}
593
Giridhar Malavali5162cf02011-11-18 09:03:18 -0800594static inline int
595qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
596 uint16_t tot_dsds)
597{
598 uint32_t *cur_dsd = NULL;
599 scsi_qla_host_t *vha;
600 struct qla_hw_data *ha;
601 struct scsi_cmnd *cmd;
602 struct scatterlist *cur_seg;
603 uint32_t *dsd_seg;
604 void *next_dsd;
605 uint8_t avail_dsds;
606 uint8_t first_iocb = 1;
607 uint32_t dsd_list_len;
608 struct dsd_dma *dsd_ptr;
609 struct ct6_dsd *ctx;
610
611 cmd = sp->cmd;
612
613 /* Update entry type to indicate Command Type 3 IOCB */
614 *((uint32_t *)(&cmd_pkt->entry_type)) =
615 __constant_cpu_to_le32(COMMAND_TYPE_6);
616
617 /* No data transfer */
618 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
619 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
620 return 0;
621 }
622
623 vha = sp->fcport->vha;
624 ha = vha->hw;
625
626 /* Set transfer direction */
627 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
628 cmd_pkt->control_flags =
629 __constant_cpu_to_le16(CF_WRITE_DATA);
630 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
631 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
632 cmd_pkt->control_flags =
633 __constant_cpu_to_le16(CF_READ_DATA);
634 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
635 }
636
637 cur_seg = scsi_sglist(cmd);
638 ctx = sp->ctx;
639
640 while (tot_dsds) {
641 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
642 QLA_DSDS_PER_IOCB : tot_dsds;
643 tot_dsds -= avail_dsds;
644 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
645
646 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
647 struct dsd_dma, list);
648 next_dsd = dsd_ptr->dsd_addr;
649 list_del(&dsd_ptr->list);
650 ha->gbl_dsd_avail--;
651 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
652 ctx->dsd_use_cnt++;
653 ha->gbl_dsd_inuse++;
654
655 if (first_iocb) {
656 first_iocb = 0;
657 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
658 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
659 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
660 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
661 } else {
662 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
663 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
664 *cur_dsd++ = cpu_to_le32(dsd_list_len);
665 }
666 cur_dsd = (uint32_t *)next_dsd;
667 while (avail_dsds) {
668 dma_addr_t sle_dma;
669
670 sle_dma = sg_dma_address(cur_seg);
671 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
672 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
673 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
674 cur_seg = sg_next(cur_seg);
675 avail_dsds--;
676 }
677 }
678
679 /* Null termination */
680 *cur_dsd++ = 0;
681 *cur_dsd++ = 0;
682 *cur_dsd++ = 0;
683 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
684 return 0;
685}
686
687/*
688 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
689 * for Command Type 6.
690 *
691 * @dsds: number of data segment decriptors needed
692 *
693 * Returns the number of dsd list needed to store @dsds.
694 */
695inline uint16_t
696qla24xx_calc_dsd_lists(uint16_t dsds)
697{
698 uint16_t dsd_lists = 0;
699
700 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
701 if (dsds % QLA_DSDS_PER_IOCB)
702 dsd_lists++;
703 return dsd_lists;
704}
705
706
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700707/**
708 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
709 * IOCB types.
710 *
711 * @sp: SRB command to process
712 * @cmd_pkt: Command type 3 IOCB
713 * @tot_dsds: Total number of segments to transfer
714 */
Giridhar Malavalia9083012010-04-12 17:59:55 -0700715inline void
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700716qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
717 uint16_t tot_dsds)
718{
719 uint16_t avail_dsds;
720 uint32_t *cur_dsd;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800721 scsi_qla_host_t *vha;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700722 struct scsi_cmnd *cmd;
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900723 struct scatterlist *sg;
724 int i;
Anirban Chakraborty73208df2008-12-09 16:45:39 -0800725 struct req_que *req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700726
727 cmd = sp->cmd;
728
729 /* Update entry type to indicate Command Type 3 IOCB */
730 *((uint32_t *)(&cmd_pkt->entry_type)) =
731 __constant_cpu_to_le32(COMMAND_TYPE_7);
732
733 /* No data transfer */
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900734 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700735 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
736 return;
737 }
738
Andrew Vasquez444786d2009-01-05 11:18:10 -0800739 vha = sp->fcport->vha;
Anirban Chakraborty67c2e932009-04-06 22:33:42 -0700740 req = vha->req;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700741
742 /* Set transfer direction */
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700743 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700744 cmd_pkt->task_mgmt_flags =
745 __constant_cpu_to_le16(TMF_WRITE_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800746 sp->fcport->vha->hw->qla_stats.output_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700747 scsi_bufflen(sp->cmd);
748 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700749 cmd_pkt->task_mgmt_flags =
750 __constant_cpu_to_le16(TMF_READ_DATA);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800751 sp->fcport->vha->hw->qla_stats.input_bytes +=
Harish Zunjarrao49fd4622008-09-11 21:22:47 -0700752 scsi_bufflen(sp->cmd);
753 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700754
755 /* One DSD is available in the Command Type 3 IOCB */
756 avail_dsds = 1;
757 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
758
759 /* Load data segments */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700760
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900761 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
762 dma_addr_t sle_dma;
763 cont_a64_entry_t *cont_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700764
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900765 /* Allocate additional continuation packets? */
766 if (avail_dsds == 0) {
767 /*
768 * Five DSDs are available in the Continuation
769 * Type 1 IOCB.
770 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -0800771 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900772 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
773 avail_dsds = 5;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700774 }
FUJITA Tomonori385d70b2007-05-26 01:55:38 +0900775
776 sle_dma = sg_dma_address(sg);
777 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
778 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
779 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
780 avail_dsds--;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -0700781 }
782}
783
Arun Easibad75002010-05-04 15:01:30 -0700784struct fw_dif_context {
785 uint32_t ref_tag;
786 uint16_t app_tag;
787 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
788 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
789};
790
791/*
792 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
793 *
794 */
795static inline void
Arun Easie02587d2011-08-16 11:29:23 -0700796qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
Arun Easibad75002010-05-04 15:01:30 -0700797 unsigned int protcnt)
798{
Arun Easie02587d2011-08-16 11:29:23 -0700799 struct scsi_cmnd *cmd = sp->cmd;
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700800 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -0700801
802 switch (scsi_get_prot_type(cmd)) {
Arun Easibad75002010-05-04 15:01:30 -0700803 case SCSI_PROT_DIF_TYPE0:
Arun Easi8cb20492011-08-16 11:29:22 -0700804 /*
805 * No check for ql2xenablehba_err_chk, as it would be an
806 * I/O error if hba tag generation is not done.
807 */
808 pkt->ref_tag = cpu_to_le32((uint32_t)
809 (0xffffffff & scsi_get_lba(cmd)));
Arun Easie02587d2011-08-16 11:29:23 -0700810
811 if (!qla2x00_hba_err_chk_enabled(sp))
812 break;
813
Arun Easi8cb20492011-08-16 11:29:22 -0700814 pkt->ref_tag_mask[0] = 0xff;
815 pkt->ref_tag_mask[1] = 0xff;
816 pkt->ref_tag_mask[2] = 0xff;
817 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700818 break;
819
820 /*
821 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
822 * match LBA in CDB + N
823 */
824 case SCSI_PROT_DIF_TYPE2:
Arun Easie02587d2011-08-16 11:29:23 -0700825 pkt->app_tag = __constant_cpu_to_le16(0);
826 pkt->app_tag_mask[0] = 0x0;
827 pkt->app_tag_mask[1] = 0x0;
Arun Easi0c470872010-07-23 15:28:38 +0500828
829 pkt->ref_tag = cpu_to_le32((uint32_t)
830 (0xffffffff & scsi_get_lba(cmd)));
831
Arun Easie02587d2011-08-16 11:29:23 -0700832 if (!qla2x00_hba_err_chk_enabled(sp))
833 break;
834
Arun Easi0c470872010-07-23 15:28:38 +0500835 /* enable ALL bytes of the ref tag */
836 pkt->ref_tag_mask[0] = 0xff;
837 pkt->ref_tag_mask[1] = 0xff;
838 pkt->ref_tag_mask[2] = 0xff;
839 pkt->ref_tag_mask[3] = 0xff;
Arun Easibad75002010-05-04 15:01:30 -0700840 break;
841
842 /* For Type 3 protection: 16 bit GUARD only */
843 case SCSI_PROT_DIF_TYPE3:
844 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
845 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
846 0x00;
847 break;
848
849 /*
850 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
851 * 16 bit app tag.
852 */
853 case SCSI_PROT_DIF_TYPE1:
Arun Easie02587d2011-08-16 11:29:23 -0700854 pkt->ref_tag = cpu_to_le32((uint32_t)
855 (0xffffffff & scsi_get_lba(cmd)));
856 pkt->app_tag = __constant_cpu_to_le16(0);
857 pkt->app_tag_mask[0] = 0x0;
858 pkt->app_tag_mask[1] = 0x0;
859
860 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -0700861 break;
862
Arun Easibad75002010-05-04 15:01:30 -0700863 /* enable ALL bytes of the ref tag */
864 pkt->ref_tag_mask[0] = 0xff;
865 pkt->ref_tag_mask[1] = 0xff;
866 pkt->ref_tag_mask[2] = 0xff;
867 pkt->ref_tag_mask[3] = 0xff;
868 break;
869 }
870
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700871 ql_dbg(ql_dbg_io, vha, 0x3009,
872 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
873 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
874 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
875 scsi_get_prot_type(cmd), cmd);
Arun Easibad75002010-05-04 15:01:30 -0700876}
877
Arun Easi8cb20492011-08-16 11:29:22 -0700878struct qla2_sgx {
879 dma_addr_t dma_addr; /* OUT */
880 uint32_t dma_len; /* OUT */
Arun Easibad75002010-05-04 15:01:30 -0700881
Arun Easi8cb20492011-08-16 11:29:22 -0700882 uint32_t tot_bytes; /* IN */
883 struct scatterlist *cur_sg; /* IN */
884
885 /* for book keeping, bzero on initial invocation */
886 uint32_t bytes_consumed;
887 uint32_t num_bytes;
888 uint32_t tot_partial;
889
890 /* for debugging */
891 uint32_t num_sg;
892 srb_t *sp;
893};
894
895static int
896qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
897 uint32_t *partial)
898{
899 struct scatterlist *sg;
900 uint32_t cumulative_partial, sg_len;
901 dma_addr_t sg_dma_addr;
902
903 if (sgx->num_bytes == sgx->tot_bytes)
904 return 0;
905
906 sg = sgx->cur_sg;
907 cumulative_partial = sgx->tot_partial;
908
909 sg_dma_addr = sg_dma_address(sg);
910 sg_len = sg_dma_len(sg);
911
912 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
913
914 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
915 sgx->dma_len = (blk_sz - cumulative_partial);
916 sgx->tot_partial = 0;
917 sgx->num_bytes += blk_sz;
918 *partial = 0;
919 } else {
920 sgx->dma_len = sg_len - sgx->bytes_consumed;
921 sgx->tot_partial += sgx->dma_len;
922 *partial = 1;
923 }
924
925 sgx->bytes_consumed += sgx->dma_len;
926
927 if (sg_len == sgx->bytes_consumed) {
928 sg = sg_next(sg);
929 sgx->num_sg++;
930 sgx->cur_sg = sg;
931 sgx->bytes_consumed = 0;
932 }
933
934 return 1;
935}
936
937static int
938qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
939 uint32_t *dsd, uint16_t tot_dsds)
940{
941 void *next_dsd;
942 uint8_t avail_dsds = 0;
943 uint32_t dsd_list_len;
944 struct dsd_dma *dsd_ptr;
945 struct scatterlist *sg_prot;
946 uint32_t *cur_dsd = dsd;
947 uint16_t used_dsds = tot_dsds;
948
949 uint32_t prot_int;
950 uint32_t partial;
951 struct qla2_sgx sgx;
952 dma_addr_t sle_dma;
953 uint32_t sle_dma_len, tot_prot_dma_len = 0;
954 struct scsi_cmnd *cmd = sp->cmd;
955
956 prot_int = cmd->device->sector_size;
957
958 memset(&sgx, 0, sizeof(struct qla2_sgx));
959 sgx.tot_bytes = scsi_bufflen(sp->cmd);
960 sgx.cur_sg = scsi_sglist(sp->cmd);
961 sgx.sp = sp;
962
963 sg_prot = scsi_prot_sglist(sp->cmd);
964
965 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
966
967 sle_dma = sgx.dma_addr;
968 sle_dma_len = sgx.dma_len;
969alloc_and_fill:
970 /* Allocate additional continuation packets? */
971 if (avail_dsds == 0) {
972 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
973 QLA_DSDS_PER_IOCB : used_dsds;
974 dsd_list_len = (avail_dsds + 1) * 12;
975 used_dsds -= avail_dsds;
976
977 /* allocate tracking DS */
978 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
979 if (!dsd_ptr)
980 return 1;
981
982 /* allocate new list */
983 dsd_ptr->dsd_addr = next_dsd =
984 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
985 &dsd_ptr->dsd_list_dma);
986
987 if (!next_dsd) {
988 /*
989 * Need to cleanup only this dsd_ptr, rest
990 * will be done by sp_free_dma()
991 */
992 kfree(dsd_ptr);
993 return 1;
994 }
995
996 list_add_tail(&dsd_ptr->list,
997 &((struct crc_context *)sp->ctx)->dsd_list);
998
999 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1000
1001 /* add new list to cmd iocb or last list */
1002 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1003 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1004 *cur_dsd++ = dsd_list_len;
1005 cur_dsd = (uint32_t *)next_dsd;
1006 }
1007 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1008 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1009 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1010 avail_dsds--;
1011
1012 if (partial == 0) {
1013 /* Got a full protection interval */
1014 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1015 sle_dma_len = 8;
1016
1017 tot_prot_dma_len += sle_dma_len;
1018 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1019 tot_prot_dma_len = 0;
1020 sg_prot = sg_next(sg_prot);
1021 }
1022
1023 partial = 1; /* So as to not re-enter this block */
1024 goto alloc_and_fill;
1025 }
1026 }
1027 /* Null termination */
1028 *cur_dsd++ = 0;
1029 *cur_dsd++ = 0;
1030 *cur_dsd++ = 0;
1031 return 0;
1032}
Giridhar Malavali5162cf02011-11-18 09:03:18 -08001033
Arun Easibad75002010-05-04 15:01:30 -07001034static int
1035qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1036 uint16_t tot_dsds)
1037{
1038 void *next_dsd;
1039 uint8_t avail_dsds = 0;
1040 uint32_t dsd_list_len;
1041 struct dsd_dma *dsd_ptr;
1042 struct scatterlist *sg;
1043 uint32_t *cur_dsd = dsd;
1044 int i;
1045 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001046 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
Arun Easibad75002010-05-04 15:01:30 -07001047
1048 uint8_t *cp;
1049
1050 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) {
1051 dma_addr_t sle_dma;
1052
1053 /* Allocate additional continuation packets? */
1054 if (avail_dsds == 0) {
1055 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1056 QLA_DSDS_PER_IOCB : used_dsds;
1057 dsd_list_len = (avail_dsds + 1) * 12;
1058 used_dsds -= avail_dsds;
1059
1060 /* allocate tracking DS */
1061 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1062 if (!dsd_ptr)
1063 return 1;
1064
1065 /* allocate new list */
1066 dsd_ptr->dsd_addr = next_dsd =
1067 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1068 &dsd_ptr->dsd_list_dma);
1069
1070 if (!next_dsd) {
1071 /*
1072 * Need to cleanup only this dsd_ptr, rest
1073 * will be done by sp_free_dma()
1074 */
1075 kfree(dsd_ptr);
1076 return 1;
1077 }
1078
1079 list_add_tail(&dsd_ptr->list,
1080 &((struct crc_context *)sp->ctx)->dsd_list);
1081
1082 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1083
1084 /* add new list to cmd iocb or last list */
1085 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1086 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1087 *cur_dsd++ = dsd_list_len;
1088 cur_dsd = (uint32_t *)next_dsd;
1089 }
1090 sle_dma = sg_dma_address(sg);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001091 ql_dbg(ql_dbg_io, vha, 0x300a,
1092 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
Joe Perchesd8424f62011-11-18 09:03:06 -08001093 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001094 sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -07001095 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1096 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1097 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1098 avail_dsds--;
1099
1100 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1101 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001102 ql_dbg(ql_dbg_io, vha, 0x300b,
1103 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
Arun Easibad75002010-05-04 15:01:30 -07001104 }
1105 }
1106 /* Null termination */
1107 *cur_dsd++ = 0;
1108 *cur_dsd++ = 0;
1109 *cur_dsd++ = 0;
1110 return 0;
1111}
1112
1113static int
1114qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1115 uint32_t *dsd,
1116 uint16_t tot_dsds)
1117{
1118 void *next_dsd;
1119 uint8_t avail_dsds = 0;
1120 uint32_t dsd_list_len;
1121 struct dsd_dma *dsd_ptr;
1122 struct scatterlist *sg;
1123 int i;
1124 struct scsi_cmnd *cmd;
1125 uint32_t *cur_dsd = dsd;
1126 uint16_t used_dsds = tot_dsds;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001127 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
Arun Easibad75002010-05-04 15:01:30 -07001128 uint8_t *cp;
1129
1130
1131 cmd = sp->cmd;
1132 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1133 dma_addr_t sle_dma;
1134
1135 /* Allocate additional continuation packets? */
1136 if (avail_dsds == 0) {
1137 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1138 QLA_DSDS_PER_IOCB : used_dsds;
1139 dsd_list_len = (avail_dsds + 1) * 12;
1140 used_dsds -= avail_dsds;
1141
1142 /* allocate tracking DS */
1143 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1144 if (!dsd_ptr)
1145 return 1;
1146
1147 /* allocate new list */
1148 dsd_ptr->dsd_addr = next_dsd =
1149 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1150 &dsd_ptr->dsd_list_dma);
1151
1152 if (!next_dsd) {
1153 /*
1154 * Need to cleanup only this dsd_ptr, rest
1155 * will be done by sp_free_dma()
1156 */
1157 kfree(dsd_ptr);
1158 return 1;
1159 }
1160
1161 list_add_tail(&dsd_ptr->list,
1162 &((struct crc_context *)sp->ctx)->dsd_list);
1163
1164 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1165
1166 /* add new list to cmd iocb or last list */
1167 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1168 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1169 *cur_dsd++ = dsd_list_len;
1170 cur_dsd = (uint32_t *)next_dsd;
1171 }
1172 sle_dma = sg_dma_address(sg);
1173 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001174 ql_dbg(ql_dbg_io, vha, 0x3027,
1175 "%s(): %p, sg_entry %d - "
1176 "addr=0x%x0x%x, len=%d.\n",
1177 __func__, cur_dsd, i,
1178 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
Arun Easibad75002010-05-04 15:01:30 -07001179 }
1180 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1181 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1182 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1183
1184 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
1185 cp = page_address(sg_page(sg)) + sg->offset;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001186 ql_dbg(ql_dbg_io, vha, 0x3028,
1187 "%s(): Protection Data buffer = %p.\n", __func__,
1188 cp);
Arun Easibad75002010-05-04 15:01:30 -07001189 }
1190 avail_dsds--;
1191 }
1192 /* Null termination */
1193 *cur_dsd++ = 0;
1194 *cur_dsd++ = 0;
1195 *cur_dsd++ = 0;
1196 return 0;
1197}
1198
1199/**
1200 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1201 * Type 6 IOCB types.
1202 *
1203 * @sp: SRB command to process
1204 * @cmd_pkt: Command type 3 IOCB
1205 * @tot_dsds: Total number of segments to transfer
1206 */
1207static inline int
1208qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1209 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1210{
1211 uint32_t *cur_dsd, *fcp_dl;
1212 scsi_qla_host_t *vha;
1213 struct scsi_cmnd *cmd;
1214 struct scatterlist *cur_seg;
1215 int sgc;
Arun Easi8cb20492011-08-16 11:29:22 -07001216 uint32_t total_bytes = 0;
Arun Easibad75002010-05-04 15:01:30 -07001217 uint32_t data_bytes;
1218 uint32_t dif_bytes;
1219 uint8_t bundling = 1;
1220 uint16_t blk_size;
1221 uint8_t *clr_ptr;
1222 struct crc_context *crc_ctx_pkt = NULL;
1223 struct qla_hw_data *ha;
1224 uint8_t additional_fcpcdb_len;
1225 uint16_t fcp_cmnd_len;
1226 struct fcp_cmnd *fcp_cmnd;
1227 dma_addr_t crc_ctx_dma;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001228 char tag[2];
Arun Easibad75002010-05-04 15:01:30 -07001229
1230 cmd = sp->cmd;
1231
1232 sgc = 0;
1233 /* Update entry type to indicate Command Type CRC_2 IOCB */
1234 *((uint32_t *)(&cmd_pkt->entry_type)) =
1235 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1236
Arun Easibad75002010-05-04 15:01:30 -07001237 vha = sp->fcport->vha;
1238 ha = vha->hw;
1239
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001240 /* No data transfer */
1241 data_bytes = scsi_bufflen(cmd);
1242 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1243 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1244 return QLA_SUCCESS;
1245 }
Arun Easibad75002010-05-04 15:01:30 -07001246
1247 cmd_pkt->vp_index = sp->fcport->vp_idx;
1248
1249 /* Set transfer direction */
1250 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1251 cmd_pkt->control_flags =
1252 __constant_cpu_to_le16(CF_WRITE_DATA);
1253 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1254 cmd_pkt->control_flags =
1255 __constant_cpu_to_le16(CF_READ_DATA);
1256 }
1257
Arun Easi8cb20492011-08-16 11:29:22 -07001258 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) ||
1259 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) ||
1260 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) ||
1261 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT))
Arun Easibad75002010-05-04 15:01:30 -07001262 bundling = 0;
1263
1264 /* Allocate CRC context from global pool */
1265 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool,
1266 GFP_ATOMIC, &crc_ctx_dma);
1267
1268 if (!crc_ctx_pkt)
1269 goto crc_queuing_error;
1270
1271 /* Zero out CTX area. */
1272 clr_ptr = (uint8_t *)crc_ctx_pkt;
1273 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1274
1275 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1276
1277 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1278
1279 /* Set handle */
1280 crc_ctx_pkt->handle = cmd_pkt->handle;
1281
1282 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1283
Arun Easie02587d2011-08-16 11:29:23 -07001284 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
Arun Easibad75002010-05-04 15:01:30 -07001285 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1286
1287 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1288 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1289 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1290
1291 /* Determine SCSI command length -- align to 4 byte boundary */
1292 if (cmd->cmd_len > 16) {
Arun Easibad75002010-05-04 15:01:30 -07001293 additional_fcpcdb_len = cmd->cmd_len - 16;
1294 if ((cmd->cmd_len % 4) != 0) {
1295 /* SCSI cmd > 16 bytes must be multiple of 4 */
1296 goto crc_queuing_error;
1297 }
1298 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1299 } else {
1300 additional_fcpcdb_len = 0;
1301 fcp_cmnd_len = 12 + 16 + 4;
1302 }
1303
1304 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1305
1306 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1307 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1308 fcp_cmnd->additional_cdb_len |= 1;
1309 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1310 fcp_cmnd->additional_cdb_len |= 2;
1311
1312 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun);
1313 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1314 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1315 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1316 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1317 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1318 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
Uwe Kleine-König65155b32010-06-11 12:17:01 +02001319 fcp_cmnd->task_management = 0;
Arun Easibad75002010-05-04 15:01:30 -07001320
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001321 /*
1322 * Update tagged queuing modifier if using command tag queuing
1323 */
1324 if (scsi_populate_tag_msg(cmd, tag)) {
1325 switch (tag[0]) {
1326 case HEAD_OF_QUEUE_TAG:
1327 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1328 break;
1329 case ORDERED_QUEUE_TAG:
1330 fcp_cmnd->task_attribute = TSK_ORDERED;
1331 break;
1332 default:
1333 fcp_cmnd->task_attribute = 0;
1334 break;
1335 }
1336 } else {
1337 fcp_cmnd->task_attribute = 0;
1338 }
1339
Arun Easibad75002010-05-04 15:01:30 -07001340 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1341
Arun Easibad75002010-05-04 15:01:30 -07001342 /* Compute dif len and adjust data len to incude protection */
Arun Easibad75002010-05-04 15:01:30 -07001343 dif_bytes = 0;
1344 blk_size = cmd->device->sector_size;
Arun Easi8cb20492011-08-16 11:29:22 -07001345 dif_bytes = (data_bytes / blk_size) * 8;
1346
1347 switch (scsi_get_prot_op(sp->cmd)) {
1348 case SCSI_PROT_READ_INSERT:
1349 case SCSI_PROT_WRITE_STRIP:
1350 total_bytes = data_bytes;
1351 data_bytes += dif_bytes;
1352 break;
1353
1354 case SCSI_PROT_READ_STRIP:
1355 case SCSI_PROT_WRITE_INSERT:
1356 case SCSI_PROT_READ_PASS:
1357 case SCSI_PROT_WRITE_PASS:
1358 total_bytes = data_bytes + dif_bytes;
1359 break;
1360 default:
1361 BUG();
Arun Easibad75002010-05-04 15:01:30 -07001362 }
1363
Arun Easie02587d2011-08-16 11:29:23 -07001364 if (!qla2x00_hba_err_chk_enabled(sp))
Arun Easibad75002010-05-04 15:01:30 -07001365 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1366
1367 if (!bundling) {
1368 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1369 } else {
1370 /*
1371 * Configure Bundling if we need to fetch interlaving
1372 * protection PCI accesses
1373 */
1374 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1375 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1376 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1377 tot_prot_dsds);
1378 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1379 }
1380
1381 /* Finish the common fields of CRC pkt */
1382 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1383 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1384 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1385 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1386 /* Fibre channel byte count */
1387 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1388 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1389 additional_fcpcdb_len);
1390 *fcp_dl = htonl(total_bytes);
1391
Arun Easi0c470872010-07-23 15:28:38 +05001392 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
Arun Easi0c470872010-07-23 15:28:38 +05001393 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1394 return QLA_SUCCESS;
1395 }
Arun Easibad75002010-05-04 15:01:30 -07001396 /* Walks data segments */
1397
1398 cmd_pkt->control_flags |=
1399 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
Arun Easi8cb20492011-08-16 11:29:22 -07001400
1401 if (!bundling && tot_prot_dsds) {
1402 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1403 cur_dsd, tot_dsds))
1404 goto crc_queuing_error;
1405 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
Arun Easibad75002010-05-04 15:01:30 -07001406 (tot_dsds - tot_prot_dsds)))
1407 goto crc_queuing_error;
1408
1409 if (bundling && tot_prot_dsds) {
1410 /* Walks dif segments */
1411 cur_seg = scsi_prot_sglist(cmd);
1412 cmd_pkt->control_flags |=
1413 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1414 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1415 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1416 tot_prot_dsds))
1417 goto crc_queuing_error;
1418 }
1419 return QLA_SUCCESS;
1420
1421crc_queuing_error:
Arun Easibad75002010-05-04 15:01:30 -07001422 /* Cleanup will be performed by the caller */
1423
1424 return QLA_FUNCTION_FAILED;
1425}
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001426
1427/**
1428 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1429 * @sp: command to send to the ISP
1430 *
Bjorn Helgaascc3ef7b2008-09-11 21:22:51 -07001431 * Returns non-zero if a failure occurred, else zero.
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001432 */
1433int
1434qla24xx_start_scsi(srb_t *sp)
1435{
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001436 int ret, nseg;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001437 unsigned long flags;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001438 uint32_t *clr_ptr;
1439 uint32_t index;
1440 uint32_t handle;
1441 struct cmd_type_7 *cmd_pkt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001442 uint16_t cnt;
1443 uint16_t req_cnt;
1444 uint16_t tot_dsds;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001445 struct req_que *req = NULL;
1446 struct rsp_que *rsp = NULL;
1447 struct scsi_cmnd *cmd = sp->cmd;
Andrew Vasquez444786d2009-01-05 11:18:10 -08001448 struct scsi_qla_host *vha = sp->fcport->vha;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001449 struct qla_hw_data *ha = vha->hw;
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001450 char tag[2];
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001451
1452 /* Setup device pointers. */
1453 ret = 0;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001454
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001455 qla25xx_set_que(sp, &rsp);
1456 req = vha->req;
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001457
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001458 /* So we know we haven't pci_map'ed anything yet */
1459 tot_dsds = 0;
1460
1461 /* Send marker if required */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001462 if (vha->marker_needed != 0) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001463 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1464 QLA_SUCCESS)
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001465 return QLA_FUNCTION_FAILED;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001466 vha->marker_needed = 0;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001467 }
1468
1469 /* Acquire ring specific lock */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001470 spin_lock_irqsave(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001471
1472 /* Check for room in outstanding command list. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001473 handle = req->current_outstanding_cmd;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001474 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1475 handle++;
1476 if (handle == MAX_OUTSTANDING_COMMANDS)
1477 handle = 1;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001478 if (!req->outstanding_cmds[handle])
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001479 break;
1480 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001481 if (index == MAX_OUTSTANDING_COMMANDS) {
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001482 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001483 }
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001484
1485 /* Map the sg table so we have an accurate count of sg entries needed */
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001486 if (scsi_sg_count(cmd)) {
1487 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1488 scsi_sg_count(cmd), cmd->sc_data_direction);
1489 if (unlikely(!nseg))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001490 goto queuing_error;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001491 } else
1492 nseg = 0;
1493
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001494 tot_dsds = nseg;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001495 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001496 if (req->cnt < (req_cnt + 2)) {
Andrew Vasquez08029992009-03-24 09:07:55 -07001497 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001498
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001499 if (req->ring_index < cnt)
1500 req->cnt = cnt - req->ring_index;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001501 else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001502 req->cnt = req->length -
1503 (req->ring_index - cnt);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001504 }
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001505 if (req->cnt < (req_cnt + 2))
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001506 goto queuing_error;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001507
1508 /* Build command packet. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001509 req->current_outstanding_cmd = handle;
1510 req->outstanding_cmds[handle] = sp;
Andrew Vasquezcf53b062009-08-20 11:06:04 -07001511 sp->handle = handle;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001512 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001513 req->cnt -= req_cnt;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001514
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001515 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001516 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001517
1518 /* Zero out remaining portion of packet. */
James Bottomley72df8322005-10-28 14:41:19 -05001519 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001520 clr_ptr = (uint32_t *)cmd_pkt + 2;
1521 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1522 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1523
1524 /* Set NPORT-ID and LUN number*/
1525 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1526 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1527 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1528 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
Seokmann Ju2c3dfe32007-07-05 13:16:51 -07001529 cmd_pkt->vp_index = sp->fcport->vp_idx;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001530
Andrew Vasquez661c3f62005-10-27 11:09:58 -07001531 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
andrew.vasquez@qlogic.com0d4be122006-02-07 08:45:35 -08001532 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001533
Andrew Vasquezff2fc422011-02-23 15:27:15 -08001534 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1535 if (scsi_populate_tag_msg(cmd, tag)) {
1536 switch (tag[0]) {
1537 case HEAD_OF_QUEUE_TAG:
1538 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1539 break;
1540 case ORDERED_QUEUE_TAG:
1541 cmd_pkt->task = TSK_ORDERED;
1542 break;
1543 }
1544 }
1545
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001546 /* Load SCSI command packet. */
1547 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1548 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1549
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001550 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001551
1552 /* Build IOCB segments */
1553 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1554
1555 /* Set total data segment count. */
1556 cmd_pkt->entry_count = (uint8_t)req_cnt;
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001557 /* Specify response queue number where completion should happen */
1558 cmd_pkt->entry_status = (uint8_t) rsp->id;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001559 wmb();
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001560 /* Adjust ring index. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001561 req->ring_index++;
1562 if (req->ring_index == req->length) {
1563 req->ring_index = 0;
1564 req->ring_ptr = req->ring;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001565 } else
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001566 req->ring_ptr++;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001567
1568 sp->flags |= SRB_DMA_VALID;
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001569
1570 /* Set chip new ring index. */
Andrew Vasquez08029992009-03-24 09:07:55 -07001571 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1572 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001573
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001574 /* Manage unprocessed RIO/ZIO commands in response queue. */
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001575 if (vha->flags.process_response_queue &&
Anirban Chakraborty73208df2008-12-09 16:45:39 -08001576 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
Anirban Chakraborty2afa19a2009-04-06 22:33:40 -07001577 qla24xx_process_response_queue(vha, rsp);
Andrew Vasquez4fdfefe2005-10-27 11:09:48 -07001578
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001579 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001580 return QLA_SUCCESS;
1581
1582queuing_error:
FUJITA Tomonori385d70b2007-05-26 01:55:38 +09001583 if (tot_dsds)
1584 scsi_dma_unmap(cmd);
1585
Anirban Chakrabortye315cd22008-11-06 10:40:51 -08001586 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -07001587
1588 return QLA_FUNCTION_FAILED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589}
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001590
Arun Easibad75002010-05-04 15:01:30 -07001591
1592/**
1593 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1594 * @sp: command to send to the ISP
1595 *
1596 * Returns non-zero if a failure occurred, else zero.
1597 */
1598int
1599qla24xx_dif_start_scsi(srb_t *sp)
1600{
1601 int nseg;
1602 unsigned long flags;
1603 uint32_t *clr_ptr;
1604 uint32_t index;
1605 uint32_t handle;
1606 uint16_t cnt;
1607 uint16_t req_cnt = 0;
1608 uint16_t tot_dsds;
1609 uint16_t tot_prot_dsds;
1610 uint16_t fw_prot_opts = 0;
1611 struct req_que *req = NULL;
1612 struct rsp_que *rsp = NULL;
1613 struct scsi_cmnd *cmd = sp->cmd;
1614 struct scsi_qla_host *vha = sp->fcport->vha;
1615 struct qla_hw_data *ha = vha->hw;
1616 struct cmd_type_crc_2 *cmd_pkt;
1617 uint32_t status = 0;
1618
1619#define QDSS_GOT_Q_SPACE BIT_0
1620
Arun Easi0c470872010-07-23 15:28:38 +05001621 /* Only process protection or >16 cdb in this routine */
1622 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1623 if (cmd->cmd_len <= 16)
1624 return qla24xx_start_scsi(sp);
1625 }
Arun Easibad75002010-05-04 15:01:30 -07001626
1627 /* Setup device pointers. */
1628
1629 qla25xx_set_que(sp, &rsp);
1630 req = vha->req;
1631
1632 /* So we know we haven't pci_map'ed anything yet */
1633 tot_dsds = 0;
1634
1635 /* Send marker if required */
1636 if (vha->marker_needed != 0) {
1637 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1638 QLA_SUCCESS)
1639 return QLA_FUNCTION_FAILED;
1640 vha->marker_needed = 0;
1641 }
1642
1643 /* Acquire ring specific lock */
1644 spin_lock_irqsave(&ha->hardware_lock, flags);
1645
1646 /* Check for room in outstanding command list. */
1647 handle = req->current_outstanding_cmd;
1648 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1649 handle++;
1650 if (handle == MAX_OUTSTANDING_COMMANDS)
1651 handle = 1;
1652 if (!req->outstanding_cmds[handle])
1653 break;
1654 }
1655
1656 if (index == MAX_OUTSTANDING_COMMANDS)
1657 goto queuing_error;
1658
1659 /* Compute number of required data segments */
1660 /* Map the sg table so we have an accurate count of sg entries needed */
1661 if (scsi_sg_count(cmd)) {
1662 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1663 scsi_sg_count(cmd), cmd->sc_data_direction);
1664 if (unlikely(!nseg))
1665 goto queuing_error;
1666 else
1667 sp->flags |= SRB_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001668
1669 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1670 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1671 struct qla2_sgx sgx;
1672 uint32_t partial;
1673
1674 memset(&sgx, 0, sizeof(struct qla2_sgx));
1675 sgx.tot_bytes = scsi_bufflen(cmd);
1676 sgx.cur_sg = scsi_sglist(cmd);
1677 sgx.sp = sp;
1678
1679 nseg = 0;
1680 while (qla24xx_get_one_block_sg(
1681 cmd->device->sector_size, &sgx, &partial))
1682 nseg++;
1683 }
Arun Easibad75002010-05-04 15:01:30 -07001684 } else
1685 nseg = 0;
1686
1687 /* number of required data segments */
1688 tot_dsds = nseg;
1689
1690 /* Compute number of required protection segments */
1691 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1692 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1693 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1694 if (unlikely(!nseg))
1695 goto queuing_error;
1696 else
1697 sp->flags |= SRB_CRC_PROT_DMA_VALID;
Arun Easi8cb20492011-08-16 11:29:22 -07001698
1699 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1700 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1701 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1702 }
Arun Easibad75002010-05-04 15:01:30 -07001703 } else {
1704 nseg = 0;
1705 }
1706
1707 req_cnt = 1;
1708 /* Total Data and protection sg segment(s) */
1709 tot_prot_dsds = nseg;
1710 tot_dsds += nseg;
1711 if (req->cnt < (req_cnt + 2)) {
1712 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1713
1714 if (req->ring_index < cnt)
1715 req->cnt = cnt - req->ring_index;
1716 else
1717 req->cnt = req->length -
1718 (req->ring_index - cnt);
1719 }
1720
1721 if (req->cnt < (req_cnt + 2))
1722 goto queuing_error;
1723
1724 status |= QDSS_GOT_Q_SPACE;
1725
1726 /* Build header part of command packet (excluding the OPCODE). */
1727 req->current_outstanding_cmd = handle;
1728 req->outstanding_cmds[handle] = sp;
Arun Easi8cb20492011-08-16 11:29:22 -07001729 sp->handle = handle;
Arun Easibad75002010-05-04 15:01:30 -07001730 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1731 req->cnt -= req_cnt;
1732
1733 /* Fill-in common area */
1734 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1735 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1736
1737 clr_ptr = (uint32_t *)cmd_pkt + 2;
1738 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1739
1740 /* Set NPORT-ID and LUN number*/
1741 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1742 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1743 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1744 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1745
1746 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
1747 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1748
1749 /* Total Data and protection segment(s) */
1750 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1751
1752 /* Build IOCB segments and adjust for data protection segments */
1753 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1754 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1755 QLA_SUCCESS)
1756 goto queuing_error;
1757
1758 cmd_pkt->entry_count = (uint8_t)req_cnt;
1759 /* Specify response queue number where completion should happen */
1760 cmd_pkt->entry_status = (uint8_t) rsp->id;
1761 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1762 wmb();
1763
1764 /* Adjust ring index. */
1765 req->ring_index++;
1766 if (req->ring_index == req->length) {
1767 req->ring_index = 0;
1768 req->ring_ptr = req->ring;
1769 } else
1770 req->ring_ptr++;
1771
1772 /* Set chip new ring index. */
1773 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1774 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1775
1776 /* Manage unprocessed RIO/ZIO commands in response queue. */
1777 if (vha->flags.process_response_queue &&
1778 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1779 qla24xx_process_response_queue(vha, rsp);
1780
1781 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1782
1783 return QLA_SUCCESS;
1784
1785queuing_error:
1786 if (status & QDSS_GOT_Q_SPACE) {
1787 req->outstanding_cmds[handle] = NULL;
1788 req->cnt += req_cnt;
1789 }
1790 /* Cleanup will be performed by the caller (queuecommand) */
1791
1792 spin_unlock_irqrestore(&ha->hardware_lock, flags);
Arun Easibad75002010-05-04 15:01:30 -07001793 return QLA_FUNCTION_FAILED;
1794}
1795
1796
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001797static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001798{
1799 struct scsi_cmnd *cmd = sp->cmd;
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001800 struct qla_hw_data *ha = sp->fcport->vha->hw;
1801 int affinity = cmd->request->cpu;
1802
Anirban Chakraborty7163ea82009-08-05 09:18:40 -07001803 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001804 affinity < ha->max_rsp_queues - 1)
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001805 *rsp = ha->rsp_q_map[affinity + 1];
Anirban Chakraborty59e0b8b2009-06-03 09:55:19 -07001806 else
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001807 *rsp = ha->rsp_q_map[0];
Anirban Chakraborty68ca9492009-04-06 22:33:41 -07001808}
Andrew Vasquezac280b62009-08-20 11:06:05 -07001809
1810/* Generic Control-SRB manipulation functions. */
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001811void *
1812qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001813{
Andrew Vasquezac280b62009-08-20 11:06:05 -07001814 struct qla_hw_data *ha = vha->hw;
1815 struct req_que *req = ha->req_q_map[0];
1816 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1817 uint32_t index, handle;
1818 request_t *pkt;
1819 uint16_t cnt, req_cnt;
Andrew Vasquez57807902011-11-18 09:03:20 -08001820 struct srb_ctx *ctx;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001821
1822 pkt = NULL;
1823 req_cnt = 1;
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001824 handle = 0;
1825
1826 if (!sp)
1827 goto skip_cmd_array;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001828
1829 /* Check for room in outstanding command list. */
1830 handle = req->current_outstanding_cmd;
1831 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1832 handle++;
1833 if (handle == MAX_OUTSTANDING_COMMANDS)
1834 handle = 1;
1835 if (!req->outstanding_cmds[handle])
1836 break;
1837 }
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001838 if (index == MAX_OUTSTANDING_COMMANDS) {
1839 ql_log(ql_log_warn, vha, 0x700b,
1840 "No room on oustanding cmd array.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07001841 goto queuing_error;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07001842 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07001843
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001844 /* Prep command array. */
1845 req->current_outstanding_cmd = handle;
1846 req->outstanding_cmds[handle] = sp;
1847 sp->handle = handle;
1848
Andrew Vasquez57807902011-11-18 09:03:20 -08001849 /* Adjust entry-counts as needed. */
1850 if (sp->ctx) {
1851 ctx = sp->ctx;
1852 req_cnt = ctx->iocbs;
1853 }
1854
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001855skip_cmd_array:
Andrew Vasquezac280b62009-08-20 11:06:05 -07001856 /* Check for room on request queue. */
1857 if (req->cnt < req_cnt) {
Giridhar Malavali6246b8a2012-02-09 11:15:34 -08001858 if (ha->mqenable || IS_QLA83XX(ha))
Andrew Vasquezac280b62009-08-20 11:06:05 -07001859 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05001860 else if (IS_QLA82XX(ha))
1861 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
Andrew Vasquezac280b62009-08-20 11:06:05 -07001862 else if (IS_FWI2_CAPABLE(ha))
1863 cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1864 else
1865 cnt = qla2x00_debounce_register(
1866 ISP_REQ_Q_OUT(ha, &reg->isp));
1867
1868 if (req->ring_index < cnt)
1869 req->cnt = cnt - req->ring_index;
1870 else
1871 req->cnt = req->length -
1872 (req->ring_index - cnt);
1873 }
1874 if (req->cnt < req_cnt)
1875 goto queuing_error;
1876
1877 /* Prep packet */
Andrew Vasquezac280b62009-08-20 11:06:05 -07001878 req->cnt -= req_cnt;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001879 pkt = req->ring_ptr;
1880 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1881 pkt->entry_count = req_cnt;
1882 pkt->handle = handle;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001883
1884queuing_error:
1885 return pkt;
1886}
1887
1888static void
Andrew Vasquezac280b62009-08-20 11:06:05 -07001889qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1890{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001891 struct srb_ctx *ctx = sp->ctx;
1892 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001893
1894 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1895 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001896 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001897 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001898 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
Andrew Vasquezac280b62009-08-20 11:06:05 -07001899 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1900 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1901 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1902 logio->port_id[1] = sp->fcport->d_id.b.area;
1903 logio->port_id[2] = sp->fcport->d_id.b.domain;
1904 logio->vp_index = sp->fcport->vp_idx;
1905}
1906
1907static void
1908qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1909{
1910 struct qla_hw_data *ha = sp->fcport->vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001911 struct srb_ctx *ctx = sp->ctx;
1912 struct srb_iocb *lio = ctx->u.iocb_cmd;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001913 uint16_t opts;
1914
Giridhar Malavalib9637522010-05-28 15:08:15 -07001915 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001916 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1917 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
Madhuranath Iyengar49163922010-05-04 15:01:28 -07001918 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1919 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001920 if (HAS_EXTENDED_IDS(ha)) {
1921 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1922 mbx->mb10 = cpu_to_le16(opts);
1923 } else {
1924 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1925 }
1926 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1927 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1928 sp->fcport->d_id.b.al_pa);
1929 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1930}
1931
1932static void
1933qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1934{
1935 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1936 logio->control_flags =
1937 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1938 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1939 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1940 logio->port_id[1] = sp->fcport->d_id.b.area;
1941 logio->port_id[2] = sp->fcport->d_id.b.domain;
1942 logio->vp_index = sp->fcport->vp_idx;
1943}
1944
1945static void
1946qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1947{
1948 struct qla_hw_data *ha = sp->fcport->vha->hw;
1949
Giridhar Malavalib9637522010-05-28 15:08:15 -07001950 mbx->entry_type = MBX_IOCB_TYPE;
Andrew Vasquezac280b62009-08-20 11:06:05 -07001951 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1952 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1953 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1954 cpu_to_le16(sp->fcport->loop_id):
1955 cpu_to_le16(sp->fcport->loop_id << 8);
1956 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1957 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1958 sp->fcport->d_id.b.al_pa);
1959 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1960 /* Implicit: mbx->mbx10 = 0. */
1961}
1962
Giridhar Malavali9a069e12010-01-12 13:02:47 -08001963static void
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07001964qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1965{
1966 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1967 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1968 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1969 logio->vp_index = sp->fcport->vp_idx;
1970}
1971
1972static void
1973qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1974{
1975 struct qla_hw_data *ha = sp->fcport->vha->hw;
1976
1977 mbx->entry_type = MBX_IOCB_TYPE;
1978 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1979 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1980 if (HAS_EXTENDED_IDS(ha)) {
1981 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1982 mbx->mb10 = cpu_to_le16(BIT_0);
1983 } else {
1984 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1985 }
1986 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1987 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1988 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1989 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1990 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1991}
1992
1993static void
Madhuranath Iyengar38222632010-05-04 15:01:29 -07001994qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1995{
1996 uint32_t flags;
1997 unsigned int lun;
1998 struct fc_port *fcport = sp->fcport;
1999 scsi_qla_host_t *vha = fcport->vha;
2000 struct qla_hw_data *ha = vha->hw;
2001 struct srb_ctx *ctx = sp->ctx;
2002 struct srb_iocb *iocb = ctx->u.iocb_cmd;
2003 struct req_que *req = vha->req;
2004
2005 flags = iocb->u.tmf.flags;
2006 lun = iocb->u.tmf.lun;
2007
2008 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2009 tsk->entry_count = 1;
2010 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2011 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2012 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2013 tsk->control_flags = cpu_to_le32(flags);
2014 tsk->port_id[0] = fcport->d_id.b.al_pa;
2015 tsk->port_id[1] = fcport->d_id.b.area;
2016 tsk->port_id[2] = fcport->d_id.b.domain;
2017 tsk->vp_index = fcport->vp_idx;
2018
2019 if (flags == TCF_LUN_RESET) {
2020 int_to_scsilun(lun, &tsk->lun);
2021 host_to_fcp_swap((uint8_t *)&tsk->lun,
2022 sizeof(tsk->lun));
2023 }
2024}
2025
2026static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002027qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2028{
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002029 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002030
2031 els_iocb->entry_type = ELS_IOCB_TYPE;
2032 els_iocb->entry_count = 1;
2033 els_iocb->sys_define = 0;
2034 els_iocb->entry_status = 0;
2035 els_iocb->handle = sp->handle;
2036 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2037 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2038 els_iocb->vp_index = sp->fcport->vp_idx;
2039 els_iocb->sof_type = EST_SOFI3;
2040 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2041
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002042 els_iocb->opcode =
2043 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ?
2044 bsg_job->request->rqst_data.r_els.els_code :
2045 bsg_job->request->rqst_data.h_els.command_code;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002046 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2047 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2048 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2049 els_iocb->control_flags = 0;
2050 els_iocb->rx_byte_count =
2051 cpu_to_le32(bsg_job->reply_payload.payload_len);
2052 els_iocb->tx_byte_count =
2053 cpu_to_le32(bsg_job->request_payload.payload_len);
2054
2055 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2056 (bsg_job->request_payload.sg_list)));
2057 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2058 (bsg_job->request_payload.sg_list)));
2059 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2060 (bsg_job->request_payload.sg_list));
2061
2062 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2063 (bsg_job->reply_payload.sg_list)));
2064 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2065 (bsg_job->reply_payload.sg_list)));
2066 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2067 (bsg_job->reply_payload.sg_list));
2068}
2069
2070static void
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002071qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2072{
2073 uint16_t avail_dsds;
2074 uint32_t *cur_dsd;
2075 struct scatterlist *sg;
2076 int index;
2077 uint16_t tot_dsds;
2078 scsi_qla_host_t *vha = sp->fcport->vha;
2079 struct qla_hw_data *ha = vha->hw;
2080 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
2081 int loop_iterartion = 0;
2082 int cont_iocb_prsnt = 0;
2083 int entry_count = 1;
2084
2085 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2086 ct_iocb->entry_type = CT_IOCB_TYPE;
2087 ct_iocb->entry_status = 0;
2088 ct_iocb->handle1 = sp->handle;
2089 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2090 ct_iocb->status = __constant_cpu_to_le16(0);
2091 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2092 ct_iocb->timeout = 0;
2093 ct_iocb->cmd_dsd_count =
2094 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2095 ct_iocb->total_dsd_count =
2096 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2097 ct_iocb->req_bytecount =
2098 cpu_to_le32(bsg_job->request_payload.payload_len);
2099 ct_iocb->rsp_bytecount =
2100 cpu_to_le32(bsg_job->reply_payload.payload_len);
2101
2102 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2103 (bsg_job->request_payload.sg_list)));
2104 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2105 (bsg_job->request_payload.sg_list)));
2106 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2107
2108 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2109 (bsg_job->reply_payload.sg_list)));
2110 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2111 (bsg_job->reply_payload.sg_list)));
2112 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2113
2114 avail_dsds = 1;
2115 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2116 index = 0;
2117 tot_dsds = bsg_job->reply_payload.sg_cnt;
2118
2119 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2120 dma_addr_t sle_dma;
2121 cont_a64_entry_t *cont_pkt;
2122
2123 /* Allocate additional continuation packets? */
2124 if (avail_dsds == 0) {
2125 /*
2126 * Five DSDs are available in the Cont.
2127 * Type 1 IOCB.
2128 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002129 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2130 vha->hw->req_q_map[0]);
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002131 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2132 avail_dsds = 5;
2133 cont_iocb_prsnt = 1;
2134 entry_count++;
2135 }
2136
2137 sle_dma = sg_dma_address(sg);
2138 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2139 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2140 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2141 loop_iterartion++;
2142 avail_dsds--;
2143 }
2144 ct_iocb->entry_count = entry_count;
2145}
2146
2147static void
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002148qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2149{
2150 uint16_t avail_dsds;
2151 uint32_t *cur_dsd;
2152 struct scatterlist *sg;
2153 int index;
2154 uint16_t tot_dsds;
2155 scsi_qla_host_t *vha = sp->fcport->vha;
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002156 struct qla_hw_data *ha = vha->hw;
Madhuranath Iyengar49163922010-05-04 15:01:28 -07002157 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002158 int loop_iterartion = 0;
2159 int cont_iocb_prsnt = 0;
2160 int entry_count = 1;
2161
2162 ct_iocb->entry_type = CT_IOCB_TYPE;
2163 ct_iocb->entry_status = 0;
2164 ct_iocb->sys_define = 0;
2165 ct_iocb->handle = sp->handle;
2166
2167 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2168 ct_iocb->vp_index = sp->fcport->vp_idx;
2169 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2170
2171 ct_iocb->cmd_dsd_count =
2172 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2173 ct_iocb->timeout = 0;
2174 ct_iocb->rsp_dsd_count =
2175 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2176 ct_iocb->rsp_byte_count =
2177 cpu_to_le32(bsg_job->reply_payload.payload_len);
2178 ct_iocb->cmd_byte_count =
2179 cpu_to_le32(bsg_job->request_payload.payload_len);
2180 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2181 (bsg_job->request_payload.sg_list)));
2182 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2183 (bsg_job->request_payload.sg_list)));
2184 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2185 (bsg_job->request_payload.sg_list));
2186
2187 avail_dsds = 1;
2188 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2189 index = 0;
2190 tot_dsds = bsg_job->reply_payload.sg_cnt;
2191
2192 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2193 dma_addr_t sle_dma;
2194 cont_a64_entry_t *cont_pkt;
2195
2196 /* Allocate additional continuation packets? */
2197 if (avail_dsds == 0) {
2198 /*
2199 * Five DSDs are available in the Cont.
2200 * Type 1 IOCB.
2201 */
Giridhar Malavali0d2aa382011-11-18 09:02:21 -08002202 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2203 ha->req_q_map[0]);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002204 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2205 avail_dsds = 5;
2206 cont_iocb_prsnt = 1;
2207 entry_count++;
2208 }
2209
2210 sle_dma = sg_dma_address(sg);
2211 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2212 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2213 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2214 loop_iterartion++;
2215 avail_dsds--;
2216 }
2217 ct_iocb->entry_count = entry_count;
2218}
2219
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002220/*
2221 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2222 * @sp: command to send to the ISP
2223 *
2224 * Returns non-zero if a failure occurred, else zero.
2225 */
2226int
2227qla82xx_start_scsi(srb_t *sp)
2228{
2229 int ret, nseg;
2230 unsigned long flags;
2231 struct scsi_cmnd *cmd;
2232 uint32_t *clr_ptr;
2233 uint32_t index;
2234 uint32_t handle;
2235 uint16_t cnt;
2236 uint16_t req_cnt;
2237 uint16_t tot_dsds;
2238 struct device_reg_82xx __iomem *reg;
2239 uint32_t dbval;
2240 uint32_t *fcp_dl;
2241 uint8_t additional_cdb_len;
2242 struct ct6_dsd *ctx;
2243 struct scsi_qla_host *vha = sp->fcport->vha;
2244 struct qla_hw_data *ha = vha->hw;
2245 struct req_que *req = NULL;
2246 struct rsp_que *rsp = NULL;
2247 char tag[2];
2248
2249 /* Setup device pointers. */
2250 ret = 0;
2251 reg = &ha->iobase->isp82;
2252 cmd = sp->cmd;
2253 req = vha->req;
2254 rsp = ha->rsp_q_map[0];
2255
2256 /* So we know we haven't pci_map'ed anything yet */
2257 tot_dsds = 0;
2258
2259 dbval = 0x04 | (ha->portnum << 5);
2260
2261 /* Send marker if required */
2262 if (vha->marker_needed != 0) {
2263 if (qla2x00_marker(vha, req,
2264 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2265 ql_log(ql_log_warn, vha, 0x300c,
2266 "qla2x00_marker failed for cmd=%p.\n", cmd);
2267 return QLA_FUNCTION_FAILED;
2268 }
2269 vha->marker_needed = 0;
2270 }
2271
2272 /* Acquire ring specific lock */
2273 spin_lock_irqsave(&ha->hardware_lock, flags);
2274
2275 /* Check for room in outstanding command list. */
2276 handle = req->current_outstanding_cmd;
2277 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2278 handle++;
2279 if (handle == MAX_OUTSTANDING_COMMANDS)
2280 handle = 1;
2281 if (!req->outstanding_cmds[handle])
2282 break;
2283 }
2284 if (index == MAX_OUTSTANDING_COMMANDS)
2285 goto queuing_error;
2286
2287 /* Map the sg table so we have an accurate count of sg entries needed */
2288 if (scsi_sg_count(cmd)) {
2289 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2290 scsi_sg_count(cmd), cmd->sc_data_direction);
2291 if (unlikely(!nseg))
2292 goto queuing_error;
2293 } else
2294 nseg = 0;
2295
2296 tot_dsds = nseg;
2297
2298 if (tot_dsds > ql2xshiftctondsd) {
2299 struct cmd_type_6 *cmd_pkt;
2300 uint16_t more_dsd_lists = 0;
2301 struct dsd_dma *dsd_ptr;
2302 uint16_t i;
2303
2304 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2305 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2306 ql_dbg(ql_dbg_io, vha, 0x300d,
2307 "Num of DSD list %d is than %d for cmd=%p.\n",
2308 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2309 cmd);
2310 goto queuing_error;
2311 }
2312
2313 if (more_dsd_lists <= ha->gbl_dsd_avail)
2314 goto sufficient_dsds;
2315 else
2316 more_dsd_lists -= ha->gbl_dsd_avail;
2317
2318 for (i = 0; i < more_dsd_lists; i++) {
2319 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2320 if (!dsd_ptr) {
2321 ql_log(ql_log_fatal, vha, 0x300e,
2322 "Failed to allocate memory for dsd_dma "
2323 "for cmd=%p.\n", cmd);
2324 goto queuing_error;
2325 }
2326
2327 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2328 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2329 if (!dsd_ptr->dsd_addr) {
2330 kfree(dsd_ptr);
2331 ql_log(ql_log_fatal, vha, 0x300f,
2332 "Failed to allocate memory for dsd_addr "
2333 "for cmd=%p.\n", cmd);
2334 goto queuing_error;
2335 }
2336 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2337 ha->gbl_dsd_avail++;
2338 }
2339
2340sufficient_dsds:
2341 req_cnt = 1;
2342
2343 if (req->cnt < (req_cnt + 2)) {
2344 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2345 &reg->req_q_out[0]);
2346 if (req->ring_index < cnt)
2347 req->cnt = cnt - req->ring_index;
2348 else
2349 req->cnt = req->length -
2350 (req->ring_index - cnt);
2351 }
2352
2353 if (req->cnt < (req_cnt + 2))
2354 goto queuing_error;
2355
2356 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2357 if (!sp->ctx) {
2358 ql_log(ql_log_fatal, vha, 0x3010,
2359 "Failed to allocate ctx for cmd=%p.\n", cmd);
2360 goto queuing_error;
2361 }
2362 memset(ctx, 0, sizeof(struct ct6_dsd));
2363 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2364 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2365 if (!ctx->fcp_cmnd) {
2366 ql_log(ql_log_fatal, vha, 0x3011,
2367 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2368 goto queuing_error_fcp_cmnd;
2369 }
2370
2371 /* Initialize the DSD list and dma handle */
2372 INIT_LIST_HEAD(&ctx->dsd_list);
2373 ctx->dsd_use_cnt = 0;
2374
2375 if (cmd->cmd_len > 16) {
2376 additional_cdb_len = cmd->cmd_len - 16;
2377 if ((cmd->cmd_len % 4) != 0) {
2378 /* SCSI command bigger than 16 bytes must be
2379 * multiple of 4
2380 */
2381 ql_log(ql_log_warn, vha, 0x3012,
2382 "scsi cmd len %d not multiple of 4 "
2383 "for cmd=%p.\n", cmd->cmd_len, cmd);
2384 goto queuing_error_fcp_cmnd;
2385 }
2386 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2387 } else {
2388 additional_cdb_len = 0;
2389 ctx->fcp_cmnd_len = 12 + 16 + 4;
2390 }
2391
2392 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2393 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2394
2395 /* Zero out remaining portion of packet. */
2396 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
2397 clr_ptr = (uint32_t *)cmd_pkt + 2;
2398 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2399 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2400
2401 /* Set NPORT-ID and LUN number*/
2402 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2403 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2404 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2405 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2406 cmd_pkt->vp_index = sp->fcport->vp_idx;
2407
2408 /* Build IOCB segments */
2409 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2410 goto queuing_error_fcp_cmnd;
2411
2412 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2413 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2414
2415 /* build FCP_CMND IU */
2416 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2417 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
2418 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2419
2420 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2421 ctx->fcp_cmnd->additional_cdb_len |= 1;
2422 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2423 ctx->fcp_cmnd->additional_cdb_len |= 2;
2424
2425 /*
2426 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2427 */
2428 if (scsi_populate_tag_msg(cmd, tag)) {
2429 switch (tag[0]) {
2430 case HEAD_OF_QUEUE_TAG:
2431 ctx->fcp_cmnd->task_attribute =
2432 TSK_HEAD_OF_QUEUE;
2433 break;
2434 case ORDERED_QUEUE_TAG:
2435 ctx->fcp_cmnd->task_attribute =
2436 TSK_ORDERED;
2437 break;
2438 }
2439 }
2440
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002441 /* Populate the FCP_PRIO. */
2442 if (ha->flags.fcp_prio_enabled)
2443 ctx->fcp_cmnd->task_attribute |=
2444 sp->fcport->fcp_prio << 3;
2445
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002446 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2447
2448 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2449 additional_cdb_len);
2450 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2451
2452 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2453 cmd_pkt->fcp_cmnd_dseg_address[0] =
2454 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2455 cmd_pkt->fcp_cmnd_dseg_address[1] =
2456 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2457
2458 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2459 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2460 /* Set total data segment count. */
2461 cmd_pkt->entry_count = (uint8_t)req_cnt;
2462 /* Specify response queue number where
2463 * completion should happen
2464 */
2465 cmd_pkt->entry_status = (uint8_t) rsp->id;
2466 } else {
2467 struct cmd_type_7 *cmd_pkt;
2468 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2469 if (req->cnt < (req_cnt + 2)) {
2470 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2471 &reg->req_q_out[0]);
2472 if (req->ring_index < cnt)
2473 req->cnt = cnt - req->ring_index;
2474 else
2475 req->cnt = req->length -
2476 (req->ring_index - cnt);
2477 }
2478 if (req->cnt < (req_cnt + 2))
2479 goto queuing_error;
2480
2481 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2482 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2483
2484 /* Zero out remaining portion of packet. */
2485 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2486 clr_ptr = (uint32_t *)cmd_pkt + 2;
2487 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2488 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2489
2490 /* Set NPORT-ID and LUN number*/
2491 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2492 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2493 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2494 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2495 cmd_pkt->vp_index = sp->fcport->vp_idx;
2496
2497 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
2498 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2499 sizeof(cmd_pkt->lun));
2500
2501 /*
2502 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2503 */
2504 if (scsi_populate_tag_msg(cmd, tag)) {
2505 switch (tag[0]) {
2506 case HEAD_OF_QUEUE_TAG:
2507 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2508 break;
2509 case ORDERED_QUEUE_TAG:
2510 cmd_pkt->task = TSK_ORDERED;
2511 break;
2512 }
2513 }
2514
Saurav Kashyapa00f6292011-11-18 09:03:19 -08002515 /* Populate the FCP_PRIO. */
2516 if (ha->flags.fcp_prio_enabled)
2517 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2518
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002519 /* Load SCSI command packet. */
2520 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2521 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2522
2523 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2524
2525 /* Build IOCB segments */
2526 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2527
2528 /* Set total data segment count. */
2529 cmd_pkt->entry_count = (uint8_t)req_cnt;
2530 /* Specify response queue number where
2531 * completion should happen.
2532 */
2533 cmd_pkt->entry_status = (uint8_t) rsp->id;
2534
2535 }
2536 /* Build command packet. */
2537 req->current_outstanding_cmd = handle;
2538 req->outstanding_cmds[handle] = sp;
2539 sp->handle = handle;
2540 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2541 req->cnt -= req_cnt;
2542 wmb();
2543
2544 /* Adjust ring index. */
2545 req->ring_index++;
2546 if (req->ring_index == req->length) {
2547 req->ring_index = 0;
2548 req->ring_ptr = req->ring;
2549 } else
2550 req->ring_ptr++;
2551
2552 sp->flags |= SRB_DMA_VALID;
2553
2554 /* Set chip new ring index. */
2555 /* write, read and verify logic */
2556 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2557 if (ql2xdbwr)
2558 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2559 else {
2560 WRT_REG_DWORD(
2561 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2562 dbval);
2563 wmb();
2564 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2565 WRT_REG_DWORD(
2566 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2567 dbval);
2568 wmb();
2569 }
2570 }
2571
2572 /* Manage unprocessed RIO/ZIO commands in response queue. */
2573 if (vha->flags.process_response_queue &&
2574 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2575 qla24xx_process_response_queue(vha, rsp);
2576
2577 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2578 return QLA_SUCCESS;
2579
2580queuing_error_fcp_cmnd:
2581 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2582queuing_error:
2583 if (tot_dsds)
2584 scsi_dma_unmap(cmd);
2585
2586 if (sp->ctx) {
2587 mempool_free(sp->ctx, ha->ctx_mempool);
2588 sp->ctx = NULL;
2589 }
2590 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2591
2592 return QLA_FUNCTION_FAILED;
2593}
2594
Andrew Vasquezac280b62009-08-20 11:06:05 -07002595int
2596qla2x00_start_sp(srb_t *sp)
2597{
2598 int rval;
2599 struct qla_hw_data *ha = sp->fcport->vha->hw;
2600 void *pkt;
2601 struct srb_ctx *ctx = sp->ctx;
2602 unsigned long flags;
2603
2604 rval = QLA_FUNCTION_FAILED;
2605 spin_lock_irqsave(&ha->hardware_lock, flags);
Giridhar Malavalid94d10e2010-07-23 15:28:23 +05002606 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002607 if (!pkt) {
2608 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2609 "qla2x00_alloc_iocbs failed.\n");
Andrew Vasquezac280b62009-08-20 11:06:05 -07002610 goto done;
Saurav Kashyap7c3df132011-07-14 12:00:13 -07002611 }
Andrew Vasquezac280b62009-08-20 11:06:05 -07002612
2613 rval = QLA_SUCCESS;
2614 switch (ctx->type) {
2615 case SRB_LOGIN_CMD:
2616 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002617 qla24xx_login_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002618 qla2x00_login_iocb(sp, pkt);
2619 break;
2620 case SRB_LOGOUT_CMD:
2621 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002622 qla24xx_logout_iocb(sp, pkt) :
Andrew Vasquezac280b62009-08-20 11:06:05 -07002623 qla2x00_logout_iocb(sp, pkt);
2624 break;
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002625 case SRB_ELS_CMD_RPT:
2626 case SRB_ELS_CMD_HST:
2627 qla24xx_els_iocb(sp, pkt);
2628 break;
2629 case SRB_CT_CMD:
Harish Zunjarrao9bc4f4f2010-07-23 15:28:32 +05002630 IS_FWI2_CAPABLE(ha) ?
Andrew Vasquez57807902011-11-18 09:03:20 -08002631 qla24xx_ct_iocb(sp, pkt) :
2632 qla2x00_ct_iocb(sp, pkt);
Giridhar Malavali9a069e12010-01-12 13:02:47 -08002633 break;
Andrew Vasquez5ff1d582010-05-04 15:01:26 -07002634 case SRB_ADISC_CMD:
2635 IS_FWI2_CAPABLE(ha) ?
2636 qla24xx_adisc_iocb(sp, pkt) :
2637 qla2x00_adisc_iocb(sp, pkt);
2638 break;
Madhuranath Iyengar38222632010-05-04 15:01:29 -07002639 case SRB_TM_CMD:
2640 qla24xx_tm_iocb(sp, pkt);
2641 break;
Andrew Vasquezac280b62009-08-20 11:06:05 -07002642 default:
2643 break;
2644 }
2645
2646 wmb();
Giridhar Malavali5162cf02011-11-18 09:03:18 -08002647 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
Andrew Vasquezac280b62009-08-20 11:06:05 -07002648done:
2649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2650 return rval;
2651}