| Andrew Vasquez | fa90c54 | 2005-10-27 11:10:08 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * QLogic Fibre Channel HBA Driver | 
| Andrew Vasquez | 01e58d8 | 2008-04-03 13:13:13 -0700 | [diff] [blame] | 3 |  * Copyright (c)  2003-2008 QLogic Corporation | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 |  * | 
| Andrew Vasquez | fa90c54 | 2005-10-27 11:10:08 -0700 | [diff] [blame] | 5 |  * See LICENSE.qla2xxx for copyright and licensing details. | 
 | 6 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include "qla_def.h" | 
 | 8 |  | 
 | 9 | #include <linux/blkdev.h> | 
 | 10 | #include <linux/delay.h> | 
 | 11 |  | 
 | 12 | #include <scsi/scsi_tcq.h> | 
 | 13 |  | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 14 | static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, | 
 | 15 | 							struct rsp_que *rsp); | 
 | 16 | static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 |  | 
| Anirban Chakraborty | 59e0b8b | 2009-06-03 09:55:19 -0700 | [diff] [blame] | 18 | static void qla25xx_set_que(srb_t *, struct rsp_que **); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | /** | 
 | 20 |  * qla2x00_get_cmd_direction() - Determine control_flag data direction. | 
 | 21 |  * @cmd: SCSI command | 
 | 22 |  * | 
 | 23 |  * Returns the proper CF_* direction based on CDB. | 
 | 24 |  */ | 
 | 25 | static inline uint16_t | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 26 | qla2x00_get_cmd_direction(srb_t *sp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | { | 
 | 28 | 	uint16_t cflags; | 
 | 29 |  | 
 | 30 | 	cflags = 0; | 
 | 31 |  | 
 | 32 | 	/* Set transfer direction */ | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 33 | 	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | 		cflags = CF_WRITE; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 35 | 		sp->fcport->vha->hw->qla_stats.output_bytes += | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 36 | 		    scsi_bufflen(sp->cmd); | 
 | 37 | 	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | 		cflags = CF_READ; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 39 | 		sp->fcport->vha->hw->qla_stats.input_bytes += | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 40 | 		    scsi_bufflen(sp->cmd); | 
 | 41 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | 	return (cflags); | 
 | 43 | } | 
 | 44 |  | 
 | 45 | /** | 
 | 46 |  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and | 
 | 47 |  * Continuation Type 0 IOCBs to allocate. | 
 | 48 |  * | 
 | 49 |  * @dsds: number of data segment decriptors needed | 
 | 50 |  * | 
 | 51 |  * Returns the number of IOCB entries needed to store @dsds. | 
 | 52 |  */ | 
 | 53 | uint16_t | 
 | 54 | qla2x00_calc_iocbs_32(uint16_t dsds) | 
 | 55 | { | 
 | 56 | 	uint16_t iocbs; | 
 | 57 |  | 
 | 58 | 	iocbs = 1; | 
 | 59 | 	if (dsds > 3) { | 
 | 60 | 		iocbs += (dsds - 3) / 7; | 
 | 61 | 		if ((dsds - 3) % 7) | 
 | 62 | 			iocbs++; | 
 | 63 | 	} | 
 | 64 | 	return (iocbs); | 
 | 65 | } | 
 | 66 |  | 
 | 67 | /** | 
 | 68 |  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and | 
 | 69 |  * Continuation Type 1 IOCBs to allocate. | 
 | 70 |  * | 
 | 71 |  * @dsds: number of data segment decriptors needed | 
 | 72 |  * | 
 | 73 |  * Returns the number of IOCB entries needed to store @dsds. | 
 | 74 |  */ | 
 | 75 | uint16_t | 
 | 76 | qla2x00_calc_iocbs_64(uint16_t dsds) | 
 | 77 | { | 
 | 78 | 	uint16_t iocbs; | 
 | 79 |  | 
 | 80 | 	iocbs = 1; | 
 | 81 | 	if (dsds > 2) { | 
 | 82 | 		iocbs += (dsds - 2) / 5; | 
 | 83 | 		if ((dsds - 2) % 5) | 
 | 84 | 			iocbs++; | 
 | 85 | 	} | 
 | 86 | 	return (iocbs); | 
 | 87 | } | 
 | 88 |  | 
 | 89 | /** | 
 | 90 |  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB. | 
 | 91 |  * @ha: HA context | 
 | 92 |  * | 
 | 93 |  * Returns a pointer to the Continuation Type 0 IOCB packet. | 
 | 94 |  */ | 
 | 95 | static inline cont_entry_t * | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 96 | qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | { | 
 | 98 | 	cont_entry_t *cont_pkt; | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 99 | 	struct req_que *req = vha->req; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | 	/* Adjust ring index. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 101 | 	req->ring_index++; | 
 | 102 | 	if (req->ring_index == req->length) { | 
 | 103 | 		req->ring_index = 0; | 
 | 104 | 		req->ring_ptr = req->ring; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | 	} else { | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 106 | 		req->ring_ptr++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | 	} | 
 | 108 |  | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 109 | 	cont_pkt = (cont_entry_t *)req->ring_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 |  | 
 | 111 | 	/* Load packet defaults. */ | 
 | 112 | 	*((uint32_t *)(&cont_pkt->entry_type)) = | 
 | 113 | 	    __constant_cpu_to_le32(CONTINUE_TYPE); | 
 | 114 |  | 
 | 115 | 	return (cont_pkt); | 
 | 116 | } | 
 | 117 |  | 
 | 118 | /** | 
 | 119 |  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB. | 
 | 120 |  * @ha: HA context | 
 | 121 |  * | 
 | 122 |  * Returns a pointer to the continuation type 1 IOCB packet. | 
 | 123 |  */ | 
 | 124 | static inline cont_a64_entry_t * | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 125 | qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | { | 
 | 127 | 	cont_a64_entry_t *cont_pkt; | 
 | 128 |  | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 129 | 	struct req_que *req = vha->req; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | 	/* Adjust ring index. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 131 | 	req->ring_index++; | 
 | 132 | 	if (req->ring_index == req->length) { | 
 | 133 | 		req->ring_index = 0; | 
 | 134 | 		req->ring_ptr = req->ring; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | 	} else { | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 136 | 		req->ring_ptr++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | 	} | 
 | 138 |  | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 139 | 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 |  | 
 | 141 | 	/* Load packet defaults. */ | 
 | 142 | 	*((uint32_t *)(&cont_pkt->entry_type)) = | 
 | 143 | 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE); | 
 | 144 |  | 
 | 145 | 	return (cont_pkt); | 
 | 146 | } | 
 | 147 |  | 
| Arun Easi | bad7500 | 2010-05-04 15:01:30 -0700 | [diff] [blame] | 148 | static inline int | 
 | 149 | qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) | 
 | 150 | { | 
 | 151 | 	uint8_t	guard = scsi_host_get_guard(sp->cmd->device->host); | 
 | 152 |  | 
 | 153 | 	/* We only support T10 DIF right now */ | 
 | 154 | 	if (guard != SHOST_DIX_GUARD_CRC) { | 
 | 155 | 		DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard)); | 
 | 156 | 		return 0; | 
 | 157 | 	} | 
 | 158 |  | 
 | 159 | 	/* We always use DIFF Bundling for best performance */ | 
 | 160 | 	*fw_prot_opts = 0; | 
 | 161 |  | 
 | 162 | 	/* Translate SCSI opcode to a protection opcode */ | 
 | 163 | 	switch (scsi_get_prot_op(sp->cmd)) { | 
 | 164 | 	case SCSI_PROT_READ_STRIP: | 
 | 165 | 		*fw_prot_opts |= PO_MODE_DIF_REMOVE; | 
 | 166 | 		break; | 
 | 167 | 	case SCSI_PROT_WRITE_INSERT: | 
 | 168 | 		*fw_prot_opts |= PO_MODE_DIF_INSERT; | 
 | 169 | 		break; | 
 | 170 | 	case SCSI_PROT_READ_INSERT: | 
 | 171 | 		*fw_prot_opts |= PO_MODE_DIF_INSERT; | 
 | 172 | 		break; | 
 | 173 | 	case SCSI_PROT_WRITE_STRIP: | 
 | 174 | 		*fw_prot_opts |= PO_MODE_DIF_REMOVE; | 
 | 175 | 		break; | 
 | 176 | 	case SCSI_PROT_READ_PASS: | 
 | 177 | 		*fw_prot_opts |= PO_MODE_DIF_PASS; | 
 | 178 | 		break; | 
 | 179 | 	case SCSI_PROT_WRITE_PASS: | 
 | 180 | 		*fw_prot_opts |= PO_MODE_DIF_PASS; | 
 | 181 | 		break; | 
 | 182 | 	default:	/* Normal Request */ | 
 | 183 | 		*fw_prot_opts |= PO_MODE_DIF_PASS; | 
 | 184 | 		break; | 
 | 185 | 	} | 
 | 186 |  | 
 | 187 | 	return scsi_prot_sg_count(sp->cmd); | 
 | 188 | } | 
 | 189 |  | 
 | 190 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 |  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit | 
 | 192 |  * capable IOCB types. | 
 | 193 |  * | 
 | 194 |  * @sp: SRB command to process | 
 | 195 |  * @cmd_pkt: Command type 2 IOCB | 
 | 196 |  * @tot_dsds: Total number of segments to transfer | 
 | 197 |  */ | 
 | 198 | void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, | 
 | 199 |     uint16_t tot_dsds) | 
 | 200 | { | 
 | 201 | 	uint16_t	avail_dsds; | 
 | 202 | 	uint32_t	*cur_dsd; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 203 | 	scsi_qla_host_t	*vha; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | 	struct scsi_cmnd *cmd; | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 205 | 	struct scatterlist *sg; | 
 | 206 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 |  | 
 | 208 | 	cmd = sp->cmd; | 
 | 209 |  | 
 | 210 | 	/* Update entry type to indicate Command Type 2 IOCB */ | 
 | 211 | 	*((uint32_t *)(&cmd_pkt->entry_type)) = | 
 | 212 | 	    __constant_cpu_to_le32(COMMAND_TYPE); | 
 | 213 |  | 
 | 214 | 	/* No data transfer */ | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 215 | 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | 		cmd_pkt->byte_count = __constant_cpu_to_le32(0); | 
 | 217 | 		return; | 
 | 218 | 	} | 
 | 219 |  | 
| Andrew Vasquez | 444786d | 2009-01-05 11:18:10 -0800 | [diff] [blame] | 220 | 	vha = sp->fcport->vha; | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 221 | 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 |  | 
 | 223 | 	/* Three DSDs are available in the Command Type 2 IOCB */ | 
 | 224 | 	avail_dsds = 3; | 
 | 225 | 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; | 
 | 226 |  | 
 | 227 | 	/* Load data segments */ | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 228 | 	scsi_for_each_sg(cmd, sg, tot_dsds, i) { | 
 | 229 | 		cont_entry_t *cont_pkt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 |  | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 231 | 		/* Allocate additional continuation packets? */ | 
 | 232 | 		if (avail_dsds == 0) { | 
 | 233 | 			/* | 
 | 234 | 			 * Seven DSDs are available in the Continuation | 
 | 235 | 			 * Type 0 IOCB. | 
 | 236 | 			 */ | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 237 | 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha); | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 238 | 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; | 
 | 239 | 			avail_dsds = 7; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | 		} | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 241 |  | 
 | 242 | 		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); | 
 | 243 | 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | 
 | 244 | 		avail_dsds--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | 	} | 
 | 246 | } | 
 | 247 |  | 
 | 248 | /** | 
 | 249 |  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit | 
 | 250 |  * capable IOCB types. | 
 | 251 |  * | 
 | 252 |  * @sp: SRB command to process | 
 | 253 |  * @cmd_pkt: Command type 3 IOCB | 
 | 254 |  * @tot_dsds: Total number of segments to transfer | 
 | 255 |  */ | 
 | 256 | void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, | 
 | 257 |     uint16_t tot_dsds) | 
 | 258 | { | 
 | 259 | 	uint16_t	avail_dsds; | 
 | 260 | 	uint32_t	*cur_dsd; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 261 | 	scsi_qla_host_t	*vha; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | 	struct scsi_cmnd *cmd; | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 263 | 	struct scatterlist *sg; | 
 | 264 | 	int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 |  | 
 | 266 | 	cmd = sp->cmd; | 
 | 267 |  | 
 | 268 | 	/* Update entry type to indicate Command Type 3 IOCB */ | 
 | 269 | 	*((uint32_t *)(&cmd_pkt->entry_type)) = | 
 | 270 | 	    __constant_cpu_to_le32(COMMAND_A64_TYPE); | 
 | 271 |  | 
 | 272 | 	/* No data transfer */ | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 273 | 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | 		cmd_pkt->byte_count = __constant_cpu_to_le32(0); | 
 | 275 | 		return; | 
 | 276 | 	} | 
 | 277 |  | 
| Andrew Vasquez | 444786d | 2009-01-05 11:18:10 -0800 | [diff] [blame] | 278 | 	vha = sp->fcport->vha; | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 279 | 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 |  | 
 | 281 | 	/* Two DSDs are available in the Command Type 3 IOCB */ | 
 | 282 | 	avail_dsds = 2; | 
 | 283 | 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; | 
 | 284 |  | 
 | 285 | 	/* Load data segments */ | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 286 | 	scsi_for_each_sg(cmd, sg, tot_dsds, i) { | 
 | 287 | 		dma_addr_t	sle_dma; | 
 | 288 | 		cont_a64_entry_t *cont_pkt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 |  | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 290 | 		/* Allocate additional continuation packets? */ | 
 | 291 | 		if (avail_dsds == 0) { | 
 | 292 | 			/* | 
 | 293 | 			 * Five DSDs are available in the Continuation | 
 | 294 | 			 * Type 1 IOCB. | 
 | 295 | 			 */ | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 296 | 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 297 | 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 
 | 298 | 			avail_dsds = 5; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | 		} | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 300 |  | 
 | 301 | 		sle_dma = sg_dma_address(sg); | 
 | 302 | 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | 
 | 303 | 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | 
 | 304 | 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | 
 | 305 | 		avail_dsds--; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | 	} | 
 | 307 | } | 
 | 308 |  | 
 | 309 | /** | 
 | 310 |  * qla2x00_start_scsi() - Send a SCSI command to the ISP | 
 | 311 |  * @sp: command to send to the ISP | 
 | 312 |  * | 
| Bjorn Helgaas | cc3ef7b | 2008-09-11 21:22:51 -0700 | [diff] [blame] | 313 |  * Returns non-zero if a failure occurred, else zero. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 |  */ | 
 | 315 | int | 
 | 316 | qla2x00_start_scsi(srb_t *sp) | 
 | 317 | { | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 318 | 	int		ret, nseg; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | 	unsigned long   flags; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 320 | 	scsi_qla_host_t	*vha; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | 	struct scsi_cmnd *cmd; | 
 | 322 | 	uint32_t	*clr_ptr; | 
 | 323 | 	uint32_t        index; | 
 | 324 | 	uint32_t	handle; | 
 | 325 | 	cmd_entry_t	*cmd_pkt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | 	uint16_t	cnt; | 
 | 327 | 	uint16_t	req_cnt; | 
 | 328 | 	uint16_t	tot_dsds; | 
| Andrew Vasquez | 3d71644 | 2005-07-06 10:30:26 -0700 | [diff] [blame] | 329 | 	struct device_reg_2xxx __iomem *reg; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 330 | 	struct qla_hw_data *ha; | 
 | 331 | 	struct req_que *req; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 332 | 	struct rsp_que *rsp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 |  | 
 | 334 | 	/* Setup device pointers. */ | 
 | 335 | 	ret = 0; | 
| Andrew Vasquez | 444786d | 2009-01-05 11:18:10 -0800 | [diff] [blame] | 336 | 	vha = sp->fcport->vha; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 337 | 	ha = vha->hw; | 
| Andrew Vasquez | 3d71644 | 2005-07-06 10:30:26 -0700 | [diff] [blame] | 338 | 	reg = &ha->iobase->isp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | 	cmd = sp->cmd; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 340 | 	req = ha->req_q_map[0]; | 
 | 341 | 	rsp = ha->rsp_q_map[0]; | 
 | 8302192 | 2005-04-17 15:10:41 -0500 | [diff] [blame] | 342 | 	/* So we know we haven't pci_map'ed anything yet */ | 
 | 343 | 	tot_dsds = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 |  | 
 | 345 | 	/* Send marker if required */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 346 | 	if (vha->marker_needed != 0) { | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 347 | 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) | 
 | 348 | 							!= QLA_SUCCESS) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | 			return (QLA_FUNCTION_FAILED); | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 350 | 		vha->marker_needed = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | 	} | 
 | 352 |  | 
 | 353 | 	/* Acquire ring specific lock */ | 
| Andrew Vasquez | c9c5ced | 2008-07-24 08:31:49 -0700 | [diff] [blame] | 354 | 	spin_lock_irqsave(&ha->hardware_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 |  | 
 | 356 | 	/* Check for room in outstanding command list. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 357 | 	handle = req->current_outstanding_cmd; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 
 | 359 | 		handle++; | 
 | 360 | 		if (handle == MAX_OUTSTANDING_COMMANDS) | 
 | 361 | 			handle = 1; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 362 | 		if (!req->outstanding_cmds[handle]) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | 			break; | 
 | 364 | 	} | 
 | 365 | 	if (index == MAX_OUTSTANDING_COMMANDS) | 
 | 366 | 		goto queuing_error; | 
 | 367 |  | 
 | 8302192 | 2005-04-17 15:10:41 -0500 | [diff] [blame] | 368 | 	/* Map the sg table so we have an accurate count of sg entries needed */ | 
| Seokmann Ju | 2c3dfe3 | 2007-07-05 13:16:51 -0700 | [diff] [blame] | 369 | 	if (scsi_sg_count(cmd)) { | 
 | 370 | 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), | 
 | 371 | 		    scsi_sg_count(cmd), cmd->sc_data_direction); | 
 | 372 | 		if (unlikely(!nseg)) | 
 | 373 | 			goto queuing_error; | 
 | 374 | 	} else | 
 | 375 | 		nseg = 0; | 
 | 376 |  | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 377 | 	tot_dsds = nseg; | 
 | 8302192 | 2005-04-17 15:10:41 -0500 | [diff] [blame] | 378 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | 	/* Calculate the number of request entries needed. */ | 
| Andrew Vasquez | fd34f55 | 2007-07-19 15:06:00 -0700 | [diff] [blame] | 380 | 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 381 | 	if (req->cnt < (req_cnt + 2)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 383 | 		if (req->ring_index < cnt) | 
 | 384 | 			req->cnt = cnt - req->ring_index; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | 		else | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 386 | 			req->cnt = req->length - | 
 | 387 | 			    (req->ring_index - cnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | 	} | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 389 | 	if (req->cnt < (req_cnt + 2)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | 		goto queuing_error; | 
 | 391 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | 	/* Build command packet */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 393 | 	req->current_outstanding_cmd = handle; | 
 | 394 | 	req->outstanding_cmds[handle] = sp; | 
| Andrew Vasquez | cf53b06 | 2009-08-20 11:06:04 -0700 | [diff] [blame] | 395 | 	sp->handle = handle; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 397 | 	req->cnt -= req_cnt; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 |  | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 399 | 	cmd_pkt = (cmd_entry_t *)req->ring_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | 	cmd_pkt->handle = handle; | 
 | 401 | 	/* Zero out remaining portion of packet. */ | 
 | 402 | 	clr_ptr = (uint32_t *)cmd_pkt + 2; | 
 | 403 | 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | 
 | 404 | 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | 
 | 405 |  | 
 | bdf7962 | 2005-04-17 15:06:53 -0500 | [diff] [blame] | 406 | 	/* Set target ID and LUN number*/ | 
 | 407 | 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); | 
 | 408 | 	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 |  | 
 | 410 | 	/* Update tagged queuing modifier */ | 
 | 411 | 	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | 	/* Load SCSI command packet. */ | 
 | 414 | 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 415 | 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 |  | 
 | 417 | 	/* Build IOCB segments */ | 
| Andrew Vasquez | fd34f55 | 2007-07-19 15:06:00 -0700 | [diff] [blame] | 418 | 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 |  | 
 | 420 | 	/* Set total data segment count. */ | 
 | 421 | 	cmd_pkt->entry_count = (uint8_t)req_cnt; | 
 | 422 | 	wmb(); | 
 | 423 |  | 
 | 424 | 	/* Adjust ring index. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 425 | 	req->ring_index++; | 
 | 426 | 	if (req->ring_index == req->length) { | 
 | 427 | 		req->ring_index = 0; | 
 | 428 | 		req->ring_ptr = req->ring; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | 	} else | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 430 | 		req->ring_ptr++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | 	sp->flags |= SRB_DMA_VALID; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 |  | 
 | 434 | 	/* Set chip new ring index. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 435 | 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */ | 
 | 437 |  | 
| Andrew Vasquez | 4fdfefe | 2005-10-27 11:09:48 -0700 | [diff] [blame] | 438 | 	/* Manage unprocessed RIO/ZIO commands in response queue. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 439 | 	if (vha->flags.process_response_queue && | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 440 | 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED) | 
 | 441 | 		qla2x00_process_response_queue(rsp); | 
| Andrew Vasquez | 4fdfefe | 2005-10-27 11:09:48 -0700 | [diff] [blame] | 442 |  | 
| Andrew Vasquez | c9c5ced | 2008-07-24 08:31:49 -0700 | [diff] [blame] | 443 | 	spin_unlock_irqrestore(&ha->hardware_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | 	return (QLA_SUCCESS); | 
 | 445 |  | 
 | 446 | queuing_error: | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 447 | 	if (tot_dsds) | 
 | 448 | 		scsi_dma_unmap(cmd); | 
 | 449 |  | 
| Andrew Vasquez | c9c5ced | 2008-07-24 08:31:49 -0700 | [diff] [blame] | 450 | 	spin_unlock_irqrestore(&ha->hardware_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 |  | 
 | 452 | 	return (QLA_FUNCTION_FAILED); | 
 | 453 | } | 
 | 454 |  | 
 | 455 | /** | 
 | 456 |  * qla2x00_marker() - Send a marker IOCB to the firmware. | 
 | 457 |  * @ha: HA context | 
 | 458 |  * @loop_id: loop ID | 
 | 459 |  * @lun: LUN | 
 | 460 |  * @type: marker modifier | 
 | 461 |  * | 
 | 462 |  * Can be called from both normal and interrupt context. | 
 | 463 |  * | 
| Bjorn Helgaas | cc3ef7b | 2008-09-11 21:22:51 -0700 | [diff] [blame] | 464 |  * Returns non-zero if a failure occurred, else zero. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 |  */ | 
| Andrew Vasquez | fa2a1ce | 2005-07-06 10:32:07 -0700 | [diff] [blame] | 466 | int | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 467 | __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | 
 | 468 | 			struct rsp_que *rsp, uint16_t loop_id, | 
 | 469 | 			uint16_t lun, uint8_t type) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | { | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 471 | 	mrk_entry_t *mrk; | 
 | 472 | 	struct mrk_entry_24xx *mrk24; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 473 | 	struct qla_hw_data *ha = vha->hw; | 
 | 474 | 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 |  | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 476 | 	mrk24 = NULL; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 477 | 	mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 478 | 	if (mrk == NULL) { | 
 | 479 | 		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 480 | 		    __func__, base_vha->host_no)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 |  | 
 | 482 | 		return (QLA_FUNCTION_FAILED); | 
 | 483 | 	} | 
 | 484 |  | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 485 | 	mrk->entry_type = MARKER_TYPE; | 
 | 486 | 	mrk->modifier = type; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | 	if (type != MK_SYNC_ALL) { | 
| Andrew Vasquez | e428924 | 2007-07-19 15:05:56 -0700 | [diff] [blame] | 488 | 		if (IS_FWI2_CAPABLE(ha)) { | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 489 | 			mrk24 = (struct mrk_entry_24xx *) mrk; | 
 | 490 | 			mrk24->nport_handle = cpu_to_le16(loop_id); | 
 | 491 | 			mrk24->lun[1] = LSB(lun); | 
 | 492 | 			mrk24->lun[2] = MSB(lun); | 
| Shyam Sundar | b797b6d | 2006-08-01 13:48:13 -0700 | [diff] [blame] | 493 | 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 494 | 			mrk24->vp_index = vha->vp_idx; | 
| Anirban Chakraborty | 2afa19a | 2009-04-06 22:33:40 -0700 | [diff] [blame] | 495 | 			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 496 | 		} else { | 
 | 497 | 			SET_TARGET_ID(ha, mrk->target, loop_id); | 
 | 498 | 			mrk->lun = cpu_to_le16(lun); | 
 | 499 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | 	} | 
 | 501 | 	wmb(); | 
 | 502 |  | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 503 | 	qla2x00_isp_cmd(vha, req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 |  | 
 | 505 | 	return (QLA_SUCCESS); | 
 | 506 | } | 
 | 507 |  | 
| Andrew Vasquez | fa2a1ce | 2005-07-06 10:32:07 -0700 | [diff] [blame] | 508 | int | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 509 | qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, | 
 | 510 | 		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun, | 
 | 511 | 		uint8_t type) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | { | 
 | 513 | 	int ret; | 
 | 514 | 	unsigned long flags = 0; | 
 | 515 |  | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 516 | 	spin_lock_irqsave(&vha->hw->hardware_lock, flags); | 
 | 517 | 	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); | 
 | 518 | 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
 | 520 | 	return (ret); | 
 | 521 | } | 
 | 522 |  | 
 | 523 | /** | 
 | 524 |  * qla2x00_req_pkt() - Retrieve a request packet from the request ring. | 
 | 525 |  * @ha: HA context | 
 | 526 |  * | 
 | 527 |  * Note: The caller must hold the hardware lock before calling this routine. | 
 | 528 |  * | 
 | 529 |  * Returns NULL if function failed, else, a pointer to the request packet. | 
 | 530 |  */ | 
 | 531 | static request_t * | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 532 | qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req, | 
 | 533 | 		struct rsp_que *rsp) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | { | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 535 | 	struct qla_hw_data *ha = vha->hw; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 536 | 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | 	request_t	*pkt = NULL; | 
 | 538 | 	uint16_t	cnt; | 
 | 539 | 	uint32_t	*dword_ptr; | 
 | 540 | 	uint32_t	timer; | 
 | 541 | 	uint16_t	req_cnt = 1; | 
 | 542 |  | 
 | 543 | 	/* Wait 1 second for slot. */ | 
 | 544 | 	for (timer = HZ; timer; timer--) { | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 545 | 		if ((req_cnt + 2) >= req->cnt) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | 			/* Calculate number of free request entries. */ | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 547 | 			if (ha->mqenable) | 
 | 548 | 				cnt = (uint16_t) | 
 | 549 | 					RD_REG_DWORD(®->isp25mq.req_q_out); | 
 | 550 | 			else { | 
| Giridhar Malavali | a908301 | 2010-04-12 17:59:55 -0700 | [diff] [blame] | 551 | 				if (IS_QLA82XX(ha)) | 
 | 552 | 					cnt = (uint16_t)RD_REG_DWORD( | 
 | 553 | 					    ®->isp82.req_q_out); | 
 | 554 | 				else if (IS_FWI2_CAPABLE(ha)) | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 555 | 					cnt = (uint16_t)RD_REG_DWORD( | 
 | 556 | 						®->isp24.req_q_out); | 
 | 557 | 				else | 
 | 558 | 					cnt = qla2x00_debounce_register( | 
 | 559 | 						ISP_REQ_Q_OUT(ha, ®->isp)); | 
 | 560 | 			} | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 561 | 			if  (req->ring_index < cnt) | 
 | 562 | 				req->cnt = cnt - req->ring_index; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | 			else | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 564 | 				req->cnt = req->length - | 
 | 565 | 				    (req->ring_index - cnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | 		} | 
 | 567 | 		/* If room for request in request ring. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 568 | 		if ((req_cnt + 2) < req->cnt) { | 
 | 569 | 			req->cnt--; | 
 | 570 | 			pkt = req->ring_ptr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 |  | 
 | 572 | 			/* Zero out packet. */ | 
 | 573 | 			dword_ptr = (uint32_t *)pkt; | 
 | 574 | 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) | 
 | 575 | 				*dword_ptr++ = 0; | 
 | 576 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | 			/* Set entry count. */ | 
 | 578 | 			pkt->entry_count = 1; | 
 | 579 |  | 
 | 580 | 			break; | 
 | 581 | 		} | 
 | 582 |  | 
 | 583 | 		/* Release ring specific lock */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 584 | 		spin_unlock_irq(&ha->hardware_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 |  | 
 | 586 | 		udelay(2);   /* 2 us */ | 
 | 587 |  | 
 | 588 | 		/* Check for pending interrupts. */ | 
 | 589 | 		/* During init we issue marker directly */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 590 | 		if (!vha->marker_needed && !vha->flags.init_done) | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 591 | 			qla2x00_poll(rsp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | 		spin_lock_irq(&ha->hardware_lock); | 
 | 593 | 	} | 
 | 594 | 	if (!pkt) { | 
 | 595 | 		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); | 
 | 596 | 	} | 
 | 597 |  | 
 | 598 | 	return (pkt); | 
 | 599 | } | 
 | 600 |  | 
 | 601 | /** | 
 | 602 |  * qla2x00_isp_cmd() - Modify the request ring pointer. | 
 | 603 |  * @ha: HA context | 
 | 604 |  * | 
 | 605 |  * Note: The caller must hold the hardware lock before calling this routine. | 
 | 606 |  */ | 
| Adrian Bunk | 413975a | 2006-06-30 02:33:06 -0700 | [diff] [blame] | 607 | static void | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 608 | qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | { | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 610 | 	struct qla_hw_data *ha = vha->hw; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 611 | 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); | 
| Anirban Chakraborty | 17d9863 | 2008-12-18 10:06:15 -0800 | [diff] [blame] | 612 | 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 |  | 
 | 614 | 	DEBUG5(printk("%s(): IOCB data:\n", __func__)); | 
 | 615 | 	DEBUG5(qla2x00_dump_buffer( | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 616 | 	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 |  | 
 | 618 | 	/* Adjust ring index. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 619 | 	req->ring_index++; | 
 | 620 | 	if (req->ring_index == req->length) { | 
 | 621 | 		req->ring_index = 0; | 
 | 622 | 		req->ring_ptr = req->ring; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | 	} else | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 624 | 		req->ring_ptr++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 |  | 
 | 626 | 	/* Set chip new ring index. */ | 
| Giridhar Malavali | a908301 | 2010-04-12 17:59:55 -0700 | [diff] [blame] | 627 | 	if (IS_QLA82XX(ha)) { | 
 | 628 | 		uint32_t dbval = 0x04 | (ha->portnum << 5); | 
 | 629 |  | 
 | 630 | 		/* write, read and verify logic */ | 
 | 631 | 		dbval = dbval | (req->id << 8) | (req->ring_index << 16); | 
 | 632 | 		if (ql2xdbwr) | 
 | 633 | 			qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); | 
 | 634 | 		else { | 
 | 635 | 			WRT_REG_DWORD( | 
 | 636 | 				(unsigned long __iomem *)ha->nxdb_wr_ptr, | 
 | 637 | 				dbval); | 
 | 638 | 			wmb(); | 
 | 639 | 			while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { | 
 | 640 | 				WRT_REG_DWORD((unsigned long __iomem *) | 
 | 641 | 					ha->nxdb_wr_ptr, dbval); | 
 | 642 | 				wmb(); | 
 | 643 | 			} | 
 | 644 | 		} | 
 | 645 | 	} else if (ha->mqenable) { | 
 | 646 | 		/* Set chip new ring index. */ | 
| Anirban Chakraborty | 17d9863 | 2008-12-18 10:06:15 -0800 | [diff] [blame] | 647 | 		WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index); | 
 | 648 | 		RD_REG_DWORD(&ioreg->hccr); | 
| Giridhar Malavali | a908301 | 2010-04-12 17:59:55 -0700 | [diff] [blame] | 649 | 	} else { | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 650 | 		if (IS_FWI2_CAPABLE(ha)) { | 
 | 651 | 			WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | 
 | 652 | 			RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | 
 | 653 | 		} else { | 
 | 654 | 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), | 
 | 655 | 				req->ring_index); | 
 | 656 | 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | 
 | 657 | 		} | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 658 | 	} | 
 | 659 |  | 
 | 660 | } | 
 | 661 |  | 
 | 662 | /** | 
 | 663 |  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and | 
 | 664 |  * Continuation Type 1 IOCBs to allocate. | 
 | 665 |  * | 
 | 666 |  * @dsds: number of data segment decriptors needed | 
 | 667 |  * | 
 | 668 |  * Returns the number of IOCB entries needed to store @dsds. | 
 | 669 |  */ | 
| Giridhar Malavali | a908301 | 2010-04-12 17:59:55 -0700 | [diff] [blame] | 670 | inline uint16_t | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 671 | qla24xx_calc_iocbs(uint16_t dsds) | 
 | 672 | { | 
 | 673 | 	uint16_t iocbs; | 
 | 674 |  | 
 | 675 | 	iocbs = 1; | 
 | 676 | 	if (dsds > 1) { | 
 | 677 | 		iocbs += (dsds - 1) / 5; | 
 | 678 | 		if ((dsds - 1) % 5) | 
 | 679 | 			iocbs++; | 
 | 680 | 	} | 
| Arun Easi | bad7500 | 2010-05-04 15:01:30 -0700 | [diff] [blame] | 681 | 	DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n", | 
 | 682 | 	    __func__, iocbs)); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 683 | 	return iocbs; | 
 | 684 | } | 
 | 685 |  | 
 | 686 | /** | 
 | 687 |  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 | 
 | 688 |  * IOCB types. | 
 | 689 |  * | 
 | 690 |  * @sp: SRB command to process | 
 | 691 |  * @cmd_pkt: Command type 3 IOCB | 
 | 692 |  * @tot_dsds: Total number of segments to transfer | 
 | 693 |  */ | 
| Giridhar Malavali | a908301 | 2010-04-12 17:59:55 -0700 | [diff] [blame] | 694 | inline void | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 695 | qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, | 
 | 696 |     uint16_t tot_dsds) | 
 | 697 | { | 
 | 698 | 	uint16_t	avail_dsds; | 
 | 699 | 	uint32_t	*cur_dsd; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 700 | 	scsi_qla_host_t	*vha; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 701 | 	struct scsi_cmnd *cmd; | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 702 | 	struct scatterlist *sg; | 
 | 703 | 	int i; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 704 | 	struct req_que *req; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 705 |  | 
 | 706 | 	cmd = sp->cmd; | 
 | 707 |  | 
 | 708 | 	/* Update entry type to indicate Command Type 3 IOCB */ | 
 | 709 | 	*((uint32_t *)(&cmd_pkt->entry_type)) = | 
 | 710 | 	    __constant_cpu_to_le32(COMMAND_TYPE_7); | 
 | 711 |  | 
 | 712 | 	/* No data transfer */ | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 713 | 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 714 | 		cmd_pkt->byte_count = __constant_cpu_to_le32(0); | 
 | 715 | 		return; | 
 | 716 | 	} | 
 | 717 |  | 
| Andrew Vasquez | 444786d | 2009-01-05 11:18:10 -0800 | [diff] [blame] | 718 | 	vha = sp->fcport->vha; | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 719 | 	req = vha->req; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 720 |  | 
 | 721 | 	/* Set transfer direction */ | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 722 | 	if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 723 | 		cmd_pkt->task_mgmt_flags = | 
 | 724 | 		    __constant_cpu_to_le16(TMF_WRITE_DATA); | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 725 | 		sp->fcport->vha->hw->qla_stats.output_bytes += | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 726 | 		    scsi_bufflen(sp->cmd); | 
 | 727 | 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 728 | 		cmd_pkt->task_mgmt_flags = | 
 | 729 | 		    __constant_cpu_to_le16(TMF_READ_DATA); | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 730 | 		sp->fcport->vha->hw->qla_stats.input_bytes += | 
| Harish Zunjarrao | 49fd462 | 2008-09-11 21:22:47 -0700 | [diff] [blame] | 731 | 		    scsi_bufflen(sp->cmd); | 
 | 732 | 	} | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 733 |  | 
 | 734 | 	/* One DSD is available in the Command Type 3 IOCB */ | 
 | 735 | 	avail_dsds = 1; | 
 | 736 | 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; | 
 | 737 |  | 
 | 738 | 	/* Load data segments */ | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 739 |  | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 740 | 	scsi_for_each_sg(cmd, sg, tot_dsds, i) { | 
 | 741 | 		dma_addr_t	sle_dma; | 
 | 742 | 		cont_a64_entry_t *cont_pkt; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 743 |  | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 744 | 		/* Allocate additional continuation packets? */ | 
 | 745 | 		if (avail_dsds == 0) { | 
 | 746 | 			/* | 
 | 747 | 			 * Five DSDs are available in the Continuation | 
 | 748 | 			 * Type 1 IOCB. | 
 | 749 | 			 */ | 
| Anirban Chakraborty | 67c2e93 | 2009-04-06 22:33:42 -0700 | [diff] [blame] | 750 | 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 751 | 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; | 
 | 752 | 			avail_dsds = 5; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 753 | 		} | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 754 |  | 
 | 755 | 		sle_dma = sg_dma_address(sg); | 
 | 756 | 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | 
 | 757 | 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | 
 | 758 | 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | 
 | 759 | 		avail_dsds--; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 760 | 	} | 
 | 761 | } | 
 | 762 |  | 
| Arun Easi | bad7500 | 2010-05-04 15:01:30 -0700 | [diff] [blame] | 763 | struct fw_dif_context { | 
 | 764 | 	uint32_t ref_tag; | 
 | 765 | 	uint16_t app_tag; | 
 | 766 | 	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/ | 
 | 767 | 	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/ | 
 | 768 | }; | 
 | 769 |  | 
 | 770 | /* | 
 | 771 |  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command | 
 | 772 |  * | 
 | 773 |  */ | 
 | 774 | static inline void | 
 | 775 | qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt, | 
 | 776 |     unsigned int protcnt) | 
 | 777 | { | 
 | 778 | 	struct sd_dif_tuple *spt; | 
 | 779 | 	unsigned char op = scsi_get_prot_op(cmd); | 
 | 780 |  | 
 | 781 | 	switch (scsi_get_prot_type(cmd)) { | 
 | 782 | 	/* For TYPE 0 protection: no checking */ | 
 | 783 | 	case SCSI_PROT_DIF_TYPE0: | 
 | 784 | 		pkt->ref_tag_mask[0] = 0x00; | 
 | 785 | 		pkt->ref_tag_mask[1] = 0x00; | 
 | 786 | 		pkt->ref_tag_mask[2] = 0x00; | 
 | 787 | 		pkt->ref_tag_mask[3] = 0x00; | 
 | 788 | 		break; | 
 | 789 |  | 
 | 790 | 	/* | 
 | 791 | 	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to | 
 | 792 | 	 * match LBA in CDB + N | 
 | 793 | 	 */ | 
 | 794 | 	case SCSI_PROT_DIF_TYPE2: | 
 | 795 | 		break; | 
 | 796 |  | 
 | 797 | 	/* For Type 3 protection: 16 bit GUARD only */ | 
 | 798 | 	case SCSI_PROT_DIF_TYPE3: | 
 | 799 | 		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = | 
 | 800 | 			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = | 
 | 801 | 								0x00; | 
 | 802 | 		break; | 
 | 803 |  | 
 | 804 | 	/* | 
 | 805 | 	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and | 
 | 806 | 	 * 16 bit app tag. | 
 | 807 | 	 */ | 
 | 808 | 	case SCSI_PROT_DIF_TYPE1: | 
 | 809 | 		if (!ql2xenablehba_err_chk) | 
 | 810 | 			break; | 
 | 811 |  | 
 | 812 | 		if (protcnt && (op == SCSI_PROT_WRITE_STRIP || | 
 | 813 | 		    op == SCSI_PROT_WRITE_PASS)) { | 
 | 814 | 			spt = page_address(sg_page(scsi_prot_sglist(cmd))) + | 
 | 815 | 			    scsi_prot_sglist(cmd)[0].offset; | 
 | 816 | 			DEBUG18(printk(KERN_DEBUG | 
 | 817 | 			    "%s(): LBA from user %p, lba = 0x%x\n", | 
 | 818 | 			    __func__, spt, (int)spt->ref_tag)); | 
 | 819 | 			pkt->ref_tag = swab32(spt->ref_tag); | 
 | 820 | 			pkt->app_tag_mask[0] = 0x0; | 
 | 821 | 			pkt->app_tag_mask[1] = 0x0; | 
 | 822 | 		} else { | 
 | 823 | 			pkt->ref_tag = cpu_to_le32((uint32_t) | 
 | 824 | 			    (0xffffffff & scsi_get_lba(cmd))); | 
 | 825 | 			pkt->app_tag = __constant_cpu_to_le16(0); | 
 | 826 | 			pkt->app_tag_mask[0] = 0x0; | 
 | 827 | 			pkt->app_tag_mask[1] = 0x0; | 
 | 828 | 		} | 
 | 829 | 		/* enable ALL bytes of the ref tag */ | 
 | 830 | 		pkt->ref_tag_mask[0] = 0xff; | 
 | 831 | 		pkt->ref_tag_mask[1] = 0xff; | 
 | 832 | 		pkt->ref_tag_mask[2] = 0xff; | 
 | 833 | 		pkt->ref_tag_mask[3] = 0xff; | 
 | 834 | 		break; | 
 | 835 | 	} | 
 | 836 |  | 
 | 837 | 	DEBUG18(printk(KERN_DEBUG | 
 | 838 | 	    "%s(): Setting protection Tags: (BIG) ref tag = 0x%x," | 
 | 839 | 	    " app tag = 0x%x, prot SG count %d , cmd lba 0x%x," | 
 | 840 | 	    " prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt, | 
 | 841 | 	    (int)scsi_get_lba(cmd), scsi_get_prot_type(cmd))); | 
 | 842 | } | 
 | 843 |  | 
 | 844 |  | 
 | 845 | static int | 
 | 846 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | 
 | 847 | 	uint16_t tot_dsds) | 
 | 848 | { | 
 | 849 | 	void *next_dsd; | 
 | 850 | 	uint8_t avail_dsds = 0; | 
 | 851 | 	uint32_t dsd_list_len; | 
 | 852 | 	struct dsd_dma *dsd_ptr; | 
 | 853 | 	struct scatterlist *sg; | 
 | 854 | 	uint32_t *cur_dsd = dsd; | 
 | 855 | 	int	i; | 
 | 856 | 	uint16_t	used_dsds = tot_dsds; | 
 | 857 |  | 
 | 858 | 	uint8_t		*cp; | 
 | 859 |  | 
 | 860 | 	scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) { | 
 | 861 | 		dma_addr_t	sle_dma; | 
 | 862 |  | 
 | 863 | 		/* Allocate additional continuation packets? */ | 
 | 864 | 		if (avail_dsds == 0) { | 
 | 865 | 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? | 
 | 866 | 					QLA_DSDS_PER_IOCB : used_dsds; | 
 | 867 | 			dsd_list_len = (avail_dsds + 1) * 12; | 
 | 868 | 			used_dsds -= avail_dsds; | 
 | 869 |  | 
 | 870 | 			/* allocate tracking DS */ | 
 | 871 | 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | 
 | 872 | 			if (!dsd_ptr) | 
 | 873 | 				return 1; | 
 | 874 |  | 
 | 875 | 			/* allocate new list */ | 
 | 876 | 			dsd_ptr->dsd_addr = next_dsd = | 
 | 877 | 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, | 
 | 878 | 				&dsd_ptr->dsd_list_dma); | 
 | 879 |  | 
 | 880 | 			if (!next_dsd) { | 
 | 881 | 				/* | 
 | 882 | 				 * Need to cleanup only this dsd_ptr, rest | 
 | 883 | 				 * will be done by sp_free_dma() | 
 | 884 | 				 */ | 
 | 885 | 				kfree(dsd_ptr); | 
 | 886 | 				return 1; | 
 | 887 | 			} | 
 | 888 |  | 
 | 889 | 			list_add_tail(&dsd_ptr->list, | 
 | 890 | 			    &((struct crc_context *)sp->ctx)->dsd_list); | 
 | 891 |  | 
 | 892 | 			sp->flags |= SRB_CRC_CTX_DSD_VALID; | 
 | 893 |  | 
 | 894 | 			/* add new list to cmd iocb or last list */ | 
 | 895 | 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 
 | 896 | 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | 
 | 897 | 			*cur_dsd++ = dsd_list_len; | 
 | 898 | 			cur_dsd = (uint32_t *)next_dsd; | 
 | 899 | 		} | 
 | 900 | 		sle_dma = sg_dma_address(sg); | 
 | 901 | 		DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x," | 
 | 902 | 		    " len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma), | 
 | 903 | 		    MSD(sle_dma), sg_dma_len(sg))); | 
 | 904 | 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | 
 | 905 | 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | 
 | 906 | 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | 
 | 907 | 		avail_dsds--; | 
 | 908 |  | 
 | 909 | 		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | 
 | 910 | 			cp = page_address(sg_page(sg)) + sg->offset; | 
 | 911 | 			DEBUG18(printk("%s(): User Data buffer= %p:\n", | 
 | 912 | 			    __func__ , cp)); | 
 | 913 | 		} | 
 | 914 | 	} | 
 | 915 | 	/* Null termination */ | 
 | 916 | 	*cur_dsd++ = 0; | 
 | 917 | 	*cur_dsd++ = 0; | 
 | 918 | 	*cur_dsd++ = 0; | 
 | 919 | 	return 0; | 
 | 920 | } | 
 | 921 |  | 
 | 922 | static int | 
 | 923 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | 
 | 924 | 							uint32_t *dsd, | 
 | 925 | 	uint16_t tot_dsds) | 
 | 926 | { | 
 | 927 | 	void *next_dsd; | 
 | 928 | 	uint8_t avail_dsds = 0; | 
 | 929 | 	uint32_t dsd_list_len; | 
 | 930 | 	struct dsd_dma *dsd_ptr; | 
 | 931 | 	struct scatterlist *sg; | 
 | 932 | 	int	i; | 
 | 933 | 	struct scsi_cmnd *cmd; | 
 | 934 | 	uint32_t *cur_dsd = dsd; | 
 | 935 | 	uint16_t	used_dsds = tot_dsds; | 
 | 936 |  | 
 | 937 | 	uint8_t		*cp; | 
 | 938 |  | 
 | 939 |  | 
 | 940 | 	cmd = sp->cmd; | 
 | 941 | 	scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { | 
 | 942 | 		dma_addr_t	sle_dma; | 
 | 943 |  | 
 | 944 | 		/* Allocate additional continuation packets? */ | 
 | 945 | 		if (avail_dsds == 0) { | 
 | 946 | 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? | 
 | 947 | 						QLA_DSDS_PER_IOCB : used_dsds; | 
 | 948 | 			dsd_list_len = (avail_dsds + 1) * 12; | 
 | 949 | 			used_dsds -= avail_dsds; | 
 | 950 |  | 
 | 951 | 			/* allocate tracking DS */ | 
 | 952 | 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); | 
 | 953 | 			if (!dsd_ptr) | 
 | 954 | 				return 1; | 
 | 955 |  | 
 | 956 | 			/* allocate new list */ | 
 | 957 | 			dsd_ptr->dsd_addr = next_dsd = | 
 | 958 | 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, | 
 | 959 | 				&dsd_ptr->dsd_list_dma); | 
 | 960 |  | 
 | 961 | 			if (!next_dsd) { | 
 | 962 | 				/* | 
 | 963 | 				 * Need to cleanup only this dsd_ptr, rest | 
 | 964 | 				 * will be done by sp_free_dma() | 
 | 965 | 				 */ | 
 | 966 | 				kfree(dsd_ptr); | 
 | 967 | 				return 1; | 
 | 968 | 			} | 
 | 969 |  | 
 | 970 | 			list_add_tail(&dsd_ptr->list, | 
 | 971 | 			    &((struct crc_context *)sp->ctx)->dsd_list); | 
 | 972 |  | 
 | 973 | 			sp->flags |= SRB_CRC_CTX_DSD_VALID; | 
 | 974 |  | 
 | 975 | 			/* add new list to cmd iocb or last list */ | 
 | 976 | 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); | 
 | 977 | 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); | 
 | 978 | 			*cur_dsd++ = dsd_list_len; | 
 | 979 | 			cur_dsd = (uint32_t *)next_dsd; | 
 | 980 | 		} | 
 | 981 | 		sle_dma = sg_dma_address(sg); | 
 | 982 | 		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | 
 | 983 | 			DEBUG18(printk(KERN_DEBUG | 
 | 984 | 			    "%s(): %p, sg entry %d - addr =0x%x" | 
 | 985 | 			    "0x%x, len =%d\n", __func__ , cur_dsd, i, | 
 | 986 | 			    LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg))); | 
 | 987 | 		} | 
 | 988 | 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | 
 | 989 | 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | 
 | 990 | 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); | 
 | 991 |  | 
 | 992 | 		if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { | 
 | 993 | 			cp = page_address(sg_page(sg)) + sg->offset; | 
 | 994 | 			DEBUG18(printk("%s(): Protection Data buffer = %p:\n", | 
 | 995 | 			    __func__ , cp)); | 
 | 996 | 		} | 
 | 997 | 		avail_dsds--; | 
 | 998 | 	} | 
 | 999 | 	/* Null termination */ | 
 | 1000 | 	*cur_dsd++ = 0; | 
 | 1001 | 	*cur_dsd++ = 0; | 
 | 1002 | 	*cur_dsd++ = 0; | 
 | 1003 | 	return 0; | 
 | 1004 | } | 
 | 1005 |  | 
 | 1006 | /** | 
 | 1007 |  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command | 
 | 1008 |  *							Type 6 IOCB types. | 
 | 1009 |  * | 
 | 1010 |  * @sp: SRB command to process | 
 | 1011 |  * @cmd_pkt: Command type 3 IOCB | 
 | 1012 |  * @tot_dsds: Total number of segments to transfer | 
 | 1013 |  */ | 
 | 1014 | static inline int | 
 | 1015 | qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, | 
 | 1016 |     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) | 
 | 1017 | { | 
 | 1018 | 	uint32_t		*cur_dsd, *fcp_dl; | 
 | 1019 | 	scsi_qla_host_t		*vha; | 
 | 1020 | 	struct scsi_cmnd	*cmd; | 
 | 1021 | 	struct scatterlist	*cur_seg; | 
 | 1022 | 	int			sgc; | 
 | 1023 | 	uint32_t		total_bytes; | 
 | 1024 | 	uint32_t		data_bytes; | 
 | 1025 | 	uint32_t		dif_bytes; | 
 | 1026 | 	uint8_t			bundling = 1; | 
 | 1027 | 	uint16_t		blk_size; | 
 | 1028 | 	uint8_t			*clr_ptr; | 
 | 1029 | 	struct crc_context	*crc_ctx_pkt = NULL; | 
 | 1030 | 	struct qla_hw_data	*ha; | 
 | 1031 | 	uint8_t			additional_fcpcdb_len; | 
 | 1032 | 	uint16_t		fcp_cmnd_len; | 
 | 1033 | 	struct fcp_cmnd		*fcp_cmnd; | 
 | 1034 | 	dma_addr_t		crc_ctx_dma; | 
 | 1035 |  | 
 | 1036 | 	cmd = sp->cmd; | 
 | 1037 |  | 
 | 1038 | 	sgc = 0; | 
 | 1039 | 	/* Update entry type to indicate Command Type CRC_2 IOCB */ | 
 | 1040 | 	*((uint32_t *)(&cmd_pkt->entry_type)) = | 
 | 1041 | 	    __constant_cpu_to_le32(COMMAND_TYPE_CRC_2); | 
 | 1042 |  | 
 | 1043 | 	/* No data transfer */ | 
 | 1044 | 	data_bytes = scsi_bufflen(cmd); | 
 | 1045 | 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { | 
 | 1046 | 		DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n", | 
 | 1047 | 		    __func__, data_bytes)); | 
 | 1048 | 		cmd_pkt->byte_count = __constant_cpu_to_le32(0); | 
 | 1049 | 		return QLA_SUCCESS; | 
 | 1050 | 	} | 
 | 1051 |  | 
 | 1052 | 	vha = sp->fcport->vha; | 
 | 1053 | 	ha = vha->hw; | 
 | 1054 |  | 
 | 1055 | 	DEBUG18(printk(KERN_DEBUG | 
 | 1056 | 	    "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__, | 
 | 1057 | 	    vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd))); | 
 | 1058 |  | 
 | 1059 | 	cmd_pkt->vp_index = sp->fcport->vp_idx; | 
 | 1060 |  | 
 | 1061 | 	/* Set transfer direction */ | 
 | 1062 | 	if (cmd->sc_data_direction == DMA_TO_DEVICE) { | 
 | 1063 | 		cmd_pkt->control_flags = | 
 | 1064 | 		    __constant_cpu_to_le16(CF_WRITE_DATA); | 
 | 1065 | 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { | 
 | 1066 | 		cmd_pkt->control_flags = | 
 | 1067 | 		    __constant_cpu_to_le16(CF_READ_DATA); | 
 | 1068 | 	} | 
 | 1069 |  | 
 | 1070 | 	tot_prot_dsds = scsi_prot_sg_count(cmd); | 
 | 1071 | 	if (!tot_prot_dsds) | 
 | 1072 | 		bundling = 0; | 
 | 1073 |  | 
 | 1074 | 	/* Allocate CRC context from global pool */ | 
 | 1075 | 	crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool, | 
 | 1076 | 	    GFP_ATOMIC, &crc_ctx_dma); | 
 | 1077 |  | 
 | 1078 | 	if (!crc_ctx_pkt) | 
 | 1079 | 		goto crc_queuing_error; | 
 | 1080 |  | 
 | 1081 | 	/* Zero out CTX area. */ | 
 | 1082 | 	clr_ptr = (uint8_t *)crc_ctx_pkt; | 
 | 1083 | 	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); | 
 | 1084 |  | 
 | 1085 | 	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; | 
 | 1086 |  | 
 | 1087 | 	sp->flags |= SRB_CRC_CTX_DMA_VALID; | 
 | 1088 |  | 
 | 1089 | 	/* Set handle */ | 
 | 1090 | 	crc_ctx_pkt->handle = cmd_pkt->handle; | 
 | 1091 |  | 
 | 1092 | 	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); | 
 | 1093 |  | 
 | 1094 | 	qla24xx_set_t10dif_tags(cmd, (struct fw_dif_context *) | 
 | 1095 | 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds); | 
 | 1096 |  | 
 | 1097 | 	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); | 
 | 1098 | 	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); | 
 | 1099 | 	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; | 
 | 1100 |  | 
 | 1101 | 	/* Determine SCSI command length -- align to 4 byte boundary */ | 
 | 1102 | 	if (cmd->cmd_len > 16) { | 
 | 1103 | 		DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n", | 
 | 1104 | 		    __func__)); | 
 | 1105 | 		additional_fcpcdb_len = cmd->cmd_len - 16; | 
 | 1106 | 		if ((cmd->cmd_len % 4) != 0) { | 
 | 1107 | 			/* SCSI cmd > 16 bytes must be multiple of 4 */ | 
 | 1108 | 			goto crc_queuing_error; | 
 | 1109 | 		} | 
 | 1110 | 		fcp_cmnd_len = 12 + cmd->cmd_len + 4; | 
 | 1111 | 	} else { | 
 | 1112 | 		additional_fcpcdb_len = 0; | 
 | 1113 | 		fcp_cmnd_len = 12 + 16 + 4; | 
 | 1114 | 	} | 
 | 1115 |  | 
 | 1116 | 	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd; | 
 | 1117 |  | 
 | 1118 | 	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len; | 
 | 1119 | 	if (cmd->sc_data_direction == DMA_TO_DEVICE) | 
 | 1120 | 		fcp_cmnd->additional_cdb_len |= 1; | 
 | 1121 | 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE) | 
 | 1122 | 		fcp_cmnd->additional_cdb_len |= 2; | 
 | 1123 |  | 
 | 1124 | 	int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); | 
 | 1125 | 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); | 
 | 1126 | 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); | 
 | 1127 | 	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( | 
 | 1128 | 	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); | 
 | 1129 | 	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( | 
 | 1130 | 	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); | 
 | 1131 | 	fcp_cmnd->task_attribute = 0; | 
 | 1132 | 	fcp_cmnd->task_managment = 0; | 
 | 1133 |  | 
 | 1134 | 	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ | 
 | 1135 |  | 
 | 1136 | 	DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data" | 
 | 1137 | 	    "entries %d, data bytes %d, Protection entries %d\n", | 
 | 1138 | 	    __func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds), | 
 | 1139 | 	    data_bytes, tot_prot_dsds)); | 
 | 1140 |  | 
 | 1141 | 	/* Compute dif len and adjust data len to incude protection */ | 
 | 1142 | 	total_bytes = data_bytes; | 
 | 1143 | 	dif_bytes = 0; | 
 | 1144 | 	blk_size = cmd->device->sector_size; | 
 | 1145 | 	if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE1) { | 
 | 1146 | 		dif_bytes = (data_bytes / blk_size) * 8; | 
 | 1147 | 		total_bytes += dif_bytes; | 
 | 1148 | 	} | 
 | 1149 |  | 
 | 1150 | 	if (!ql2xenablehba_err_chk) | 
 | 1151 | 		fw_prot_opts |= 0x10; /* Disable Guard tag checking */ | 
 | 1152 |  | 
 | 1153 | 	if (!bundling) { | 
 | 1154 | 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; | 
 | 1155 | 	} else { | 
 | 1156 | 		/* | 
 | 1157 | 		 * Configure Bundling if we need to fetch interlaving | 
 | 1158 | 		 * protection PCI accesses | 
 | 1159 | 		 */ | 
 | 1160 | 		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; | 
 | 1161 | 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); | 
 | 1162 | 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - | 
 | 1163 | 							tot_prot_dsds); | 
 | 1164 | 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; | 
 | 1165 | 	} | 
 | 1166 |  | 
 | 1167 | 	/* Finish the common fields of CRC pkt */ | 
 | 1168 | 	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); | 
 | 1169 | 	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); | 
 | 1170 | 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); | 
 | 1171 | 	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); | 
 | 1172 | 	/* Fibre channel byte count */ | 
 | 1173 | 	cmd_pkt->byte_count = cpu_to_le32(total_bytes); | 
 | 1174 | 	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + | 
 | 1175 | 	    additional_fcpcdb_len); | 
 | 1176 | 	*fcp_dl = htonl(total_bytes); | 
 | 1177 |  | 
 | 1178 | 	DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes" | 
 | 1179 | 	    " = 0x%x (%d), dat block size =0x%x (%d)\n", __func__, | 
 | 1180 | 	    vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes, | 
 | 1181 | 	    crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size)); | 
 | 1182 |  | 
 | 1183 | 	/* Walks data segments */ | 
 | 1184 |  | 
 | 1185 | 	cmd_pkt->control_flags |= | 
 | 1186 | 	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); | 
 | 1187 | 	if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, | 
 | 1188 | 	    (tot_dsds - tot_prot_dsds))) | 
 | 1189 | 		goto crc_queuing_error; | 
 | 1190 |  | 
 | 1191 | 	if (bundling && tot_prot_dsds) { | 
 | 1192 | 		/* Walks dif segments */ | 
 | 1193 | 		cur_seg = scsi_prot_sglist(cmd); | 
 | 1194 | 		cmd_pkt->control_flags |= | 
 | 1195 | 			__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); | 
 | 1196 | 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; | 
 | 1197 | 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, | 
 | 1198 | 		    tot_prot_dsds)) | 
 | 1199 | 			goto crc_queuing_error; | 
 | 1200 | 	} | 
 | 1201 | 	return QLA_SUCCESS; | 
 | 1202 |  | 
 | 1203 | crc_queuing_error: | 
 | 1204 | 	DEBUG18(qla_printk(KERN_INFO, ha, | 
 | 1205 | 	    "CMD sent FAILED crc_q error:sp = %p\n", sp)); | 
 | 1206 | 	/* Cleanup will be performed by the caller */ | 
 | 1207 |  | 
 | 1208 | 	return QLA_FUNCTION_FAILED; | 
 | 1209 | } | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1210 |  | 
 | 1211 | /** | 
 | 1212 |  * qla24xx_start_scsi() - Send a SCSI command to the ISP | 
 | 1213 |  * @sp: command to send to the ISP | 
 | 1214 |  * | 
| Bjorn Helgaas | cc3ef7b | 2008-09-11 21:22:51 -0700 | [diff] [blame] | 1215 |  * Returns non-zero if a failure occurred, else zero. | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1216 |  */ | 
 | 1217 | int | 
 | 1218 | qla24xx_start_scsi(srb_t *sp) | 
 | 1219 | { | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 1220 | 	int		ret, nseg; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1221 | 	unsigned long   flags; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1222 | 	uint32_t	*clr_ptr; | 
 | 1223 | 	uint32_t        index; | 
 | 1224 | 	uint32_t	handle; | 
 | 1225 | 	struct cmd_type_7 *cmd_pkt; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1226 | 	uint16_t	cnt; | 
 | 1227 | 	uint16_t	req_cnt; | 
 | 1228 | 	uint16_t	tot_dsds; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 1229 | 	struct req_que *req = NULL; | 
 | 1230 | 	struct rsp_que *rsp = NULL; | 
 | 1231 | 	struct scsi_cmnd *cmd = sp->cmd; | 
| Andrew Vasquez | 444786d | 2009-01-05 11:18:10 -0800 | [diff] [blame] | 1232 | 	struct scsi_qla_host *vha = sp->fcport->vha; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 1233 | 	struct qla_hw_data *ha = vha->hw; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1234 |  | 
 | 1235 | 	/* Setup device pointers. */ | 
 | 1236 | 	ret = 0; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 1237 |  | 
| Anirban Chakraborty | 59e0b8b | 2009-06-03 09:55:19 -0700 | [diff] [blame] | 1238 | 	qla25xx_set_que(sp, &rsp); | 
 | 1239 | 	req = vha->req; | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 1240 |  | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1241 | 	/* So we know we haven't pci_map'ed anything yet */ | 
 | 1242 | 	tot_dsds = 0; | 
 | 1243 |  | 
 | 1244 | 	/* Send marker if required */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1245 | 	if (vha->marker_needed != 0) { | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 1246 | 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) | 
 | 1247 | 							!= QLA_SUCCESS) | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1248 | 			return QLA_FUNCTION_FAILED; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1249 | 		vha->marker_needed = 0; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1250 | 	} | 
 | 1251 |  | 
 | 1252 | 	/* Acquire ring specific lock */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1253 | 	spin_lock_irqsave(&ha->hardware_lock, flags); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1254 |  | 
 | 1255 | 	/* Check for room in outstanding command list. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1256 | 	handle = req->current_outstanding_cmd; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1257 | 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 
 | 1258 | 		handle++; | 
 | 1259 | 		if (handle == MAX_OUTSTANDING_COMMANDS) | 
 | 1260 | 			handle = 1; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1261 | 		if (!req->outstanding_cmds[handle]) | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1262 | 			break; | 
 | 1263 | 	} | 
 | 1264 | 	if (index == MAX_OUTSTANDING_COMMANDS) | 
 | 1265 | 		goto queuing_error; | 
 | 1266 |  | 
 | 1267 | 	/* Map the sg table so we have an accurate count of sg entries needed */ | 
| Seokmann Ju | 2c3dfe3 | 2007-07-05 13:16:51 -0700 | [diff] [blame] | 1268 | 	if (scsi_sg_count(cmd)) { | 
 | 1269 | 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), | 
 | 1270 | 		    scsi_sg_count(cmd), cmd->sc_data_direction); | 
 | 1271 | 		if (unlikely(!nseg)) | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1272 | 			goto queuing_error; | 
| Seokmann Ju | 2c3dfe3 | 2007-07-05 13:16:51 -0700 | [diff] [blame] | 1273 | 	} else | 
 | 1274 | 		nseg = 0; | 
 | 1275 |  | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 1276 | 	tot_dsds = nseg; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1277 |  | 
 | 1278 | 	req_cnt = qla24xx_calc_iocbs(tot_dsds); | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1279 | 	if (req->cnt < (req_cnt + 2)) { | 
| Andrew Vasquez | 0802999 | 2009-03-24 09:07:55 -0700 | [diff] [blame] | 1280 | 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out); | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 1281 |  | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1282 | 		if (req->ring_index < cnt) | 
 | 1283 | 			req->cnt = cnt - req->ring_index; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1284 | 		else | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1285 | 			req->cnt = req->length - | 
 | 1286 | 				(req->ring_index - cnt); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1287 | 	} | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1288 | 	if (req->cnt < (req_cnt + 2)) | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1289 | 		goto queuing_error; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1290 |  | 
 | 1291 | 	/* Build command packet. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1292 | 	req->current_outstanding_cmd = handle; | 
 | 1293 | 	req->outstanding_cmds[handle] = sp; | 
| Andrew Vasquez | cf53b06 | 2009-08-20 11:06:04 -0700 | [diff] [blame] | 1294 | 	sp->handle = handle; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1295 | 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1296 | 	req->cnt -= req_cnt; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1297 |  | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1298 | 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; | 
| Anirban Chakraborty | 2afa19a | 2009-04-06 22:33:40 -0700 | [diff] [blame] | 1299 | 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1300 |  | 
 | 1301 | 	/* Zero out remaining portion of packet. */ | 
| James Bottomley | 72df832 | 2005-10-28 14:41:19 -0500 | [diff] [blame] | 1302 | 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */ | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1303 | 	clr_ptr = (uint32_t *)cmd_pkt + 2; | 
 | 1304 | 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | 
 | 1305 | 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | 
 | 1306 |  | 
 | 1307 | 	/* Set NPORT-ID and LUN number*/ | 
 | 1308 | 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 
 | 1309 | 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | 
 | 1310 | 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | 
 | 1311 | 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | 
| Seokmann Ju | 2c3dfe3 | 2007-07-05 13:16:51 -0700 | [diff] [blame] | 1312 | 	cmd_pkt->vp_index = sp->fcport->vp_idx; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1313 |  | 
| Andrew Vasquez | 661c3f6 | 2005-10-27 11:09:58 -0700 | [diff] [blame] | 1314 | 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | 
| andrew.vasquez@qlogic.com | 0d4be12 | 2006-02-07 08:45:35 -0800 | [diff] [blame] | 1315 | 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1316 |  | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1317 | 	/* Load SCSI command packet. */ | 
 | 1318 | 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); | 
 | 1319 | 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); | 
 | 1320 |  | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 1321 | 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1322 |  | 
 | 1323 | 	/* Build IOCB segments */ | 
 | 1324 | 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); | 
 | 1325 |  | 
 | 1326 | 	/* Set total data segment count. */ | 
 | 1327 | 	cmd_pkt->entry_count = (uint8_t)req_cnt; | 
| Anirban Chakraborty | 2afa19a | 2009-04-06 22:33:40 -0700 | [diff] [blame] | 1328 | 	/* Specify response queue number where completion should happen */ | 
 | 1329 | 	cmd_pkt->entry_status = (uint8_t) rsp->id; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1330 | 	wmb(); | 
 | 1331 |  | 
 | 1332 | 	/* Adjust ring index. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1333 | 	req->ring_index++; | 
 | 1334 | 	if (req->ring_index == req->length) { | 
 | 1335 | 		req->ring_index = 0; | 
 | 1336 | 		req->ring_ptr = req->ring; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1337 | 	} else | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1338 | 		req->ring_ptr++; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1339 |  | 
 | 1340 | 	sp->flags |= SRB_DMA_VALID; | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1341 |  | 
 | 1342 | 	/* Set chip new ring index. */ | 
| Andrew Vasquez | 0802999 | 2009-03-24 09:07:55 -0700 | [diff] [blame] | 1343 | 	WRT_REG_DWORD(req->req_q_in, req->ring_index); | 
 | 1344 | 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1345 |  | 
| Andrew Vasquez | 4fdfefe | 2005-10-27 11:09:48 -0700 | [diff] [blame] | 1346 | 	/* Manage unprocessed RIO/ZIO commands in response queue. */ | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1347 | 	if (vha->flags.process_response_queue && | 
| Anirban Chakraborty | 73208df | 2008-12-09 16:45:39 -0800 | [diff] [blame] | 1348 | 		rsp->ring_ptr->signature != RESPONSE_PROCESSED) | 
| Anirban Chakraborty | 2afa19a | 2009-04-06 22:33:40 -0700 | [diff] [blame] | 1349 | 		qla24xx_process_response_queue(vha, rsp); | 
| Andrew Vasquez | 4fdfefe | 2005-10-27 11:09:48 -0700 | [diff] [blame] | 1350 |  | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1351 | 	spin_unlock_irqrestore(&ha->hardware_lock, flags); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1352 | 	return QLA_SUCCESS; | 
 | 1353 |  | 
 | 1354 | queuing_error: | 
| FUJITA Tomonori | 385d70b | 2007-05-26 01:55:38 +0900 | [diff] [blame] | 1355 | 	if (tot_dsds) | 
 | 1356 | 		scsi_dma_unmap(cmd); | 
 | 1357 |  | 
| Anirban Chakraborty | e315cd2 | 2008-11-06 10:40:51 -0800 | [diff] [blame] | 1358 | 	spin_unlock_irqrestore(&ha->hardware_lock, flags); | 
| Andrew Vasquez | 2b6c0ce | 2005-07-06 10:31:17 -0700 | [diff] [blame] | 1359 |  | 
 | 1360 | 	return QLA_FUNCTION_FAILED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1361 | } | 
| Anirban Chakraborty | 68ca949 | 2009-04-06 22:33:41 -0700 | [diff] [blame] | 1362 |  | 
| Arun Easi | bad7500 | 2010-05-04 15:01:30 -0700 | [diff] [blame] | 1363 |  | 
 | 1364 | /** | 
 | 1365 |  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP | 
 | 1366 |  * @sp: command to send to the ISP | 
 | 1367 |  * | 
 | 1368 |  * Returns non-zero if a failure occurred, else zero. | 
 | 1369 |  */ | 
 | 1370 | int | 
 | 1371 | qla24xx_dif_start_scsi(srb_t *sp) | 
 | 1372 | { | 
 | 1373 | 	int			nseg; | 
 | 1374 | 	unsigned long		flags; | 
 | 1375 | 	uint32_t		*clr_ptr; | 
 | 1376 | 	uint32_t		index; | 
 | 1377 | 	uint32_t		handle; | 
 | 1378 | 	uint16_t		cnt; | 
 | 1379 | 	uint16_t		req_cnt = 0; | 
 | 1380 | 	uint16_t		tot_dsds; | 
 | 1381 | 	uint16_t		tot_prot_dsds; | 
 | 1382 | 	uint16_t		fw_prot_opts = 0; | 
 | 1383 | 	struct req_que		*req = NULL; | 
 | 1384 | 	struct rsp_que		*rsp = NULL; | 
 | 1385 | 	struct scsi_cmnd	*cmd = sp->cmd; | 
 | 1386 | 	struct scsi_qla_host	*vha = sp->fcport->vha; | 
 | 1387 | 	struct qla_hw_data	*ha = vha->hw; | 
 | 1388 | 	struct cmd_type_crc_2	*cmd_pkt; | 
 | 1389 | 	uint32_t		status = 0; | 
 | 1390 |  | 
 | 1391 | #define QDSS_GOT_Q_SPACE	BIT_0 | 
 | 1392 |  | 
 | 1393 | 	/* Only process protection in this routine */ | 
 | 1394 | 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) | 
 | 1395 | 		return qla24xx_start_scsi(sp); | 
 | 1396 |  | 
 | 1397 | 	/* Setup device pointers. */ | 
 | 1398 |  | 
 | 1399 | 	qla25xx_set_que(sp, &rsp); | 
 | 1400 | 	req = vha->req; | 
 | 1401 |  | 
 | 1402 | 	/* So we know we haven't pci_map'ed anything yet */ | 
 | 1403 | 	tot_dsds = 0; | 
 | 1404 |  | 
 | 1405 | 	/* Send marker if required */ | 
 | 1406 | 	if (vha->marker_needed != 0) { | 
 | 1407 | 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != | 
 | 1408 | 		    QLA_SUCCESS) | 
 | 1409 | 			return QLA_FUNCTION_FAILED; | 
 | 1410 | 		vha->marker_needed = 0; | 
 | 1411 | 	} | 
 | 1412 |  | 
 | 1413 | 	/* Acquire ring specific lock */ | 
 | 1414 | 	spin_lock_irqsave(&ha->hardware_lock, flags); | 
 | 1415 |  | 
 | 1416 | 	/* Check for room in outstanding command list. */ | 
 | 1417 | 	handle = req->current_outstanding_cmd; | 
 | 1418 | 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 
 | 1419 | 		handle++; | 
 | 1420 | 		if (handle == MAX_OUTSTANDING_COMMANDS) | 
 | 1421 | 			handle = 1; | 
 | 1422 | 		if (!req->outstanding_cmds[handle]) | 
 | 1423 | 			break; | 
 | 1424 | 	} | 
 | 1425 |  | 
 | 1426 | 	if (index == MAX_OUTSTANDING_COMMANDS) | 
 | 1427 | 		goto queuing_error; | 
 | 1428 |  | 
 | 1429 | 	/* Compute number of required data segments */ | 
 | 1430 | 	/* Map the sg table so we have an accurate count of sg entries needed */ | 
 | 1431 | 	if (scsi_sg_count(cmd)) { | 
 | 1432 | 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), | 
 | 1433 | 		    scsi_sg_count(cmd), cmd->sc_data_direction); | 
 | 1434 | 		if (unlikely(!nseg)) | 
 | 1435 | 			goto queuing_error; | 
 | 1436 | 		else | 
 | 1437 | 			sp->flags |= SRB_DMA_VALID; | 
 | 1438 | 	} else | 
 | 1439 | 		nseg = 0; | 
 | 1440 |  | 
 | 1441 | 	/* number of required data segments */ | 
 | 1442 | 	tot_dsds = nseg; | 
 | 1443 |  | 
 | 1444 | 	/* Compute number of required protection segments */ | 
 | 1445 | 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) { | 
 | 1446 | 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd), | 
 | 1447 | 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction); | 
 | 1448 | 		if (unlikely(!nseg)) | 
 | 1449 | 			goto queuing_error; | 
 | 1450 | 		else | 
 | 1451 | 			sp->flags |= SRB_CRC_PROT_DMA_VALID; | 
 | 1452 | 	} else { | 
 | 1453 | 		nseg = 0; | 
 | 1454 | 	} | 
 | 1455 |  | 
 | 1456 | 	req_cnt = 1; | 
 | 1457 | 	/* Total Data and protection sg segment(s) */ | 
 | 1458 | 	tot_prot_dsds = nseg; | 
 | 1459 | 	tot_dsds += nseg; | 
 | 1460 | 	if (req->cnt < (req_cnt + 2)) { | 
 | 1461 | 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out); | 
 | 1462 |  | 
 | 1463 | 		if (req->ring_index < cnt) | 
 | 1464 | 			req->cnt = cnt - req->ring_index; | 
 | 1465 | 		else | 
 | 1466 | 			req->cnt = req->length - | 
 | 1467 | 				(req->ring_index - cnt); | 
 | 1468 | 	} | 
 | 1469 |  | 
 | 1470 | 	if (req->cnt < (req_cnt + 2)) | 
 | 1471 | 		goto queuing_error; | 
 | 1472 |  | 
 | 1473 | 	status |= QDSS_GOT_Q_SPACE; | 
 | 1474 |  | 
 | 1475 | 	/* Build header part of command packet (excluding the OPCODE). */ | 
 | 1476 | 	req->current_outstanding_cmd = handle; | 
 | 1477 | 	req->outstanding_cmds[handle] = sp; | 
 | 1478 | 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; | 
 | 1479 | 	req->cnt -= req_cnt; | 
 | 1480 |  | 
 | 1481 | 	/* Fill-in common area */ | 
 | 1482 | 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; | 
 | 1483 | 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle); | 
 | 1484 |  | 
 | 1485 | 	clr_ptr = (uint32_t *)cmd_pkt + 2; | 
 | 1486 | 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); | 
 | 1487 |  | 
 | 1488 | 	/* Set NPORT-ID and LUN number*/ | 
 | 1489 | 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 
 | 1490 | 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; | 
 | 1491 | 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; | 
 | 1492 | 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; | 
 | 1493 |  | 
 | 1494 | 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); | 
 | 1495 | 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); | 
 | 1496 |  | 
 | 1497 | 	/* Total Data and protection segment(s) */ | 
 | 1498 | 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); | 
 | 1499 |  | 
 | 1500 | 	/* Build IOCB segments and adjust for data protection segments */ | 
 | 1501 | 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *) | 
 | 1502 | 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != | 
 | 1503 | 		QLA_SUCCESS) | 
 | 1504 | 		goto queuing_error; | 
 | 1505 |  | 
 | 1506 | 	cmd_pkt->entry_count = (uint8_t)req_cnt; | 
 | 1507 | 	/* Specify response queue number where completion should happen */ | 
 | 1508 | 	cmd_pkt->entry_status = (uint8_t) rsp->id; | 
 | 1509 | 	cmd_pkt->timeout = __constant_cpu_to_le16(0); | 
 | 1510 | 	wmb(); | 
 | 1511 |  | 
 | 1512 | 	/* Adjust ring index. */ | 
 | 1513 | 	req->ring_index++; | 
 | 1514 | 	if (req->ring_index == req->length) { | 
 | 1515 | 		req->ring_index = 0; | 
 | 1516 | 		req->ring_ptr = req->ring; | 
 | 1517 | 	} else | 
 | 1518 | 		req->ring_ptr++; | 
 | 1519 |  | 
 | 1520 | 	/* Set chip new ring index. */ | 
 | 1521 | 	WRT_REG_DWORD(req->req_q_in, req->ring_index); | 
 | 1522 | 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); | 
 | 1523 |  | 
 | 1524 | 	/* Manage unprocessed RIO/ZIO commands in response queue. */ | 
 | 1525 | 	if (vha->flags.process_response_queue && | 
 | 1526 | 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED) | 
 | 1527 | 		qla24xx_process_response_queue(vha, rsp); | 
 | 1528 |  | 
 | 1529 | 	spin_unlock_irqrestore(&ha->hardware_lock, flags); | 
 | 1530 |  | 
 | 1531 | 	return QLA_SUCCESS; | 
 | 1532 |  | 
 | 1533 | queuing_error: | 
 | 1534 | 	if (status & QDSS_GOT_Q_SPACE) { | 
 | 1535 | 		req->outstanding_cmds[handle] = NULL; | 
 | 1536 | 		req->cnt += req_cnt; | 
 | 1537 | 	} | 
 | 1538 | 	/* Cleanup will be performed by the caller (queuecommand) */ | 
 | 1539 |  | 
 | 1540 | 	spin_unlock_irqrestore(&ha->hardware_lock, flags); | 
 | 1541 |  | 
 | 1542 | 	DEBUG18(qla_printk(KERN_INFO, ha, | 
 | 1543 | 	    "CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd))); | 
 | 1544 | 	return QLA_FUNCTION_FAILED; | 
 | 1545 | } | 
 | 1546 |  | 
 | 1547 |  | 
| Anirban Chakraborty | 59e0b8b | 2009-06-03 09:55:19 -0700 | [diff] [blame] | 1548 | static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) | 
| Anirban Chakraborty | 68ca949 | 2009-04-06 22:33:41 -0700 | [diff] [blame] | 1549 | { | 
 | 1550 | 	struct scsi_cmnd *cmd = sp->cmd; | 
| Anirban Chakraborty | 68ca949 | 2009-04-06 22:33:41 -0700 | [diff] [blame] | 1551 | 	struct qla_hw_data *ha = sp->fcport->vha->hw; | 
 | 1552 | 	int affinity = cmd->request->cpu; | 
 | 1553 |  | 
| Anirban Chakraborty | 7163ea8 | 2009-08-05 09:18:40 -0700 | [diff] [blame] | 1554 | 	if (ha->flags.cpu_affinity_enabled && affinity >= 0 && | 
| Anirban Chakraborty | 59e0b8b | 2009-06-03 09:55:19 -0700 | [diff] [blame] | 1555 | 		affinity < ha->max_rsp_queues - 1) | 
| Anirban Chakraborty | 68ca949 | 2009-04-06 22:33:41 -0700 | [diff] [blame] | 1556 | 		*rsp = ha->rsp_q_map[affinity + 1]; | 
| Anirban Chakraborty | 59e0b8b | 2009-06-03 09:55:19 -0700 | [diff] [blame] | 1557 | 	 else | 
| Anirban Chakraborty | 68ca949 | 2009-04-06 22:33:41 -0700 | [diff] [blame] | 1558 | 		*rsp = ha->rsp_q_map[0]; | 
| Anirban Chakraborty | 68ca949 | 2009-04-06 22:33:41 -0700 | [diff] [blame] | 1559 | } | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1560 |  | 
 | 1561 | /* Generic Control-SRB manipulation functions. */ | 
 | 1562 |  | 
 | 1563 | static void * | 
 | 1564 | qla2x00_alloc_iocbs(srb_t *sp) | 
 | 1565 | { | 
 | 1566 | 	scsi_qla_host_t	*vha = sp->fcport->vha; | 
 | 1567 | 	struct qla_hw_data *ha = vha->hw; | 
 | 1568 | 	struct req_que *req = ha->req_q_map[0]; | 
 | 1569 | 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); | 
 | 1570 | 	uint32_t index, handle; | 
 | 1571 | 	request_t *pkt; | 
 | 1572 | 	uint16_t cnt, req_cnt; | 
 | 1573 |  | 
 | 1574 | 	pkt = NULL; | 
 | 1575 | 	req_cnt = 1; | 
 | 1576 |  | 
 | 1577 | 	/* Check for room in outstanding command list. */ | 
 | 1578 | 	handle = req->current_outstanding_cmd; | 
 | 1579 | 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { | 
 | 1580 | 		handle++; | 
 | 1581 | 		if (handle == MAX_OUTSTANDING_COMMANDS) | 
 | 1582 | 			handle = 1; | 
 | 1583 | 		if (!req->outstanding_cmds[handle]) | 
 | 1584 | 			break; | 
 | 1585 | 	} | 
 | 1586 | 	if (index == MAX_OUTSTANDING_COMMANDS) | 
 | 1587 | 		goto queuing_error; | 
 | 1588 |  | 
 | 1589 | 	/* Check for room on request queue. */ | 
 | 1590 | 	if (req->cnt < req_cnt) { | 
 | 1591 | 		if (ha->mqenable) | 
 | 1592 | 			cnt = RD_REG_DWORD(®->isp25mq.req_q_out); | 
 | 1593 | 		else if (IS_FWI2_CAPABLE(ha)) | 
 | 1594 | 			cnt = RD_REG_DWORD(®->isp24.req_q_out); | 
 | 1595 | 		else | 
 | 1596 | 			cnt = qla2x00_debounce_register( | 
 | 1597 | 			    ISP_REQ_Q_OUT(ha, ®->isp)); | 
 | 1598 |  | 
 | 1599 | 		if  (req->ring_index < cnt) | 
 | 1600 | 			req->cnt = cnt - req->ring_index; | 
 | 1601 | 		else | 
 | 1602 | 			req->cnt = req->length - | 
 | 1603 | 			    (req->ring_index - cnt); | 
 | 1604 | 	} | 
 | 1605 | 	if (req->cnt < req_cnt) | 
 | 1606 | 		goto queuing_error; | 
 | 1607 |  | 
 | 1608 | 	/* Prep packet */ | 
 | 1609 | 	req->current_outstanding_cmd = handle; | 
 | 1610 | 	req->outstanding_cmds[handle] = sp; | 
 | 1611 | 	req->cnt -= req_cnt; | 
 | 1612 |  | 
 | 1613 | 	pkt = req->ring_ptr; | 
 | 1614 | 	memset(pkt, 0, REQUEST_ENTRY_SIZE); | 
 | 1615 | 	pkt->entry_count = req_cnt; | 
 | 1616 | 	pkt->handle = handle; | 
 | 1617 | 	sp->handle = handle; | 
 | 1618 |  | 
 | 1619 | queuing_error: | 
 | 1620 | 	return pkt; | 
 | 1621 | } | 
 | 1622 |  | 
 | 1623 | static void | 
 | 1624 | qla2x00_start_iocbs(srb_t *sp) | 
 | 1625 | { | 
 | 1626 | 	struct qla_hw_data *ha = sp->fcport->vha->hw; | 
 | 1627 | 	struct req_que *req = ha->req_q_map[0]; | 
 | 1628 | 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); | 
 | 1629 | 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; | 
 | 1630 |  | 
| Giridhar Malavali | a908301 | 2010-04-12 17:59:55 -0700 | [diff] [blame] | 1631 | 	if (IS_QLA82XX(ha)) { | 
 | 1632 | 		qla82xx_start_iocbs(sp); | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1633 | 	} else { | 
| Giridhar Malavali | a908301 | 2010-04-12 17:59:55 -0700 | [diff] [blame] | 1634 | 		/* Adjust ring index. */ | 
 | 1635 | 		req->ring_index++; | 
 | 1636 | 		if (req->ring_index == req->length) { | 
 | 1637 | 			req->ring_index = 0; | 
 | 1638 | 			req->ring_ptr = req->ring; | 
 | 1639 | 		} else | 
 | 1640 | 			req->ring_ptr++; | 
 | 1641 |  | 
 | 1642 | 		/* Set chip new ring index. */ | 
 | 1643 | 		if (ha->mqenable) { | 
 | 1644 | 			WRT_REG_DWORD(®->isp25mq.req_q_in, req->ring_index); | 
 | 1645 | 			RD_REG_DWORD(&ioreg->hccr); | 
 | 1646 | 		} else if (IS_QLA82XX(ha)) { | 
 | 1647 | 			qla82xx_start_iocbs(sp); | 
 | 1648 | 		} else if (IS_FWI2_CAPABLE(ha)) { | 
 | 1649 | 			WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); | 
 | 1650 | 			RD_REG_DWORD_RELAXED(®->isp24.req_q_in); | 
 | 1651 | 		} else { | 
 | 1652 | 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), | 
 | 1653 | 				req->ring_index); | 
 | 1654 | 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); | 
 | 1655 | 		} | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1656 | 	} | 
 | 1657 | } | 
 | 1658 |  | 
 | 1659 | static void | 
 | 1660 | qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) | 
 | 1661 | { | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1662 | 	struct srb_ctx *ctx = sp->ctx; | 
 | 1663 | 	struct srb_iocb *lio = ctx->u.iocb_cmd; | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1664 |  | 
 | 1665 | 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; | 
 | 1666 | 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1667 | 	if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1668 | 		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1669 | 	if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1670 | 		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); | 
 | 1671 | 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 
 | 1672 | 	logio->port_id[0] = sp->fcport->d_id.b.al_pa; | 
 | 1673 | 	logio->port_id[1] = sp->fcport->d_id.b.area; | 
 | 1674 | 	logio->port_id[2] = sp->fcport->d_id.b.domain; | 
 | 1675 | 	logio->vp_index = sp->fcport->vp_idx; | 
 | 1676 | } | 
 | 1677 |  | 
 | 1678 | static void | 
 | 1679 | qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) | 
 | 1680 | { | 
 | 1681 | 	struct qla_hw_data *ha = sp->fcport->vha->hw; | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1682 | 	struct srb_ctx *ctx = sp->ctx; | 
 | 1683 | 	struct srb_iocb *lio = ctx->u.iocb_cmd; | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1684 | 	uint16_t opts; | 
 | 1685 |  | 
 | 1686 | 	mbx->entry_type = MBX_IOCB_TYPE;; | 
 | 1687 | 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); | 
 | 1688 | 	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT); | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1689 | 	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0; | 
 | 1690 | 	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0; | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1691 | 	if (HAS_EXTENDED_IDS(ha)) { | 
 | 1692 | 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); | 
 | 1693 | 		mbx->mb10 = cpu_to_le16(opts); | 
 | 1694 | 	} else { | 
 | 1695 | 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts); | 
 | 1696 | 	} | 
 | 1697 | 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); | 
 | 1698 | 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | | 
 | 1699 | 	    sp->fcport->d_id.b.al_pa); | 
 | 1700 | 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); | 
 | 1701 | } | 
 | 1702 |  | 
 | 1703 | static void | 
 | 1704 | qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) | 
 | 1705 | { | 
 | 1706 | 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; | 
 | 1707 | 	logio->control_flags = | 
 | 1708 | 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); | 
 | 1709 | 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 
 | 1710 | 	logio->port_id[0] = sp->fcport->d_id.b.al_pa; | 
 | 1711 | 	logio->port_id[1] = sp->fcport->d_id.b.area; | 
 | 1712 | 	logio->port_id[2] = sp->fcport->d_id.b.domain; | 
 | 1713 | 	logio->vp_index = sp->fcport->vp_idx; | 
 | 1714 | } | 
 | 1715 |  | 
 | 1716 | static void | 
 | 1717 | qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx) | 
 | 1718 | { | 
 | 1719 | 	struct qla_hw_data *ha = sp->fcport->vha->hw; | 
 | 1720 |  | 
 | 1721 | 	mbx->entry_type = MBX_IOCB_TYPE;; | 
 | 1722 | 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); | 
 | 1723 | 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT); | 
 | 1724 | 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ? | 
 | 1725 | 	    cpu_to_le16(sp->fcport->loop_id): | 
 | 1726 | 	    cpu_to_le16(sp->fcport->loop_id << 8); | 
 | 1727 | 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); | 
 | 1728 | 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | | 
 | 1729 | 	    sp->fcport->d_id.b.al_pa); | 
 | 1730 | 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); | 
 | 1731 | 	/* Implicit: mbx->mbx10 = 0. */ | 
 | 1732 | } | 
 | 1733 |  | 
| Giridhar Malavali | 9a069e1 | 2010-01-12 13:02:47 -0800 | [diff] [blame] | 1734 | static void | 
| Andrew Vasquez | 5ff1d58 | 2010-05-04 15:01:26 -0700 | [diff] [blame] | 1735 | qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio) | 
 | 1736 | { | 
 | 1737 | 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; | 
 | 1738 | 	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); | 
 | 1739 | 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 
 | 1740 | 	logio->vp_index = sp->fcport->vp_idx; | 
 | 1741 | } | 
 | 1742 |  | 
 | 1743 | static void | 
 | 1744 | qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx) | 
 | 1745 | { | 
 | 1746 | 	struct qla_hw_data *ha = sp->fcport->vha->hw; | 
 | 1747 |  | 
 | 1748 | 	mbx->entry_type = MBX_IOCB_TYPE; | 
 | 1749 | 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id); | 
 | 1750 | 	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE); | 
 | 1751 | 	if (HAS_EXTENDED_IDS(ha)) { | 
 | 1752 | 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id); | 
 | 1753 | 		mbx->mb10 = cpu_to_le16(BIT_0); | 
 | 1754 | 	} else { | 
 | 1755 | 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0); | 
 | 1756 | 	} | 
 | 1757 | 	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma)); | 
 | 1758 | 	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); | 
 | 1759 | 	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); | 
 | 1760 | 	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); | 
 | 1761 | 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); | 
 | 1762 | } | 
 | 1763 |  | 
 | 1764 | static void | 
| Madhuranath Iyengar | 3822263 | 2010-05-04 15:01:29 -0700 | [diff] [blame] | 1765 | qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) | 
 | 1766 | { | 
 | 1767 | 	uint32_t flags; | 
 | 1768 | 	unsigned int lun; | 
 | 1769 | 	struct fc_port *fcport = sp->fcport; | 
 | 1770 | 	scsi_qla_host_t *vha = fcport->vha; | 
 | 1771 | 	struct qla_hw_data *ha = vha->hw; | 
 | 1772 | 	struct srb_ctx *ctx = sp->ctx; | 
 | 1773 | 	struct srb_iocb *iocb = ctx->u.iocb_cmd; | 
 | 1774 | 	struct req_que *req = vha->req; | 
 | 1775 |  | 
 | 1776 | 	flags = iocb->u.tmf.flags; | 
 | 1777 | 	lun = iocb->u.tmf.lun; | 
 | 1778 |  | 
 | 1779 | 	tsk->entry_type = TSK_MGMT_IOCB_TYPE; | 
 | 1780 | 	tsk->entry_count = 1; | 
 | 1781 | 	tsk->handle = MAKE_HANDLE(req->id, tsk->handle); | 
 | 1782 | 	tsk->nport_handle = cpu_to_le16(fcport->loop_id); | 
 | 1783 | 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); | 
 | 1784 | 	tsk->control_flags = cpu_to_le32(flags); | 
 | 1785 | 	tsk->port_id[0] = fcport->d_id.b.al_pa; | 
 | 1786 | 	tsk->port_id[1] = fcport->d_id.b.area; | 
 | 1787 | 	tsk->port_id[2] = fcport->d_id.b.domain; | 
 | 1788 | 	tsk->vp_index = fcport->vp_idx; | 
 | 1789 |  | 
 | 1790 | 	if (flags == TCF_LUN_RESET) { | 
 | 1791 | 		int_to_scsilun(lun, &tsk->lun); | 
 | 1792 | 		host_to_fcp_swap((uint8_t *)&tsk->lun, | 
 | 1793 | 			sizeof(tsk->lun)); | 
 | 1794 | 	} | 
 | 1795 | } | 
 | 1796 |  | 
 | 1797 | static void | 
 | 1798 | qla24xx_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) | 
 | 1799 | { | 
 | 1800 | 	uint16_t lun; | 
 | 1801 | 	uint8_t modif; | 
 | 1802 | 	struct fc_port *fcport = sp->fcport; | 
 | 1803 | 	scsi_qla_host_t *vha = fcport->vha; | 
 | 1804 | 	struct srb_ctx *ctx = sp->ctx; | 
 | 1805 | 	struct srb_iocb *iocb = ctx->u.iocb_cmd; | 
 | 1806 | 	struct req_que *req = vha->req; | 
 | 1807 |  | 
 | 1808 | 	lun = iocb->u.marker.lun; | 
 | 1809 | 	modif = iocb->u.marker.modif; | 
 | 1810 | 	mrk->entry_type = MARKER_TYPE; | 
 | 1811 | 	mrk->modifier = modif; | 
 | 1812 | 	if (modif !=  MK_SYNC_ALL) { | 
 | 1813 | 		mrk->nport_handle = cpu_to_le16(fcport->loop_id); | 
 | 1814 | 		mrk->lun[1] = LSB(lun); | 
 | 1815 | 		mrk->lun[2] = MSB(lun); | 
 | 1816 | 		host_to_fcp_swap(mrk->lun, sizeof(mrk->lun)); | 
 | 1817 | 		mrk->vp_index = vha->vp_idx; | 
 | 1818 | 		mrk->handle = MAKE_HANDLE(req->id, mrk->handle); | 
 | 1819 | 	} | 
 | 1820 | } | 
 | 1821 |  | 
 | 1822 | static void | 
| Giridhar Malavali | 9a069e1 | 2010-01-12 13:02:47 -0800 | [diff] [blame] | 1823 | qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) | 
 | 1824 | { | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1825 | 	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; | 
| Giridhar Malavali | 9a069e1 | 2010-01-12 13:02:47 -0800 | [diff] [blame] | 1826 |  | 
 | 1827 |         els_iocb->entry_type = ELS_IOCB_TYPE; | 
 | 1828 |         els_iocb->entry_count = 1; | 
 | 1829 |         els_iocb->sys_define = 0; | 
 | 1830 |         els_iocb->entry_status = 0; | 
 | 1831 |         els_iocb->handle = sp->handle; | 
 | 1832 |         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 
 | 1833 |         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); | 
 | 1834 |         els_iocb->vp_index = sp->fcport->vp_idx; | 
 | 1835 |         els_iocb->sof_type = EST_SOFI3; | 
 | 1836 |         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); | 
 | 1837 |  | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1838 | 	els_iocb->opcode = | 
 | 1839 | 	    (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ? | 
 | 1840 | 	    bsg_job->request->rqst_data.r_els.els_code : | 
 | 1841 | 	    bsg_job->request->rqst_data.h_els.command_code; | 
| Giridhar Malavali | 9a069e1 | 2010-01-12 13:02:47 -0800 | [diff] [blame] | 1842 |         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; | 
 | 1843 |         els_iocb->port_id[1] = sp->fcport->d_id.b.area; | 
 | 1844 |         els_iocb->port_id[2] = sp->fcport->d_id.b.domain; | 
 | 1845 |         els_iocb->control_flags = 0; | 
 | 1846 |         els_iocb->rx_byte_count = | 
 | 1847 |             cpu_to_le32(bsg_job->reply_payload.payload_len); | 
 | 1848 |         els_iocb->tx_byte_count = | 
 | 1849 |             cpu_to_le32(bsg_job->request_payload.payload_len); | 
 | 1850 |  | 
 | 1851 |         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address | 
 | 1852 |             (bsg_job->request_payload.sg_list))); | 
 | 1853 |         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address | 
 | 1854 |             (bsg_job->request_payload.sg_list))); | 
 | 1855 |         els_iocb->tx_len = cpu_to_le32(sg_dma_len | 
 | 1856 |             (bsg_job->request_payload.sg_list)); | 
 | 1857 |  | 
 | 1858 |         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address | 
 | 1859 |             (bsg_job->reply_payload.sg_list))); | 
 | 1860 |         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address | 
 | 1861 |             (bsg_job->reply_payload.sg_list))); | 
 | 1862 |         els_iocb->rx_len = cpu_to_le32(sg_dma_len | 
 | 1863 |             (bsg_job->reply_payload.sg_list)); | 
 | 1864 | } | 
 | 1865 |  | 
 | 1866 | static void | 
 | 1867 | qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) | 
 | 1868 | { | 
 | 1869 | 	uint16_t        avail_dsds; | 
 | 1870 | 	uint32_t        *cur_dsd; | 
 | 1871 | 	struct scatterlist *sg; | 
 | 1872 | 	int index; | 
 | 1873 | 	uint16_t tot_dsds; | 
 | 1874 |         scsi_qla_host_t *vha = sp->fcport->vha; | 
| Madhuranath Iyengar | 4916392 | 2010-05-04 15:01:28 -0700 | [diff] [blame] | 1875 | 	struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; | 
| Giridhar Malavali | 9a069e1 | 2010-01-12 13:02:47 -0800 | [diff] [blame] | 1876 | 	int loop_iterartion = 0; | 
 | 1877 | 	int cont_iocb_prsnt = 0; | 
 | 1878 | 	int entry_count = 1; | 
 | 1879 |  | 
 | 1880 | 	ct_iocb->entry_type = CT_IOCB_TYPE; | 
 | 1881 |         ct_iocb->entry_status = 0; | 
 | 1882 |         ct_iocb->sys_define = 0; | 
 | 1883 |         ct_iocb->handle = sp->handle; | 
 | 1884 |  | 
 | 1885 | 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 
 | 1886 | 	ct_iocb->vp_index = sp->fcport->vp_idx; | 
 | 1887 |         ct_iocb->comp_status = __constant_cpu_to_le16(0); | 
 | 1888 |  | 
 | 1889 | 	ct_iocb->cmd_dsd_count = | 
 | 1890 |             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); | 
 | 1891 |         ct_iocb->timeout = 0; | 
 | 1892 |         ct_iocb->rsp_dsd_count = | 
 | 1893 |             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); | 
 | 1894 |         ct_iocb->rsp_byte_count = | 
 | 1895 |             cpu_to_le32(bsg_job->reply_payload.payload_len); | 
 | 1896 |         ct_iocb->cmd_byte_count = | 
 | 1897 |             cpu_to_le32(bsg_job->request_payload.payload_len); | 
 | 1898 |         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address | 
 | 1899 |             (bsg_job->request_payload.sg_list))); | 
 | 1900 |         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address | 
 | 1901 |            (bsg_job->request_payload.sg_list))); | 
 | 1902 |         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len | 
 | 1903 |             (bsg_job->request_payload.sg_list)); | 
 | 1904 |  | 
 | 1905 | 	avail_dsds = 1; | 
 | 1906 | 	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address; | 
 | 1907 | 	index = 0; | 
 | 1908 | 	tot_dsds = bsg_job->reply_payload.sg_cnt; | 
 | 1909 |  | 
 | 1910 | 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { | 
 | 1911 | 		dma_addr_t       sle_dma; | 
 | 1912 | 		cont_a64_entry_t *cont_pkt; | 
 | 1913 |  | 
 | 1914 | 		/* Allocate additional continuation packets? */ | 
 | 1915 | 		if (avail_dsds == 0) { | 
 | 1916 | 			/* | 
 | 1917 | 			* Five DSDs are available in the Cont. | 
 | 1918 | 			* Type 1 IOCB. | 
 | 1919 | 			       */ | 
 | 1920 | 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha); | 
 | 1921 | 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; | 
 | 1922 | 			avail_dsds = 5; | 
 | 1923 | 			cont_iocb_prsnt = 1; | 
 | 1924 | 			entry_count++; | 
 | 1925 | 		} | 
 | 1926 |  | 
 | 1927 | 		sle_dma = sg_dma_address(sg); | 
 | 1928 | 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma)); | 
 | 1929 | 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma)); | 
 | 1930 | 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg)); | 
 | 1931 | 		loop_iterartion++; | 
 | 1932 | 		avail_dsds--; | 
 | 1933 | 	} | 
 | 1934 |         ct_iocb->entry_count = entry_count; | 
 | 1935 | } | 
 | 1936 |  | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1937 | int | 
 | 1938 | qla2x00_start_sp(srb_t *sp) | 
 | 1939 | { | 
 | 1940 | 	int rval; | 
 | 1941 | 	struct qla_hw_data *ha = sp->fcport->vha->hw; | 
 | 1942 | 	void *pkt; | 
 | 1943 | 	struct srb_ctx *ctx = sp->ctx; | 
 | 1944 | 	unsigned long flags; | 
 | 1945 |  | 
 | 1946 | 	rval = QLA_FUNCTION_FAILED; | 
 | 1947 | 	spin_lock_irqsave(&ha->hardware_lock, flags); | 
 | 1948 | 	pkt = qla2x00_alloc_iocbs(sp); | 
 | 1949 | 	if (!pkt) | 
 | 1950 | 		goto done; | 
 | 1951 |  | 
 | 1952 | 	rval = QLA_SUCCESS; | 
 | 1953 | 	switch (ctx->type) { | 
 | 1954 | 	case SRB_LOGIN_CMD: | 
 | 1955 | 		IS_FWI2_CAPABLE(ha) ? | 
| Andrew Vasquez | 5ff1d58 | 2010-05-04 15:01:26 -0700 | [diff] [blame] | 1956 | 		    qla24xx_login_iocb(sp, pkt) : | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1957 | 		    qla2x00_login_iocb(sp, pkt); | 
 | 1958 | 		break; | 
 | 1959 | 	case SRB_LOGOUT_CMD: | 
 | 1960 | 		IS_FWI2_CAPABLE(ha) ? | 
| Andrew Vasquez | 5ff1d58 | 2010-05-04 15:01:26 -0700 | [diff] [blame] | 1961 | 		    qla24xx_logout_iocb(sp, pkt) : | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1962 | 		    qla2x00_logout_iocb(sp, pkt); | 
 | 1963 | 		break; | 
| Giridhar Malavali | 9a069e1 | 2010-01-12 13:02:47 -0800 | [diff] [blame] | 1964 | 	case SRB_ELS_CMD_RPT: | 
 | 1965 | 	case SRB_ELS_CMD_HST: | 
 | 1966 | 		qla24xx_els_iocb(sp, pkt); | 
 | 1967 | 		break; | 
 | 1968 | 	case SRB_CT_CMD: | 
 | 1969 | 		qla24xx_ct_iocb(sp, pkt); | 
 | 1970 | 		break; | 
| Andrew Vasquez | 5ff1d58 | 2010-05-04 15:01:26 -0700 | [diff] [blame] | 1971 | 	case SRB_ADISC_CMD: | 
 | 1972 | 		IS_FWI2_CAPABLE(ha) ? | 
 | 1973 | 		    qla24xx_adisc_iocb(sp, pkt) : | 
 | 1974 | 		    qla2x00_adisc_iocb(sp, pkt); | 
 | 1975 | 		break; | 
| Madhuranath Iyengar | 3822263 | 2010-05-04 15:01:29 -0700 | [diff] [blame] | 1976 | 	case SRB_TM_CMD: | 
 | 1977 | 		qla24xx_tm_iocb(sp, pkt); | 
 | 1978 | 		break; | 
 | 1979 | 	case SRB_MARKER_CMD: | 
 | 1980 | 		qla24xx_marker_iocb(sp, pkt); | 
 | 1981 | 		break; | 
| Andrew Vasquez | ac280b6 | 2009-08-20 11:06:05 -0700 | [diff] [blame] | 1982 | 	default: | 
 | 1983 | 		break; | 
 | 1984 | 	} | 
 | 1985 |  | 
 | 1986 | 	wmb(); | 
 | 1987 | 	qla2x00_start_iocbs(sp); | 
 | 1988 | done: | 
 | 1989 | 	spin_unlock_irqrestore(&ha->hardware_lock, flags); | 
 | 1990 | 	return rval; | 
 | 1991 | } |