| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 1 | /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. | 
|  | 2 | * IO manager and SCSI IO processing. | 
|  | 3 | * | 
|  | 4 | * Copyright (c) 2008 - 2010 Broadcom Corporation | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License as published by | 
|  | 8 | * the Free Software Foundation. | 
|  | 9 | * | 
|  | 10 | * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) | 
|  | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include "bnx2fc.h" | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 14 |  | 
|  | 15 | #define RESERVE_FREE_LIST_INDEX num_possible_cpus() | 
|  | 16 |  | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 17 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, | 
|  | 18 | int bd_index); | 
|  | 19 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); | 
|  | 20 | static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); | 
|  | 21 | static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, | 
|  | 22 | struct bnx2fc_cmd *io_req); | 
|  | 23 | static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); | 
|  | 24 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); | 
|  | 25 | static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, | 
|  | 26 | struct fcoe_fcp_rsp_payload *fcp_rsp, | 
|  | 27 | u8 num_rq); | 
|  | 28 |  | 
|  | 29 | void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, | 
|  | 30 | unsigned int timer_msec) | 
|  | 31 | { | 
|  | 32 | struct bnx2fc_hba *hba = io_req->port->priv; | 
|  | 33 |  | 
|  | 34 | if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, | 
|  | 35 | msecs_to_jiffies(timer_msec))) | 
|  | 36 | kref_get(&io_req->refcount); | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | static void bnx2fc_cmd_timeout(struct work_struct *work) | 
|  | 40 | { | 
|  | 41 | struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, | 
|  | 42 | timeout_work.work); | 
|  | 43 | struct fc_lport *lport; | 
|  | 44 | struct fc_rport_priv *rdata; | 
|  | 45 | u8 cmd_type = io_req->cmd_type; | 
|  | 46 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 47 | int logo_issued; | 
|  | 48 | int rc; | 
|  | 49 |  | 
|  | 50 | BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," | 
|  | 51 | "req_flags = %lx\n", cmd_type, io_req->req_flags); | 
|  | 52 |  | 
|  | 53 | spin_lock_bh(&tgt->tgt_lock); | 
|  | 54 | if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { | 
|  | 55 | clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); | 
|  | 56 | /* | 
|  | 57 | * ideally we should hold the io_req until RRQ complets, | 
|  | 58 | * and release io_req from timeout hold. | 
|  | 59 | */ | 
|  | 60 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 61 | bnx2fc_send_rrq(io_req); | 
|  | 62 | return; | 
|  | 63 | } | 
|  | 64 | if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { | 
|  | 65 | BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); | 
|  | 66 | goto done; | 
|  | 67 | } | 
|  | 68 |  | 
|  | 69 | switch (cmd_type) { | 
|  | 70 | case BNX2FC_SCSI_CMD: | 
|  | 71 | if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, | 
|  | 72 | &io_req->req_flags)) { | 
|  | 73 | /* Handle eh_abort timeout */ | 
|  | 74 | BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); | 
|  | 75 | complete(&io_req->tm_done); | 
|  | 76 | } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, | 
|  | 77 | &io_req->req_flags)) { | 
|  | 78 | /* Handle internally generated ABTS timeout */ | 
|  | 79 | BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", | 
|  | 80 | io_req->refcount.refcount.counter); | 
|  | 81 | if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, | 
|  | 82 | &io_req->req_flags))) { | 
|  | 83 |  | 
|  | 84 | lport = io_req->port->lport; | 
|  | 85 | rdata = io_req->tgt->rdata; | 
|  | 86 | logo_issued = test_and_set_bit( | 
|  | 87 | BNX2FC_FLAG_EXPL_LOGO, | 
|  | 88 | &tgt->flags); | 
|  | 89 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 90 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 91 |  | 
|  | 92 | /* Explicitly logo the target */ | 
|  | 93 | if (!logo_issued) { | 
|  | 94 | BNX2FC_IO_DBG(io_req, "Explicit " | 
|  | 95 | "logo - tgt flags = 0x%lx\n", | 
|  | 96 | tgt->flags); | 
|  | 97 |  | 
|  | 98 | mutex_lock(&lport->disc.disc_mutex); | 
|  | 99 | lport->tt.rport_logoff(rdata); | 
|  | 100 | mutex_unlock(&lport->disc.disc_mutex); | 
|  | 101 | } | 
|  | 102 | return; | 
|  | 103 | } | 
|  | 104 | } else { | 
|  | 105 | /* Hanlde IO timeout */ | 
|  | 106 | BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); | 
|  | 107 | if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, | 
|  | 108 | &io_req->req_flags)) { | 
|  | 109 | BNX2FC_IO_DBG(io_req, "IO completed before " | 
|  | 110 | " timer expiry\n"); | 
|  | 111 | goto done; | 
|  | 112 | } | 
|  | 113 |  | 
|  | 114 | if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, | 
|  | 115 | &io_req->req_flags)) { | 
|  | 116 | rc = bnx2fc_initiate_abts(io_req); | 
|  | 117 | if (rc == SUCCESS) | 
|  | 118 | goto done; | 
|  | 119 | /* | 
|  | 120 | * Explicitly logo the target if | 
|  | 121 | * abts initiation fails | 
|  | 122 | */ | 
|  | 123 | lport = io_req->port->lport; | 
|  | 124 | rdata = io_req->tgt->rdata; | 
|  | 125 | logo_issued = test_and_set_bit( | 
|  | 126 | BNX2FC_FLAG_EXPL_LOGO, | 
|  | 127 | &tgt->flags); | 
|  | 128 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 129 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 130 |  | 
|  | 131 | if (!logo_issued) { | 
|  | 132 | BNX2FC_IO_DBG(io_req, "Explicit " | 
|  | 133 | "logo - tgt flags = 0x%lx\n", | 
|  | 134 | tgt->flags); | 
|  | 135 |  | 
|  | 136 |  | 
|  | 137 | mutex_lock(&lport->disc.disc_mutex); | 
|  | 138 | lport->tt.rport_logoff(rdata); | 
|  | 139 | mutex_unlock(&lport->disc.disc_mutex); | 
|  | 140 | } | 
|  | 141 | return; | 
|  | 142 | } else { | 
|  | 143 | BNX2FC_IO_DBG(io_req, "IO already in " | 
|  | 144 | "ABTS processing\n"); | 
|  | 145 | } | 
|  | 146 | } | 
|  | 147 | break; | 
|  | 148 | case BNX2FC_ELS: | 
|  | 149 |  | 
|  | 150 | if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { | 
|  | 151 | BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); | 
|  | 152 |  | 
|  | 153 | if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, | 
|  | 154 | &io_req->req_flags)) { | 
|  | 155 | lport = io_req->port->lport; | 
|  | 156 | rdata = io_req->tgt->rdata; | 
|  | 157 | logo_issued = test_and_set_bit( | 
|  | 158 | BNX2FC_FLAG_EXPL_LOGO, | 
|  | 159 | &tgt->flags); | 
|  | 160 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 161 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 162 |  | 
|  | 163 | /* Explicitly logo the target */ | 
|  | 164 | if (!logo_issued) { | 
|  | 165 | BNX2FC_IO_DBG(io_req, "Explicitly logo" | 
|  | 166 | "(els)\n"); | 
|  | 167 | mutex_lock(&lport->disc.disc_mutex); | 
|  | 168 | lport->tt.rport_logoff(rdata); | 
|  | 169 | mutex_unlock(&lport->disc.disc_mutex); | 
|  | 170 | } | 
|  | 171 | return; | 
|  | 172 | } | 
|  | 173 | } else { | 
|  | 174 | /* | 
|  | 175 | * Handle ELS timeout. | 
|  | 176 | * tgt_lock is used to sync compl path and timeout | 
|  | 177 | * path. If els compl path is processing this IO, we | 
|  | 178 | * have nothing to do here, just release the timer hold | 
|  | 179 | */ | 
|  | 180 | BNX2FC_IO_DBG(io_req, "ELS timed out\n"); | 
|  | 181 | if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, | 
|  | 182 | &io_req->req_flags)) | 
|  | 183 | goto done; | 
|  | 184 |  | 
|  | 185 | /* Indicate the cb_func that this ELS is timed out */ | 
|  | 186 | set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); | 
|  | 187 |  | 
|  | 188 | if ((io_req->cb_func) && (io_req->cb_arg)) { | 
|  | 189 | io_req->cb_func(io_req->cb_arg); | 
|  | 190 | io_req->cb_arg = NULL; | 
|  | 191 | } | 
|  | 192 | } | 
|  | 193 | break; | 
|  | 194 | default: | 
|  | 195 | printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", | 
|  | 196 | cmd_type); | 
|  | 197 | break; | 
|  | 198 | } | 
|  | 199 |  | 
|  | 200 | done: | 
|  | 201 | /* release the cmd that was held when timer was set */ | 
|  | 202 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 203 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) | 
|  | 207 | { | 
|  | 208 | /* Called with host lock held */ | 
|  | 209 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 
|  | 210 |  | 
|  | 211 | /* | 
|  | 212 | * active_cmd_queue may have other command types as well, | 
|  | 213 | * and during flush operation,  we want to error back only | 
|  | 214 | * scsi commands. | 
|  | 215 | */ | 
|  | 216 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) | 
|  | 217 | return; | 
|  | 218 |  | 
|  | 219 | BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); | 
|  | 220 | bnx2fc_unmap_sg_list(io_req); | 
|  | 221 | io_req->sc_cmd = NULL; | 
|  | 222 | if (!sc_cmd) { | 
|  | 223 | printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " | 
|  | 224 | "IO(0x%x) already cleaned up\n", | 
|  | 225 | io_req->xid); | 
|  | 226 | return; | 
|  | 227 | } | 
|  | 228 | sc_cmd->result = err_code << 16; | 
|  | 229 |  | 
|  | 230 | BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", | 
|  | 231 | sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, | 
|  | 232 | sc_cmd->allowed); | 
|  | 233 | scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); | 
|  | 234 | sc_cmd->SCp.ptr = NULL; | 
|  | 235 | sc_cmd->scsi_done(sc_cmd); | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, | 
|  | 239 | u16 min_xid, u16 max_xid) | 
|  | 240 | { | 
|  | 241 | struct bnx2fc_cmd_mgr *cmgr; | 
|  | 242 | struct io_bdt *bdt_info; | 
|  | 243 | struct bnx2fc_cmd *io_req; | 
|  | 244 | size_t len; | 
|  | 245 | u32 mem_size; | 
|  | 246 | u16 xid; | 
|  | 247 | int i; | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 248 | int num_ios, num_pri_ios; | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 249 | size_t bd_tbl_sz; | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 250 | int arr_sz = num_possible_cpus() + 1; | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 251 |  | 
|  | 252 | if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { | 
|  | 253 | printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ | 
|  | 254 | and max_xid 0x%x\n", min_xid, max_xid); | 
|  | 255 | return NULL; | 
|  | 256 | } | 
|  | 257 | BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); | 
|  | 258 |  | 
|  | 259 | num_ios = max_xid - min_xid + 1; | 
|  | 260 | len = (num_ios * (sizeof(struct bnx2fc_cmd *))); | 
|  | 261 | len += sizeof(struct bnx2fc_cmd_mgr); | 
|  | 262 |  | 
|  | 263 | cmgr = kzalloc(len, GFP_KERNEL); | 
|  | 264 | if (!cmgr) { | 
|  | 265 | printk(KERN_ERR PFX "failed to alloc cmgr\n"); | 
|  | 266 | return NULL; | 
|  | 267 | } | 
|  | 268 |  | 
|  | 269 | cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 270 | arr_sz, GFP_KERNEL); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 271 | if (!cmgr->free_list) { | 
|  | 272 | printk(KERN_ERR PFX "failed to alloc free_list\n"); | 
|  | 273 | goto mem_err; | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 277 | arr_sz, GFP_KERNEL); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 278 | if (!cmgr->free_list_lock) { | 
|  | 279 | printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); | 
|  | 280 | goto mem_err; | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | cmgr->hba = hba; | 
|  | 284 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); | 
|  | 285 |  | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 286 | for (i = 0; i < arr_sz; i++)  { | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 287 | INIT_LIST_HEAD(&cmgr->free_list[i]); | 
|  | 288 | spin_lock_init(&cmgr->free_list_lock[i]); | 
|  | 289 | } | 
|  | 290 |  | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 291 | /* | 
|  | 292 | * Pre-allocated pool of bnx2fc_cmds. | 
|  | 293 | * Last entry in the free list array is the free list | 
|  | 294 | * of slow path requests. | 
|  | 295 | */ | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 296 | xid = BNX2FC_MIN_XID; | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 297 | num_pri_ios = num_ios - BNX2FC_ELSTM_XIDS; | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 298 | for (i = 0; i < num_ios; i++) { | 
|  | 299 | io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); | 
|  | 300 |  | 
|  | 301 | if (!io_req) { | 
|  | 302 | printk(KERN_ERR PFX "failed to alloc io_req\n"); | 
|  | 303 | goto mem_err; | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 | INIT_LIST_HEAD(&io_req->link); | 
|  | 307 | INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); | 
|  | 308 |  | 
|  | 309 | io_req->xid = xid++; | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 310 | if (i < num_pri_ios) | 
|  | 311 | list_add_tail(&io_req->link, | 
|  | 312 | &cmgr->free_list[io_req->xid % | 
|  | 313 | num_possible_cpus()]); | 
|  | 314 | else | 
|  | 315 | list_add_tail(&io_req->link, | 
|  | 316 | &cmgr->free_list[num_possible_cpus()]); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 317 | io_req++; | 
|  | 318 | } | 
|  | 319 |  | 
|  | 320 | /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ | 
|  | 321 | mem_size = num_ios * sizeof(struct io_bdt *); | 
|  | 322 | cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); | 
|  | 323 | if (!cmgr->io_bdt_pool) { | 
|  | 324 | printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); | 
|  | 325 | goto mem_err; | 
|  | 326 | } | 
|  | 327 |  | 
|  | 328 | mem_size = sizeof(struct io_bdt); | 
|  | 329 | for (i = 0; i < num_ios; i++) { | 
|  | 330 | cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); | 
|  | 331 | if (!cmgr->io_bdt_pool[i]) { | 
|  | 332 | printk(KERN_ERR PFX "failed to alloc " | 
|  | 333 | "io_bdt_pool[%d]\n", i); | 
|  | 334 | goto mem_err; | 
|  | 335 | } | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | /* Allocate an map fcoe_bdt_ctx structures */ | 
|  | 339 | bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); | 
|  | 340 | for (i = 0; i < num_ios; i++) { | 
|  | 341 | bdt_info = cmgr->io_bdt_pool[i]; | 
|  | 342 | bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, | 
|  | 343 | bd_tbl_sz, | 
|  | 344 | &bdt_info->bd_tbl_dma, | 
|  | 345 | GFP_KERNEL); | 
|  | 346 | if (!bdt_info->bd_tbl) { | 
|  | 347 | printk(KERN_ERR PFX "failed to alloc " | 
|  | 348 | "bdt_tbl[%d]\n", i); | 
|  | 349 | goto mem_err; | 
|  | 350 | } | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | return cmgr; | 
|  | 354 |  | 
|  | 355 | mem_err: | 
|  | 356 | bnx2fc_cmd_mgr_free(cmgr); | 
|  | 357 | return NULL; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) | 
|  | 361 | { | 
|  | 362 | struct io_bdt *bdt_info; | 
|  | 363 | struct bnx2fc_hba *hba = cmgr->hba; | 
|  | 364 | size_t bd_tbl_sz; | 
|  | 365 | u16 min_xid = BNX2FC_MIN_XID; | 
|  | 366 | u16 max_xid = BNX2FC_MAX_XID; | 
|  | 367 | int num_ios; | 
|  | 368 | int i; | 
|  | 369 |  | 
|  | 370 | num_ios = max_xid - min_xid + 1; | 
|  | 371 |  | 
|  | 372 | /* Free fcoe_bdt_ctx structures */ | 
|  | 373 | if (!cmgr->io_bdt_pool) | 
|  | 374 | goto free_cmd_pool; | 
|  | 375 |  | 
|  | 376 | bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); | 
|  | 377 | for (i = 0; i < num_ios; i++) { | 
|  | 378 | bdt_info = cmgr->io_bdt_pool[i]; | 
|  | 379 | if (bdt_info->bd_tbl) { | 
|  | 380 | dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, | 
|  | 381 | bdt_info->bd_tbl, | 
|  | 382 | bdt_info->bd_tbl_dma); | 
|  | 383 | bdt_info->bd_tbl = NULL; | 
|  | 384 | } | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | /* Destroy io_bdt pool */ | 
|  | 388 | for (i = 0; i < num_ios; i++) { | 
|  | 389 | kfree(cmgr->io_bdt_pool[i]); | 
|  | 390 | cmgr->io_bdt_pool[i] = NULL; | 
|  | 391 | } | 
|  | 392 |  | 
|  | 393 | kfree(cmgr->io_bdt_pool); | 
|  | 394 | cmgr->io_bdt_pool = NULL; | 
|  | 395 |  | 
|  | 396 | free_cmd_pool: | 
|  | 397 | kfree(cmgr->free_list_lock); | 
|  | 398 |  | 
|  | 399 | /* Destroy cmd pool */ | 
|  | 400 | if (!cmgr->free_list) | 
|  | 401 | goto free_cmgr; | 
|  | 402 |  | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 403 | for (i = 0; i < num_possible_cpus() + 1; i++)  { | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 404 | struct list_head *list; | 
|  | 405 | struct list_head *tmp; | 
|  | 406 |  | 
|  | 407 | list_for_each_safe(list, tmp, &cmgr->free_list[i]) { | 
|  | 408 | struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; | 
|  | 409 | list_del(&io_req->link); | 
|  | 410 | kfree(io_req); | 
|  | 411 | } | 
|  | 412 | } | 
|  | 413 | kfree(cmgr->free_list); | 
|  | 414 | free_cmgr: | 
|  | 415 | /* Free command manager itself */ | 
|  | 416 | kfree(cmgr); | 
|  | 417 | } | 
|  | 418 |  | 
|  | 419 | struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) | 
|  | 420 | { | 
|  | 421 | struct fcoe_port *port = tgt->port; | 
|  | 422 | struct bnx2fc_hba *hba = port->priv; | 
|  | 423 | struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; | 
|  | 424 | struct bnx2fc_cmd *io_req; | 
|  | 425 | struct list_head *listp; | 
|  | 426 | struct io_bdt *bd_tbl; | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 427 | int index = RESERVE_FREE_LIST_INDEX; | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 428 | u32 max_sqes; | 
|  | 429 | u16 xid; | 
|  | 430 |  | 
|  | 431 | max_sqes = tgt->max_sqes; | 
|  | 432 | switch (type) { | 
|  | 433 | case BNX2FC_TASK_MGMT_CMD: | 
|  | 434 | max_sqes = BNX2FC_TM_MAX_SQES; | 
|  | 435 | break; | 
|  | 436 | case BNX2FC_ELS: | 
|  | 437 | max_sqes = BNX2FC_ELS_MAX_SQES; | 
|  | 438 | break; | 
|  | 439 | default: | 
|  | 440 | break; | 
|  | 441 | } | 
|  | 442 |  | 
|  | 443 | /* | 
|  | 444 | * NOTE: Free list insertions and deletions are protected with | 
|  | 445 | * cmgr lock | 
|  | 446 | */ | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 447 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | 
|  | 448 | if ((list_empty(&(cmd_mgr->free_list[index]))) || | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 449 | (tgt->num_active_ios.counter  >= max_sqes)) { | 
|  | 450 | BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " | 
|  | 451 | "ios(%d):sqes(%d)\n", | 
|  | 452 | tgt->num_active_ios.counter, tgt->max_sqes); | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 453 | if (list_empty(&(cmd_mgr->free_list[index]))) | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 454 | printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 455 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 456 | return NULL; | 
|  | 457 | } | 
|  | 458 |  | 
|  | 459 | listp = (struct list_head *) | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 460 | cmd_mgr->free_list[index].next; | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 461 | list_del_init(listp); | 
|  | 462 | io_req = (struct bnx2fc_cmd *) listp; | 
|  | 463 | xid = io_req->xid; | 
|  | 464 | cmd_mgr->cmds[xid] = io_req; | 
|  | 465 | atomic_inc(&tgt->num_active_ios); | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 466 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 467 |  | 
|  | 468 | INIT_LIST_HEAD(&io_req->link); | 
|  | 469 |  | 
|  | 470 | io_req->port = port; | 
|  | 471 | io_req->cmd_mgr = cmd_mgr; | 
|  | 472 | io_req->req_flags = 0; | 
|  | 473 | io_req->cmd_type = type; | 
|  | 474 |  | 
|  | 475 | /* Bind io_bdt for this io_req */ | 
|  | 476 | /* Have a static link between io_req and io_bdt_pool */ | 
|  | 477 | bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; | 
|  | 478 | bd_tbl->io_req = io_req; | 
|  | 479 |  | 
|  | 480 | /* Hold the io_req  against deletion */ | 
|  | 481 | kref_init(&io_req->refcount); | 
|  | 482 | return io_req; | 
|  | 483 | } | 
|  | 484 | static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) | 
|  | 485 | { | 
|  | 486 | struct fcoe_port *port = tgt->port; | 
|  | 487 | struct bnx2fc_hba *hba = port->priv; | 
|  | 488 | struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; | 
|  | 489 | struct bnx2fc_cmd *io_req; | 
|  | 490 | struct list_head *listp; | 
|  | 491 | struct io_bdt *bd_tbl; | 
|  | 492 | u32 max_sqes; | 
|  | 493 | u16 xid; | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 494 | int index = get_cpu(); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 495 |  | 
|  | 496 | max_sqes = BNX2FC_SCSI_MAX_SQES; | 
|  | 497 | /* | 
|  | 498 | * NOTE: Free list insertions and deletions are protected with | 
|  | 499 | * cmgr lock | 
|  | 500 | */ | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 501 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | 
|  | 502 | if ((list_empty(&cmd_mgr->free_list[index])) || | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 503 | (tgt->num_active_ios.counter  >= max_sqes)) { | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 504 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 
|  | 505 | put_cpu(); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 506 | return NULL; | 
|  | 507 | } | 
|  | 508 |  | 
|  | 509 | listp = (struct list_head *) | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 510 | cmd_mgr->free_list[index].next; | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 511 | list_del_init(listp); | 
|  | 512 | io_req = (struct bnx2fc_cmd *) listp; | 
|  | 513 | xid = io_req->xid; | 
|  | 514 | cmd_mgr->cmds[xid] = io_req; | 
|  | 515 | atomic_inc(&tgt->num_active_ios); | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 516 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 
|  | 517 | put_cpu(); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 518 |  | 
|  | 519 | INIT_LIST_HEAD(&io_req->link); | 
|  | 520 |  | 
|  | 521 | io_req->port = port; | 
|  | 522 | io_req->cmd_mgr = cmd_mgr; | 
|  | 523 | io_req->req_flags = 0; | 
|  | 524 |  | 
|  | 525 | /* Bind io_bdt for this io_req */ | 
|  | 526 | /* Have a static link between io_req and io_bdt_pool */ | 
|  | 527 | bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; | 
|  | 528 | bd_tbl->io_req = io_req; | 
|  | 529 |  | 
|  | 530 | /* Hold the io_req  against deletion */ | 
|  | 531 | kref_init(&io_req->refcount); | 
|  | 532 | return io_req; | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 | void bnx2fc_cmd_release(struct kref *ref) | 
|  | 536 | { | 
|  | 537 | struct bnx2fc_cmd *io_req = container_of(ref, | 
|  | 538 | struct bnx2fc_cmd, refcount); | 
|  | 539 | struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 540 | int index; | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 541 |  | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 542 | if (io_req->cmd_type == BNX2FC_SCSI_CMD) | 
|  | 543 | index = io_req->xid % num_possible_cpus(); | 
|  | 544 | else | 
|  | 545 | index = RESERVE_FREE_LIST_INDEX; | 
|  | 546 |  | 
|  | 547 |  | 
|  | 548 | spin_lock_bh(&cmd_mgr->free_list_lock[index]); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 549 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) | 
|  | 550 | bnx2fc_free_mp_resc(io_req); | 
|  | 551 | cmd_mgr->cmds[io_req->xid] = NULL; | 
|  | 552 | /* Delete IO from retire queue */ | 
|  | 553 | list_del_init(&io_req->link); | 
|  | 554 | /* Add it to the free list */ | 
|  | 555 | list_add(&io_req->link, | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 556 | &cmd_mgr->free_list[index]); | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 557 | atomic_dec(&io_req->tgt->num_active_ios); | 
| Bhanu Gollapudi | 0ea5c27 | 2011-03-17 17:13:29 -0700 | [diff] [blame] | 558 | spin_unlock_bh(&cmd_mgr->free_list_lock[index]); | 
|  | 559 |  | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 560 | } | 
|  | 561 |  | 
|  | 562 | static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | 
|  | 563 | { | 
|  | 564 | struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); | 
|  | 565 | struct bnx2fc_hba *hba = io_req->port->priv; | 
|  | 566 | size_t sz = sizeof(struct fcoe_bd_ctx); | 
|  | 567 |  | 
|  | 568 | /* clear tm flags */ | 
|  | 569 | mp_req->tm_flags = 0; | 
|  | 570 | if (mp_req->mp_req_bd) { | 
|  | 571 | dma_free_coherent(&hba->pcidev->dev, sz, | 
|  | 572 | mp_req->mp_req_bd, | 
|  | 573 | mp_req->mp_req_bd_dma); | 
|  | 574 | mp_req->mp_req_bd = NULL; | 
|  | 575 | } | 
|  | 576 | if (mp_req->mp_resp_bd) { | 
|  | 577 | dma_free_coherent(&hba->pcidev->dev, sz, | 
|  | 578 | mp_req->mp_resp_bd, | 
|  | 579 | mp_req->mp_resp_bd_dma); | 
|  | 580 | mp_req->mp_resp_bd = NULL; | 
|  | 581 | } | 
|  | 582 | if (mp_req->req_buf) { | 
|  | 583 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 
|  | 584 | mp_req->req_buf, | 
|  | 585 | mp_req->req_buf_dma); | 
|  | 586 | mp_req->req_buf = NULL; | 
|  | 587 | } | 
|  | 588 | if (mp_req->resp_buf) { | 
|  | 589 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 
|  | 590 | mp_req->resp_buf, | 
|  | 591 | mp_req->resp_buf_dma); | 
|  | 592 | mp_req->resp_buf = NULL; | 
|  | 593 | } | 
|  | 594 | } | 
|  | 595 |  | 
|  | 596 | int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | 
|  | 597 | { | 
|  | 598 | struct bnx2fc_mp_req *mp_req; | 
|  | 599 | struct fcoe_bd_ctx *mp_req_bd; | 
|  | 600 | struct fcoe_bd_ctx *mp_resp_bd; | 
|  | 601 | struct bnx2fc_hba *hba = io_req->port->priv; | 
|  | 602 | dma_addr_t addr; | 
|  | 603 | size_t sz; | 
|  | 604 |  | 
|  | 605 | mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); | 
|  | 606 | memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); | 
|  | 607 |  | 
|  | 608 | mp_req->req_len = sizeof(struct fcp_cmnd); | 
|  | 609 | io_req->data_xfer_len = mp_req->req_len; | 
|  | 610 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 
|  | 611 | &mp_req->req_buf_dma, | 
|  | 612 | GFP_ATOMIC); | 
|  | 613 | if (!mp_req->req_buf) { | 
|  | 614 | printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); | 
|  | 615 | bnx2fc_free_mp_resc(io_req); | 
|  | 616 | return FAILED; | 
|  | 617 | } | 
|  | 618 |  | 
|  | 619 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 
|  | 620 | &mp_req->resp_buf_dma, | 
|  | 621 | GFP_ATOMIC); | 
|  | 622 | if (!mp_req->resp_buf) { | 
|  | 623 | printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); | 
|  | 624 | bnx2fc_free_mp_resc(io_req); | 
|  | 625 | return FAILED; | 
|  | 626 | } | 
|  | 627 | memset(mp_req->req_buf, 0, PAGE_SIZE); | 
|  | 628 | memset(mp_req->resp_buf, 0, PAGE_SIZE); | 
|  | 629 |  | 
|  | 630 | /* Allocate and map mp_req_bd and mp_resp_bd */ | 
|  | 631 | sz = sizeof(struct fcoe_bd_ctx); | 
|  | 632 | mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, | 
|  | 633 | &mp_req->mp_req_bd_dma, | 
|  | 634 | GFP_ATOMIC); | 
|  | 635 | if (!mp_req->mp_req_bd) { | 
|  | 636 | printk(KERN_ERR PFX "unable to alloc MP req bd\n"); | 
|  | 637 | bnx2fc_free_mp_resc(io_req); | 
|  | 638 | return FAILED; | 
|  | 639 | } | 
|  | 640 | mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, | 
|  | 641 | &mp_req->mp_resp_bd_dma, | 
|  | 642 | GFP_ATOMIC); | 
|  | 643 | if (!mp_req->mp_req_bd) { | 
|  | 644 | printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); | 
|  | 645 | bnx2fc_free_mp_resc(io_req); | 
|  | 646 | return FAILED; | 
|  | 647 | } | 
|  | 648 | /* Fill bd table */ | 
|  | 649 | addr = mp_req->req_buf_dma; | 
|  | 650 | mp_req_bd = mp_req->mp_req_bd; | 
|  | 651 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 
|  | 652 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 
|  | 653 | mp_req_bd->buf_len = PAGE_SIZE; | 
|  | 654 | mp_req_bd->flags = 0; | 
|  | 655 |  | 
|  | 656 | /* | 
|  | 657 | * MP buffer is either a task mgmt command or an ELS. | 
|  | 658 | * So the assumption is that it consumes a single bd | 
|  | 659 | * entry in the bd table | 
|  | 660 | */ | 
|  | 661 | mp_resp_bd = mp_req->mp_resp_bd; | 
|  | 662 | addr = mp_req->resp_buf_dma; | 
|  | 663 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 
|  | 664 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 
|  | 665 | mp_resp_bd->buf_len = PAGE_SIZE; | 
|  | 666 | mp_resp_bd->flags = 0; | 
|  | 667 |  | 
|  | 668 | return SUCCESS; | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) | 
|  | 672 | { | 
|  | 673 | struct fc_lport *lport; | 
|  | 674 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 
|  | 675 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 
|  | 676 | struct fcoe_port *port; | 
|  | 677 | struct bnx2fc_hba *hba; | 
|  | 678 | struct bnx2fc_rport *tgt; | 
|  | 679 | struct bnx2fc_cmd *io_req; | 
|  | 680 | struct bnx2fc_mp_req *tm_req; | 
|  | 681 | struct fcoe_task_ctx_entry *task; | 
|  | 682 | struct fcoe_task_ctx_entry *task_page; | 
|  | 683 | struct Scsi_Host *host = sc_cmd->device->host; | 
|  | 684 | struct fc_frame_header *fc_hdr; | 
|  | 685 | struct fcp_cmnd *fcp_cmnd; | 
|  | 686 | int task_idx, index; | 
|  | 687 | int rc = SUCCESS; | 
|  | 688 | u16 xid; | 
|  | 689 | u32 sid, did; | 
|  | 690 | unsigned long start = jiffies; | 
|  | 691 |  | 
|  | 692 | lport = shost_priv(host); | 
|  | 693 | port = lport_priv(lport); | 
|  | 694 | hba = port->priv; | 
|  | 695 |  | 
|  | 696 | if (rport == NULL) { | 
|  | 697 | printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); | 
|  | 698 | rc = FAILED; | 
|  | 699 | goto tmf_err; | 
|  | 700 | } | 
|  | 701 |  | 
|  | 702 | rc = fc_block_scsi_eh(sc_cmd); | 
|  | 703 | if (rc) | 
|  | 704 | return rc; | 
|  | 705 |  | 
|  | 706 | if (lport->state != LPORT_ST_READY || !(lport->link_up)) { | 
|  | 707 | printk(KERN_ERR PFX "device_reset: link is not ready\n"); | 
|  | 708 | rc = FAILED; | 
|  | 709 | goto tmf_err; | 
|  | 710 | } | 
|  | 711 | /* rport and tgt are allocated together, so tgt should be non-NULL */ | 
|  | 712 | tgt = (struct bnx2fc_rport *)&rp[1]; | 
|  | 713 |  | 
|  | 714 | if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { | 
|  | 715 | printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); | 
|  | 716 | rc = FAILED; | 
|  | 717 | goto tmf_err; | 
|  | 718 | } | 
|  | 719 | retry_tmf: | 
|  | 720 | io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); | 
|  | 721 | if (!io_req) { | 
|  | 722 | if (time_after(jiffies, start + HZ)) { | 
|  | 723 | printk(KERN_ERR PFX "tmf: Failed TMF"); | 
|  | 724 | rc = FAILED; | 
|  | 725 | goto tmf_err; | 
|  | 726 | } | 
|  | 727 | msleep(20); | 
|  | 728 | goto retry_tmf; | 
|  | 729 | } | 
|  | 730 | /* Initialize rest of io_req fields */ | 
|  | 731 | io_req->sc_cmd = sc_cmd; | 
|  | 732 | io_req->port = port; | 
|  | 733 | io_req->tgt = tgt; | 
|  | 734 |  | 
|  | 735 | tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); | 
|  | 736 |  | 
|  | 737 | rc = bnx2fc_init_mp_req(io_req); | 
|  | 738 | if (rc == FAILED) { | 
|  | 739 | printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); | 
|  | 740 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 741 | goto tmf_err; | 
|  | 742 | } | 
|  | 743 |  | 
|  | 744 | /* Set TM flags */ | 
|  | 745 | io_req->io_req_flags = 0; | 
|  | 746 | tm_req->tm_flags = tm_flags; | 
|  | 747 |  | 
|  | 748 | /* Fill FCP_CMND */ | 
|  | 749 | bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); | 
|  | 750 | fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; | 
|  | 751 | memset(fcp_cmnd->fc_cdb, 0,  sc_cmd->cmd_len); | 
|  | 752 | fcp_cmnd->fc_dl = 0; | 
|  | 753 |  | 
|  | 754 | /* Fill FC header */ | 
|  | 755 | fc_hdr = &(tm_req->req_fc_hdr); | 
|  | 756 | sid = tgt->sid; | 
|  | 757 | did = rport->port_id; | 
|  | 758 | __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, | 
|  | 759 | FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | | 
|  | 760 | FC_FC_SEQ_INIT, 0); | 
|  | 761 | /* Obtain exchange id */ | 
|  | 762 | xid = io_req->xid; | 
|  | 763 |  | 
|  | 764 | BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); | 
|  | 765 | task_idx = xid/BNX2FC_TASKS_PER_PAGE; | 
|  | 766 | index = xid % BNX2FC_TASKS_PER_PAGE; | 
|  | 767 |  | 
|  | 768 | /* Initialize task context for this IO request */ | 
|  | 769 | task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; | 
|  | 770 | task = &(task_page[index]); | 
|  | 771 | bnx2fc_init_mp_task(io_req, task); | 
|  | 772 |  | 
|  | 773 | sc_cmd->SCp.ptr = (char *)io_req; | 
|  | 774 |  | 
|  | 775 | /* Obtain free SQ entry */ | 
|  | 776 | spin_lock_bh(&tgt->tgt_lock); | 
|  | 777 | bnx2fc_add_2_sq(tgt, xid); | 
|  | 778 |  | 
|  | 779 | /* Enqueue the io_req to active_tm_queue */ | 
|  | 780 | io_req->on_tmf_queue = 1; | 
|  | 781 | list_add_tail(&io_req->link, &tgt->active_tm_queue); | 
|  | 782 |  | 
|  | 783 | init_completion(&io_req->tm_done); | 
|  | 784 | io_req->wait_for_comp = 1; | 
|  | 785 |  | 
|  | 786 | /* Ring doorbell */ | 
|  | 787 | bnx2fc_ring_doorbell(tgt); | 
|  | 788 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 789 |  | 
|  | 790 | rc = wait_for_completion_timeout(&io_req->tm_done, | 
|  | 791 | BNX2FC_TM_TIMEOUT * HZ); | 
|  | 792 | spin_lock_bh(&tgt->tgt_lock); | 
|  | 793 |  | 
|  | 794 | io_req->wait_for_comp = 0; | 
|  | 795 | if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) | 
|  | 796 | set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); | 
|  | 797 |  | 
|  | 798 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 799 |  | 
|  | 800 | if (!rc) { | 
|  | 801 | printk(KERN_ERR PFX "task mgmt command failed...\n"); | 
|  | 802 | rc = FAILED; | 
|  | 803 | } else { | 
|  | 804 | printk(KERN_ERR PFX "task mgmt command success...\n"); | 
|  | 805 | rc = SUCCESS; | 
|  | 806 | } | 
|  | 807 | tmf_err: | 
|  | 808 | return rc; | 
|  | 809 | } | 
|  | 810 |  | 
|  | 811 | int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) | 
|  | 812 | { | 
|  | 813 | struct fc_lport *lport; | 
|  | 814 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 815 | struct fc_rport *rport = tgt->rport; | 
|  | 816 | struct fc_rport_priv *rdata = tgt->rdata; | 
|  | 817 | struct bnx2fc_hba *hba; | 
|  | 818 | struct fcoe_port *port; | 
|  | 819 | struct bnx2fc_cmd *abts_io_req; | 
|  | 820 | struct fcoe_task_ctx_entry *task; | 
|  | 821 | struct fcoe_task_ctx_entry *task_page; | 
|  | 822 | struct fc_frame_header *fc_hdr; | 
|  | 823 | struct bnx2fc_mp_req *abts_req; | 
|  | 824 | int task_idx, index; | 
|  | 825 | u32 sid, did; | 
|  | 826 | u16 xid; | 
|  | 827 | int rc = SUCCESS; | 
|  | 828 | u32 r_a_tov = rdata->r_a_tov; | 
|  | 829 |  | 
|  | 830 | /* called with tgt_lock held */ | 
|  | 831 | BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); | 
|  | 832 |  | 
|  | 833 | port = io_req->port; | 
|  | 834 | hba = port->priv; | 
|  | 835 | lport = port->lport; | 
|  | 836 |  | 
|  | 837 | if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { | 
|  | 838 | printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); | 
|  | 839 | rc = FAILED; | 
|  | 840 | goto abts_err; | 
|  | 841 | } | 
|  | 842 |  | 
|  | 843 | if (rport == NULL) { | 
|  | 844 | printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); | 
|  | 845 | rc = FAILED; | 
|  | 846 | goto abts_err; | 
|  | 847 | } | 
|  | 848 |  | 
|  | 849 | if (lport->state != LPORT_ST_READY || !(lport->link_up)) { | 
|  | 850 | printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); | 
|  | 851 | rc = FAILED; | 
|  | 852 | goto abts_err; | 
|  | 853 | } | 
|  | 854 |  | 
|  | 855 | abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); | 
|  | 856 | if (!abts_io_req) { | 
|  | 857 | printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); | 
|  | 858 | rc = FAILED; | 
|  | 859 | goto abts_err; | 
|  | 860 | } | 
|  | 861 |  | 
|  | 862 | /* Initialize rest of io_req fields */ | 
|  | 863 | abts_io_req->sc_cmd = NULL; | 
|  | 864 | abts_io_req->port = port; | 
|  | 865 | abts_io_req->tgt = tgt; | 
|  | 866 | abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ | 
|  | 867 |  | 
|  | 868 | abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); | 
|  | 869 | memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); | 
|  | 870 |  | 
|  | 871 | /* Fill FC header */ | 
|  | 872 | fc_hdr = &(abts_req->req_fc_hdr); | 
|  | 873 |  | 
|  | 874 | /* Obtain oxid and rxid for the original exchange to be aborted */ | 
|  | 875 | fc_hdr->fh_ox_id = htons(io_req->xid); | 
|  | 876 | fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id); | 
|  | 877 |  | 
|  | 878 | sid = tgt->sid; | 
|  | 879 | did = rport->port_id; | 
|  | 880 |  | 
|  | 881 | __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, | 
|  | 882 | FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | | 
|  | 883 | FC_FC_SEQ_INIT, 0); | 
|  | 884 |  | 
|  | 885 | xid = abts_io_req->xid; | 
|  | 886 | BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); | 
|  | 887 | task_idx = xid/BNX2FC_TASKS_PER_PAGE; | 
|  | 888 | index = xid % BNX2FC_TASKS_PER_PAGE; | 
|  | 889 |  | 
|  | 890 | /* Initialize task context for this IO request */ | 
|  | 891 | task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; | 
|  | 892 | task = &(task_page[index]); | 
|  | 893 | bnx2fc_init_mp_task(abts_io_req, task); | 
|  | 894 |  | 
|  | 895 | /* | 
|  | 896 | * ABTS task is a temporary task that will be cleaned up | 
|  | 897 | * irrespective of ABTS response. We need to start the timer | 
|  | 898 | * for the original exchange, as the CQE is posted for the original | 
|  | 899 | * IO request. | 
|  | 900 | * | 
|  | 901 | * Timer for ABTS is started only when it is originated by a | 
|  | 902 | * TM request. For the ABTS issued as part of ULP timeout, | 
|  | 903 | * scsi-ml maintains the timers. | 
|  | 904 | */ | 
|  | 905 |  | 
|  | 906 | /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ | 
|  | 907 | bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); | 
|  | 908 |  | 
|  | 909 | /* Obtain free SQ entry */ | 
|  | 910 | bnx2fc_add_2_sq(tgt, xid); | 
|  | 911 |  | 
|  | 912 | /* Ring doorbell */ | 
|  | 913 | bnx2fc_ring_doorbell(tgt); | 
|  | 914 |  | 
|  | 915 | abts_err: | 
|  | 916 | return rc; | 
|  | 917 | } | 
|  | 918 |  | 
|  | 919 | int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) | 
|  | 920 | { | 
|  | 921 | struct fc_lport *lport; | 
|  | 922 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 923 | struct bnx2fc_hba *hba; | 
|  | 924 | struct fcoe_port *port; | 
|  | 925 | struct bnx2fc_cmd *cleanup_io_req; | 
|  | 926 | struct fcoe_task_ctx_entry *task; | 
|  | 927 | struct fcoe_task_ctx_entry *task_page; | 
|  | 928 | int task_idx, index; | 
|  | 929 | u16 xid, orig_xid; | 
|  | 930 | int rc = 0; | 
|  | 931 |  | 
|  | 932 | /* ASSUMPTION: called with tgt_lock held */ | 
|  | 933 | BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); | 
|  | 934 |  | 
|  | 935 | port = io_req->port; | 
|  | 936 | hba = port->priv; | 
|  | 937 | lport = port->lport; | 
|  | 938 |  | 
|  | 939 | cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); | 
|  | 940 | if (!cleanup_io_req) { | 
|  | 941 | printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); | 
|  | 942 | rc = -1; | 
|  | 943 | goto cleanup_err; | 
|  | 944 | } | 
|  | 945 |  | 
|  | 946 | /* Initialize rest of io_req fields */ | 
|  | 947 | cleanup_io_req->sc_cmd = NULL; | 
|  | 948 | cleanup_io_req->port = port; | 
|  | 949 | cleanup_io_req->tgt = tgt; | 
|  | 950 | cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ | 
|  | 951 |  | 
|  | 952 | xid = cleanup_io_req->xid; | 
|  | 953 |  | 
|  | 954 | task_idx = xid/BNX2FC_TASKS_PER_PAGE; | 
|  | 955 | index = xid % BNX2FC_TASKS_PER_PAGE; | 
|  | 956 |  | 
|  | 957 | /* Initialize task context for this IO request */ | 
|  | 958 | task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; | 
|  | 959 | task = &(task_page[index]); | 
|  | 960 | orig_xid = io_req->xid; | 
|  | 961 |  | 
|  | 962 | BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); | 
|  | 963 |  | 
|  | 964 | bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); | 
|  | 965 |  | 
|  | 966 | /* Obtain free SQ entry */ | 
|  | 967 | bnx2fc_add_2_sq(tgt, xid); | 
|  | 968 |  | 
|  | 969 | /* Ring doorbell */ | 
|  | 970 | bnx2fc_ring_doorbell(tgt); | 
|  | 971 |  | 
|  | 972 | cleanup_err: | 
|  | 973 | return rc; | 
|  | 974 | } | 
|  | 975 |  | 
|  | 976 | /** | 
|  | 977 | * bnx2fc_eh_target_reset: Reset a target | 
|  | 978 | * | 
|  | 979 | * @sc_cmd:	SCSI command | 
|  | 980 | * | 
|  | 981 | * Set from SCSI host template to send task mgmt command to the target | 
|  | 982 | *	and wait for the response | 
|  | 983 | */ | 
|  | 984 | int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) | 
|  | 985 | { | 
|  | 986 | return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); | 
|  | 987 | } | 
|  | 988 |  | 
|  | 989 | /** | 
|  | 990 | * bnx2fc_eh_device_reset - Reset a single LUN | 
|  | 991 | * | 
|  | 992 | * @sc_cmd:	SCSI command | 
|  | 993 | * | 
|  | 994 | * Set from SCSI host template to send task mgmt command to the target | 
|  | 995 | *	and wait for the response | 
|  | 996 | */ | 
|  | 997 | int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) | 
|  | 998 | { | 
|  | 999 | return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); | 
|  | 1000 | } | 
|  | 1001 |  | 
|  | 1002 | /** | 
|  | 1003 | * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding | 
|  | 1004 | *			SCSI command | 
|  | 1005 | * | 
|  | 1006 | * @sc_cmd:	SCSI_ML command pointer | 
|  | 1007 | * | 
|  | 1008 | * SCSI abort request handler | 
|  | 1009 | */ | 
|  | 1010 | int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) | 
|  | 1011 | { | 
|  | 1012 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 
|  | 1013 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 
|  | 1014 | struct bnx2fc_cmd *io_req; | 
|  | 1015 | struct fc_lport *lport; | 
|  | 1016 | struct bnx2fc_rport *tgt; | 
|  | 1017 | int rc = FAILED; | 
|  | 1018 |  | 
|  | 1019 |  | 
|  | 1020 | rc = fc_block_scsi_eh(sc_cmd); | 
|  | 1021 | if (rc) | 
|  | 1022 | return rc; | 
|  | 1023 |  | 
|  | 1024 | lport = shost_priv(sc_cmd->device->host); | 
|  | 1025 | if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { | 
|  | 1026 | printk(KERN_ALERT PFX "eh_abort: link not ready\n"); | 
|  | 1027 | return rc; | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 | tgt = (struct bnx2fc_rport *)&rp[1]; | 
|  | 1031 |  | 
|  | 1032 | BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); | 
|  | 1033 |  | 
|  | 1034 | spin_lock_bh(&tgt->tgt_lock); | 
|  | 1035 | io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; | 
|  | 1036 | if (!io_req) { | 
|  | 1037 | /* Command might have just completed */ | 
|  | 1038 | printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); | 
|  | 1039 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1040 | return SUCCESS; | 
|  | 1041 | } | 
|  | 1042 | BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", | 
|  | 1043 | io_req->refcount.refcount.counter); | 
|  | 1044 |  | 
|  | 1045 | /* Hold IO request across abort processing */ | 
|  | 1046 | kref_get(&io_req->refcount); | 
|  | 1047 |  | 
|  | 1048 | BUG_ON(tgt != io_req->tgt); | 
|  | 1049 |  | 
|  | 1050 | /* Remove the io_req from the active_q. */ | 
|  | 1051 | /* | 
|  | 1052 | * Task Mgmt functions (LUN RESET & TGT RESET) will not | 
|  | 1053 | * issue an ABTS on this particular IO req, as the | 
|  | 1054 | * io_req is no longer in the active_q. | 
|  | 1055 | */ | 
|  | 1056 | if (tgt->flush_in_prog) { | 
|  | 1057 | printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " | 
|  | 1058 | "flush in progress\n", io_req->xid); | 
|  | 1059 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1060 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1061 | return SUCCESS; | 
|  | 1062 | } | 
|  | 1063 |  | 
|  | 1064 | if (io_req->on_active_queue == 0) { | 
|  | 1065 | printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " | 
|  | 1066 | "not on active_q\n", io_req->xid); | 
|  | 1067 | /* | 
|  | 1068 | * This condition can happen only due to the FW bug, | 
|  | 1069 | * where we do not receive cleanup response from | 
|  | 1070 | * the FW. Handle this case gracefully by erroring | 
|  | 1071 | * back the IO request to SCSI-ml | 
|  | 1072 | */ | 
|  | 1073 | bnx2fc_scsi_done(io_req, DID_ABORT); | 
|  | 1074 |  | 
|  | 1075 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1076 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1077 | return SUCCESS; | 
|  | 1078 | } | 
|  | 1079 |  | 
|  | 1080 | /* | 
|  | 1081 | * Only eh_abort processing will remove the IO from | 
|  | 1082 | * active_cmd_q before processing the request. this is | 
|  | 1083 | * done to avoid race conditions between IOs aborted | 
|  | 1084 | * as part of task management completion and eh_abort | 
|  | 1085 | * processing | 
|  | 1086 | */ | 
|  | 1087 | list_del_init(&io_req->link); | 
|  | 1088 | io_req->on_active_queue = 0; | 
|  | 1089 | /* Move IO req to retire queue */ | 
|  | 1090 | list_add_tail(&io_req->link, &tgt->io_retire_queue); | 
|  | 1091 |  | 
|  | 1092 | init_completion(&io_req->tm_done); | 
|  | 1093 | io_req->wait_for_comp = 1; | 
|  | 1094 |  | 
|  | 1095 | if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { | 
|  | 1096 | /* Cancel the current timer running on this io_req */ | 
|  | 1097 | if (cancel_delayed_work(&io_req->timeout_work)) | 
|  | 1098 | kref_put(&io_req->refcount, | 
|  | 1099 | bnx2fc_cmd_release); /* drop timer hold */ | 
|  | 1100 | set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); | 
|  | 1101 | rc = bnx2fc_initiate_abts(io_req); | 
|  | 1102 | } else { | 
|  | 1103 | printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " | 
|  | 1104 | "already in abts processing\n", io_req->xid); | 
|  | 1105 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1106 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1107 | return SUCCESS; | 
|  | 1108 | } | 
|  | 1109 | if (rc == FAILED) { | 
|  | 1110 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1111 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1112 | return rc; | 
|  | 1113 | } | 
|  | 1114 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1115 |  | 
|  | 1116 | wait_for_completion(&io_req->tm_done); | 
|  | 1117 |  | 
|  | 1118 | spin_lock_bh(&tgt->tgt_lock); | 
|  | 1119 | io_req->wait_for_comp = 0; | 
|  | 1120 | if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, | 
|  | 1121 | &io_req->req_flags))) { | 
|  | 1122 | /* Let the scsi-ml try to recover this command */ | 
|  | 1123 | printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", | 
|  | 1124 | io_req->xid); | 
|  | 1125 | rc = FAILED; | 
|  | 1126 | } else { | 
|  | 1127 | /* | 
|  | 1128 | * We come here even when there was a race condition | 
|  | 1129 | * between timeout and abts completion, and abts | 
|  | 1130 | * completion happens just in time. | 
|  | 1131 | */ | 
|  | 1132 | BNX2FC_IO_DBG(io_req, "abort succeeded\n"); | 
|  | 1133 | rc = SUCCESS; | 
|  | 1134 | bnx2fc_scsi_done(io_req, DID_ABORT); | 
|  | 1135 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1136 | } | 
|  | 1137 |  | 
|  | 1138 | /* release the reference taken in eh_abort */ | 
|  | 1139 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1140 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1141 | return rc; | 
|  | 1142 | } | 
|  | 1143 |  | 
|  | 1144 | void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, | 
|  | 1145 | struct fcoe_task_ctx_entry *task, | 
|  | 1146 | u8 num_rq) | 
|  | 1147 | { | 
|  | 1148 | BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " | 
|  | 1149 | "refcnt = %d, cmd_type = %d\n", | 
|  | 1150 | io_req->refcount.refcount.counter, io_req->cmd_type); | 
|  | 1151 | bnx2fc_scsi_done(io_req, DID_ERROR); | 
|  | 1152 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1153 | } | 
|  | 1154 |  | 
|  | 1155 | void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, | 
|  | 1156 | struct fcoe_task_ctx_entry *task, | 
|  | 1157 | u8 num_rq) | 
|  | 1158 | { | 
|  | 1159 | u32 r_ctl; | 
|  | 1160 | u32 r_a_tov = FC_DEF_R_A_TOV; | 
|  | 1161 | u8 issue_rrq = 0; | 
|  | 1162 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 1163 |  | 
|  | 1164 | BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" | 
|  | 1165 | "refcnt = %d, cmd_type = %d\n", | 
|  | 1166 | io_req->xid, | 
|  | 1167 | io_req->refcount.refcount.counter, io_req->cmd_type); | 
|  | 1168 |  | 
|  | 1169 | if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, | 
|  | 1170 | &io_req->req_flags)) { | 
|  | 1171 | BNX2FC_IO_DBG(io_req, "Timer context finished processing" | 
|  | 1172 | " this io\n"); | 
|  | 1173 | return; | 
|  | 1174 | } | 
|  | 1175 |  | 
|  | 1176 | /* Do not issue RRQ as this IO is already cleanedup */ | 
|  | 1177 | if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, | 
|  | 1178 | &io_req->req_flags)) | 
|  | 1179 | goto io_compl; | 
|  | 1180 |  | 
|  | 1181 | /* | 
|  | 1182 | * For ABTS issued due to SCSI eh_abort_handler, timeout | 
|  | 1183 | * values are maintained by scsi-ml itself. Cancel timeout | 
|  | 1184 | * in case ABTS issued as part of task management function | 
|  | 1185 | * or due to FW error. | 
|  | 1186 | */ | 
|  | 1187 | if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) | 
|  | 1188 | if (cancel_delayed_work(&io_req->timeout_work)) | 
|  | 1189 | kref_put(&io_req->refcount, | 
|  | 1190 | bnx2fc_cmd_release); /* drop timer hold */ | 
|  | 1191 |  | 
|  | 1192 | r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl; | 
|  | 1193 |  | 
|  | 1194 | switch (r_ctl) { | 
|  | 1195 | case FC_RCTL_BA_ACC: | 
|  | 1196 | /* | 
|  | 1197 | * Dont release this cmd yet. It will be relesed | 
|  | 1198 | * after we get RRQ response | 
|  | 1199 | */ | 
|  | 1200 | BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); | 
|  | 1201 | issue_rrq = 1; | 
|  | 1202 | break; | 
|  | 1203 |  | 
|  | 1204 | case FC_RCTL_BA_RJT: | 
|  | 1205 | BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); | 
|  | 1206 | break; | 
|  | 1207 | default: | 
|  | 1208 | printk(KERN_ERR PFX "Unknown ABTS response\n"); | 
|  | 1209 | break; | 
|  | 1210 | } | 
|  | 1211 |  | 
|  | 1212 | if (issue_rrq) { | 
|  | 1213 | BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); | 
|  | 1214 | set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); | 
|  | 1215 | } | 
|  | 1216 | set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); | 
|  | 1217 | bnx2fc_cmd_timer_set(io_req, r_a_tov); | 
|  | 1218 |  | 
|  | 1219 | io_compl: | 
|  | 1220 | if (io_req->wait_for_comp) { | 
|  | 1221 | if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, | 
|  | 1222 | &io_req->req_flags)) | 
|  | 1223 | complete(&io_req->tm_done); | 
|  | 1224 | } else { | 
|  | 1225 | /* | 
|  | 1226 | * We end up here when ABTS is issued as | 
|  | 1227 | * in asynchronous context, i.e., as part | 
|  | 1228 | * of task management completion, or | 
|  | 1229 | * when FW error is received or when the | 
|  | 1230 | * ABTS is issued when the IO is timed | 
|  | 1231 | * out. | 
|  | 1232 | */ | 
|  | 1233 |  | 
|  | 1234 | if (io_req->on_active_queue) { | 
|  | 1235 | list_del_init(&io_req->link); | 
|  | 1236 | io_req->on_active_queue = 0; | 
|  | 1237 | /* Move IO req to retire queue */ | 
|  | 1238 | list_add_tail(&io_req->link, &tgt->io_retire_queue); | 
|  | 1239 | } | 
|  | 1240 | bnx2fc_scsi_done(io_req, DID_ERROR); | 
|  | 1241 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1242 | } | 
|  | 1243 | } | 
|  | 1244 |  | 
|  | 1245 | static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) | 
|  | 1246 | { | 
|  | 1247 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 
|  | 1248 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 1249 | struct list_head *list; | 
|  | 1250 | struct list_head *tmp; | 
|  | 1251 | struct bnx2fc_cmd *cmd; | 
|  | 1252 | int tm_lun = sc_cmd->device->lun; | 
|  | 1253 | int rc = 0; | 
|  | 1254 | int lun; | 
|  | 1255 |  | 
|  | 1256 | /* called with tgt_lock held */ | 
|  | 1257 | BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); | 
|  | 1258 | /* | 
|  | 1259 | * Walk thru the active_ios queue and ABORT the IO | 
|  | 1260 | * that matches with the LUN that was reset | 
|  | 1261 | */ | 
|  | 1262 | list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { | 
|  | 1263 | BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); | 
|  | 1264 | cmd = (struct bnx2fc_cmd *)list; | 
|  | 1265 | lun = cmd->sc_cmd->device->lun; | 
|  | 1266 | if (lun == tm_lun) { | 
|  | 1267 | /* Initiate ABTS on this cmd */ | 
|  | 1268 | if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, | 
|  | 1269 | &cmd->req_flags)) { | 
|  | 1270 | /* cancel the IO timeout */ | 
|  | 1271 | if (cancel_delayed_work(&io_req->timeout_work)) | 
|  | 1272 | kref_put(&io_req->refcount, | 
|  | 1273 | bnx2fc_cmd_release); | 
|  | 1274 | /* timer hold */ | 
|  | 1275 | rc = bnx2fc_initiate_abts(cmd); | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1276 | /* abts shouldn't fail in this context */ | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 1277 | WARN_ON(rc != SUCCESS); | 
|  | 1278 | } else | 
|  | 1279 | printk(KERN_ERR PFX "lun_rst: abts already in" | 
|  | 1280 | " progress for this IO 0x%x\n", | 
|  | 1281 | cmd->xid); | 
|  | 1282 | } | 
|  | 1283 | } | 
|  | 1284 | } | 
|  | 1285 |  | 
|  | 1286 | static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) | 
|  | 1287 | { | 
|  | 1288 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 1289 | struct list_head *list; | 
|  | 1290 | struct list_head *tmp; | 
|  | 1291 | struct bnx2fc_cmd *cmd; | 
|  | 1292 | int rc = 0; | 
|  | 1293 |  | 
|  | 1294 | /* called with tgt_lock held */ | 
|  | 1295 | BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); | 
|  | 1296 | /* | 
|  | 1297 | * Walk thru the active_ios queue and ABORT the IO | 
|  | 1298 | * that matches with the LUN that was reset | 
|  | 1299 | */ | 
|  | 1300 | list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { | 
|  | 1301 | BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); | 
|  | 1302 | cmd = (struct bnx2fc_cmd *)list; | 
|  | 1303 | /* Initiate ABTS */ | 
|  | 1304 | if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, | 
|  | 1305 | &cmd->req_flags)) { | 
|  | 1306 | /* cancel the IO timeout */ | 
|  | 1307 | if (cancel_delayed_work(&io_req->timeout_work)) | 
|  | 1308 | kref_put(&io_req->refcount, | 
|  | 1309 | bnx2fc_cmd_release); /* timer hold */ | 
|  | 1310 | rc = bnx2fc_initiate_abts(cmd); | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1311 | /* abts shouldn't fail in this context */ | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 1312 | WARN_ON(rc != SUCCESS); | 
|  | 1313 |  | 
|  | 1314 | } else | 
|  | 1315 | printk(KERN_ERR PFX "tgt_rst: abts already in progress" | 
|  | 1316 | " for this IO 0x%x\n", cmd->xid); | 
|  | 1317 | } | 
|  | 1318 | } | 
|  | 1319 |  | 
|  | 1320 | void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, | 
|  | 1321 | struct fcoe_task_ctx_entry *task, u8 num_rq) | 
|  | 1322 | { | 
|  | 1323 | struct bnx2fc_mp_req *tm_req; | 
|  | 1324 | struct fc_frame_header *fc_hdr; | 
|  | 1325 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 
|  | 1326 | u64 *hdr; | 
|  | 1327 | u64 *temp_hdr; | 
|  | 1328 | void *rsp_buf; | 
|  | 1329 |  | 
|  | 1330 | /* Called with tgt_lock held */ | 
|  | 1331 | BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); | 
|  | 1332 |  | 
|  | 1333 | if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) | 
|  | 1334 | set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); | 
|  | 1335 | else { | 
|  | 1336 | /* TM has already timed out and we got | 
|  | 1337 | * delayed completion. Ignore completion | 
|  | 1338 | * processing. | 
|  | 1339 | */ | 
|  | 1340 | return; | 
|  | 1341 | } | 
|  | 1342 |  | 
|  | 1343 | tm_req = &(io_req->mp_req); | 
|  | 1344 | fc_hdr = &(tm_req->resp_fc_hdr); | 
|  | 1345 | hdr = (u64 *)fc_hdr; | 
|  | 1346 | temp_hdr = (u64 *) | 
|  | 1347 | &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; | 
|  | 1348 | hdr[0] = cpu_to_be64(temp_hdr[0]); | 
|  | 1349 | hdr[1] = cpu_to_be64(temp_hdr[1]); | 
|  | 1350 | hdr[2] = cpu_to_be64(temp_hdr[2]); | 
|  | 1351 |  | 
|  | 1352 | tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; | 
|  | 1353 |  | 
|  | 1354 | rsp_buf = tm_req->resp_buf; | 
|  | 1355 |  | 
|  | 1356 | if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { | 
|  | 1357 | bnx2fc_parse_fcp_rsp(io_req, | 
|  | 1358 | (struct fcoe_fcp_rsp_payload *) | 
|  | 1359 | rsp_buf, num_rq); | 
|  | 1360 | if (io_req->fcp_rsp_code == 0) { | 
|  | 1361 | /* TM successful */ | 
|  | 1362 | if (tm_req->tm_flags & FCP_TMF_LUN_RESET) | 
|  | 1363 | bnx2fc_lun_reset_cmpl(io_req); | 
|  | 1364 | else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) | 
|  | 1365 | bnx2fc_tgt_reset_cmpl(io_req); | 
|  | 1366 | } | 
|  | 1367 | } else { | 
|  | 1368 | printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", | 
|  | 1369 | fc_hdr->fh_r_ctl); | 
|  | 1370 | } | 
|  | 1371 | if (!sc_cmd->SCp.ptr) { | 
|  | 1372 | printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); | 
|  | 1373 | return; | 
|  | 1374 | } | 
|  | 1375 | switch (io_req->fcp_status) { | 
|  | 1376 | case FC_GOOD: | 
|  | 1377 | if (io_req->cdb_status == 0) { | 
|  | 1378 | /* Good IO completion */ | 
|  | 1379 | sc_cmd->result = DID_OK << 16; | 
|  | 1380 | } else { | 
|  | 1381 | /* Transport status is good, SCSI status not good */ | 
|  | 1382 | sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; | 
|  | 1383 | } | 
|  | 1384 | if (io_req->fcp_resid) | 
|  | 1385 | scsi_set_resid(sc_cmd, io_req->fcp_resid); | 
|  | 1386 | break; | 
|  | 1387 |  | 
|  | 1388 | default: | 
|  | 1389 | BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", | 
|  | 1390 | io_req->fcp_status); | 
|  | 1391 | break; | 
|  | 1392 | } | 
|  | 1393 |  | 
|  | 1394 | sc_cmd = io_req->sc_cmd; | 
|  | 1395 | io_req->sc_cmd = NULL; | 
|  | 1396 |  | 
|  | 1397 | /* check if the io_req exists in tgt's tmf_q */ | 
|  | 1398 | if (io_req->on_tmf_queue) { | 
|  | 1399 |  | 
|  | 1400 | list_del_init(&io_req->link); | 
|  | 1401 | io_req->on_tmf_queue = 0; | 
|  | 1402 | } else { | 
|  | 1403 |  | 
|  | 1404 | printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); | 
|  | 1405 | return; | 
|  | 1406 | } | 
|  | 1407 |  | 
|  | 1408 | sc_cmd->SCp.ptr = NULL; | 
|  | 1409 | sc_cmd->scsi_done(sc_cmd); | 
|  | 1410 |  | 
|  | 1411 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1412 | if (io_req->wait_for_comp) { | 
|  | 1413 | BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); | 
|  | 1414 | complete(&io_req->tm_done); | 
|  | 1415 | } | 
|  | 1416 | } | 
|  | 1417 |  | 
|  | 1418 | static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, | 
|  | 1419 | int bd_index) | 
|  | 1420 | { | 
|  | 1421 | struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; | 
|  | 1422 | int frag_size, sg_frags; | 
|  | 1423 |  | 
|  | 1424 | sg_frags = 0; | 
|  | 1425 | while (sg_len) { | 
|  | 1426 | if (sg_len >= BNX2FC_BD_SPLIT_SZ) | 
|  | 1427 | frag_size = BNX2FC_BD_SPLIT_SZ; | 
|  | 1428 | else | 
|  | 1429 | frag_size = sg_len; | 
|  | 1430 | bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; | 
|  | 1431 | bd[bd_index + sg_frags].buf_addr_hi  = addr >> 32; | 
|  | 1432 | bd[bd_index + sg_frags].buf_len = (u16)frag_size; | 
|  | 1433 | bd[bd_index + sg_frags].flags = 0; | 
|  | 1434 |  | 
|  | 1435 | addr += (u64) frag_size; | 
|  | 1436 | sg_frags++; | 
|  | 1437 | sg_len -= frag_size; | 
|  | 1438 | } | 
|  | 1439 | return sg_frags; | 
|  | 1440 |  | 
|  | 1441 | } | 
|  | 1442 |  | 
|  | 1443 | static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) | 
|  | 1444 | { | 
|  | 1445 | struct scsi_cmnd *sc = io_req->sc_cmd; | 
|  | 1446 | struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; | 
|  | 1447 | struct scatterlist *sg; | 
|  | 1448 | int byte_count = 0; | 
|  | 1449 | int sg_count = 0; | 
|  | 1450 | int bd_count = 0; | 
|  | 1451 | int sg_frags; | 
|  | 1452 | unsigned int sg_len; | 
|  | 1453 | u64 addr; | 
|  | 1454 | int i; | 
|  | 1455 |  | 
|  | 1456 | sg_count = scsi_dma_map(sc); | 
|  | 1457 | scsi_for_each_sg(sc, sg, sg_count, i) { | 
|  | 1458 | sg_len = sg_dma_len(sg); | 
|  | 1459 | addr = sg_dma_address(sg); | 
|  | 1460 | if (sg_len > BNX2FC_MAX_BD_LEN) { | 
|  | 1461 | sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, | 
|  | 1462 | bd_count); | 
|  | 1463 | } else { | 
|  | 1464 |  | 
|  | 1465 | sg_frags = 1; | 
|  | 1466 | bd[bd_count].buf_addr_lo = addr & 0xffffffff; | 
|  | 1467 | bd[bd_count].buf_addr_hi  = addr >> 32; | 
|  | 1468 | bd[bd_count].buf_len = (u16)sg_len; | 
|  | 1469 | bd[bd_count].flags = 0; | 
|  | 1470 | } | 
|  | 1471 | bd_count += sg_frags; | 
|  | 1472 | byte_count += sg_len; | 
|  | 1473 | } | 
|  | 1474 | if (byte_count != scsi_bufflen(sc)) | 
|  | 1475 | printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " | 
|  | 1476 | "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), | 
|  | 1477 | io_req->xid); | 
|  | 1478 | return bd_count; | 
|  | 1479 | } | 
|  | 1480 |  | 
|  | 1481 | static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) | 
|  | 1482 | { | 
|  | 1483 | struct scsi_cmnd *sc = io_req->sc_cmd; | 
|  | 1484 | struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; | 
|  | 1485 | int bd_count; | 
|  | 1486 |  | 
|  | 1487 | if (scsi_sg_count(sc)) | 
|  | 1488 | bd_count = bnx2fc_map_sg(io_req); | 
|  | 1489 | else { | 
|  | 1490 | bd_count = 0; | 
|  | 1491 | bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; | 
|  | 1492 | bd[0].buf_len = bd[0].flags = 0; | 
|  | 1493 | } | 
|  | 1494 | io_req->bd_tbl->bd_valid = bd_count; | 
|  | 1495 | } | 
|  | 1496 |  | 
|  | 1497 | static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) | 
|  | 1498 | { | 
|  | 1499 | struct scsi_cmnd *sc = io_req->sc_cmd; | 
|  | 1500 |  | 
|  | 1501 | if (io_req->bd_tbl->bd_valid && sc) { | 
|  | 1502 | scsi_dma_unmap(sc); | 
|  | 1503 | io_req->bd_tbl->bd_valid = 0; | 
|  | 1504 | } | 
|  | 1505 | } | 
|  | 1506 |  | 
|  | 1507 | void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, | 
|  | 1508 | struct fcp_cmnd *fcp_cmnd) | 
|  | 1509 | { | 
|  | 1510 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 
|  | 1511 | char tag[2]; | 
|  | 1512 |  | 
|  | 1513 | memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); | 
|  | 1514 |  | 
|  | 1515 | int_to_scsilun(sc_cmd->device->lun, | 
|  | 1516 | (struct scsi_lun *) fcp_cmnd->fc_lun); | 
|  | 1517 |  | 
|  | 1518 |  | 
|  | 1519 | fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); | 
|  | 1520 | memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); | 
|  | 1521 |  | 
|  | 1522 | fcp_cmnd->fc_cmdref = 0; | 
|  | 1523 | fcp_cmnd->fc_pri_ta = 0; | 
|  | 1524 | fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; | 
|  | 1525 | fcp_cmnd->fc_flags = io_req->io_req_flags; | 
|  | 1526 |  | 
|  | 1527 | if (scsi_populate_tag_msg(sc_cmd, tag)) { | 
|  | 1528 | switch (tag[0]) { | 
|  | 1529 | case HEAD_OF_QUEUE_TAG: | 
|  | 1530 | fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; | 
|  | 1531 | break; | 
|  | 1532 | case ORDERED_QUEUE_TAG: | 
|  | 1533 | fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; | 
|  | 1534 | break; | 
|  | 1535 | default: | 
|  | 1536 | fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; | 
|  | 1537 | break; | 
|  | 1538 | } | 
|  | 1539 | } else { | 
|  | 1540 | fcp_cmnd->fc_pri_ta = 0; | 
|  | 1541 | } | 
|  | 1542 | } | 
|  | 1543 |  | 
|  | 1544 | static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, | 
|  | 1545 | struct fcoe_fcp_rsp_payload *fcp_rsp, | 
|  | 1546 | u8 num_rq) | 
|  | 1547 | { | 
|  | 1548 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 
|  | 1549 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 1550 | u8 rsp_flags = fcp_rsp->fcp_flags.flags; | 
|  | 1551 | u32 rq_buff_len = 0; | 
|  | 1552 | int i; | 
|  | 1553 | unsigned char *rq_data; | 
|  | 1554 | unsigned char *dummy; | 
|  | 1555 | int fcp_sns_len = 0; | 
|  | 1556 | int fcp_rsp_len = 0; | 
|  | 1557 |  | 
|  | 1558 | io_req->fcp_status = FC_GOOD; | 
|  | 1559 | io_req->fcp_resid = fcp_rsp->fcp_resid; | 
|  | 1560 |  | 
|  | 1561 | io_req->scsi_comp_flags = rsp_flags; | 
|  | 1562 | CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = | 
|  | 1563 | fcp_rsp->scsi_status_code; | 
|  | 1564 |  | 
|  | 1565 | /* Fetch fcp_rsp_info and fcp_sns_info if available */ | 
|  | 1566 | if (num_rq) { | 
|  | 1567 |  | 
|  | 1568 | /* | 
|  | 1569 | * We do not anticipate num_rq >1, as the linux defined | 
|  | 1570 | * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO | 
|  | 1571 | * 256 bytes of single rq buffer is good enough to hold this. | 
|  | 1572 | */ | 
|  | 1573 |  | 
|  | 1574 | if (rsp_flags & | 
|  | 1575 | FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { | 
|  | 1576 | fcp_rsp_len = rq_buff_len | 
|  | 1577 | = fcp_rsp->fcp_rsp_len; | 
|  | 1578 | } | 
|  | 1579 |  | 
|  | 1580 | if (rsp_flags & | 
|  | 1581 | FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { | 
|  | 1582 | fcp_sns_len = fcp_rsp->fcp_sns_len; | 
|  | 1583 | rq_buff_len += fcp_rsp->fcp_sns_len; | 
|  | 1584 | } | 
|  | 1585 |  | 
|  | 1586 | io_req->fcp_rsp_len = fcp_rsp_len; | 
|  | 1587 | io_req->fcp_sns_len = fcp_sns_len; | 
|  | 1588 |  | 
|  | 1589 | if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { | 
|  | 1590 | /* Invalid sense sense length. */ | 
|  | 1591 | printk(KERN_ALERT PFX "invalid sns length %d\n", | 
|  | 1592 | rq_buff_len); | 
|  | 1593 | /* reset rq_buff_len */ | 
|  | 1594 | rq_buff_len =  num_rq * BNX2FC_RQ_BUF_SZ; | 
|  | 1595 | } | 
|  | 1596 |  | 
|  | 1597 | rq_data = bnx2fc_get_next_rqe(tgt, 1); | 
|  | 1598 |  | 
|  | 1599 | if (num_rq > 1) { | 
|  | 1600 | /* We do not need extra sense data */ | 
|  | 1601 | for (i = 1; i < num_rq; i++) | 
|  | 1602 | dummy = bnx2fc_get_next_rqe(tgt, 1); | 
|  | 1603 | } | 
|  | 1604 |  | 
|  | 1605 | /* fetch fcp_rsp_code */ | 
|  | 1606 | if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { | 
|  | 1607 | /* Only for task management function */ | 
|  | 1608 | io_req->fcp_rsp_code = rq_data[3]; | 
|  | 1609 | printk(KERN_ERR PFX "fcp_rsp_code = %d\n", | 
|  | 1610 | io_req->fcp_rsp_code); | 
|  | 1611 | } | 
|  | 1612 |  | 
|  | 1613 | /* fetch sense data */ | 
|  | 1614 | rq_data += fcp_rsp_len; | 
|  | 1615 |  | 
|  | 1616 | if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { | 
|  | 1617 | printk(KERN_ERR PFX "Truncating sense buffer\n"); | 
|  | 1618 | fcp_sns_len = SCSI_SENSE_BUFFERSIZE; | 
|  | 1619 | } | 
|  | 1620 |  | 
|  | 1621 | memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); | 
|  | 1622 | if (fcp_sns_len) | 
|  | 1623 | memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); | 
|  | 1624 |  | 
|  | 1625 | /* return RQ entries */ | 
|  | 1626 | for (i = 0; i < num_rq; i++) | 
|  | 1627 | bnx2fc_return_rqe(tgt, 1); | 
|  | 1628 | } | 
|  | 1629 | } | 
|  | 1630 |  | 
|  | 1631 | /** | 
|  | 1632 | * bnx2fc_queuecommand - Queuecommand function of the scsi template | 
|  | 1633 | * | 
|  | 1634 | * @host:	The Scsi_Host the command was issued to | 
|  | 1635 | * @sc_cmd:	struct scsi_cmnd to be executed | 
|  | 1636 | * | 
|  | 1637 | * This is the IO strategy routine, called by SCSI-ML | 
|  | 1638 | **/ | 
|  | 1639 | int bnx2fc_queuecommand(struct Scsi_Host *host, | 
|  | 1640 | struct scsi_cmnd *sc_cmd) | 
|  | 1641 | { | 
|  | 1642 | struct fc_lport *lport = shost_priv(host); | 
|  | 1643 | struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); | 
|  | 1644 | struct fc_rport_libfc_priv *rp = rport->dd_data; | 
|  | 1645 | struct bnx2fc_rport *tgt; | 
|  | 1646 | struct bnx2fc_cmd *io_req; | 
|  | 1647 | int rc = 0; | 
|  | 1648 | int rval; | 
|  | 1649 |  | 
|  | 1650 | rval = fc_remote_port_chkready(rport); | 
|  | 1651 | if (rval) { | 
|  | 1652 | sc_cmd->result = rval; | 
|  | 1653 | sc_cmd->scsi_done(sc_cmd); | 
|  | 1654 | return 0; | 
|  | 1655 | } | 
|  | 1656 |  | 
|  | 1657 | if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { | 
|  | 1658 | rc = SCSI_MLQUEUE_HOST_BUSY; | 
|  | 1659 | goto exit_qcmd; | 
|  | 1660 | } | 
|  | 1661 |  | 
|  | 1662 | /* rport and tgt are allocated together, so tgt should be non-NULL */ | 
|  | 1663 | tgt = (struct bnx2fc_rport *)&rp[1]; | 
|  | 1664 |  | 
|  | 1665 | if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { | 
| Nithin Nayak Sujir | 35dd71a | 2011-04-25 12:30:08 -0700 | [diff] [blame] | 1666 | if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags))  { | 
|  | 1667 | sc_cmd->result = DID_NO_CONNECT << 16; | 
|  | 1668 | sc_cmd->scsi_done(sc_cmd); | 
|  | 1669 | return 0; | 
|  | 1670 |  | 
|  | 1671 | } | 
| Bhanu Gollapudi | 853e2bd | 2011-02-04 12:10:34 -0800 | [diff] [blame] | 1672 | /* | 
|  | 1673 | * Session is not offloaded yet. Let SCSI-ml retry | 
|  | 1674 | * the command. | 
|  | 1675 | */ | 
|  | 1676 | rc = SCSI_MLQUEUE_TARGET_BUSY; | 
|  | 1677 | goto exit_qcmd; | 
|  | 1678 | } | 
|  | 1679 |  | 
|  | 1680 | io_req = bnx2fc_cmd_alloc(tgt); | 
|  | 1681 | if (!io_req) { | 
|  | 1682 | rc = SCSI_MLQUEUE_HOST_BUSY; | 
|  | 1683 | goto exit_qcmd; | 
|  | 1684 | } | 
|  | 1685 | io_req->sc_cmd = sc_cmd; | 
|  | 1686 |  | 
|  | 1687 | if (bnx2fc_post_io_req(tgt, io_req)) { | 
|  | 1688 | printk(KERN_ERR PFX "Unable to post io_req\n"); | 
|  | 1689 | rc = SCSI_MLQUEUE_HOST_BUSY; | 
|  | 1690 | goto exit_qcmd; | 
|  | 1691 | } | 
|  | 1692 | exit_qcmd: | 
|  | 1693 | return rc; | 
|  | 1694 | } | 
|  | 1695 |  | 
|  | 1696 | void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, | 
|  | 1697 | struct fcoe_task_ctx_entry *task, | 
|  | 1698 | u8 num_rq) | 
|  | 1699 | { | 
|  | 1700 | struct fcoe_fcp_rsp_payload *fcp_rsp; | 
|  | 1701 | struct bnx2fc_rport *tgt = io_req->tgt; | 
|  | 1702 | struct scsi_cmnd *sc_cmd; | 
|  | 1703 | struct Scsi_Host *host; | 
|  | 1704 |  | 
|  | 1705 |  | 
|  | 1706 | /* scsi_cmd_cmpl is called with tgt lock held */ | 
|  | 1707 |  | 
|  | 1708 | if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { | 
|  | 1709 | /* we will not receive ABTS response for this IO */ | 
|  | 1710 | BNX2FC_IO_DBG(io_req, "Timer context finished processing " | 
|  | 1711 | "this scsi cmd\n"); | 
|  | 1712 | } | 
|  | 1713 |  | 
|  | 1714 | /* Cancel the timeout_work, as we received IO completion */ | 
|  | 1715 | if (cancel_delayed_work(&io_req->timeout_work)) | 
|  | 1716 | kref_put(&io_req->refcount, | 
|  | 1717 | bnx2fc_cmd_release); /* drop timer hold */ | 
|  | 1718 |  | 
|  | 1719 | sc_cmd = io_req->sc_cmd; | 
|  | 1720 | if (sc_cmd == NULL) { | 
|  | 1721 | printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); | 
|  | 1722 | return; | 
|  | 1723 | } | 
|  | 1724 |  | 
|  | 1725 | /* Fetch fcp_rsp from task context and perform cmd completion */ | 
|  | 1726 | fcp_rsp = (struct fcoe_fcp_rsp_payload *) | 
|  | 1727 | &(task->cmn.general.rsp_info.fcp_rsp.payload); | 
|  | 1728 |  | 
|  | 1729 | /* parse fcp_rsp and obtain sense data from RQ if available */ | 
|  | 1730 | bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); | 
|  | 1731 |  | 
|  | 1732 | host = sc_cmd->device->host; | 
|  | 1733 | if (!sc_cmd->SCp.ptr) { | 
|  | 1734 | printk(KERN_ERR PFX "SCp.ptr is NULL\n"); | 
|  | 1735 | return; | 
|  | 1736 | } | 
|  | 1737 | io_req->sc_cmd = NULL; | 
|  | 1738 |  | 
|  | 1739 | if (io_req->on_active_queue) { | 
|  | 1740 | list_del_init(&io_req->link); | 
|  | 1741 | io_req->on_active_queue = 0; | 
|  | 1742 | /* Move IO req to retire queue */ | 
|  | 1743 | list_add_tail(&io_req->link, &tgt->io_retire_queue); | 
|  | 1744 | } else { | 
|  | 1745 | /* This should not happen, but could have been pulled | 
|  | 1746 | * by bnx2fc_flush_active_ios(), or during a race | 
|  | 1747 | * between command abort and (late) completion. | 
|  | 1748 | */ | 
|  | 1749 | BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); | 
|  | 1750 | if (io_req->wait_for_comp) | 
|  | 1751 | if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, | 
|  | 1752 | &io_req->req_flags)) | 
|  | 1753 | complete(&io_req->tm_done); | 
|  | 1754 | } | 
|  | 1755 |  | 
|  | 1756 | bnx2fc_unmap_sg_list(io_req); | 
|  | 1757 |  | 
|  | 1758 | switch (io_req->fcp_status) { | 
|  | 1759 | case FC_GOOD: | 
|  | 1760 | if (io_req->cdb_status == 0) { | 
|  | 1761 | /* Good IO completion */ | 
|  | 1762 | sc_cmd->result = DID_OK << 16; | 
|  | 1763 | } else { | 
|  | 1764 | /* Transport status is good, SCSI status not good */ | 
|  | 1765 | BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" | 
|  | 1766 | " fcp_resid = 0x%x\n", | 
|  | 1767 | io_req->cdb_status, io_req->fcp_resid); | 
|  | 1768 | sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; | 
|  | 1769 | } | 
|  | 1770 | if (io_req->fcp_resid) | 
|  | 1771 | scsi_set_resid(sc_cmd, io_req->fcp_resid); | 
|  | 1772 | break; | 
|  | 1773 | default: | 
|  | 1774 | printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", | 
|  | 1775 | io_req->fcp_status); | 
|  | 1776 | break; | 
|  | 1777 | } | 
|  | 1778 | sc_cmd->SCp.ptr = NULL; | 
|  | 1779 | sc_cmd->scsi_done(sc_cmd); | 
|  | 1780 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1781 | } | 
|  | 1782 |  | 
|  | 1783 | static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, | 
|  | 1784 | struct bnx2fc_cmd *io_req) | 
|  | 1785 | { | 
|  | 1786 | struct fcoe_task_ctx_entry *task; | 
|  | 1787 | struct fcoe_task_ctx_entry *task_page; | 
|  | 1788 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | 
|  | 1789 | struct fcoe_port *port = tgt->port; | 
|  | 1790 | struct bnx2fc_hba *hba = port->priv; | 
|  | 1791 | struct fc_lport *lport = port->lport; | 
|  | 1792 | struct fcoe_dev_stats *stats; | 
|  | 1793 | int task_idx, index; | 
|  | 1794 | u16 xid; | 
|  | 1795 |  | 
|  | 1796 | /* Initialize rest of io_req fields */ | 
|  | 1797 | io_req->cmd_type = BNX2FC_SCSI_CMD; | 
|  | 1798 | io_req->port = port; | 
|  | 1799 | io_req->tgt = tgt; | 
|  | 1800 | io_req->data_xfer_len = scsi_bufflen(sc_cmd); | 
|  | 1801 | sc_cmd->SCp.ptr = (char *)io_req; | 
|  | 1802 |  | 
|  | 1803 | stats = per_cpu_ptr(lport->dev_stats, get_cpu()); | 
|  | 1804 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { | 
|  | 1805 | io_req->io_req_flags = BNX2FC_READ; | 
|  | 1806 | stats->InputRequests++; | 
|  | 1807 | stats->InputBytes += io_req->data_xfer_len; | 
|  | 1808 | } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { | 
|  | 1809 | io_req->io_req_flags = BNX2FC_WRITE; | 
|  | 1810 | stats->OutputRequests++; | 
|  | 1811 | stats->OutputBytes += io_req->data_xfer_len; | 
|  | 1812 | } else { | 
|  | 1813 | io_req->io_req_flags = 0; | 
|  | 1814 | stats->ControlRequests++; | 
|  | 1815 | } | 
|  | 1816 | put_cpu(); | 
|  | 1817 |  | 
|  | 1818 | xid = io_req->xid; | 
|  | 1819 |  | 
|  | 1820 | /* Build buffer descriptor list for firmware from sg list */ | 
|  | 1821 | bnx2fc_build_bd_list_from_sg(io_req); | 
|  | 1822 |  | 
|  | 1823 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; | 
|  | 1824 | index = xid % BNX2FC_TASKS_PER_PAGE; | 
|  | 1825 |  | 
|  | 1826 | /* Initialize task context for this IO request */ | 
|  | 1827 | task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; | 
|  | 1828 | task = &(task_page[index]); | 
|  | 1829 | bnx2fc_init_task(io_req, task); | 
|  | 1830 |  | 
|  | 1831 | spin_lock_bh(&tgt->tgt_lock); | 
|  | 1832 |  | 
|  | 1833 | if (tgt->flush_in_prog) { | 
|  | 1834 | printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); | 
|  | 1835 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1836 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1837 | return -EAGAIN; | 
|  | 1838 | } | 
|  | 1839 |  | 
|  | 1840 | if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { | 
|  | 1841 | printk(KERN_ERR PFX "Session not ready...post_io\n"); | 
|  | 1842 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | 
|  | 1843 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1844 | return -EAGAIN; | 
|  | 1845 | } | 
|  | 1846 |  | 
|  | 1847 | /* Time IO req */ | 
|  | 1848 | bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); | 
|  | 1849 | /* Obtain free SQ entry */ | 
|  | 1850 | bnx2fc_add_2_sq(tgt, xid); | 
|  | 1851 |  | 
|  | 1852 | /* Enqueue the io_req to active_cmd_queue */ | 
|  | 1853 |  | 
|  | 1854 | io_req->on_active_queue = 1; | 
|  | 1855 | /* move io_req from pending_queue to active_queue */ | 
|  | 1856 | list_add_tail(&io_req->link, &tgt->active_cmd_queue); | 
|  | 1857 |  | 
|  | 1858 | /* Ring doorbell */ | 
|  | 1859 | bnx2fc_ring_doorbell(tgt); | 
|  | 1860 | spin_unlock_bh(&tgt->tgt_lock); | 
|  | 1861 | return 0; | 
|  | 1862 | } |