blob: 4c4d77356bb231c616cfade347ad488051693635 [file] [log] [blame]
dea31012005-04-17 16:05:31 -05001/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04003 * Fibre Channel Host Bus Adapters. *
James Smart792581d2011-03-11 16:06:44 -05004 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04005 * EMULEX and SLI are trademarks of Emulex. *
dea31012005-04-17 16:05:31 -05006 * www.emulex.com *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -04007 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
dea31012005-04-17 16:05:31 -05008 * *
9 * This program is free software; you can redistribute it and/or *
James.Smart@Emulex.Comc44ce172005-06-25 10:34:39 -040010 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
dea31012005-04-17 16:05:31 -050020 *******************************************************************/
21
dea31012005-04-17 16:05:31 -050022#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
dea31012005-04-17 16:05:31 -050027
James.Smart@Emulex.Com91886522005-08-10 15:03:09 -040028#include <scsi/scsi.h>
dea31012005-04-17 16:05:31 -050029#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_host.h>
James.Smart@Emulex.Comf888ba32005-08-10 15:03:01 -040032#include <scsi/scsi_transport_fc.h>
James Smartda0436e2009-05-22 14:51:39 -040033#include <scsi/fc/fc_fs.h>
James Smart0d878412009-10-02 15:16:56 -040034#include <linux/aer.h>
dea31012005-04-17 16:05:31 -050035
James Smartda0436e2009-05-22 14:51:39 -040036#include "lpfc_hw4.h"
dea31012005-04-17 16:05:31 -050037#include "lpfc_hw.h"
38#include "lpfc_sli.h"
James Smartda0436e2009-05-22 14:51:39 -040039#include "lpfc_sli4.h"
James Smartea2151b2008-09-07 11:52:10 -040040#include "lpfc_nl.h"
dea31012005-04-17 16:05:31 -050041#include "lpfc_disc.h"
42#include "lpfc_scsi.h"
43#include "lpfc.h"
44#include "lpfc_crtn.h"
45#include "lpfc_logmsg.h"
46#include "lpfc_compat.h"
James Smart858c9f62007-06-17 19:56:39 -050047#include "lpfc_debugfs.h"
James Smart04c68492009-05-22 14:52:52 -040048#include "lpfc_vport.h"
dea31012005-04-17 16:05:31 -050049
50/* There are only four IOCB completion types. */
51typedef enum _lpfc_iocb_type {
52 LPFC_UNKNOWN_IOCB,
53 LPFC_UNSOL_IOCB,
54 LPFC_SOL_IOCB,
55 LPFC_ABORT_IOCB
56} lpfc_iocb_type;
57
James Smart4f774512009-05-22 14:52:35 -040058
59/* Provide function prototypes local to this module. */
60static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint32_t);
62static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
James Smart45ed1192009-10-02 15:17:02 -040063 uint8_t *, uint32_t *);
64static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
65 struct lpfc_iocbq *);
James Smart6669f9b2009-10-02 15:16:45 -040066static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
67 struct hbq_dmabuf *);
James Smart05580562011-05-24 11:40:48 -040068static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
69 struct lpfc_cqe *);
70
James Smart4f774512009-05-22 14:52:35 -040071static IOCB_t *
72lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
73{
74 return &iocbq->iocb;
75}
76
77/**
78 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
79 * @q: The Work Queue to operate on.
80 * @wqe: The work Queue Entry to put on the Work queue.
81 *
82 * This routine will copy the contents of @wqe to the next available entry on
83 * the @q. This function will then ring the Work Queue Doorbell to signal the
84 * HBA to start processing the Work Queue Entry. This function returns 0 if
85 * successful. If no entries are available on @q then this function will return
86 * -ENOMEM.
87 * The caller is expected to hold the hbalock when calling this routine.
88 **/
89static uint32_t
90lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
91{
92 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
93 struct lpfc_register doorbell;
94 uint32_t host_index;
95
96 /* If the host has not yet processed the next entry then we are done */
97 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
98 return -ENOMEM;
99 /* set consumption flag every once in a while */
James Smartff78d8f2011-12-13 13:21:35 -0500100 if (!((q->host_index + 1) % q->entry_repost))
James Smartf0d9bcc2010-10-22 11:07:09 -0400101 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
James Smartfedd3b72011-02-16 12:39:24 -0500102 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
103 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
James Smart4f774512009-05-22 14:52:35 -0400104 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
105
106 /* Update the host index before invoking device */
107 host_index = q->host_index;
108 q->host_index = ((q->host_index + 1) % q->entry_count);
109
110 /* Ring Doorbell */
111 doorbell.word0 = 0;
112 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
113 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
114 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
115 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
116 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
117
118 return 0;
119}
120
121/**
122 * lpfc_sli4_wq_release - Updates internal hba index for WQ
123 * @q: The Work Queue to operate on.
124 * @index: The index to advance the hba index to.
125 *
126 * This routine will update the HBA index of a queue to reflect consumption of
127 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
128 * an entry the host calls this function to update the queue's internal
129 * pointers. This routine returns the number of entries that were consumed by
130 * the HBA.
131 **/
132static uint32_t
133lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
134{
135 uint32_t released = 0;
136
137 if (q->hba_index == index)
138 return 0;
139 do {
140 q->hba_index = ((q->hba_index + 1) % q->entry_count);
141 released++;
142 } while (q->hba_index != index);
143 return released;
144}
145
146/**
147 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
148 * @q: The Mailbox Queue to operate on.
149 * @wqe: The Mailbox Queue Entry to put on the Work queue.
150 *
151 * This routine will copy the contents of @mqe to the next available entry on
152 * the @q. This function will then ring the Work Queue Doorbell to signal the
153 * HBA to start processing the Work Queue Entry. This function returns 0 if
154 * successful. If no entries are available on @q then this function will return
155 * -ENOMEM.
156 * The caller is expected to hold the hbalock when calling this routine.
157 **/
158static uint32_t
159lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
160{
161 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
162 struct lpfc_register doorbell;
163 uint32_t host_index;
164
165 /* If the host has not yet processed the next entry then we are done */
166 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
167 return -ENOMEM;
168 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
169 /* Save off the mailbox pointer for completion */
170 q->phba->mbox = (MAILBOX_t *)temp_mqe;
171
172 /* Update the host index before invoking device */
173 host_index = q->host_index;
174 q->host_index = ((q->host_index + 1) % q->entry_count);
175
176 /* Ring Doorbell */
177 doorbell.word0 = 0;
178 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
179 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
180 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
181 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
182 return 0;
183}
184
185/**
186 * lpfc_sli4_mq_release - Updates internal hba index for MQ
187 * @q: The Mailbox Queue to operate on.
188 *
189 * This routine will update the HBA index of a queue to reflect consumption of
190 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
191 * an entry the host calls this function to update the queue's internal
192 * pointers. This routine returns the number of entries that were consumed by
193 * the HBA.
194 **/
195static uint32_t
196lpfc_sli4_mq_release(struct lpfc_queue *q)
197{
198 /* Clear the mailbox pointer for completion */
199 q->phba->mbox = NULL;
200 q->hba_index = ((q->hba_index + 1) % q->entry_count);
201 return 1;
202}
203
204/**
205 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
206 * @q: The Event Queue to get the first valid EQE from
207 *
208 * This routine will get the first valid Event Queue Entry from @q, update
209 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
210 * the Queue (no more work to do), or the Queue is full of EQEs that have been
211 * processed, but not popped back to the HBA then this routine will return NULL.
212 **/
213static struct lpfc_eqe *
214lpfc_sli4_eq_get(struct lpfc_queue *q)
215{
216 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
217
218 /* If the next EQE is not valid then we are done */
James Smartcb5172e2010-03-15 11:25:07 -0400219 if (!bf_get_le32(lpfc_eqe_valid, eqe))
James Smart4f774512009-05-22 14:52:35 -0400220 return NULL;
221 /* If the host has not yet processed the next entry then we are done */
222 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
223 return NULL;
224
225 q->hba_index = ((q->hba_index + 1) % q->entry_count);
226 return eqe;
227}
228
229/**
230 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
231 * @q: The Event Queue that the host has completed processing for.
232 * @arm: Indicates whether the host wants to arms this CQ.
233 *
234 * This routine will mark all Event Queue Entries on @q, from the last
235 * known completed entry to the last entry that was processed, as completed
236 * by clearing the valid bit for each completion queue entry. Then it will
237 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
238 * The internal host index in the @q will be updated by this routine to indicate
239 * that the host has finished processing the entries. The @arm parameter
240 * indicates that the queue should be rearmed when ringing the doorbell.
241 *
242 * This function will return the number of EQEs that were popped.
243 **/
244uint32_t
245lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
246{
247 uint32_t released = 0;
248 struct lpfc_eqe *temp_eqe;
249 struct lpfc_register doorbell;
250
251 /* while there are valid entries */
252 while (q->hba_index != q->host_index) {
253 temp_eqe = q->qe[q->host_index].eqe;
James Smartcb5172e2010-03-15 11:25:07 -0400254 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
James Smart4f774512009-05-22 14:52:35 -0400255 released++;
256 q->host_index = ((q->host_index + 1) % q->entry_count);
257 }
258 if (unlikely(released == 0 && !arm))
259 return 0;
260
261 /* ring doorbell for number popped */
262 doorbell.word0 = 0;
263 if (arm) {
264 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
265 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
266 }
267 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
268 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
269 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
270 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
James Smarta747c9c2009-11-18 15:41:10 -0500271 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
272 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
273 readl(q->phba->sli4_hba.EQCQDBregaddr);
James Smart4f774512009-05-22 14:52:35 -0400274 return released;
275}
276
277/**
278 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
279 * @q: The Completion Queue to get the first valid CQE from
280 *
281 * This routine will get the first valid Completion Queue Entry from @q, update
282 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
283 * the Queue (no more work to do), or the Queue is full of CQEs that have been
284 * processed, but not popped back to the HBA then this routine will return NULL.
285 **/
286static struct lpfc_cqe *
287lpfc_sli4_cq_get(struct lpfc_queue *q)
288{
289 struct lpfc_cqe *cqe;
290
291 /* If the next CQE is not valid then we are done */
James Smartcb5172e2010-03-15 11:25:07 -0400292 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
James Smart4f774512009-05-22 14:52:35 -0400293 return NULL;
294 /* If the host has not yet processed the next entry then we are done */
295 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
296 return NULL;
297
298 cqe = q->qe[q->hba_index].cqe;
299 q->hba_index = ((q->hba_index + 1) % q->entry_count);
300 return cqe;
301}
302
303/**
304 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
305 * @q: The Completion Queue that the host has completed processing for.
306 * @arm: Indicates whether the host wants to arms this CQ.
307 *
308 * This routine will mark all Completion queue entries on @q, from the last
309 * known completed entry to the last entry that was processed, as completed
310 * by clearing the valid bit for each completion queue entry. Then it will
311 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
312 * The internal host index in the @q will be updated by this routine to indicate
313 * that the host has finished processing the entries. The @arm parameter
314 * indicates that the queue should be rearmed when ringing the doorbell.
315 *
316 * This function will return the number of CQEs that were released.
317 **/
318uint32_t
319lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
320{
321 uint32_t released = 0;
322 struct lpfc_cqe *temp_qe;
323 struct lpfc_register doorbell;
324
325 /* while there are valid entries */
326 while (q->hba_index != q->host_index) {
327 temp_qe = q->qe[q->host_index].cqe;
James Smartcb5172e2010-03-15 11:25:07 -0400328 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
James Smart4f774512009-05-22 14:52:35 -0400329 released++;
330 q->host_index = ((q->host_index + 1) % q->entry_count);
331 }
332 if (unlikely(released == 0 && !arm))
333 return 0;
334
335 /* ring doorbell for number popped */
336 doorbell.word0 = 0;
337 if (arm)
338 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
339 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
340 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
341 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
342 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
343 return released;
344}
345
346/**
347 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
348 * @q: The Header Receive Queue to operate on.
349 * @wqe: The Receive Queue Entry to put on the Receive queue.
350 *
351 * This routine will copy the contents of @wqe to the next available entry on
352 * the @q. This function will then ring the Receive Queue Doorbell to signal the
353 * HBA to start processing the Receive Queue Entry. This function returns the
354 * index that the rqe was copied to if successful. If no entries are available
355 * on @q then this function will return -ENOMEM.
356 * The caller is expected to hold the hbalock when calling this routine.
357 **/
358static int
359lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
360 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
361{
362 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
363 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
364 struct lpfc_register doorbell;
365 int put_index = hq->host_index;
366
367 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
368 return -EINVAL;
369 if (hq->host_index != dq->host_index)
370 return -EINVAL;
371 /* If the host has not yet processed the next entry then we are done */
372 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
373 return -EBUSY;
374 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
375 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
376
377 /* Update the host index to point to the next slot */
378 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
379 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
380
381 /* Ring The Header Receive Queue Doorbell */
James Smart73d91e52011-10-10 21:32:10 -0400382 if (!(hq->host_index % hq->entry_repost)) {
James Smart4f774512009-05-22 14:52:35 -0400383 doorbell.word0 = 0;
384 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
James Smart73d91e52011-10-10 21:32:10 -0400385 hq->entry_repost);
James Smart4f774512009-05-22 14:52:35 -0400386 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
387 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
388 }
389 return put_index;
390}
391
392/**
393 * lpfc_sli4_rq_release - Updates internal hba index for RQ
394 * @q: The Header Receive Queue to operate on.
395 *
396 * This routine will update the HBA index of a queue to reflect consumption of
397 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
398 * consumed an entry the host calls this function to update the queue's
399 * internal pointers. This routine returns the number of entries that were
400 * consumed by the HBA.
401 **/
402static uint32_t
403lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
404{
405 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
406 return 0;
407 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
408 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
409 return 1;
410}
411
James Smarte59058c2008-08-24 21:49:00 -0400412/**
James Smart3621a712009-04-06 18:47:14 -0400413 * lpfc_cmd_iocb - Get next command iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400414 * @phba: Pointer to HBA context object.
415 * @pring: Pointer to driver SLI ring object.
416 *
417 * This function returns pointer to next command iocb entry
418 * in the command ring. The caller must hold hbalock to prevent
419 * other threads consume the next command iocb.
420 * SLI-2/SLI-3 provide different sized iocbs.
421 **/
James Smarted957682007-06-17 19:56:37 -0500422static inline IOCB_t *
423lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
424{
425 return (IOCB_t *) (((char *) pring->cmdringaddr) +
426 pring->cmdidx * phba->iocb_cmd_size);
427}
428
James Smarte59058c2008-08-24 21:49:00 -0400429/**
James Smart3621a712009-04-06 18:47:14 -0400430 * lpfc_resp_iocb - Get next response iocb entry in the ring
James Smarte59058c2008-08-24 21:49:00 -0400431 * @phba: Pointer to HBA context object.
432 * @pring: Pointer to driver SLI ring object.
433 *
434 * This function returns pointer to next response iocb entry
435 * in the response ring. The caller must hold hbalock to make sure
436 * that no other thread consume the next response iocb.
437 * SLI-2/SLI-3 provide different sized iocbs.
438 **/
James Smarted957682007-06-17 19:56:37 -0500439static inline IOCB_t *
440lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
441{
442 return (IOCB_t *) (((char *) pring->rspringaddr) +
443 pring->rspidx * phba->iocb_rsp_size);
444}
445
James Smarte59058c2008-08-24 21:49:00 -0400446/**
James Smart3621a712009-04-06 18:47:14 -0400447 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -0400448 * @phba: Pointer to HBA context object.
449 *
450 * This function is called with hbalock held. This function
451 * allocates a new driver iocb object from the iocb pool. If the
452 * allocation is successful, it returns pointer to the newly
453 * allocated iocb object else it returns NULL.
454 **/
James Smart2e0fef82007-06-17 19:56:36 -0500455static struct lpfc_iocbq *
456__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400457{
458 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
459 struct lpfc_iocbq * iocbq = NULL;
460
461 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
James Smart2a9bf3d2010-06-07 15:24:45 -0400462 if (iocbq)
463 phba->iocb_cnt++;
464 if (phba->iocb_cnt > phba->iocb_max)
465 phba->iocb_max = phba->iocb_cnt;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -0400466 return iocbq;
467}
468
James Smarte59058c2008-08-24 21:49:00 -0400469/**
James Smartda0436e2009-05-22 14:51:39 -0400470 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
471 * @phba: Pointer to HBA context object.
472 * @xritag: XRI value.
473 *
474 * This function clears the sglq pointer from the array of acive
475 * sglq's. The xritag that is passed in is used to index into the
476 * array. Before the xritag can be used it needs to be adjusted
477 * by subtracting the xribase.
478 *
479 * Returns sglq ponter = success, NULL = Failure.
480 **/
481static struct lpfc_sglq *
482__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
483{
James Smartda0436e2009-05-22 14:51:39 -0400484 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400485
486 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
487 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
James Smartda0436e2009-05-22 14:51:39 -0400488 return sglq;
489}
490
491/**
492 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
493 * @phba: Pointer to HBA context object.
494 * @xritag: XRI value.
495 *
496 * This function returns the sglq pointer from the array of acive
497 * sglq's. The xritag that is passed in is used to index into the
498 * array. Before the xritag can be used it needs to be adjusted
499 * by subtracting the xribase.
500 *
501 * Returns sglq ponter = success, NULL = Failure.
502 **/
James Smart0f65ff62010-02-26 14:14:23 -0500503struct lpfc_sglq *
James Smartda0436e2009-05-22 14:51:39 -0400504__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
505{
James Smartda0436e2009-05-22 14:51:39 -0400506 struct lpfc_sglq *sglq;
James Smart6d368e52011-05-24 11:44:12 -0400507
508 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
James Smartda0436e2009-05-22 14:51:39 -0400509 return sglq;
510}
511
512/**
James Smart19ca7602010-11-20 23:11:55 -0500513 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
514 * @phba: Pointer to HBA context object.
515 * @ndlp: nodelist pointer for this target.
516 * @xritag: xri used in this exchange.
517 * @rxid: Remote Exchange ID.
518 * @send_rrq: Flag used to determine if we should send rrq els cmd.
519 *
520 * This function is called with hbalock held.
521 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
522 * rrq struct and adds it to the active_rrq_list.
523 *
524 * returns 0 for rrq slot for this xri
525 * < 0 Were not able to get rrq mem or invalid parameter.
526 **/
527static int
528__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
529 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
530{
James Smart19ca7602010-11-20 23:11:55 -0500531 struct lpfc_node_rrq *rrq;
532 int empty;
James Smart1151e3e2011-02-16 12:39:35 -0500533 uint32_t did = 0;
534
535
536 if (!ndlp)
537 return -EINVAL;
538
539 if (!phba->cfg_enable_rrq)
540 return -EINVAL;
541
542 if (phba->pport->load_flag & FC_UNLOADING) {
543 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
544 goto out;
545 }
546 did = ndlp->nlp_DID;
James Smart19ca7602010-11-20 23:11:55 -0500547
548 /*
549 * set the active bit even if there is no mem available.
550 */
James Smart1151e3e2011-02-16 12:39:35 -0500551 if (NLP_CHK_FREE_REQ(ndlp))
552 goto out;
553
554 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
555 goto out;
556
James Smart6d368e52011-05-24 11:44:12 -0400557 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
James Smart1151e3e2011-02-16 12:39:35 -0500558 goto out;
559
James Smart19ca7602010-11-20 23:11:55 -0500560 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
561 if (rrq) {
562 rrq->send_rrq = send_rrq;
James Smart7851fe22011-07-22 18:36:52 -0400563 rrq->xritag = xritag;
James Smart19ca7602010-11-20 23:11:55 -0500564 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
565 rrq->ndlp = ndlp;
566 rrq->nlp_DID = ndlp->nlp_DID;
567 rrq->vport = ndlp->vport;
568 rrq->rxid = rxid;
569 empty = list_empty(&phba->active_rrq_list);
James Smart1151e3e2011-02-16 12:39:35 -0500570 rrq->send_rrq = send_rrq;
James Smart19ca7602010-11-20 23:11:55 -0500571 list_add_tail(&rrq->list, &phba->active_rrq_list);
572 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
573 phba->hba_flag |= HBA_RRQ_ACTIVE;
574 if (empty)
575 lpfc_worker_wake_up(phba);
576 }
577 return 0;
578 }
James Smart1151e3e2011-02-16 12:39:35 -0500579out:
580 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
581 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
582 " DID:0x%x Send:%d\n",
583 xritag, rxid, did, send_rrq);
584 return -EINVAL;
James Smart19ca7602010-11-20 23:11:55 -0500585}
586
587/**
James Smart1151e3e2011-02-16 12:39:35 -0500588 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500589 * @phba: Pointer to HBA context object.
590 * @xritag: xri used in this exchange.
591 * @rrq: The RRQ to be cleared.
592 *
James Smart19ca7602010-11-20 23:11:55 -0500593 **/
James Smart1151e3e2011-02-16 12:39:35 -0500594void
595lpfc_clr_rrq_active(struct lpfc_hba *phba,
596 uint16_t xritag,
597 struct lpfc_node_rrq *rrq)
James Smart19ca7602010-11-20 23:11:55 -0500598{
James Smart1151e3e2011-02-16 12:39:35 -0500599 struct lpfc_nodelist *ndlp = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500600
James Smart1151e3e2011-02-16 12:39:35 -0500601 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
602 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
James Smart19ca7602010-11-20 23:11:55 -0500603
604 /* The target DID could have been swapped (cable swap)
605 * we should use the ndlp from the findnode if it is
606 * available.
607 */
James Smart1151e3e2011-02-16 12:39:35 -0500608 if ((!ndlp) && rrq->ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500609 ndlp = rrq->ndlp;
610
James Smart1151e3e2011-02-16 12:39:35 -0500611 if (!ndlp)
612 goto out;
613
James Smart6d368e52011-05-24 11:44:12 -0400614 if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) {
James Smart19ca7602010-11-20 23:11:55 -0500615 rrq->send_rrq = 0;
616 rrq->xritag = 0;
617 rrq->rrq_stop_time = 0;
618 }
James Smart1151e3e2011-02-16 12:39:35 -0500619out:
James Smart19ca7602010-11-20 23:11:55 -0500620 mempool_free(rrq, phba->rrq_pool);
621}
622
623/**
624 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
625 * @phba: Pointer to HBA context object.
626 *
627 * This function is called with hbalock held. This function
628 * Checks if stop_time (ratov from setting rrq active) has
629 * been reached, if it has and the send_rrq flag is set then
630 * it will call lpfc_send_rrq. If the send_rrq flag is not set
631 * then it will just call the routine to clear the rrq and
632 * free the rrq resource.
633 * The timer is set to the next rrq that is going to expire before
634 * leaving the routine.
635 *
636 **/
637void
638lpfc_handle_rrq_active(struct lpfc_hba *phba)
639{
640 struct lpfc_node_rrq *rrq;
641 struct lpfc_node_rrq *nextrrq;
642 unsigned long next_time;
643 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500644 LIST_HEAD(send_rrq);
James Smart19ca7602010-11-20 23:11:55 -0500645
646 spin_lock_irqsave(&phba->hbalock, iflags);
647 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
648 next_time = jiffies + HZ * (phba->fc_ratov + 1);
649 list_for_each_entry_safe(rrq, nextrrq,
James Smart1151e3e2011-02-16 12:39:35 -0500650 &phba->active_rrq_list, list) {
651 if (time_after(jiffies, rrq->rrq_stop_time))
652 list_move(&rrq->list, &send_rrq);
653 else if (time_before(rrq->rrq_stop_time, next_time))
James Smart19ca7602010-11-20 23:11:55 -0500654 next_time = rrq->rrq_stop_time;
655 }
656 spin_unlock_irqrestore(&phba->hbalock, iflags);
657 if (!list_empty(&phba->active_rrq_list))
658 mod_timer(&phba->rrq_tmr, next_time);
James Smart1151e3e2011-02-16 12:39:35 -0500659 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
660 list_del(&rrq->list);
661 if (!rrq->send_rrq)
662 /* this call will free the rrq */
663 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
664 else if (lpfc_send_rrq(phba, rrq)) {
665 /* if we send the rrq then the completion handler
666 * will clear the bit in the xribitmap.
667 */
668 lpfc_clr_rrq_active(phba, rrq->xritag,
669 rrq);
670 }
671 }
James Smart19ca7602010-11-20 23:11:55 -0500672}
673
674/**
675 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
676 * @vport: Pointer to vport context object.
677 * @xri: The xri used in the exchange.
678 * @did: The targets DID for this exchange.
679 *
680 * returns NULL = rrq not found in the phba->active_rrq_list.
681 * rrq = rrq for this xri and target.
682 **/
683struct lpfc_node_rrq *
684lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
685{
686 struct lpfc_hba *phba = vport->phba;
687 struct lpfc_node_rrq *rrq;
688 struct lpfc_node_rrq *nextrrq;
689 unsigned long iflags;
690
691 if (phba->sli_rev != LPFC_SLI_REV4)
692 return NULL;
693 spin_lock_irqsave(&phba->hbalock, iflags);
694 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
695 if (rrq->vport == vport && rrq->xritag == xri &&
696 rrq->nlp_DID == did){
697 list_del(&rrq->list);
698 spin_unlock_irqrestore(&phba->hbalock, iflags);
699 return rrq;
700 }
701 }
702 spin_unlock_irqrestore(&phba->hbalock, iflags);
703 return NULL;
704}
705
706/**
707 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
708 * @vport: Pointer to vport context object.
James Smart1151e3e2011-02-16 12:39:35 -0500709 * @ndlp: Pointer to the lpfc_node_list structure.
710 * If ndlp is NULL Remove all active RRQs for this vport from the
711 * phba->active_rrq_list and clear the rrq.
712 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
James Smart19ca7602010-11-20 23:11:55 -0500713 **/
714void
James Smart1151e3e2011-02-16 12:39:35 -0500715lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
James Smart19ca7602010-11-20 23:11:55 -0500716
717{
718 struct lpfc_hba *phba = vport->phba;
719 struct lpfc_node_rrq *rrq;
720 struct lpfc_node_rrq *nextrrq;
721 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500722 LIST_HEAD(rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500723
724 if (phba->sli_rev != LPFC_SLI_REV4)
725 return;
James Smart1151e3e2011-02-16 12:39:35 -0500726 if (!ndlp) {
727 lpfc_sli4_vport_delete_els_xri_aborted(vport);
728 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
James Smart19ca7602010-11-20 23:11:55 -0500729 }
James Smart1151e3e2011-02-16 12:39:35 -0500730 spin_lock_irqsave(&phba->hbalock, iflags);
731 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
732 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
733 list_move(&rrq->list, &rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500734 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart1151e3e2011-02-16 12:39:35 -0500735
736 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
737 list_del(&rrq->list);
738 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
739 }
James Smart19ca7602010-11-20 23:11:55 -0500740}
741
742/**
743 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
744 * @phba: Pointer to HBA context object.
745 *
746 * Remove all rrqs from the phba->active_rrq_list and free them by
747 * calling __lpfc_clr_active_rrq
748 *
749 **/
750void
751lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
752{
753 struct lpfc_node_rrq *rrq;
754 struct lpfc_node_rrq *nextrrq;
755 unsigned long next_time;
756 unsigned long iflags;
James Smart1151e3e2011-02-16 12:39:35 -0500757 LIST_HEAD(rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500758
759 if (phba->sli_rev != LPFC_SLI_REV4)
760 return;
761 spin_lock_irqsave(&phba->hbalock, iflags);
762 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
763 next_time = jiffies + HZ * (phba->fc_ratov * 2);
James Smart1151e3e2011-02-16 12:39:35 -0500764 list_splice_init(&phba->active_rrq_list, &rrq_list);
James Smart19ca7602010-11-20 23:11:55 -0500765 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart1151e3e2011-02-16 12:39:35 -0500766
767 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
768 list_del(&rrq->list);
769 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
770 }
James Smart19ca7602010-11-20 23:11:55 -0500771 if (!list_empty(&phba->active_rrq_list))
772 mod_timer(&phba->rrq_tmr, next_time);
773}
774
775
776/**
James Smart1151e3e2011-02-16 12:39:35 -0500777 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
James Smart19ca7602010-11-20 23:11:55 -0500778 * @phba: Pointer to HBA context object.
779 * @ndlp: Targets nodelist pointer for this exchange.
780 * @xritag the xri in the bitmap to test.
781 *
782 * This function is called with hbalock held. This function
783 * returns 0 = rrq not active for this xri
784 * 1 = rrq is valid for this xri.
785 **/
James Smart1151e3e2011-02-16 12:39:35 -0500786int
787lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
James Smart19ca7602010-11-20 23:11:55 -0500788 uint16_t xritag)
789{
James Smart19ca7602010-11-20 23:11:55 -0500790 if (!ndlp)
791 return 0;
James Smart6d368e52011-05-24 11:44:12 -0400792 if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap))
James Smart19ca7602010-11-20 23:11:55 -0500793 return 1;
794 else
795 return 0;
796}
797
798/**
799 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
800 * @phba: Pointer to HBA context object.
801 * @ndlp: nodelist pointer for this target.
802 * @xritag: xri used in this exchange.
803 * @rxid: Remote Exchange ID.
804 * @send_rrq: Flag used to determine if we should send rrq els cmd.
805 *
806 * This function takes the hbalock.
807 * The active bit is always set in the active rrq xri_bitmap even
808 * if there is no slot avaiable for the other rrq information.
809 *
810 * returns 0 rrq actived for this xri
811 * < 0 No memory or invalid ndlp.
812 **/
813int
814lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
815 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
816{
817 int ret;
818 unsigned long iflags;
819
820 spin_lock_irqsave(&phba->hbalock, iflags);
821 ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq);
822 spin_unlock_irqrestore(&phba->hbalock, iflags);
823 return ret;
824}
825
826/**
James Smartda0436e2009-05-22 14:51:39 -0400827 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
828 * @phba: Pointer to HBA context object.
James Smart19ca7602010-11-20 23:11:55 -0500829 * @piocb: Pointer to the iocbq.
James Smartda0436e2009-05-22 14:51:39 -0400830 *
831 * This function is called with hbalock held. This function
James Smart6d368e52011-05-24 11:44:12 -0400832 * gets a new driver sglq object from the sglq list. If the
James Smartda0436e2009-05-22 14:51:39 -0400833 * list is not empty then it is successful, it returns pointer to the newly
834 * allocated sglq object else it returns NULL.
835 **/
836static struct lpfc_sglq *
James Smart19ca7602010-11-20 23:11:55 -0500837__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
James Smartda0436e2009-05-22 14:51:39 -0400838{
839 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
840 struct lpfc_sglq *sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500841 struct lpfc_sglq *start_sglq = NULL;
James Smart19ca7602010-11-20 23:11:55 -0500842 struct lpfc_scsi_buf *lpfc_cmd;
843 struct lpfc_nodelist *ndlp;
844 int found = 0;
845
846 if (piocbq->iocb_flag & LPFC_IO_FCP) {
847 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
848 ndlp = lpfc_cmd->rdata->pnode;
James Smartbe858b62010-12-15 17:57:20 -0500849 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
850 !(piocbq->iocb_flag & LPFC_IO_LIBDFC))
James Smart19ca7602010-11-20 23:11:55 -0500851 ndlp = piocbq->context_un.ndlp;
James Smart19ca7602010-11-20 23:11:55 -0500852 else
853 ndlp = piocbq->context1;
854
James Smartda0436e2009-05-22 14:51:39 -0400855 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
James Smart19ca7602010-11-20 23:11:55 -0500856 start_sglq = sglq;
857 while (!found) {
858 if (!sglq)
859 return NULL;
James Smart1151e3e2011-02-16 12:39:35 -0500860 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
James Smart19ca7602010-11-20 23:11:55 -0500861 /* This xri has an rrq outstanding for this DID.
862 * put it back in the list and get another xri.
863 */
864 list_add_tail(&sglq->list, lpfc_sgl_list);
865 sglq = NULL;
866 list_remove_head(lpfc_sgl_list, sglq,
867 struct lpfc_sglq, list);
868 if (sglq == start_sglq) {
869 sglq = NULL;
870 break;
871 } else
872 continue;
873 }
874 sglq->ndlp = ndlp;
875 found = 1;
James Smart6d368e52011-05-24 11:44:12 -0400876 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
James Smart19ca7602010-11-20 23:11:55 -0500877 sglq->state = SGL_ALLOCATED;
878 }
James Smartda0436e2009-05-22 14:51:39 -0400879 return sglq;
880}
881
882/**
James Smart3621a712009-04-06 18:47:14 -0400883 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
James Smarte59058c2008-08-24 21:49:00 -0400884 * @phba: Pointer to HBA context object.
885 *
886 * This function is called with no lock held. This function
887 * allocates a new driver iocb object from the iocb pool. If the
888 * allocation is successful, it returns pointer to the newly
889 * allocated iocb object else it returns NULL.
890 **/
James Smart2e0fef82007-06-17 19:56:36 -0500891struct lpfc_iocbq *
892lpfc_sli_get_iocbq(struct lpfc_hba *phba)
James Bottomley604a3e32005-10-29 10:28:33 -0500893{
James Smart2e0fef82007-06-17 19:56:36 -0500894 struct lpfc_iocbq * iocbq = NULL;
895 unsigned long iflags;
896
897 spin_lock_irqsave(&phba->hbalock, iflags);
898 iocbq = __lpfc_sli_get_iocbq(phba);
899 spin_unlock_irqrestore(&phba->hbalock, iflags);
900 return iocbq;
901}
902
James Smarte59058c2008-08-24 21:49:00 -0400903/**
James Smart4f774512009-05-22 14:52:35 -0400904 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
905 * @phba: Pointer to HBA context object.
906 * @iocbq: Pointer to driver iocb object.
907 *
908 * This function is called with hbalock held to release driver
909 * iocb object to the iocb pool. The iotag in the iocb object
910 * does not change for each use of the iocb object. This function
911 * clears all other fields of the iocb object when it is freed.
912 * The sqlq structure that holds the xritag and phys and virtual
913 * mappings for the scatter gather list is retrieved from the
914 * active array of sglq. The get of the sglq pointer also clears
915 * the entry in the array. If the status of the IO indiactes that
916 * this IO was aborted then the sglq entry it put on the
917 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
918 * IO has good status or fails for any other reason then the sglq
919 * entry is added to the free list (lpfc_sgl_list).
920 **/
921static void
922__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
923{
924 struct lpfc_sglq *sglq;
925 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
James Smart2a9bf3d2010-06-07 15:24:45 -0400926 unsigned long iflag = 0;
927 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
James Smart4f774512009-05-22 14:52:35 -0400928
929 if (iocbq->sli4_xritag == NO_XRI)
930 sglq = NULL;
931 else
James Smart6d368e52011-05-24 11:44:12 -0400932 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
933
James Smart4f774512009-05-22 14:52:35 -0400934 if (sglq) {
James Smart0f65ff62010-02-26 14:14:23 -0500935 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
936 (sglq->state != SGL_XRI_ABORTED)) {
James Smart4f774512009-05-22 14:52:35 -0400937 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
938 iflag);
939 list_add(&sglq->list,
940 &phba->sli4_hba.lpfc_abts_els_sgl_list);
941 spin_unlock_irqrestore(
942 &phba->sli4_hba.abts_sgl_list_lock, iflag);
James Smart0f65ff62010-02-26 14:14:23 -0500943 } else {
944 sglq->state = SGL_FREED;
James Smart19ca7602010-11-20 23:11:55 -0500945 sglq->ndlp = NULL;
James Smartfedd3b72011-02-16 12:39:24 -0500946 list_add_tail(&sglq->list,
947 &phba->sli4_hba.lpfc_sgl_list);
James Smart2a9bf3d2010-06-07 15:24:45 -0400948
949 /* Check if TXQ queue needs to be serviced */
James Smart589a52d2010-07-14 15:30:54 -0400950 if (pring->txq_cnt)
James Smart2a9bf3d2010-06-07 15:24:45 -0400951 lpfc_worker_wake_up(phba);
James Smart0f65ff62010-02-26 14:14:23 -0500952 }
James Smart4f774512009-05-22 14:52:35 -0400953 }
954
955
956 /*
957 * Clean all volatile data fields, preserve iotag and node struct.
958 */
959 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
James Smart6d368e52011-05-24 11:44:12 -0400960 iocbq->sli4_lxritag = NO_XRI;
James Smart4f774512009-05-22 14:52:35 -0400961 iocbq->sli4_xritag = NO_XRI;
962 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
963}
964
James Smart2a9bf3d2010-06-07 15:24:45 -0400965
James Smart4f774512009-05-22 14:52:35 -0400966/**
James Smart3772a992009-05-22 14:50:54 -0400967 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
968 * @phba: Pointer to HBA context object.
969 * @iocbq: Pointer to driver iocb object.
970 *
971 * This function is called with hbalock held to release driver
972 * iocb object to the iocb pool. The iotag in the iocb object
973 * does not change for each use of the iocb object. This function
974 * clears all other fields of the iocb object when it is freed.
975 **/
976static void
977__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
978{
979 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
980
981 /*
982 * Clean all volatile data fields, preserve iotag and node struct.
983 */
984 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
985 iocbq->sli4_xritag = NO_XRI;
986 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
987}
988
989/**
James Smart3621a712009-04-06 18:47:14 -0400990 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -0400991 * @phba: Pointer to HBA context object.
992 * @iocbq: Pointer to driver iocb object.
993 *
994 * This function is called with hbalock held to release driver
995 * iocb object to the iocb pool. The iotag in the iocb object
996 * does not change for each use of the iocb object. This function
997 * clears all other fields of the iocb object when it is freed.
998 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +0100999static void
James Smart2e0fef82007-06-17 19:56:36 -05001000__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1001{
James Smart3772a992009-05-22 14:50:54 -04001002 phba->__lpfc_sli_release_iocbq(phba, iocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -04001003 phba->iocb_cnt--;
James Bottomley604a3e32005-10-29 10:28:33 -05001004}
1005
James Smarte59058c2008-08-24 21:49:00 -04001006/**
James Smart3621a712009-04-06 18:47:14 -04001007 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
James Smarte59058c2008-08-24 21:49:00 -04001008 * @phba: Pointer to HBA context object.
1009 * @iocbq: Pointer to driver iocb object.
1010 *
1011 * This function is called with no lock held to release the iocb to
1012 * iocb pool.
1013 **/
James Smart2e0fef82007-06-17 19:56:36 -05001014void
1015lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1016{
1017 unsigned long iflags;
1018
1019 /*
1020 * Clean all volatile data fields, preserve iotag and node struct.
1021 */
1022 spin_lock_irqsave(&phba->hbalock, iflags);
1023 __lpfc_sli_release_iocbq(phba, iocbq);
1024 spin_unlock_irqrestore(&phba->hbalock, iflags);
1025}
1026
James Smarte59058c2008-08-24 21:49:00 -04001027/**
James Smarta257bf92009-04-06 18:48:10 -04001028 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1029 * @phba: Pointer to HBA context object.
1030 * @iocblist: List of IOCBs.
1031 * @ulpstatus: ULP status in IOCB command field.
1032 * @ulpWord4: ULP word-4 in IOCB command field.
1033 *
1034 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1035 * on the list by invoking the complete callback function associated with the
1036 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1037 * fields.
1038 **/
1039void
1040lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1041 uint32_t ulpstatus, uint32_t ulpWord4)
1042{
1043 struct lpfc_iocbq *piocb;
1044
1045 while (!list_empty(iocblist)) {
1046 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1047
1048 if (!piocb->iocb_cmpl)
1049 lpfc_sli_release_iocbq(phba, piocb);
1050 else {
1051 piocb->iocb.ulpStatus = ulpstatus;
1052 piocb->iocb.un.ulpWord[4] = ulpWord4;
1053 (piocb->iocb_cmpl) (phba, piocb, piocb);
1054 }
1055 }
1056 return;
1057}
1058
1059/**
James Smart3621a712009-04-06 18:47:14 -04001060 * lpfc_sli_iocb_cmd_type - Get the iocb type
1061 * @iocb_cmnd: iocb command code.
James Smarte59058c2008-08-24 21:49:00 -04001062 *
1063 * This function is called by ring event handler function to get the iocb type.
1064 * This function translates the iocb command to an iocb command type used to
1065 * decide the final disposition of each completed IOCB.
1066 * The function returns
1067 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1068 * LPFC_SOL_IOCB if it is a solicited iocb completion
1069 * LPFC_ABORT_IOCB if it is an abort iocb
1070 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1071 *
1072 * The caller is not required to hold any lock.
1073 **/
dea31012005-04-17 16:05:31 -05001074static lpfc_iocb_type
1075lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1076{
1077 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1078
1079 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1080 return 0;
1081
1082 switch (iocb_cmnd) {
1083 case CMD_XMIT_SEQUENCE_CR:
1084 case CMD_XMIT_SEQUENCE_CX:
1085 case CMD_XMIT_BCAST_CN:
1086 case CMD_XMIT_BCAST_CX:
1087 case CMD_ELS_REQUEST_CR:
1088 case CMD_ELS_REQUEST_CX:
1089 case CMD_CREATE_XRI_CR:
1090 case CMD_CREATE_XRI_CX:
1091 case CMD_GET_RPI_CN:
1092 case CMD_XMIT_ELS_RSP_CX:
1093 case CMD_GET_RPI_CR:
1094 case CMD_FCP_IWRITE_CR:
1095 case CMD_FCP_IWRITE_CX:
1096 case CMD_FCP_IREAD_CR:
1097 case CMD_FCP_IREAD_CX:
1098 case CMD_FCP_ICMND_CR:
1099 case CMD_FCP_ICMND_CX:
James Smartf5603512006-12-02 13:35:43 -05001100 case CMD_FCP_TSEND_CX:
1101 case CMD_FCP_TRSP_CX:
1102 case CMD_FCP_TRECEIVE_CX:
1103 case CMD_FCP_AUTO_TRSP_CX:
dea31012005-04-17 16:05:31 -05001104 case CMD_ADAPTER_MSG:
1105 case CMD_ADAPTER_DUMP:
1106 case CMD_XMIT_SEQUENCE64_CR:
1107 case CMD_XMIT_SEQUENCE64_CX:
1108 case CMD_XMIT_BCAST64_CN:
1109 case CMD_XMIT_BCAST64_CX:
1110 case CMD_ELS_REQUEST64_CR:
1111 case CMD_ELS_REQUEST64_CX:
1112 case CMD_FCP_IWRITE64_CR:
1113 case CMD_FCP_IWRITE64_CX:
1114 case CMD_FCP_IREAD64_CR:
1115 case CMD_FCP_IREAD64_CX:
1116 case CMD_FCP_ICMND64_CR:
1117 case CMD_FCP_ICMND64_CX:
James Smartf5603512006-12-02 13:35:43 -05001118 case CMD_FCP_TSEND64_CX:
1119 case CMD_FCP_TRSP64_CX:
1120 case CMD_FCP_TRECEIVE64_CX:
dea31012005-04-17 16:05:31 -05001121 case CMD_GEN_REQUEST64_CR:
1122 case CMD_GEN_REQUEST64_CX:
1123 case CMD_XMIT_ELS_RSP64_CX:
James Smartda0436e2009-05-22 14:51:39 -04001124 case DSSCMD_IWRITE64_CR:
1125 case DSSCMD_IWRITE64_CX:
1126 case DSSCMD_IREAD64_CR:
1127 case DSSCMD_IREAD64_CX:
dea31012005-04-17 16:05:31 -05001128 type = LPFC_SOL_IOCB;
1129 break;
1130 case CMD_ABORT_XRI_CN:
1131 case CMD_ABORT_XRI_CX:
1132 case CMD_CLOSE_XRI_CN:
1133 case CMD_CLOSE_XRI_CX:
1134 case CMD_XRI_ABORTED_CX:
1135 case CMD_ABORT_MXRI64_CN:
James Smart6669f9b2009-10-02 15:16:45 -04001136 case CMD_XMIT_BLS_RSP64_CX:
dea31012005-04-17 16:05:31 -05001137 type = LPFC_ABORT_IOCB;
1138 break;
1139 case CMD_RCV_SEQUENCE_CX:
1140 case CMD_RCV_ELS_REQ_CX:
1141 case CMD_RCV_SEQUENCE64_CX:
1142 case CMD_RCV_ELS_REQ64_CX:
James Smart57127f12007-10-27 13:37:05 -04001143 case CMD_ASYNC_STATUS:
James Smarted957682007-06-17 19:56:37 -05001144 case CMD_IOCB_RCV_SEQ64_CX:
1145 case CMD_IOCB_RCV_ELS64_CX:
1146 case CMD_IOCB_RCV_CONT64_CX:
James Smart3163f722008-02-08 18:50:25 -05001147 case CMD_IOCB_RET_XRI64_CX:
dea31012005-04-17 16:05:31 -05001148 type = LPFC_UNSOL_IOCB;
1149 break;
James Smart3163f722008-02-08 18:50:25 -05001150 case CMD_IOCB_XMIT_MSEQ64_CR:
1151 case CMD_IOCB_XMIT_MSEQ64_CX:
1152 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1153 case CMD_IOCB_RCV_ELS_LIST64_CX:
1154 case CMD_IOCB_CLOSE_EXTENDED_CN:
1155 case CMD_IOCB_ABORT_EXTENDED_CN:
1156 case CMD_IOCB_RET_HBQE64_CN:
1157 case CMD_IOCB_FCP_IBIDIR64_CR:
1158 case CMD_IOCB_FCP_IBIDIR64_CX:
1159 case CMD_IOCB_FCP_ITASKMGT64_CX:
1160 case CMD_IOCB_LOGENTRY_CN:
1161 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1162 printk("%s - Unhandled SLI-3 Command x%x\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07001163 __func__, iocb_cmnd);
James Smart3163f722008-02-08 18:50:25 -05001164 type = LPFC_UNKNOWN_IOCB;
1165 break;
dea31012005-04-17 16:05:31 -05001166 default:
1167 type = LPFC_UNKNOWN_IOCB;
1168 break;
1169 }
1170
1171 return type;
1172}
1173
James Smarte59058c2008-08-24 21:49:00 -04001174/**
James Smart3621a712009-04-06 18:47:14 -04001175 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
James Smarte59058c2008-08-24 21:49:00 -04001176 * @phba: Pointer to HBA context object.
1177 *
1178 * This function is called from SLI initialization code
1179 * to configure every ring of the HBA's SLI interface. The
1180 * caller is not required to hold any lock. This function issues
1181 * a config_ring mailbox command for each ring.
1182 * This function returns zero if successful else returns a negative
1183 * error code.
1184 **/
dea31012005-04-17 16:05:31 -05001185static int
James Smarted957682007-06-17 19:56:37 -05001186lpfc_sli_ring_map(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05001187{
1188 struct lpfc_sli *psli = &phba->sli;
James Smarted957682007-06-17 19:56:37 -05001189 LPFC_MBOXQ_t *pmb;
1190 MAILBOX_t *pmbox;
1191 int i, rc, ret = 0;
dea31012005-04-17 16:05:31 -05001192
James Smarted957682007-06-17 19:56:37 -05001193 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1194 if (!pmb)
1195 return -ENOMEM;
James Smart04c68492009-05-22 14:52:52 -04001196 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05001197 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05001198 for (i = 0; i < psli->num_rings; i++) {
dea31012005-04-17 16:05:31 -05001199 lpfc_config_ring(phba, i, pmb);
1200 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1201 if (rc != MBX_SUCCESS) {
James Smart92d7f7b2007-06-17 19:56:38 -05001202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04001203 "0446 Adapter failed to init (%d), "
dea31012005-04-17 16:05:31 -05001204 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1205 "ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001206 rc, pmbox->mbxCommand,
1207 pmbox->mbxStatus, i);
James Smart2e0fef82007-06-17 19:56:36 -05001208 phba->link_state = LPFC_HBA_ERROR;
James Smarted957682007-06-17 19:56:37 -05001209 ret = -ENXIO;
1210 break;
dea31012005-04-17 16:05:31 -05001211 }
1212 }
James Smarted957682007-06-17 19:56:37 -05001213 mempool_free(pmb, phba->mbox_mem_pool);
1214 return ret;
dea31012005-04-17 16:05:31 -05001215}
1216
James Smarte59058c2008-08-24 21:49:00 -04001217/**
James Smart3621a712009-04-06 18:47:14 -04001218 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
James Smarte59058c2008-08-24 21:49:00 -04001219 * @phba: Pointer to HBA context object.
1220 * @pring: Pointer to driver SLI ring object.
1221 * @piocb: Pointer to the driver iocb object.
1222 *
1223 * This function is called with hbalock held. The function adds the
1224 * new iocb to txcmplq of the given ring. This function always returns
1225 * 0. If this function is called for ELS ring, this function checks if
1226 * there is a vport associated with the ELS command. This function also
1227 * starts els_tmofunc timer if this is an ELS command.
1228 **/
dea31012005-04-17 16:05:31 -05001229static int
James Smart2e0fef82007-06-17 19:56:36 -05001230lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1231 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05001232{
dea31012005-04-17 16:05:31 -05001233 list_add_tail(&piocb->list, &pring->txcmplq);
James Smart2a9bf3d2010-06-07 15:24:45 -04001234 piocb->iocb_flag |= LPFC_IO_ON_Q;
dea31012005-04-17 16:05:31 -05001235 pring->txcmplq_cnt++;
James Smart2a9bf3d2010-06-07 15:24:45 -04001236 if (pring->txcmplq_cnt > pring->txcmplq_max)
1237 pring->txcmplq_max = pring->txcmplq_cnt;
1238
James Smart92d7f7b2007-06-17 19:56:38 -05001239 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1240 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1241 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1242 if (!piocb->vport)
1243 BUG();
1244 else
1245 mod_timer(&piocb->vport->els_tmofunc,
1246 jiffies + HZ * (phba->fc_ratov << 1));
1247 }
1248
dea31012005-04-17 16:05:31 -05001249
James Smart2e0fef82007-06-17 19:56:36 -05001250 return 0;
dea31012005-04-17 16:05:31 -05001251}
1252
James Smarte59058c2008-08-24 21:49:00 -04001253/**
James Smart3621a712009-04-06 18:47:14 -04001254 * lpfc_sli_ringtx_get - Get first element of the txq
James Smarte59058c2008-08-24 21:49:00 -04001255 * @phba: Pointer to HBA context object.
1256 * @pring: Pointer to driver SLI ring object.
1257 *
1258 * This function is called with hbalock held to get next
1259 * iocb in txq of the given ring. If there is any iocb in
1260 * the txq, the function returns first iocb in the list after
1261 * removing the iocb from the list, else it returns NULL.
1262 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04001263struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05001264lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001265{
dea31012005-04-17 16:05:31 -05001266 struct lpfc_iocbq *cmd_iocb;
1267
James Smart858c9f62007-06-17 19:56:39 -05001268 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1269 if (cmd_iocb != NULL)
dea31012005-04-17 16:05:31 -05001270 pring->txq_cnt--;
James Smart2e0fef82007-06-17 19:56:36 -05001271 return cmd_iocb;
dea31012005-04-17 16:05:31 -05001272}
1273
James Smarte59058c2008-08-24 21:49:00 -04001274/**
James Smart3621a712009-04-06 18:47:14 -04001275 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
James Smarte59058c2008-08-24 21:49:00 -04001276 * @phba: Pointer to HBA context object.
1277 * @pring: Pointer to driver SLI ring object.
1278 *
1279 * This function is called with hbalock held and the caller must post the
1280 * iocb without releasing the lock. If the caller releases the lock,
1281 * iocb slot returned by the function is not guaranteed to be available.
1282 * The function returns pointer to the next available iocb slot if there
1283 * is available slot in the ring, else it returns NULL.
1284 * If the get index of the ring is ahead of the put index, the function
1285 * will post an error attention event to the worker thread to take the
1286 * HBA to offline state.
1287 **/
dea31012005-04-17 16:05:31 -05001288static IOCB_t *
1289lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1290{
James Smart34b02dc2008-08-24 21:49:55 -04001291 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea31012005-04-17 16:05:31 -05001292 uint32_t max_cmd_idx = pring->numCiocb;
dea31012005-04-17 16:05:31 -05001293 if ((pring->next_cmdidx == pring->cmdidx) &&
1294 (++pring->next_cmdidx >= max_cmd_idx))
1295 pring->next_cmdidx = 0;
1296
1297 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
1298
1299 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1300
1301 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
1302 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001303 "0315 Ring %d issue: portCmdGet %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02001304 "is bigger than cmd ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04001305 pring->ringno,
dea31012005-04-17 16:05:31 -05001306 pring->local_getidx, max_cmd_idx);
1307
James Smart2e0fef82007-06-17 19:56:36 -05001308 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05001309 /*
1310 * All error attention handlers are posted to
1311 * worker thread
1312 */
1313 phba->work_ha |= HA_ERATT;
1314 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05001315
James Smart5e9d9b82008-06-14 22:52:53 -04001316 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -05001317
1318 return NULL;
1319 }
1320
1321 if (pring->local_getidx == pring->next_cmdidx)
1322 return NULL;
1323 }
1324
James Smarted957682007-06-17 19:56:37 -05001325 return lpfc_cmd_iocb(phba, pring);
dea31012005-04-17 16:05:31 -05001326}
1327
James Smarte59058c2008-08-24 21:49:00 -04001328/**
James Smart3621a712009-04-06 18:47:14 -04001329 * lpfc_sli_next_iotag - Get an iotag for the iocb
James Smarte59058c2008-08-24 21:49:00 -04001330 * @phba: Pointer to HBA context object.
1331 * @iocbq: Pointer to driver iocb object.
1332 *
1333 * This function gets an iotag for the iocb. If there is no unused iotag and
1334 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1335 * array and assigns a new iotag.
1336 * The function returns the allocated iotag if successful, else returns zero.
1337 * Zero is not a valid iotag.
1338 * The caller is not required to hold any lock.
1339 **/
James Bottomley604a3e32005-10-29 10:28:33 -05001340uint16_t
James Smart2e0fef82007-06-17 19:56:36 -05001341lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
dea31012005-04-17 16:05:31 -05001342{
James Smart2e0fef82007-06-17 19:56:36 -05001343 struct lpfc_iocbq **new_arr;
1344 struct lpfc_iocbq **old_arr;
James Bottomley604a3e32005-10-29 10:28:33 -05001345 size_t new_len;
1346 struct lpfc_sli *psli = &phba->sli;
1347 uint16_t iotag;
dea31012005-04-17 16:05:31 -05001348
James Smart2e0fef82007-06-17 19:56:36 -05001349 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001350 iotag = psli->last_iotag;
1351 if(++iotag < psli->iocbq_lookup_len) {
1352 psli->last_iotag = iotag;
1353 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001354 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001355 iocbq->iotag = iotag;
1356 return iotag;
James Smart2e0fef82007-06-17 19:56:36 -05001357 } else if (psli->iocbq_lookup_len < (0xffff
James Bottomley604a3e32005-10-29 10:28:33 -05001358 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1359 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
James Smart2e0fef82007-06-17 19:56:36 -05001360 spin_unlock_irq(&phba->hbalock);
1361 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
James Bottomley604a3e32005-10-29 10:28:33 -05001362 GFP_KERNEL);
1363 if (new_arr) {
James Smart2e0fef82007-06-17 19:56:36 -05001364 spin_lock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001365 old_arr = psli->iocbq_lookup;
1366 if (new_len <= psli->iocbq_lookup_len) {
1367 /* highly unprobable case */
1368 kfree(new_arr);
1369 iotag = psli->last_iotag;
1370 if(++iotag < psli->iocbq_lookup_len) {
1371 psli->last_iotag = iotag;
1372 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001373 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001374 iocbq->iotag = iotag;
1375 return iotag;
1376 }
James Smart2e0fef82007-06-17 19:56:36 -05001377 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001378 return 0;
1379 }
1380 if (psli->iocbq_lookup)
1381 memcpy(new_arr, old_arr,
1382 ((psli->last_iotag + 1) *
James Smart311464e2007-08-02 11:10:37 -04001383 sizeof (struct lpfc_iocbq *)));
James Bottomley604a3e32005-10-29 10:28:33 -05001384 psli->iocbq_lookup = new_arr;
1385 psli->iocbq_lookup_len = new_len;
1386 psli->last_iotag = iotag;
1387 psli->iocbq_lookup[iotag] = iocbq;
James Smart2e0fef82007-06-17 19:56:36 -05001388 spin_unlock_irq(&phba->hbalock);
James Bottomley604a3e32005-10-29 10:28:33 -05001389 iocbq->iotag = iotag;
1390 kfree(old_arr);
1391 return iotag;
1392 }
James Smart8f6d98d2006-08-01 07:34:00 -04001393 } else
James Smart2e0fef82007-06-17 19:56:36 -05001394 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05001395
James Smartbc739052010-08-04 16:11:18 -04001396 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04001397 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1398 psli->last_iotag);
dea31012005-04-17 16:05:31 -05001399
James Bottomley604a3e32005-10-29 10:28:33 -05001400 return 0;
dea31012005-04-17 16:05:31 -05001401}
1402
James Smarte59058c2008-08-24 21:49:00 -04001403/**
James Smart3621a712009-04-06 18:47:14 -04001404 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
James Smarte59058c2008-08-24 21:49:00 -04001405 * @phba: Pointer to HBA context object.
1406 * @pring: Pointer to driver SLI ring object.
1407 * @iocb: Pointer to iocb slot in the ring.
1408 * @nextiocb: Pointer to driver iocb object which need to be
1409 * posted to firmware.
1410 *
1411 * This function is called with hbalock held to post a new iocb to
1412 * the firmware. This function copies the new iocb to ring iocb slot and
1413 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1414 * a completion call back for this iocb else the function will free the
1415 * iocb object.
1416 **/
dea31012005-04-17 16:05:31 -05001417static void
1418lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1419 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1420{
1421 /*
James Bottomley604a3e32005-10-29 10:28:33 -05001422 * Set up an iotag
dea31012005-04-17 16:05:31 -05001423 */
James Bottomley604a3e32005-10-29 10:28:33 -05001424 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
dea31012005-04-17 16:05:31 -05001425
James Smarte2a0a9d2008-12-04 22:40:02 -05001426
James Smarta58cbd52007-08-02 11:09:43 -04001427 if (pring->ringno == LPFC_ELS_RING) {
1428 lpfc_debugfs_slow_ring_trc(phba,
1429 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1430 *(((uint32_t *) &nextiocb->iocb) + 4),
1431 *(((uint32_t *) &nextiocb->iocb) + 6),
1432 *(((uint32_t *) &nextiocb->iocb) + 7));
1433 }
1434
dea31012005-04-17 16:05:31 -05001435 /*
1436 * Issue iocb command to adapter
1437 */
James Smart92d7f7b2007-06-17 19:56:38 -05001438 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
dea31012005-04-17 16:05:31 -05001439 wmb();
1440 pring->stats.iocb_cmd++;
1441
1442 /*
1443 * If there is no completion routine to call, we can release the
1444 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1445 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1446 */
1447 if (nextiocb->iocb_cmpl)
1448 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
James Bottomley604a3e32005-10-29 10:28:33 -05001449 else
James Smart2e0fef82007-06-17 19:56:36 -05001450 __lpfc_sli_release_iocbq(phba, nextiocb);
dea31012005-04-17 16:05:31 -05001451
1452 /*
1453 * Let the HBA know what IOCB slot will be the next one the
1454 * driver will put a command into.
1455 */
1456 pring->cmdidx = pring->next_cmdidx;
James Smarted957682007-06-17 19:56:37 -05001457 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
dea31012005-04-17 16:05:31 -05001458}
1459
James Smarte59058c2008-08-24 21:49:00 -04001460/**
James Smart3621a712009-04-06 18:47:14 -04001461 * lpfc_sli_update_full_ring - Update the chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001462 * @phba: Pointer to HBA context object.
1463 * @pring: Pointer to driver SLI ring object.
1464 *
1465 * The caller is not required to hold any lock for calling this function.
1466 * This function updates the chip attention bits for the ring to inform firmware
1467 * that there are pending work to be done for this ring and requests an
1468 * interrupt when there is space available in the ring. This function is
1469 * called when the driver is unable to post more iocbs to the ring due
1470 * to unavailability of space in the ring.
1471 **/
dea31012005-04-17 16:05:31 -05001472static void
James Smart2e0fef82007-06-17 19:56:36 -05001473lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001474{
1475 int ringno = pring->ringno;
1476
1477 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1478
1479 wmb();
1480
1481 /*
1482 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1483 * The HBA will tell us when an IOCB entry is available.
1484 */
1485 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1486 readl(phba->CAregaddr); /* flush */
1487
1488 pring->stats.iocb_cmd_full++;
1489}
1490
James Smarte59058c2008-08-24 21:49:00 -04001491/**
James Smart3621a712009-04-06 18:47:14 -04001492 * lpfc_sli_update_ring - Update chip attention register
James Smarte59058c2008-08-24 21:49:00 -04001493 * @phba: Pointer to HBA context object.
1494 * @pring: Pointer to driver SLI ring object.
1495 *
1496 * This function updates the chip attention register bit for the
1497 * given ring to inform HBA that there is more work to be done
1498 * in this ring. The caller is not required to hold any lock.
1499 **/
dea31012005-04-17 16:05:31 -05001500static void
James Smart2e0fef82007-06-17 19:56:36 -05001501lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001502{
1503 int ringno = pring->ringno;
1504
1505 /*
1506 * Tell the HBA that there is work to do in this ring.
1507 */
James Smart34b02dc2008-08-24 21:49:55 -04001508 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1509 wmb();
1510 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1511 readl(phba->CAregaddr); /* flush */
1512 }
dea31012005-04-17 16:05:31 -05001513}
1514
James Smarte59058c2008-08-24 21:49:00 -04001515/**
James Smart3621a712009-04-06 18:47:14 -04001516 * lpfc_sli_resume_iocb - Process iocbs in the txq
James Smarte59058c2008-08-24 21:49:00 -04001517 * @phba: Pointer to HBA context object.
1518 * @pring: Pointer to driver SLI ring object.
1519 *
1520 * This function is called with hbalock held to post pending iocbs
1521 * in the txq to the firmware. This function is called when driver
1522 * detects space available in the ring.
1523 **/
dea31012005-04-17 16:05:31 -05001524static void
James Smart2e0fef82007-06-17 19:56:36 -05001525lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
dea31012005-04-17 16:05:31 -05001526{
1527 IOCB_t *iocb;
1528 struct lpfc_iocbq *nextiocb;
1529
1530 /*
1531 * Check to see if:
1532 * (a) there is anything on the txq to send
1533 * (b) link is up
1534 * (c) link attention events can be processed (fcp ring only)
1535 * (d) IOCB processing is not blocked by the outstanding mbox command.
1536 */
1537 if (pring->txq_cnt &&
James Smart2e0fef82007-06-17 19:56:36 -05001538 lpfc_is_link_up(phba) &&
dea31012005-04-17 16:05:31 -05001539 (pring->ringno != phba->sli.fcp_ring ||
James Smart0b727fe2007-10-27 13:37:25 -04001540 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
dea31012005-04-17 16:05:31 -05001541
1542 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1543 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1544 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1545
1546 if (iocb)
1547 lpfc_sli_update_ring(phba, pring);
1548 else
1549 lpfc_sli_update_full_ring(phba, pring);
1550 }
1551
1552 return;
1553}
1554
James Smarte59058c2008-08-24 21:49:00 -04001555/**
James Smart3621a712009-04-06 18:47:14 -04001556 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
James Smarte59058c2008-08-24 21:49:00 -04001557 * @phba: Pointer to HBA context object.
1558 * @hbqno: HBQ number.
1559 *
1560 * This function is called with hbalock held to get the next
1561 * available slot for the given HBQ. If there is free slot
1562 * available for the HBQ it will return pointer to the next available
1563 * HBQ entry else it will return NULL.
1564 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001565static struct lpfc_hbq_entry *
James Smarted957682007-06-17 19:56:37 -05001566lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1567{
1568 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1569
1570 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1571 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1572 hbqp->next_hbqPutIdx = 0;
1573
1574 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
James Smart92d7f7b2007-06-17 19:56:38 -05001575 uint32_t raw_index = phba->hbq_get[hbqno];
James Smarted957682007-06-17 19:56:37 -05001576 uint32_t getidx = le32_to_cpu(raw_index);
1577
1578 hbqp->local_hbqGetIdx = getidx;
1579
1580 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1581 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05001582 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04001583 "1802 HBQ %d: local_hbqGetIdx "
James Smarted957682007-06-17 19:56:37 -05001584 "%u is > than hbqp->entry_count %u\n",
James Smarte8b62012007-08-02 11:10:09 -04001585 hbqno, hbqp->local_hbqGetIdx,
James Smarted957682007-06-17 19:56:37 -05001586 hbqp->entry_count);
1587
1588 phba->link_state = LPFC_HBA_ERROR;
1589 return NULL;
1590 }
1591
1592 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1593 return NULL;
1594 }
1595
James Smart51ef4c22007-08-02 11:10:31 -04001596 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1597 hbqp->hbqPutIdx;
James Smarted957682007-06-17 19:56:37 -05001598}
1599
James Smarte59058c2008-08-24 21:49:00 -04001600/**
James Smart3621a712009-04-06 18:47:14 -04001601 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
James Smarte59058c2008-08-24 21:49:00 -04001602 * @phba: Pointer to HBA context object.
1603 *
1604 * This function is called with no lock held to free all the
1605 * hbq buffers while uninitializing the SLI interface. It also
1606 * frees the HBQ buffers returned by the firmware but not yet
1607 * processed by the upper layers.
1608 **/
James Smarted957682007-06-17 19:56:37 -05001609void
1610lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1611{
James Smart92d7f7b2007-06-17 19:56:38 -05001612 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1613 struct hbq_dmabuf *hbq_buf;
James Smart3163f722008-02-08 18:50:25 -05001614 unsigned long flags;
James Smart51ef4c22007-08-02 11:10:31 -04001615 int i, hbq_count;
James Smart3163f722008-02-08 18:50:25 -05001616 uint32_t hbqno;
James Smarted957682007-06-17 19:56:37 -05001617
James Smart51ef4c22007-08-02 11:10:31 -04001618 hbq_count = lpfc_sli_hbq_count();
James Smarted957682007-06-17 19:56:37 -05001619 /* Return all memory used by all HBQs */
James Smart3163f722008-02-08 18:50:25 -05001620 spin_lock_irqsave(&phba->hbalock, flags);
James Smart51ef4c22007-08-02 11:10:31 -04001621 for (i = 0; i < hbq_count; ++i) {
1622 list_for_each_entry_safe(dmabuf, next_dmabuf,
1623 &phba->hbqs[i].hbq_buffer_list, list) {
1624 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1625 list_del(&hbq_buf->dbuf.list);
1626 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1627 }
James Smarta8adb832007-10-27 13:37:53 -04001628 phba->hbqs[i].buffer_count = 0;
James Smarted957682007-06-17 19:56:37 -05001629 }
James Smart3163f722008-02-08 18:50:25 -05001630 /* Return all HBQ buffer that are in-fly */
James Smart3772a992009-05-22 14:50:54 -04001631 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1632 list) {
James Smart3163f722008-02-08 18:50:25 -05001633 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1634 list_del(&hbq_buf->dbuf.list);
1635 if (hbq_buf->tag == -1) {
1636 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1637 (phba, hbq_buf);
1638 } else {
1639 hbqno = hbq_buf->tag >> 16;
1640 if (hbqno >= LPFC_MAX_HBQS)
1641 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1642 (phba, hbq_buf);
1643 else
1644 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1645 hbq_buf);
1646 }
1647 }
1648
1649 /* Mark the HBQs not in use */
1650 phba->hbq_in_use = 0;
1651 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smarted957682007-06-17 19:56:37 -05001652}
1653
James Smarte59058c2008-08-24 21:49:00 -04001654/**
James Smart3621a712009-04-06 18:47:14 -04001655 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04001656 * @phba: Pointer to HBA context object.
1657 * @hbqno: HBQ number.
1658 * @hbq_buf: Pointer to HBQ buffer.
1659 *
1660 * This function is called with the hbalock held to post a
1661 * hbq buffer to the firmware. If the function finds an empty
1662 * slot in the HBQ, it will post the buffer. The function will return
1663 * pointer to the hbq entry if it successfully post the buffer
1664 * else it will return NULL.
1665 **/
James Smart3772a992009-05-22 14:50:54 -04001666static int
James Smarted957682007-06-17 19:56:37 -05001667lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
James Smart92d7f7b2007-06-17 19:56:38 -05001668 struct hbq_dmabuf *hbq_buf)
James Smarted957682007-06-17 19:56:37 -05001669{
James Smart3772a992009-05-22 14:50:54 -04001670 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1671}
1672
1673/**
1674 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1675 * @phba: Pointer to HBA context object.
1676 * @hbqno: HBQ number.
1677 * @hbq_buf: Pointer to HBQ buffer.
1678 *
1679 * This function is called with the hbalock held to post a hbq buffer to the
1680 * firmware. If the function finds an empty slot in the HBQ, it will post the
1681 * buffer and place it on the hbq_buffer_list. The function will return zero if
1682 * it successfully post the buffer else it will return an error.
1683 **/
1684static int
1685lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1686 struct hbq_dmabuf *hbq_buf)
1687{
James Smarted957682007-06-17 19:56:37 -05001688 struct lpfc_hbq_entry *hbqe;
James Smart92d7f7b2007-06-17 19:56:38 -05001689 dma_addr_t physaddr = hbq_buf->dbuf.phys;
James Smarted957682007-06-17 19:56:37 -05001690
1691 /* Get next HBQ entry slot to use */
1692 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1693 if (hbqe) {
1694 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1695
James Smart92d7f7b2007-06-17 19:56:38 -05001696 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1697 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
James Smart51ef4c22007-08-02 11:10:31 -04001698 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
James Smarted957682007-06-17 19:56:37 -05001699 hbqe->bde.tus.f.bdeFlags = 0;
James Smart92d7f7b2007-06-17 19:56:38 -05001700 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1701 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1702 /* Sync SLIM */
James Smarted957682007-06-17 19:56:37 -05001703 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1704 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
James Smart92d7f7b2007-06-17 19:56:38 -05001705 /* flush */
James Smarted957682007-06-17 19:56:37 -05001706 readl(phba->hbq_put + hbqno);
James Smart51ef4c22007-08-02 11:10:31 -04001707 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
James Smart3772a992009-05-22 14:50:54 -04001708 return 0;
1709 } else
1710 return -ENOMEM;
James Smarted957682007-06-17 19:56:37 -05001711}
1712
James Smart4f774512009-05-22 14:52:35 -04001713/**
1714 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1715 * @phba: Pointer to HBA context object.
1716 * @hbqno: HBQ number.
1717 * @hbq_buf: Pointer to HBQ buffer.
1718 *
1719 * This function is called with the hbalock held to post an RQE to the SLI4
1720 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1721 * the hbq_buffer_list and return zero, otherwise it will return an error.
1722 **/
1723static int
1724lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1725 struct hbq_dmabuf *hbq_buf)
1726{
1727 int rc;
1728 struct lpfc_rqe hrqe;
1729 struct lpfc_rqe drqe;
1730
1731 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1732 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1733 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1734 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1735 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1736 &hrqe, &drqe);
1737 if (rc < 0)
1738 return rc;
1739 hbq_buf->tag = rc;
1740 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1741 return 0;
1742}
1743
James Smarte59058c2008-08-24 21:49:00 -04001744/* HBQ for ELS and CT traffic. */
James Smart92d7f7b2007-06-17 19:56:38 -05001745static struct lpfc_hbq_init lpfc_els_hbq = {
1746 .rn = 1,
James Smartdef9c7a2009-12-21 17:02:28 -05001747 .entry_count = 256,
James Smart92d7f7b2007-06-17 19:56:38 -05001748 .mask_count = 0,
1749 .profile = 0,
James Smart51ef4c22007-08-02 11:10:31 -04001750 .ring_mask = (1 << LPFC_ELS_RING),
James Smart92d7f7b2007-06-17 19:56:38 -05001751 .buffer_count = 0,
James Smarta257bf92009-04-06 18:48:10 -04001752 .init_count = 40,
1753 .add_count = 40,
James Smart92d7f7b2007-06-17 19:56:38 -05001754};
James Smarted957682007-06-17 19:56:37 -05001755
James Smarte59058c2008-08-24 21:49:00 -04001756/* HBQ for the extra ring if needed */
James Smart51ef4c22007-08-02 11:10:31 -04001757static struct lpfc_hbq_init lpfc_extra_hbq = {
1758 .rn = 1,
1759 .entry_count = 200,
1760 .mask_count = 0,
1761 .profile = 0,
1762 .ring_mask = (1 << LPFC_EXTRA_RING),
1763 .buffer_count = 0,
1764 .init_count = 0,
1765 .add_count = 5,
1766};
1767
James Smarte59058c2008-08-24 21:49:00 -04001768/* Array of HBQs */
James Smart78b2d852007-08-02 11:10:21 -04001769struct lpfc_hbq_init *lpfc_hbq_defs[] = {
James Smart92d7f7b2007-06-17 19:56:38 -05001770 &lpfc_els_hbq,
James Smart51ef4c22007-08-02 11:10:31 -04001771 &lpfc_extra_hbq,
James Smart92d7f7b2007-06-17 19:56:38 -05001772};
1773
James Smarte59058c2008-08-24 21:49:00 -04001774/**
James Smart3621a712009-04-06 18:47:14 -04001775 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
James Smarte59058c2008-08-24 21:49:00 -04001776 * @phba: Pointer to HBA context object.
1777 * @hbqno: HBQ number.
1778 * @count: Number of HBQ buffers to be posted.
1779 *
James Smartd7c255b2008-08-24 21:50:00 -04001780 * This function is called with no lock held to post more hbq buffers to the
1781 * given HBQ. The function returns the number of HBQ buffers successfully
1782 * posted.
James Smarte59058c2008-08-24 21:49:00 -04001783 **/
James Smart311464e2007-08-02 11:10:37 -04001784static int
James Smart92d7f7b2007-06-17 19:56:38 -05001785lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1786{
James Smartd7c255b2008-08-24 21:50:00 -04001787 uint32_t i, posted = 0;
James Smart3163f722008-02-08 18:50:25 -05001788 unsigned long flags;
James Smart92d7f7b2007-06-17 19:56:38 -05001789 struct hbq_dmabuf *hbq_buffer;
James Smartd7c255b2008-08-24 21:50:00 -04001790 LIST_HEAD(hbq_buf_list);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07001791 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
James Smart51ef4c22007-08-02 11:10:31 -04001792 return 0;
James Smart51ef4c22007-08-02 11:10:31 -04001793
James Smartd7c255b2008-08-24 21:50:00 -04001794 if ((phba->hbqs[hbqno].buffer_count + count) >
1795 lpfc_hbq_defs[hbqno]->entry_count)
1796 count = lpfc_hbq_defs[hbqno]->entry_count -
1797 phba->hbqs[hbqno].buffer_count;
1798 if (!count)
1799 return 0;
1800 /* Allocate HBQ entries */
1801 for (i = 0; i < count; i++) {
1802 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1803 if (!hbq_buffer)
1804 break;
1805 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1806 }
James Smart3163f722008-02-08 18:50:25 -05001807 /* Check whether HBQ is still in use */
1808 spin_lock_irqsave(&phba->hbalock, flags);
Matthew Wilcoxeafe1df2008-02-21 05:44:33 -07001809 if (!phba->hbq_in_use)
James Smartd7c255b2008-08-24 21:50:00 -04001810 goto err;
1811 while (!list_empty(&hbq_buf_list)) {
1812 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1813 dbuf.list);
1814 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1815 (hbqno << 16));
James Smart3772a992009-05-22 14:50:54 -04001816 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
James Smarta8adb832007-10-27 13:37:53 -04001817 phba->hbqs[hbqno].buffer_count++;
James Smartd7c255b2008-08-24 21:50:00 -04001818 posted++;
1819 } else
James Smart51ef4c22007-08-02 11:10:31 -04001820 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smart92d7f7b2007-06-17 19:56:38 -05001821 }
James Smart3163f722008-02-08 18:50:25 -05001822 spin_unlock_irqrestore(&phba->hbalock, flags);
James Smartd7c255b2008-08-24 21:50:00 -04001823 return posted;
1824err:
1825 spin_unlock_irqrestore(&phba->hbalock, flags);
1826 while (!list_empty(&hbq_buf_list)) {
1827 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1828 dbuf.list);
1829 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1830 }
James Smart92d7f7b2007-06-17 19:56:38 -05001831 return 0;
James Smarted957682007-06-17 19:56:37 -05001832}
1833
James Smarte59058c2008-08-24 21:49:00 -04001834/**
James Smart3621a712009-04-06 18:47:14 -04001835 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
James Smarte59058c2008-08-24 21:49:00 -04001836 * @phba: Pointer to HBA context object.
1837 * @qno: HBQ number.
1838 *
1839 * This function posts more buffers to the HBQ. This function
James Smartd7c255b2008-08-24 21:50:00 -04001840 * is called with no lock held. The function returns the number of HBQ entries
1841 * successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04001842 **/
James Smarted957682007-06-17 19:56:37 -05001843int
James Smart92d7f7b2007-06-17 19:56:38 -05001844lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05001845{
James Smartdef9c7a2009-12-21 17:02:28 -05001846 if (phba->sli_rev == LPFC_SLI_REV4)
1847 return 0;
1848 else
1849 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1850 lpfc_hbq_defs[qno]->add_count);
James Smarted957682007-06-17 19:56:37 -05001851}
1852
James Smarte59058c2008-08-24 21:49:00 -04001853/**
James Smart3621a712009-04-06 18:47:14 -04001854 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
James Smarte59058c2008-08-24 21:49:00 -04001855 * @phba: Pointer to HBA context object.
1856 * @qno: HBQ queue number.
1857 *
1858 * This function is called from SLI initialization code path with
1859 * no lock held to post initial HBQ buffers to firmware. The
James Smartd7c255b2008-08-24 21:50:00 -04001860 * function returns the number of HBQ entries successfully allocated.
James Smarte59058c2008-08-24 21:49:00 -04001861 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001862static int
James Smart92d7f7b2007-06-17 19:56:38 -05001863lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
James Smarted957682007-06-17 19:56:37 -05001864{
James Smartdef9c7a2009-12-21 17:02:28 -05001865 if (phba->sli_rev == LPFC_SLI_REV4)
1866 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
James Smart73d91e52011-10-10 21:32:10 -04001867 lpfc_hbq_defs[qno]->entry_count);
James Smartdef9c7a2009-12-21 17:02:28 -05001868 else
1869 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1870 lpfc_hbq_defs[qno]->init_count);
James Smarted957682007-06-17 19:56:37 -05001871}
1872
James Smarte59058c2008-08-24 21:49:00 -04001873/**
James Smart3772a992009-05-22 14:50:54 -04001874 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1875 * @phba: Pointer to HBA context object.
1876 * @hbqno: HBQ number.
1877 *
1878 * This function removes the first hbq buffer on an hbq list and returns a
1879 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1880 **/
1881static struct hbq_dmabuf *
1882lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1883{
1884 struct lpfc_dmabuf *d_buf;
1885
1886 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1887 if (!d_buf)
1888 return NULL;
1889 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1890}
1891
1892/**
James Smart3621a712009-04-06 18:47:14 -04001893 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
James Smarte59058c2008-08-24 21:49:00 -04001894 * @phba: Pointer to HBA context object.
1895 * @tag: Tag of the hbq buffer.
1896 *
1897 * This function is called with hbalock held. This function searches
1898 * for the hbq buffer associated with the given tag in the hbq buffer
1899 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1900 * it returns NULL.
1901 **/
Adrian Bunka6ababd2007-11-05 18:07:33 +01001902static struct hbq_dmabuf *
James Smarted957682007-06-17 19:56:37 -05001903lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1904{
James Smart92d7f7b2007-06-17 19:56:38 -05001905 struct lpfc_dmabuf *d_buf;
1906 struct hbq_dmabuf *hbq_buf;
James Smart51ef4c22007-08-02 11:10:31 -04001907 uint32_t hbqno;
James Smarted957682007-06-17 19:56:37 -05001908
James Smart51ef4c22007-08-02 11:10:31 -04001909 hbqno = tag >> 16;
Jesper Juhla0a74e452007-08-09 20:47:15 +02001910 if (hbqno >= LPFC_MAX_HBQS)
James Smart51ef4c22007-08-02 11:10:31 -04001911 return NULL;
1912
James Smart3772a992009-05-22 14:50:54 -04001913 spin_lock_irq(&phba->hbalock);
James Smart51ef4c22007-08-02 11:10:31 -04001914 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
James Smart92d7f7b2007-06-17 19:56:38 -05001915 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
James Smart51ef4c22007-08-02 11:10:31 -04001916 if (hbq_buf->tag == tag) {
James Smart3772a992009-05-22 14:50:54 -04001917 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05001918 return hbq_buf;
James Smarted957682007-06-17 19:56:37 -05001919 }
1920 }
James Smart3772a992009-05-22 14:50:54 -04001921 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05001922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04001923 "1803 Bad hbq tag. Data: x%x x%x\n",
James Smarta8adb832007-10-27 13:37:53 -04001924 tag, phba->hbqs[tag >> 16].buffer_count);
James Smart92d7f7b2007-06-17 19:56:38 -05001925 return NULL;
James Smarted957682007-06-17 19:56:37 -05001926}
1927
James Smarte59058c2008-08-24 21:49:00 -04001928/**
James Smart3621a712009-04-06 18:47:14 -04001929 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
James Smarte59058c2008-08-24 21:49:00 -04001930 * @phba: Pointer to HBA context object.
1931 * @hbq_buffer: Pointer to HBQ buffer.
1932 *
1933 * This function is called with hbalock. This function gives back
1934 * the hbq buffer to firmware. If the HBQ does not have space to
1935 * post the buffer, it will free the buffer.
1936 **/
James Smarted957682007-06-17 19:56:37 -05001937void
James Smart51ef4c22007-08-02 11:10:31 -04001938lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
James Smarted957682007-06-17 19:56:37 -05001939{
1940 uint32_t hbqno;
1941
James Smart51ef4c22007-08-02 11:10:31 -04001942 if (hbq_buffer) {
1943 hbqno = hbq_buffer->tag >> 16;
James Smart3772a992009-05-22 14:50:54 -04001944 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
James Smart51ef4c22007-08-02 11:10:31 -04001945 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
James Smarted957682007-06-17 19:56:37 -05001946 }
1947}
1948
James Smarte59058c2008-08-24 21:49:00 -04001949/**
James Smart3621a712009-04-06 18:47:14 -04001950 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
James Smarte59058c2008-08-24 21:49:00 -04001951 * @mbxCommand: mailbox command code.
1952 *
1953 * This function is called by the mailbox event handler function to verify
1954 * that the completed mailbox command is a legitimate mailbox command. If the
1955 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1956 * and the mailbox event handler will take the HBA offline.
1957 **/
dea31012005-04-17 16:05:31 -05001958static int
1959lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1960{
1961 uint8_t ret;
1962
1963 switch (mbxCommand) {
1964 case MBX_LOAD_SM:
1965 case MBX_READ_NV:
1966 case MBX_WRITE_NV:
James Smarta8adb832007-10-27 13:37:53 -04001967 case MBX_WRITE_VPARMS:
dea31012005-04-17 16:05:31 -05001968 case MBX_RUN_BIU_DIAG:
1969 case MBX_INIT_LINK:
1970 case MBX_DOWN_LINK:
1971 case MBX_CONFIG_LINK:
1972 case MBX_CONFIG_RING:
1973 case MBX_RESET_RING:
1974 case MBX_READ_CONFIG:
1975 case MBX_READ_RCONFIG:
1976 case MBX_READ_SPARM:
1977 case MBX_READ_STATUS:
1978 case MBX_READ_RPI:
1979 case MBX_READ_XRI:
1980 case MBX_READ_REV:
1981 case MBX_READ_LNK_STAT:
1982 case MBX_REG_LOGIN:
1983 case MBX_UNREG_LOGIN:
dea31012005-04-17 16:05:31 -05001984 case MBX_CLEAR_LA:
1985 case MBX_DUMP_MEMORY:
1986 case MBX_DUMP_CONTEXT:
1987 case MBX_RUN_DIAGS:
1988 case MBX_RESTART:
1989 case MBX_UPDATE_CFG:
1990 case MBX_DOWN_LOAD:
1991 case MBX_DEL_LD_ENTRY:
1992 case MBX_RUN_PROGRAM:
1993 case MBX_SET_MASK:
James Smart09372822008-01-11 01:52:54 -05001994 case MBX_SET_VARIABLE:
dea31012005-04-17 16:05:31 -05001995 case MBX_UNREG_D_ID:
Jamie Wellnitz41415862006-02-28 19:25:27 -05001996 case MBX_KILL_BOARD:
dea31012005-04-17 16:05:31 -05001997 case MBX_CONFIG_FARP:
Jamie Wellnitz41415862006-02-28 19:25:27 -05001998 case MBX_BEACON:
dea31012005-04-17 16:05:31 -05001999 case MBX_LOAD_AREA:
2000 case MBX_RUN_BIU_DIAG64:
2001 case MBX_CONFIG_PORT:
2002 case MBX_READ_SPARM64:
2003 case MBX_READ_RPI64:
2004 case MBX_REG_LOGIN64:
James Smart76a95d72010-11-20 23:11:48 -05002005 case MBX_READ_TOPOLOGY:
James Smart09372822008-01-11 01:52:54 -05002006 case MBX_WRITE_WWN:
dea31012005-04-17 16:05:31 -05002007 case MBX_SET_DEBUG:
2008 case MBX_LOAD_EXP_ROM:
James Smart57127f12007-10-27 13:37:05 -04002009 case MBX_ASYNCEVT_ENABLE:
James Smart92d7f7b2007-06-17 19:56:38 -05002010 case MBX_REG_VPI:
2011 case MBX_UNREG_VPI:
James Smart858c9f62007-06-17 19:56:39 -05002012 case MBX_HEARTBEAT:
James Smart84774a42008-08-24 21:50:06 -04002013 case MBX_PORT_CAPABILITIES:
2014 case MBX_PORT_IOV_CONTROL:
James Smart04c68492009-05-22 14:52:52 -04002015 case MBX_SLI4_CONFIG:
2016 case MBX_SLI4_REQ_FTRS:
2017 case MBX_REG_FCFI:
2018 case MBX_UNREG_FCFI:
2019 case MBX_REG_VFI:
2020 case MBX_UNREG_VFI:
2021 case MBX_INIT_VPI:
2022 case MBX_INIT_VFI:
2023 case MBX_RESUME_RPI:
James Smartc7495932010-04-06 15:05:28 -04002024 case MBX_READ_EVENT_LOG_STATUS:
2025 case MBX_READ_EVENT_LOG:
James Smartdcf2a4e2010-09-29 11:18:53 -04002026 case MBX_SECURITY_MGMT:
2027 case MBX_AUTH_PORT:
dea31012005-04-17 16:05:31 -05002028 ret = mbxCommand;
2029 break;
2030 default:
2031 ret = MBX_SHUTDOWN;
2032 break;
2033 }
James Smart2e0fef82007-06-17 19:56:36 -05002034 return ret;
dea31012005-04-17 16:05:31 -05002035}
James Smarte59058c2008-08-24 21:49:00 -04002036
2037/**
James Smart3621a712009-04-06 18:47:14 -04002038 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002039 * @phba: Pointer to HBA context object.
2040 * @pmboxq: Pointer to mailbox command.
2041 *
2042 * This is completion handler function for mailbox commands issued from
2043 * lpfc_sli_issue_mbox_wait function. This function is called by the
2044 * mailbox event handler function with no lock held. This function
2045 * will wake up thread waiting on the wait queue pointed by context1
2046 * of the mailbox.
2047 **/
James Smart04c68492009-05-22 14:52:52 -04002048void
James Smart2e0fef82007-06-17 19:56:36 -05002049lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
dea31012005-04-17 16:05:31 -05002050{
2051 wait_queue_head_t *pdone_q;
James Smart858c9f62007-06-17 19:56:39 -05002052 unsigned long drvr_flag;
dea31012005-04-17 16:05:31 -05002053
2054 /*
2055 * If pdone_q is empty, the driver thread gave up waiting and
2056 * continued running.
2057 */
James Smart7054a602007-04-25 09:52:34 -04002058 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
James Smart858c9f62007-06-17 19:56:39 -05002059 spin_lock_irqsave(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002060 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2061 if (pdone_q)
2062 wake_up_interruptible(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -05002063 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05002064 return;
2065}
2066
James Smarte59058c2008-08-24 21:49:00 -04002067
2068/**
James Smart3621a712009-04-06 18:47:14 -04002069 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
James Smarte59058c2008-08-24 21:49:00 -04002070 * @phba: Pointer to HBA context object.
2071 * @pmb: Pointer to mailbox object.
2072 *
2073 * This function is the default mailbox completion handler. It
2074 * frees the memory resources associated with the completed mailbox
2075 * command. If the completed command is a REG_LOGIN mailbox command,
2076 * this function will issue a UREG_LOGIN to re-claim the RPI.
2077 **/
dea31012005-04-17 16:05:31 -05002078void
James Smart2e0fef82007-06-17 19:56:36 -05002079lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
dea31012005-04-17 16:05:31 -05002080{
James Smartd439d282010-09-29 11:18:45 -04002081 struct lpfc_vport *vport = pmb->vport;
dea31012005-04-17 16:05:31 -05002082 struct lpfc_dmabuf *mp;
James Smartd439d282010-09-29 11:18:45 -04002083 struct lpfc_nodelist *ndlp;
James Smart5af5eee2010-10-22 11:06:38 -04002084 struct Scsi_Host *shost;
James Smart04c68492009-05-22 14:52:52 -04002085 uint16_t rpi, vpi;
James Smart7054a602007-04-25 09:52:34 -04002086 int rc;
2087
dea31012005-04-17 16:05:31 -05002088 mp = (struct lpfc_dmabuf *) (pmb->context1);
James Smart7054a602007-04-25 09:52:34 -04002089
dea31012005-04-17 16:05:31 -05002090 if (mp) {
2091 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2092 kfree(mp);
2093 }
James Smart7054a602007-04-25 09:52:34 -04002094
2095 /*
2096 * If a REG_LOGIN succeeded after node is destroyed or node
2097 * is in re-discovery driver need to cleanup the RPI.
2098 */
James Smart2e0fef82007-06-17 19:56:36 -05002099 if (!(phba->pport->load_flag & FC_UNLOADING) &&
James Smart04c68492009-05-22 14:52:52 -04002100 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2101 !pmb->u.mb.mbxStatus) {
2102 rpi = pmb->u.mb.un.varWords[0];
James Smart6d368e52011-05-24 11:44:12 -04002103 vpi = pmb->u.mb.un.varRegLogin.vpi;
James Smart04c68492009-05-22 14:52:52 -04002104 lpfc_unreg_login(phba, vpi, rpi, pmb);
James Smart92d7f7b2007-06-17 19:56:38 -05002105 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
James Smart7054a602007-04-25 09:52:34 -04002106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2107 if (rc != MBX_NOT_FINISHED)
2108 return;
2109 }
2110
James Smart695a8142010-01-26 23:08:03 -05002111 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2112 !(phba->pport->load_flag & FC_UNLOADING) &&
2113 !pmb->u.mb.mbxStatus) {
James Smart5af5eee2010-10-22 11:06:38 -04002114 shost = lpfc_shost_from_vport(vport);
2115 spin_lock_irq(shost->host_lock);
2116 vport->vpi_state |= LPFC_VPI_REGISTERED;
2117 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2118 spin_unlock_irq(shost->host_lock);
James Smart695a8142010-01-26 23:08:03 -05002119 }
2120
James Smartd439d282010-09-29 11:18:45 -04002121 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2122 ndlp = (struct lpfc_nodelist *)pmb->context2;
2123 lpfc_nlp_put(ndlp);
2124 pmb->context2 = NULL;
2125 }
2126
James Smartdcf2a4e2010-09-29 11:18:53 -04002127 /* Check security permission status on INIT_LINK mailbox command */
2128 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2129 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2130 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2131 "2860 SLI authentication is required "
2132 "for INIT_LINK but has not done yet\n");
2133
James Smart04c68492009-05-22 14:52:52 -04002134 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2135 lpfc_sli4_mbox_cmd_free(phba, pmb);
2136 else
2137 mempool_free(pmb, phba->mbox_mem_pool);
dea31012005-04-17 16:05:31 -05002138}
2139
James Smarte59058c2008-08-24 21:49:00 -04002140/**
James Smart3621a712009-04-06 18:47:14 -04002141 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
James Smarte59058c2008-08-24 21:49:00 -04002142 * @phba: Pointer to HBA context object.
2143 *
2144 * This function is called with no lock held. This function processes all
2145 * the completed mailbox commands and gives it to upper layers. The interrupt
2146 * service routine processes mailbox completion interrupt and adds completed
2147 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2148 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2149 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2150 * function returns the mailbox commands to the upper layer by calling the
2151 * completion handler function of each mailbox.
2152 **/
dea31012005-04-17 16:05:31 -05002153int
James Smart2e0fef82007-06-17 19:56:36 -05002154lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05002155{
James Smart92d7f7b2007-06-17 19:56:38 -05002156 MAILBOX_t *pmbox;
dea31012005-04-17 16:05:31 -05002157 LPFC_MBOXQ_t *pmb;
James Smart92d7f7b2007-06-17 19:56:38 -05002158 int rc;
2159 LIST_HEAD(cmplq);
dea31012005-04-17 16:05:31 -05002160
2161 phba->sli.slistat.mbox_event++;
2162
James Smart92d7f7b2007-06-17 19:56:38 -05002163 /* Get all completed mailboxe buffers into the cmplq */
2164 spin_lock_irq(&phba->hbalock);
2165 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2166 spin_unlock_irq(&phba->hbalock);
2167
dea31012005-04-17 16:05:31 -05002168 /* Get a Mailbox buffer to setup mailbox commands for callback */
James Smart92d7f7b2007-06-17 19:56:38 -05002169 do {
2170 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2171 if (pmb == NULL)
2172 break;
2173
James Smart04c68492009-05-22 14:52:52 -04002174 pmbox = &pmb->u.mb;
dea31012005-04-17 16:05:31 -05002175
James Smart858c9f62007-06-17 19:56:39 -05002176 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2177 if (pmb->vport) {
2178 lpfc_debugfs_disc_trc(pmb->vport,
2179 LPFC_DISC_TRC_MBOX_VPORT,
2180 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2181 (uint32_t)pmbox->mbxCommand,
2182 pmbox->un.varWords[0],
2183 pmbox->un.varWords[1]);
2184 }
2185 else {
2186 lpfc_debugfs_disc_trc(phba->pport,
2187 LPFC_DISC_TRC_MBOX,
2188 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2189 (uint32_t)pmbox->mbxCommand,
2190 pmbox->un.varWords[0],
2191 pmbox->un.varWords[1]);
2192 }
2193 }
2194
dea31012005-04-17 16:05:31 -05002195 /*
2196 * It is a fatal error if unknown mbox command completion.
2197 */
2198 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2199 MBX_SHUTDOWN) {
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002200 /* Unknown mailbox command compl */
James Smart92d7f7b2007-06-17 19:56:38 -05002201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002202 "(%d):0323 Unknown Mailbox command "
James Smarta183a152011-10-10 21:32:43 -04002203 "x%x (x%x/x%x) Cmpl\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002204 pmb->vport ? pmb->vport->vpi : 0,
James Smart04c68492009-05-22 14:52:52 -04002205 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002206 lpfc_sli_config_mbox_subsys_get(phba,
2207 pmb),
2208 lpfc_sli_config_mbox_opcode_get(phba,
2209 pmb));
James Smart2e0fef82007-06-17 19:56:36 -05002210 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05002211 phba->work_hs = HS_FFER3;
2212 lpfc_handle_eratt(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05002213 continue;
dea31012005-04-17 16:05:31 -05002214 }
2215
dea31012005-04-17 16:05:31 -05002216 if (pmbox->mbxStatus) {
2217 phba->sli.slistat.mbox_stat_err++;
2218 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2219 /* Mbox cmd cmpl error - RETRYing */
James Smart92d7f7b2007-06-17 19:56:38 -05002220 lpfc_printf_log(phba, KERN_INFO,
James Smarta183a152011-10-10 21:32:43 -04002221 LOG_MBOX | LOG_SLI,
2222 "(%d):0305 Mbox cmd cmpl "
2223 "error - RETRYing Data: x%x "
2224 "(x%x/x%x) x%x x%x x%x\n",
2225 pmb->vport ? pmb->vport->vpi : 0,
2226 pmbox->mbxCommand,
2227 lpfc_sli_config_mbox_subsys_get(phba,
2228 pmb),
2229 lpfc_sli_config_mbox_opcode_get(phba,
2230 pmb),
2231 pmbox->mbxStatus,
2232 pmbox->un.varWords[0],
2233 pmb->vport->port_state);
dea31012005-04-17 16:05:31 -05002234 pmbox->mbxStatus = 0;
2235 pmbox->mbxOwner = OWN_HOST;
dea31012005-04-17 16:05:31 -05002236 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
James Smart04c68492009-05-22 14:52:52 -04002237 if (rc != MBX_NOT_FINISHED)
James Smart92d7f7b2007-06-17 19:56:38 -05002238 continue;
dea31012005-04-17 16:05:31 -05002239 }
2240 }
2241
2242 /* Mailbox cmd <cmd> Cmpl <cmpl> */
James Smart92d7f7b2007-06-17 19:56:38 -05002243 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04002244 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
dea31012005-04-17 16:05:31 -05002245 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05002246 pmb->vport ? pmb->vport->vpi : 0,
dea31012005-04-17 16:05:31 -05002247 pmbox->mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04002248 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2249 lpfc_sli_config_mbox_opcode_get(phba, pmb),
dea31012005-04-17 16:05:31 -05002250 pmb->mbox_cmpl,
2251 *((uint32_t *) pmbox),
2252 pmbox->un.varWords[0],
2253 pmbox->un.varWords[1],
2254 pmbox->un.varWords[2],
2255 pmbox->un.varWords[3],
2256 pmbox->un.varWords[4],
2257 pmbox->un.varWords[5],
2258 pmbox->un.varWords[6],
2259 pmbox->un.varWords[7]);
2260
James Smart92d7f7b2007-06-17 19:56:38 -05002261 if (pmb->mbox_cmpl)
dea31012005-04-17 16:05:31 -05002262 pmb->mbox_cmpl(phba,pmb);
James Smart92d7f7b2007-06-17 19:56:38 -05002263 } while (1);
James Smart2e0fef82007-06-17 19:56:36 -05002264 return 0;
dea31012005-04-17 16:05:31 -05002265}
James Smart92d7f7b2007-06-17 19:56:38 -05002266
James Smarte59058c2008-08-24 21:49:00 -04002267/**
James Smart3621a712009-04-06 18:47:14 -04002268 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
James Smarte59058c2008-08-24 21:49:00 -04002269 * @phba: Pointer to HBA context object.
2270 * @pring: Pointer to driver SLI ring object.
2271 * @tag: buffer tag.
2272 *
2273 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2274 * is set in the tag the buffer is posted for a particular exchange,
2275 * the function will return the buffer without replacing the buffer.
2276 * If the buffer is for unsolicited ELS or CT traffic, this function
2277 * returns the buffer and also posts another buffer to the firmware.
2278 **/
James Smart76bb24e2007-10-27 13:38:00 -04002279static struct lpfc_dmabuf *
2280lpfc_sli_get_buff(struct lpfc_hba *phba,
James Smart9f1e1b52008-12-04 22:39:40 -05002281 struct lpfc_sli_ring *pring,
2282 uint32_t tag)
James Smart76bb24e2007-10-27 13:38:00 -04002283{
James Smart9f1e1b52008-12-04 22:39:40 -05002284 struct hbq_dmabuf *hbq_entry;
2285
James Smart76bb24e2007-10-27 13:38:00 -04002286 if (tag & QUE_BUFTAG_BIT)
2287 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
James Smart9f1e1b52008-12-04 22:39:40 -05002288 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2289 if (!hbq_entry)
2290 return NULL;
2291 return &hbq_entry->dbuf;
James Smart76bb24e2007-10-27 13:38:00 -04002292}
James Smart57127f12007-10-27 13:37:05 -04002293
James Smart3772a992009-05-22 14:50:54 -04002294/**
2295 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2296 * @phba: Pointer to HBA context object.
2297 * @pring: Pointer to driver SLI ring object.
2298 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2299 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2300 * @fch_type: the type for the first frame of the sequence.
2301 *
2302 * This function is called with no lock held. This function uses the r_ctl and
2303 * type of the received sequence to find the correct callback function to call
2304 * to process the sequence.
2305 **/
2306static int
2307lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2308 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2309 uint32_t fch_type)
2310{
2311 int i;
2312
2313 /* unSolicited Responses */
2314 if (pring->prt[0].profile) {
2315 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2316 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2317 saveq);
2318 return 1;
2319 }
2320 /* We must search, based on rctl / type
2321 for the right routine */
2322 for (i = 0; i < pring->num_mask; i++) {
2323 if ((pring->prt[i].rctl == fch_r_ctl) &&
2324 (pring->prt[i].type == fch_type)) {
2325 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2326 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2327 (phba, pring, saveq);
2328 return 1;
2329 }
2330 }
2331 return 0;
2332}
James Smarte59058c2008-08-24 21:49:00 -04002333
2334/**
James Smart3621a712009-04-06 18:47:14 -04002335 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
James Smarte59058c2008-08-24 21:49:00 -04002336 * @phba: Pointer to HBA context object.
2337 * @pring: Pointer to driver SLI ring object.
2338 * @saveq: Pointer to the unsolicited iocb.
2339 *
2340 * This function is called with no lock held by the ring event handler
2341 * when there is an unsolicited iocb posted to the response ring by the
2342 * firmware. This function gets the buffer associated with the iocbs
2343 * and calls the event handler for the ring. This function handles both
2344 * qring buffers and hbq buffers.
2345 * When the function returns 1 the caller can free the iocb object otherwise
2346 * upper layer functions will free the iocb objects.
2347 **/
dea31012005-04-17 16:05:31 -05002348static int
2349lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2350 struct lpfc_iocbq *saveq)
2351{
2352 IOCB_t * irsp;
2353 WORD5 * w5p;
2354 uint32_t Rctl, Type;
James Smart3772a992009-05-22 14:50:54 -04002355 uint32_t match;
James Smart76bb24e2007-10-27 13:38:00 -04002356 struct lpfc_iocbq *iocbq;
James Smart3163f722008-02-08 18:50:25 -05002357 struct lpfc_dmabuf *dmzbuf;
dea31012005-04-17 16:05:31 -05002358
2359 match = 0;
2360 irsp = &(saveq->iocb);
James Smart57127f12007-10-27 13:37:05 -04002361
2362 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2363 if (pring->lpfc_sli_rcv_async_status)
2364 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2365 else
2366 lpfc_printf_log(phba,
2367 KERN_WARNING,
2368 LOG_SLI,
2369 "0316 Ring %d handler: unexpected "
2370 "ASYNC_STATUS iocb received evt_code "
2371 "0x%x\n",
2372 pring->ringno,
2373 irsp->un.asyncstat.evt_code);
2374 return 1;
2375 }
2376
James Smart3163f722008-02-08 18:50:25 -05002377 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2378 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2379 if (irsp->ulpBdeCount > 0) {
2380 dmzbuf = lpfc_sli_get_buff(phba, pring,
2381 irsp->un.ulpWord[3]);
2382 lpfc_in_buf_free(phba, dmzbuf);
2383 }
2384
2385 if (irsp->ulpBdeCount > 1) {
2386 dmzbuf = lpfc_sli_get_buff(phba, pring,
2387 irsp->unsli3.sli3Words[3]);
2388 lpfc_in_buf_free(phba, dmzbuf);
2389 }
2390
2391 if (irsp->ulpBdeCount > 2) {
2392 dmzbuf = lpfc_sli_get_buff(phba, pring,
2393 irsp->unsli3.sli3Words[7]);
2394 lpfc_in_buf_free(phba, dmzbuf);
2395 }
2396
2397 return 1;
2398 }
2399
James Smart92d7f7b2007-06-17 19:56:38 -05002400 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
James Smart76bb24e2007-10-27 13:38:00 -04002401 if (irsp->ulpBdeCount != 0) {
2402 saveq->context2 = lpfc_sli_get_buff(phba, pring,
James Smart92d7f7b2007-06-17 19:56:38 -05002403 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002404 if (!saveq->context2)
2405 lpfc_printf_log(phba,
2406 KERN_ERR,
2407 LOG_SLI,
2408 "0341 Ring %d Cannot find buffer for "
2409 "an unsolicited iocb. tag 0x%x\n",
2410 pring->ringno,
2411 irsp->un.ulpWord[3]);
James Smart76bb24e2007-10-27 13:38:00 -04002412 }
2413 if (irsp->ulpBdeCount == 2) {
2414 saveq->context3 = lpfc_sli_get_buff(phba, pring,
James Smart51ef4c22007-08-02 11:10:31 -04002415 irsp->unsli3.sli3Words[7]);
James Smart76bb24e2007-10-27 13:38:00 -04002416 if (!saveq->context3)
2417 lpfc_printf_log(phba,
2418 KERN_ERR,
2419 LOG_SLI,
2420 "0342 Ring %d Cannot find buffer for an"
2421 " unsolicited iocb. tag 0x%x\n",
2422 pring->ringno,
2423 irsp->unsli3.sli3Words[7]);
2424 }
2425 list_for_each_entry(iocbq, &saveq->list, list) {
James Smart76bb24e2007-10-27 13:38:00 -04002426 irsp = &(iocbq->iocb);
James Smart76bb24e2007-10-27 13:38:00 -04002427 if (irsp->ulpBdeCount != 0) {
2428 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2429 irsp->un.ulpWord[3]);
James Smart9c2face2008-01-11 01:53:18 -05002430 if (!iocbq->context2)
James Smart76bb24e2007-10-27 13:38:00 -04002431 lpfc_printf_log(phba,
2432 KERN_ERR,
2433 LOG_SLI,
2434 "0343 Ring %d Cannot find "
2435 "buffer for an unsolicited iocb"
2436 ". tag 0x%x\n", pring->ringno,
2437 irsp->un.ulpWord[3]);
2438 }
2439 if (irsp->ulpBdeCount == 2) {
2440 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2441 irsp->unsli3.sli3Words[7]);
James Smart9c2face2008-01-11 01:53:18 -05002442 if (!iocbq->context3)
James Smart76bb24e2007-10-27 13:38:00 -04002443 lpfc_printf_log(phba,
2444 KERN_ERR,
2445 LOG_SLI,
2446 "0344 Ring %d Cannot find "
2447 "buffer for an unsolicited "
2448 "iocb. tag 0x%x\n",
2449 pring->ringno,
2450 irsp->unsli3.sli3Words[7]);
2451 }
2452 }
James Smart92d7f7b2007-06-17 19:56:38 -05002453 }
James Smart9c2face2008-01-11 01:53:18 -05002454 if (irsp->ulpBdeCount != 0 &&
2455 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2456 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2457 int found = 0;
2458
2459 /* search continue save q for same XRI */
2460 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
James Smart7851fe22011-07-22 18:36:52 -04002461 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2462 saveq->iocb.unsli3.rcvsli3.ox_id) {
James Smart9c2face2008-01-11 01:53:18 -05002463 list_add_tail(&saveq->list, &iocbq->list);
2464 found = 1;
2465 break;
2466 }
2467 }
2468 if (!found)
2469 list_add_tail(&saveq->clist,
2470 &pring->iocb_continue_saveq);
2471 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2472 list_del_init(&iocbq->clist);
2473 saveq = iocbq;
2474 irsp = &(saveq->iocb);
2475 } else
2476 return 0;
2477 }
2478 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2479 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2480 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04002481 Rctl = FC_RCTL_ELS_REQ;
2482 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05002483 } else {
2484 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2485 Rctl = w5p->hcsw.Rctl;
2486 Type = w5p->hcsw.Type;
2487
2488 /* Firmware Workaround */
2489 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2490 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2491 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
James Smart6a9c52c2009-10-02 15:16:51 -04002492 Rctl = FC_RCTL_ELS_REQ;
2493 Type = FC_TYPE_ELS;
James Smart9c2face2008-01-11 01:53:18 -05002494 w5p->hcsw.Rctl = Rctl;
2495 w5p->hcsw.Type = Type;
2496 }
2497 }
James Smart92d7f7b2007-06-17 19:56:38 -05002498
James Smart3772a992009-05-22 14:50:54 -04002499 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
James Smart92d7f7b2007-06-17 19:56:38 -05002500 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002501 "0313 Ring %d handler: unexpected Rctl x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05002502 "Type x%x received\n",
James Smarte8b62012007-08-02 11:10:09 -04002503 pring->ringno, Rctl, Type);
James Smart3772a992009-05-22 14:50:54 -04002504
James Smart92d7f7b2007-06-17 19:56:38 -05002505 return 1;
dea31012005-04-17 16:05:31 -05002506}
2507
James Smarte59058c2008-08-24 21:49:00 -04002508/**
James Smart3621a712009-04-06 18:47:14 -04002509 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
James Smarte59058c2008-08-24 21:49:00 -04002510 * @phba: Pointer to HBA context object.
2511 * @pring: Pointer to driver SLI ring object.
2512 * @prspiocb: Pointer to response iocb object.
2513 *
2514 * This function looks up the iocb_lookup table to get the command iocb
2515 * corresponding to the given response iocb using the iotag of the
2516 * response iocb. This function is called with the hbalock held.
2517 * This function returns the command iocb object if it finds the command
2518 * iocb else returns NULL.
2519 **/
dea31012005-04-17 16:05:31 -05002520static struct lpfc_iocbq *
James Smart2e0fef82007-06-17 19:56:36 -05002521lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2522 struct lpfc_sli_ring *pring,
2523 struct lpfc_iocbq *prspiocb)
dea31012005-04-17 16:05:31 -05002524{
dea31012005-04-17 16:05:31 -05002525 struct lpfc_iocbq *cmd_iocb = NULL;
2526 uint16_t iotag;
2527
James Bottomley604a3e32005-10-29 10:28:33 -05002528 iotag = prspiocb->iocb.ulpIoTag;
dea31012005-04-17 16:05:31 -05002529
James Bottomley604a3e32005-10-29 10:28:33 -05002530 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2531 cmd_iocb = phba->sli.iocbq_lookup[iotag];
James Smart92d7f7b2007-06-17 19:56:38 -05002532 list_del_init(&cmd_iocb->list);
James Smart2a9bf3d2010-06-07 15:24:45 -04002533 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2534 pring->txcmplq_cnt--;
2535 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2536 }
James Bottomley604a3e32005-10-29 10:28:33 -05002537 return cmd_iocb;
dea31012005-04-17 16:05:31 -05002538 }
2539
dea31012005-04-17 16:05:31 -05002540 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002541 "0317 iotag x%x is out off "
James Bottomley604a3e32005-10-29 10:28:33 -05002542 "range: max iotag x%x wd0 x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04002543 iotag, phba->sli.last_iotag,
James Bottomley604a3e32005-10-29 10:28:33 -05002544 *(((uint32_t *) &prspiocb->iocb) + 7));
dea31012005-04-17 16:05:31 -05002545 return NULL;
2546}
2547
James Smarte59058c2008-08-24 21:49:00 -04002548/**
James Smart3772a992009-05-22 14:50:54 -04002549 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2550 * @phba: Pointer to HBA context object.
2551 * @pring: Pointer to driver SLI ring object.
2552 * @iotag: IOCB tag.
2553 *
2554 * This function looks up the iocb_lookup table to get the command iocb
2555 * corresponding to the given iotag. This function is called with the
2556 * hbalock held.
2557 * This function returns the command iocb object if it finds the command
2558 * iocb else returns NULL.
2559 **/
2560static struct lpfc_iocbq *
2561lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2562 struct lpfc_sli_ring *pring, uint16_t iotag)
2563{
2564 struct lpfc_iocbq *cmd_iocb;
2565
2566 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2567 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2568 list_del_init(&cmd_iocb->list);
James Smart2a9bf3d2010-06-07 15:24:45 -04002569 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) {
2570 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
2571 pring->txcmplq_cnt--;
2572 }
James Smart3772a992009-05-22 14:50:54 -04002573 return cmd_iocb;
2574 }
2575
2576 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2577 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2578 iotag, phba->sli.last_iotag);
2579 return NULL;
2580}
2581
2582/**
James Smart3621a712009-04-06 18:47:14 -04002583 * lpfc_sli_process_sol_iocb - process solicited iocb completion
James Smarte59058c2008-08-24 21:49:00 -04002584 * @phba: Pointer to HBA context object.
2585 * @pring: Pointer to driver SLI ring object.
2586 * @saveq: Pointer to the response iocb to be processed.
2587 *
2588 * This function is called by the ring event handler for non-fcp
2589 * rings when there is a new response iocb in the response ring.
2590 * The caller is not required to hold any locks. This function
2591 * gets the command iocb associated with the response iocb and
2592 * calls the completion handler for the command iocb. If there
2593 * is no completion handler, the function will free the resources
2594 * associated with command iocb. If the response iocb is for
2595 * an already aborted command iocb, the status of the completion
2596 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2597 * This function always returns 1.
2598 **/
dea31012005-04-17 16:05:31 -05002599static int
James Smart2e0fef82007-06-17 19:56:36 -05002600lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dea31012005-04-17 16:05:31 -05002601 struct lpfc_iocbq *saveq)
2602{
James Smart2e0fef82007-06-17 19:56:36 -05002603 struct lpfc_iocbq *cmdiocbp;
dea31012005-04-17 16:05:31 -05002604 int rc = 1;
2605 unsigned long iflag;
2606
2607 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
James Smart2e0fef82007-06-17 19:56:36 -05002608 spin_lock_irqsave(&phba->hbalock, iflag);
James Bottomley604a3e32005-10-29 10:28:33 -05002609 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
James Smart2e0fef82007-06-17 19:56:36 -05002610 spin_unlock_irqrestore(&phba->hbalock, iflag);
2611
dea31012005-04-17 16:05:31 -05002612 if (cmdiocbp) {
2613 if (cmdiocbp->iocb_cmpl) {
2614 /*
James Smartea2151b2008-09-07 11:52:10 -04002615 * If an ELS command failed send an event to mgmt
2616 * application.
2617 */
2618 if (saveq->iocb.ulpStatus &&
2619 (pring->ringno == LPFC_ELS_RING) &&
2620 (cmdiocbp->iocb.ulpCommand ==
2621 CMD_ELS_REQUEST64_CR))
2622 lpfc_send_els_failure_event(phba,
2623 cmdiocbp, saveq);
2624
2625 /*
dea31012005-04-17 16:05:31 -05002626 * Post all ELS completions to the worker thread.
2627 * All other are passed to the completion callback.
2628 */
2629 if (pring->ringno == LPFC_ELS_RING) {
James Smart341af102010-01-26 23:07:37 -05002630 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2631 (cmdiocbp->iocb_flag &
2632 LPFC_DRIVER_ABORTED)) {
2633 spin_lock_irqsave(&phba->hbalock,
2634 iflag);
James Smart07951072007-04-25 09:51:38 -04002635 cmdiocbp->iocb_flag &=
2636 ~LPFC_DRIVER_ABORTED;
James Smart341af102010-01-26 23:07:37 -05002637 spin_unlock_irqrestore(&phba->hbalock,
2638 iflag);
James Smart07951072007-04-25 09:51:38 -04002639 saveq->iocb.ulpStatus =
2640 IOSTAT_LOCAL_REJECT;
2641 saveq->iocb.un.ulpWord[4] =
2642 IOERR_SLI_ABORTED;
James Smart0ff10d42008-01-11 01:52:36 -05002643
2644 /* Firmware could still be in progress
2645 * of DMAing payload, so don't free data
2646 * buffer till after a hbeat.
2647 */
James Smart341af102010-01-26 23:07:37 -05002648 spin_lock_irqsave(&phba->hbalock,
2649 iflag);
James Smart0ff10d42008-01-11 01:52:36 -05002650 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
James Smart341af102010-01-26 23:07:37 -05002651 spin_unlock_irqrestore(&phba->hbalock,
2652 iflag);
2653 }
James Smart0f65ff62010-02-26 14:14:23 -05002654 if (phba->sli_rev == LPFC_SLI_REV4) {
2655 if (saveq->iocb_flag &
2656 LPFC_EXCHANGE_BUSY) {
2657 /* Set cmdiocb flag for the
2658 * exchange busy so sgl (xri)
2659 * will not be released until
2660 * the abort xri is received
2661 * from hba.
2662 */
2663 spin_lock_irqsave(
2664 &phba->hbalock, iflag);
2665 cmdiocbp->iocb_flag |=
2666 LPFC_EXCHANGE_BUSY;
2667 spin_unlock_irqrestore(
2668 &phba->hbalock, iflag);
2669 }
2670 if (cmdiocbp->iocb_flag &
2671 LPFC_DRIVER_ABORTED) {
2672 /*
2673 * Clear LPFC_DRIVER_ABORTED
2674 * bit in case it was driver
2675 * initiated abort.
2676 */
2677 spin_lock_irqsave(
2678 &phba->hbalock, iflag);
2679 cmdiocbp->iocb_flag &=
2680 ~LPFC_DRIVER_ABORTED;
2681 spin_unlock_irqrestore(
2682 &phba->hbalock, iflag);
2683 cmdiocbp->iocb.ulpStatus =
2684 IOSTAT_LOCAL_REJECT;
2685 cmdiocbp->iocb.un.ulpWord[4] =
2686 IOERR_ABORT_REQUESTED;
2687 /*
2688 * For SLI4, irsiocb contains
2689 * NO_XRI in sli_xritag, it
2690 * shall not affect releasing
2691 * sgl (xri) process.
2692 */
2693 saveq->iocb.ulpStatus =
2694 IOSTAT_LOCAL_REJECT;
2695 saveq->iocb.un.ulpWord[4] =
2696 IOERR_SLI_ABORTED;
2697 spin_lock_irqsave(
2698 &phba->hbalock, iflag);
2699 saveq->iocb_flag |=
2700 LPFC_DELAY_MEM_FREE;
2701 spin_unlock_irqrestore(
2702 &phba->hbalock, iflag);
2703 }
James Smart07951072007-04-25 09:51:38 -04002704 }
dea31012005-04-17 16:05:31 -05002705 }
James Smart2e0fef82007-06-17 19:56:36 -05002706 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
James Bottomley604a3e32005-10-29 10:28:33 -05002707 } else
2708 lpfc_sli_release_iocbq(phba, cmdiocbp);
dea31012005-04-17 16:05:31 -05002709 } else {
2710 /*
2711 * Unknown initiating command based on the response iotag.
2712 * This could be the case on the ELS ring because of
2713 * lpfc_els_abort().
2714 */
2715 if (pring->ringno != LPFC_ELS_RING) {
2716 /*
2717 * Ring <ringno> handler: unexpected completion IoTag
2718 * <IoTag>
2719 */
James Smarta257bf92009-04-06 18:48:10 -04002720 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002721 "0322 Ring %d handler: "
2722 "unexpected completion IoTag x%x "
2723 "Data: x%x x%x x%x x%x\n",
2724 pring->ringno,
2725 saveq->iocb.ulpIoTag,
2726 saveq->iocb.ulpStatus,
2727 saveq->iocb.un.ulpWord[4],
2728 saveq->iocb.ulpCommand,
2729 saveq->iocb.ulpContext);
dea31012005-04-17 16:05:31 -05002730 }
2731 }
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04002732
dea31012005-04-17 16:05:31 -05002733 return rc;
2734}
2735
James Smarte59058c2008-08-24 21:49:00 -04002736/**
James Smart3621a712009-04-06 18:47:14 -04002737 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
James Smarte59058c2008-08-24 21:49:00 -04002738 * @phba: Pointer to HBA context object.
2739 * @pring: Pointer to driver SLI ring object.
2740 *
2741 * This function is called from the iocb ring event handlers when
2742 * put pointer is ahead of the get pointer for a ring. This function signal
2743 * an error attention condition to the worker thread and the worker
2744 * thread will transition the HBA to offline state.
2745 **/
James Smart2e0fef82007-06-17 19:56:36 -05002746static void
2747lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002748{
James Smart34b02dc2008-08-24 21:49:55 -04002749 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002750 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02002751 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002752 * rsp ring <portRspMax>
2753 */
2754 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002755 "0312 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02002756 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04002757 pring->ringno, le32_to_cpu(pgp->rspPutInx),
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002758 pring->numRiocb);
2759
James Smart2e0fef82007-06-17 19:56:36 -05002760 phba->link_state = LPFC_HBA_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002761
2762 /*
2763 * All error attention handlers are posted to
2764 * worker thread
2765 */
2766 phba->work_ha |= HA_ERATT;
2767 phba->work_hs = HS_FFER3;
James Smart92d7f7b2007-06-17 19:56:38 -05002768
James Smart5e9d9b82008-06-14 22:52:53 -04002769 lpfc_worker_wake_up(phba);
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002770
2771 return;
2772}
2773
James Smarte59058c2008-08-24 21:49:00 -04002774/**
James Smart3621a712009-04-06 18:47:14 -04002775 * lpfc_poll_eratt - Error attention polling timer timeout handler
James Smart93996272008-08-24 21:50:30 -04002776 * @ptr: Pointer to address of HBA context object.
2777 *
2778 * This function is invoked by the Error Attention polling timer when the
2779 * timer times out. It will check the SLI Error Attention register for
2780 * possible attention events. If so, it will post an Error Attention event
2781 * and wake up worker thread to process it. Otherwise, it will set up the
2782 * Error Attention polling timer for the next poll.
2783 **/
2784void lpfc_poll_eratt(unsigned long ptr)
2785{
2786 struct lpfc_hba *phba;
2787 uint32_t eratt = 0;
2788
2789 phba = (struct lpfc_hba *)ptr;
2790
2791 /* Check chip HA register for error event */
2792 eratt = lpfc_sli_check_eratt(phba);
2793
2794 if (eratt)
2795 /* Tell the worker thread there is work to do */
2796 lpfc_worker_wake_up(phba);
2797 else
2798 /* Restart the timer for next eratt poll */
2799 mod_timer(&phba->eratt_poll, jiffies +
2800 HZ * LPFC_ERATT_POLL_INTERVAL);
2801 return;
2802}
2803
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002804
James Smarte59058c2008-08-24 21:49:00 -04002805/**
James Smart3621a712009-04-06 18:47:14 -04002806 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
James Smarte59058c2008-08-24 21:49:00 -04002807 * @phba: Pointer to HBA context object.
2808 * @pring: Pointer to driver SLI ring object.
2809 * @mask: Host attention register mask for this ring.
2810 *
2811 * This function is called from the interrupt context when there is a ring
2812 * event for the fcp ring. The caller does not hold any lock.
2813 * The function processes each response iocb in the response ring until it
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002814 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
James Smarte59058c2008-08-24 21:49:00 -04002815 * LE bit set. The function will call the completion handler of the command iocb
2816 * if the response iocb indicates a completion for a command iocb or it is
2817 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2818 * function if this is an unsolicited iocb.
dea31012005-04-17 16:05:31 -05002819 * This routine presumes LPFC_FCP_RING handling and doesn't bother
James Smart45ed1192009-10-02 15:17:02 -04002820 * to check it explicitly.
2821 */
2822int
James Smart2e0fef82007-06-17 19:56:36 -05002823lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2824 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05002825{
James Smart34b02dc2008-08-24 21:49:55 -04002826 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
dea31012005-04-17 16:05:31 -05002827 IOCB_t *irsp = NULL;
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04002828 IOCB_t *entry = NULL;
dea31012005-04-17 16:05:31 -05002829 struct lpfc_iocbq *cmdiocbq = NULL;
2830 struct lpfc_iocbq rspiocbq;
dea31012005-04-17 16:05:31 -05002831 uint32_t status;
2832 uint32_t portRspPut, portRspMax;
2833 int rc = 1;
2834 lpfc_iocb_type type;
2835 unsigned long iflag;
2836 uint32_t rsp_cmpl = 0;
dea31012005-04-17 16:05:31 -05002837
James Smart2e0fef82007-06-17 19:56:36 -05002838 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05002839 pring->stats.iocb_event++;
2840
dea31012005-04-17 16:05:31 -05002841 /*
2842 * The next available response entry should never exceed the maximum
2843 * entries. If it does, treat it as an adapter hardware error.
2844 */
2845 portRspMax = pring->numRiocb;
2846 portRspPut = le32_to_cpu(pgp->rspPutInx);
2847 if (unlikely(portRspPut >= portRspMax)) {
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002848 lpfc_sli_rsp_pointers_error(phba, pring);
James Smart2e0fef82007-06-17 19:56:36 -05002849 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05002850 return 1;
2851 }
James Smart45ed1192009-10-02 15:17:02 -04002852 if (phba->fcp_ring_in_use) {
2853 spin_unlock_irqrestore(&phba->hbalock, iflag);
2854 return 1;
2855 } else
2856 phba->fcp_ring_in_use = 1;
dea31012005-04-17 16:05:31 -05002857
2858 rmb();
2859 while (pring->rspidx != portRspPut) {
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04002860 /*
2861 * Fetch an entry off the ring and copy it into a local data
2862 * structure. The copy involves a byte-swap since the
2863 * network byte order and pci byte orders are different.
2864 */
James Smarted957682007-06-17 19:56:37 -05002865 entry = lpfc_resp_iocb(phba, pring);
James Smart858c9f62007-06-17 19:56:39 -05002866 phba->last_completion_time = jiffies;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05002867
2868 if (++pring->rspidx >= portRspMax)
2869 pring->rspidx = 0;
2870
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04002871 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2872 (uint32_t *) &rspiocbq.iocb,
James Smarted957682007-06-17 19:56:37 -05002873 phba->iocb_rsp_size);
James Smarta4bc3372006-12-02 13:34:16 -05002874 INIT_LIST_HEAD(&(rspiocbq.list));
James.Smart@Emulex.Com87f6eaf2005-06-25 10:34:13 -04002875 irsp = &rspiocbq.iocb;
2876
dea31012005-04-17 16:05:31 -05002877 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2878 pring->stats.iocb_rsp++;
2879 rsp_cmpl++;
2880
2881 if (unlikely(irsp->ulpStatus)) {
James Smart92d7f7b2007-06-17 19:56:38 -05002882 /*
2883 * If resource errors reported from HBA, reduce
2884 * queuedepths of the SCSI device.
2885 */
2886 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2887 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2888 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04002889 phba->lpfc_rampdown_queue_depth(phba);
James Smart92d7f7b2007-06-17 19:56:38 -05002890 spin_lock_irqsave(&phba->hbalock, iflag);
2891 }
2892
dea31012005-04-17 16:05:31 -05002893 /* Rsp ring <ringno> error: IOCB */
2894 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002895 "0336 Rsp Ring %d error: IOCB Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05002896 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04002897 pring->ringno,
James Smart92d7f7b2007-06-17 19:56:38 -05002898 irsp->un.ulpWord[0],
2899 irsp->un.ulpWord[1],
2900 irsp->un.ulpWord[2],
2901 irsp->un.ulpWord[3],
2902 irsp->un.ulpWord[4],
2903 irsp->un.ulpWord[5],
James Smartd7c255b2008-08-24 21:50:00 -04002904 *(uint32_t *)&irsp->un1,
2905 *((uint32_t *)&irsp->un1 + 1));
dea31012005-04-17 16:05:31 -05002906 }
2907
2908 switch (type) {
2909 case LPFC_ABORT_IOCB:
2910 case LPFC_SOL_IOCB:
2911 /*
2912 * Idle exchange closed via ABTS from port. No iocb
2913 * resources need to be recovered.
2914 */
2915 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
James Smartdca94792006-08-01 07:34:08 -04002916 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002917 "0333 IOCB cmd 0x%x"
James Smartdca94792006-08-01 07:34:08 -04002918 " processed. Skipping"
James Smart92d7f7b2007-06-17 19:56:38 -05002919 " completion\n",
James Smartdca94792006-08-01 07:34:08 -04002920 irsp->ulpCommand);
dea31012005-04-17 16:05:31 -05002921 break;
2922 }
2923
James Bottomley604a3e32005-10-29 10:28:33 -05002924 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2925 &rspiocbq);
James Smart0f65ff62010-02-26 14:14:23 -05002926 if (unlikely(!cmdiocbq))
2927 break;
2928 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
2929 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2930 if (cmdiocbq->iocb_cmpl) {
2931 spin_unlock_irqrestore(&phba->hbalock, iflag);
2932 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2933 &rspiocbq);
2934 spin_lock_irqsave(&phba->hbalock, iflag);
2935 }
dea31012005-04-17 16:05:31 -05002936 break;
James Smarta4bc3372006-12-02 13:34:16 -05002937 case LPFC_UNSOL_IOCB:
James Smart2e0fef82007-06-17 19:56:36 -05002938 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05002939 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
James Smart2e0fef82007-06-17 19:56:36 -05002940 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta4bc3372006-12-02 13:34:16 -05002941 break;
dea31012005-04-17 16:05:31 -05002942 default:
2943 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2944 char adaptermsg[LPFC_MAX_ADPTMSG];
2945 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2946 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2947 MAX_MSG_DATA);
Joe Perches898eb712007-10-18 03:06:30 -07002948 dev_warn(&((phba->pcidev)->dev),
2949 "lpfc%d: %s\n",
dea31012005-04-17 16:05:31 -05002950 phba->brd_no, adaptermsg);
2951 } else {
2952 /* Unknown IOCB command */
2953 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04002954 "0334 Unknown IOCB command "
James Smart92d7f7b2007-06-17 19:56:38 -05002955 "Data: x%x, x%x x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04002956 type, irsp->ulpCommand,
James Smart92d7f7b2007-06-17 19:56:38 -05002957 irsp->ulpStatus,
2958 irsp->ulpIoTag,
2959 irsp->ulpContext);
dea31012005-04-17 16:05:31 -05002960 }
2961 break;
2962 }
2963
2964 /*
2965 * The response IOCB has been processed. Update the ring
2966 * pointer in SLIM. If the port response put pointer has not
2967 * been updated, sync the pgp->rspPutInx and fetch the new port
2968 * response put pointer.
2969 */
James Smarted957682007-06-17 19:56:37 -05002970 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05002971
2972 if (pring->rspidx == portRspPut)
2973 portRspPut = le32_to_cpu(pgp->rspPutInx);
2974 }
2975
2976 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2977 pring->stats.iocb_rsp_full++;
2978 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2979 writel(status, phba->CAregaddr);
2980 readl(phba->CAregaddr);
2981 }
2982 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2983 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2984 pring->stats.iocb_cmd_empty++;
2985
2986 /* Force update of the local copy of cmdGetInx */
2987 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2988 lpfc_sli_resume_iocb(phba, pring);
2989
2990 if ((pring->lpfc_sli_cmd_available))
2991 (pring->lpfc_sli_cmd_available) (phba, pring);
2992
2993 }
2994
James Smart45ed1192009-10-02 15:17:02 -04002995 phba->fcp_ring_in_use = 0;
James Smart2e0fef82007-06-17 19:56:36 -05002996 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05002997 return rc;
2998}
2999
James Smarte59058c2008-08-24 21:49:00 -04003000/**
James Smart3772a992009-05-22 14:50:54 -04003001 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3002 * @phba: Pointer to HBA context object.
3003 * @pring: Pointer to driver SLI ring object.
3004 * @rspiocbp: Pointer to driver response IOCB object.
3005 *
3006 * This function is called from the worker thread when there is a slow-path
3007 * response IOCB to process. This function chains all the response iocbs until
3008 * seeing the iocb with the LE bit set. The function will call
3009 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3010 * completion of a command iocb. The function will call the
3011 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3012 * The function frees the resources or calls the completion handler if this
3013 * iocb is an abort completion. The function returns NULL when the response
3014 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3015 * this function shall chain the iocb on to the iocb_continueq and return the
3016 * response iocb passed in.
3017 **/
3018static struct lpfc_iocbq *
3019lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3020 struct lpfc_iocbq *rspiocbp)
3021{
3022 struct lpfc_iocbq *saveq;
3023 struct lpfc_iocbq *cmdiocbp;
3024 struct lpfc_iocbq *next_iocb;
3025 IOCB_t *irsp = NULL;
3026 uint32_t free_saveq;
3027 uint8_t iocb_cmd_type;
3028 lpfc_iocb_type type;
3029 unsigned long iflag;
3030 int rc;
3031
3032 spin_lock_irqsave(&phba->hbalock, iflag);
3033 /* First add the response iocb to the countinueq list */
3034 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3035 pring->iocb_continueq_cnt++;
3036
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02003037 /* Now, determine whether the list is completed for processing */
James Smart3772a992009-05-22 14:50:54 -04003038 irsp = &rspiocbp->iocb;
3039 if (irsp->ulpLe) {
3040 /*
3041 * By default, the driver expects to free all resources
3042 * associated with this iocb completion.
3043 */
3044 free_saveq = 1;
3045 saveq = list_get_first(&pring->iocb_continueq,
3046 struct lpfc_iocbq, list);
3047 irsp = &(saveq->iocb);
3048 list_del_init(&pring->iocb_continueq);
3049 pring->iocb_continueq_cnt = 0;
3050
3051 pring->stats.iocb_rsp++;
3052
3053 /*
3054 * If resource errors reported from HBA, reduce
3055 * queuedepths of the SCSI device.
3056 */
3057 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3058 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
3059 spin_unlock_irqrestore(&phba->hbalock, iflag);
3060 phba->lpfc_rampdown_queue_depth(phba);
3061 spin_lock_irqsave(&phba->hbalock, iflag);
3062 }
3063
3064 if (irsp->ulpStatus) {
3065 /* Rsp ring <ringno> error: IOCB */
3066 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3067 "0328 Rsp Ring %d error: "
3068 "IOCB Data: "
3069 "x%x x%x x%x x%x "
3070 "x%x x%x x%x x%x "
3071 "x%x x%x x%x x%x "
3072 "x%x x%x x%x x%x\n",
3073 pring->ringno,
3074 irsp->un.ulpWord[0],
3075 irsp->un.ulpWord[1],
3076 irsp->un.ulpWord[2],
3077 irsp->un.ulpWord[3],
3078 irsp->un.ulpWord[4],
3079 irsp->un.ulpWord[5],
3080 *(((uint32_t *) irsp) + 6),
3081 *(((uint32_t *) irsp) + 7),
3082 *(((uint32_t *) irsp) + 8),
3083 *(((uint32_t *) irsp) + 9),
3084 *(((uint32_t *) irsp) + 10),
3085 *(((uint32_t *) irsp) + 11),
3086 *(((uint32_t *) irsp) + 12),
3087 *(((uint32_t *) irsp) + 13),
3088 *(((uint32_t *) irsp) + 14),
3089 *(((uint32_t *) irsp) + 15));
3090 }
3091
3092 /*
3093 * Fetch the IOCB command type and call the correct completion
3094 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3095 * get freed back to the lpfc_iocb_list by the discovery
3096 * kernel thread.
3097 */
3098 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3099 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3100 switch (type) {
3101 case LPFC_SOL_IOCB:
3102 spin_unlock_irqrestore(&phba->hbalock, iflag);
3103 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3104 spin_lock_irqsave(&phba->hbalock, iflag);
3105 break;
3106
3107 case LPFC_UNSOL_IOCB:
3108 spin_unlock_irqrestore(&phba->hbalock, iflag);
3109 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3110 spin_lock_irqsave(&phba->hbalock, iflag);
3111 if (!rc)
3112 free_saveq = 0;
3113 break;
3114
3115 case LPFC_ABORT_IOCB:
3116 cmdiocbp = NULL;
3117 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3118 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3119 saveq);
3120 if (cmdiocbp) {
3121 /* Call the specified completion routine */
3122 if (cmdiocbp->iocb_cmpl) {
3123 spin_unlock_irqrestore(&phba->hbalock,
3124 iflag);
3125 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3126 saveq);
3127 spin_lock_irqsave(&phba->hbalock,
3128 iflag);
3129 } else
3130 __lpfc_sli_release_iocbq(phba,
3131 cmdiocbp);
3132 }
3133 break;
3134
3135 case LPFC_UNKNOWN_IOCB:
3136 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3137 char adaptermsg[LPFC_MAX_ADPTMSG];
3138 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3139 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3140 MAX_MSG_DATA);
3141 dev_warn(&((phba->pcidev)->dev),
3142 "lpfc%d: %s\n",
3143 phba->brd_no, adaptermsg);
3144 } else {
3145 /* Unknown IOCB command */
3146 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3147 "0335 Unknown IOCB "
3148 "command Data: x%x "
3149 "x%x x%x x%x\n",
3150 irsp->ulpCommand,
3151 irsp->ulpStatus,
3152 irsp->ulpIoTag,
3153 irsp->ulpContext);
3154 }
3155 break;
3156 }
3157
3158 if (free_saveq) {
3159 list_for_each_entry_safe(rspiocbp, next_iocb,
3160 &saveq->list, list) {
3161 list_del(&rspiocbp->list);
3162 __lpfc_sli_release_iocbq(phba, rspiocbp);
3163 }
3164 __lpfc_sli_release_iocbq(phba, saveq);
3165 }
3166 rspiocbp = NULL;
3167 }
3168 spin_unlock_irqrestore(&phba->hbalock, iflag);
3169 return rspiocbp;
3170}
3171
3172/**
3173 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
James Smarte59058c2008-08-24 21:49:00 -04003174 * @phba: Pointer to HBA context object.
3175 * @pring: Pointer to driver SLI ring object.
3176 * @mask: Host attention register mask for this ring.
3177 *
James Smart3772a992009-05-22 14:50:54 -04003178 * This routine wraps the actual slow_ring event process routine from the
3179 * API jump table function pointer from the lpfc_hba struct.
James Smarte59058c2008-08-24 21:49:00 -04003180 **/
James Smart3772a992009-05-22 14:50:54 -04003181void
James Smart2e0fef82007-06-17 19:56:36 -05003182lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3183 struct lpfc_sli_ring *pring, uint32_t mask)
dea31012005-04-17 16:05:31 -05003184{
James Smart3772a992009-05-22 14:50:54 -04003185 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3186}
3187
3188/**
3189 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3190 * @phba: Pointer to HBA context object.
3191 * @pring: Pointer to driver SLI ring object.
3192 * @mask: Host attention register mask for this ring.
3193 *
3194 * This function is called from the worker thread when there is a ring event
3195 * for non-fcp rings. The caller does not hold any lock. The function will
3196 * remove each response iocb in the response ring and calls the handle
3197 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3198 **/
3199static void
3200lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3201 struct lpfc_sli_ring *pring, uint32_t mask)
3202{
James Smart34b02dc2008-08-24 21:49:55 -04003203 struct lpfc_pgp *pgp;
dea31012005-04-17 16:05:31 -05003204 IOCB_t *entry;
3205 IOCB_t *irsp = NULL;
3206 struct lpfc_iocbq *rspiocbp = NULL;
dea31012005-04-17 16:05:31 -05003207 uint32_t portRspPut, portRspMax;
dea31012005-04-17 16:05:31 -05003208 unsigned long iflag;
James Smart3772a992009-05-22 14:50:54 -04003209 uint32_t status;
dea31012005-04-17 16:05:31 -05003210
James Smart34b02dc2008-08-24 21:49:55 -04003211 pgp = &phba->port_gp[pring->ringno];
James Smart2e0fef82007-06-17 19:56:36 -05003212 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003213 pring->stats.iocb_event++;
3214
dea31012005-04-17 16:05:31 -05003215 /*
3216 * The next available response entry should never exceed the maximum
3217 * entries. If it does, treat it as an adapter hardware error.
3218 */
3219 portRspMax = pring->numRiocb;
3220 portRspPut = le32_to_cpu(pgp->rspPutInx);
3221 if (portRspPut >= portRspMax) {
3222 /*
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003223 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
dea31012005-04-17 16:05:31 -05003224 * rsp ring <portRspMax>
3225 */
James Smarted957682007-06-17 19:56:37 -05003226 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003227 "0303 Ring %d handler: portRspPut %d "
Frederik Schwarzer025dfda2008-10-16 19:02:37 +02003228 "is bigger than rsp ring %d\n",
James Smarte8b62012007-08-02 11:10:09 -04003229 pring->ringno, portRspPut, portRspMax);
dea31012005-04-17 16:05:31 -05003230
James Smart2e0fef82007-06-17 19:56:36 -05003231 phba->link_state = LPFC_HBA_ERROR;
3232 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003233
3234 phba->work_hs = HS_FFER3;
3235 lpfc_handle_eratt(phba);
3236
James Smart3772a992009-05-22 14:50:54 -04003237 return;
dea31012005-04-17 16:05:31 -05003238 }
3239
3240 rmb();
dea31012005-04-17 16:05:31 -05003241 while (pring->rspidx != portRspPut) {
3242 /*
3243 * Build a completion list and call the appropriate handler.
3244 * The process is to get the next available response iocb, get
3245 * a free iocb from the list, copy the response data into the
3246 * free iocb, insert to the continuation list, and update the
3247 * next response index to slim. This process makes response
3248 * iocb's in the ring available to DMA as fast as possible but
3249 * pays a penalty for a copy operation. Since the iocb is
3250 * only 32 bytes, this penalty is considered small relative to
3251 * the PCI reads for register values and a slim write. When
3252 * the ulpLe field is set, the entire Command has been
3253 * received.
3254 */
James Smarted957682007-06-17 19:56:37 -05003255 entry = lpfc_resp_iocb(phba, pring);
3256
James Smart858c9f62007-06-17 19:56:39 -05003257 phba->last_completion_time = jiffies;
James Smart2e0fef82007-06-17 19:56:36 -05003258 rspiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05003259 if (rspiocbp == NULL) {
3260 printk(KERN_ERR "%s: out of buffers! Failing "
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07003261 "completion.\n", __func__);
dea31012005-04-17 16:05:31 -05003262 break;
3263 }
3264
James Smarted957682007-06-17 19:56:37 -05003265 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3266 phba->iocb_rsp_size);
dea31012005-04-17 16:05:31 -05003267 irsp = &rspiocbp->iocb;
3268
3269 if (++pring->rspidx >= portRspMax)
3270 pring->rspidx = 0;
3271
James Smarta58cbd52007-08-02 11:09:43 -04003272 if (pring->ringno == LPFC_ELS_RING) {
3273 lpfc_debugfs_slow_ring_trc(phba,
3274 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3275 *(((uint32_t *) irsp) + 4),
3276 *(((uint32_t *) irsp) + 6),
3277 *(((uint32_t *) irsp) + 7));
3278 }
3279
James Smarted957682007-06-17 19:56:37 -05003280 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
dea31012005-04-17 16:05:31 -05003281
James Smart3772a992009-05-22 14:50:54 -04003282 spin_unlock_irqrestore(&phba->hbalock, iflag);
3283 /* Handle the response IOCB */
3284 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3285 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05003286
3287 /*
3288 * If the port response put pointer has not been updated, sync
3289 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3290 * response put pointer.
3291 */
3292 if (pring->rspidx == portRspPut) {
3293 portRspPut = le32_to_cpu(pgp->rspPutInx);
3294 }
3295 } /* while (pring->rspidx != portRspPut) */
3296
James Smart92d7f7b2007-06-17 19:56:38 -05003297 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
dea31012005-04-17 16:05:31 -05003298 /* At least one response entry has been freed */
3299 pring->stats.iocb_rsp_full++;
3300 /* SET RxRE_RSP in Chip Att register */
3301 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3302 writel(status, phba->CAregaddr);
3303 readl(phba->CAregaddr); /* flush */
3304 }
3305 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3306 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3307 pring->stats.iocb_cmd_empty++;
3308
3309 /* Force update of the local copy of cmdGetInx */
3310 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
3311 lpfc_sli_resume_iocb(phba, pring);
3312
3313 if ((pring->lpfc_sli_cmd_available))
3314 (pring->lpfc_sli_cmd_available) (phba, pring);
3315
3316 }
3317
James Smart2e0fef82007-06-17 19:56:36 -05003318 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart3772a992009-05-22 14:50:54 -04003319 return;
dea31012005-04-17 16:05:31 -05003320}
3321
James Smarte59058c2008-08-24 21:49:00 -04003322/**
James Smart4f774512009-05-22 14:52:35 -04003323 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3324 * @phba: Pointer to HBA context object.
3325 * @pring: Pointer to driver SLI ring object.
3326 * @mask: Host attention register mask for this ring.
3327 *
3328 * This function is called from the worker thread when there is a pending
3329 * ELS response iocb on the driver internal slow-path response iocb worker
3330 * queue. The caller does not hold any lock. The function will remove each
3331 * response iocb from the response worker queue and calls the handle
3332 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3333 **/
3334static void
3335lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3336 struct lpfc_sli_ring *pring, uint32_t mask)
3337{
3338 struct lpfc_iocbq *irspiocbq;
James Smart4d9ab992009-10-02 15:16:39 -04003339 struct hbq_dmabuf *dmabuf;
3340 struct lpfc_cq_event *cq_event;
James Smart4f774512009-05-22 14:52:35 -04003341 unsigned long iflag;
3342
James Smart45ed1192009-10-02 15:17:02 -04003343 spin_lock_irqsave(&phba->hbalock, iflag);
3344 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3345 spin_unlock_irqrestore(&phba->hbalock, iflag);
3346 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
James Smart4f774512009-05-22 14:52:35 -04003347 /* Get the response iocb from the head of work queue */
3348 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart45ed1192009-10-02 15:17:02 -04003349 list_remove_head(&phba->sli4_hba.sp_queue_event,
James Smart4d9ab992009-10-02 15:16:39 -04003350 cq_event, struct lpfc_cq_event, list);
James Smart4f774512009-05-22 14:52:35 -04003351 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart4d9ab992009-10-02 15:16:39 -04003352
3353 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3354 case CQE_CODE_COMPL_WQE:
3355 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3356 cq_event);
James Smart45ed1192009-10-02 15:17:02 -04003357 /* Translate ELS WCQE to response IOCBQ */
3358 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3359 irspiocbq);
3360 if (irspiocbq)
3361 lpfc_sli_sp_handle_rspiocb(phba, pring,
3362 irspiocbq);
James Smart4d9ab992009-10-02 15:16:39 -04003363 break;
3364 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -04003365 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -04003366 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3367 cq_event);
3368 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3369 break;
3370 default:
3371 break;
3372 }
James Smart4f774512009-05-22 14:52:35 -04003373 }
3374}
3375
3376/**
James Smart3621a712009-04-06 18:47:14 -04003377 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
James Smarte59058c2008-08-24 21:49:00 -04003378 * @phba: Pointer to HBA context object.
3379 * @pring: Pointer to driver SLI ring object.
3380 *
3381 * This function aborts all iocbs in the given ring and frees all the iocb
3382 * objects in txq. This function issues an abort iocb for all the iocb commands
3383 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3384 * the return of this function. The caller is not required to hold any locks.
3385 **/
James Smart2e0fef82007-06-17 19:56:36 -05003386void
dea31012005-04-17 16:05:31 -05003387lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3388{
James Smart2534ba72007-04-25 09:52:20 -04003389 LIST_HEAD(completions);
dea31012005-04-17 16:05:31 -05003390 struct lpfc_iocbq *iocb, *next_iocb;
dea31012005-04-17 16:05:31 -05003391
James Smart92d7f7b2007-06-17 19:56:38 -05003392 if (pring->ringno == LPFC_ELS_RING) {
3393 lpfc_fabric_abort_hba(phba);
3394 }
3395
dea31012005-04-17 16:05:31 -05003396 /* Error everything on txq and txcmplq
3397 * First do the txq.
3398 */
James Smart2e0fef82007-06-17 19:56:36 -05003399 spin_lock_irq(&phba->hbalock);
James Smart2534ba72007-04-25 09:52:20 -04003400 list_splice_init(&pring->txq, &completions);
dea31012005-04-17 16:05:31 -05003401 pring->txq_cnt = 0;
dea31012005-04-17 16:05:31 -05003402
3403 /* Next issue ABTS for everything on the txcmplq */
James Smart2534ba72007-04-25 09:52:20 -04003404 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3405 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3406
James Smart2e0fef82007-06-17 19:56:36 -05003407 spin_unlock_irq(&phba->hbalock);
James Smart2534ba72007-04-25 09:52:20 -04003408
James Smarta257bf92009-04-06 18:48:10 -04003409 /* Cancel all the IOCBs from the completions list */
3410 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3411 IOERR_SLI_ABORTED);
dea31012005-04-17 16:05:31 -05003412}
3413
James Smarte59058c2008-08-24 21:49:00 -04003414/**
James Smart3621a712009-04-06 18:47:14 -04003415 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
James Smarta8e497d2008-08-24 21:50:11 -04003416 * @phba: Pointer to HBA context object.
3417 *
3418 * This function flushes all iocbs in the fcp ring and frees all the iocb
3419 * objects in txq and txcmplq. This function will not issue abort iocbs
3420 * for all the iocb commands in txcmplq, they will just be returned with
3421 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3422 * slot has been permanently disabled.
3423 **/
3424void
3425lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3426{
3427 LIST_HEAD(txq);
3428 LIST_HEAD(txcmplq);
James Smarta8e497d2008-08-24 21:50:11 -04003429 struct lpfc_sli *psli = &phba->sli;
3430 struct lpfc_sli_ring *pring;
3431
3432 /* Currently, only one fcp ring */
3433 pring = &psli->ring[psli->fcp_ring];
3434
3435 spin_lock_irq(&phba->hbalock);
3436 /* Retrieve everything on txq */
3437 list_splice_init(&pring->txq, &txq);
3438 pring->txq_cnt = 0;
3439
3440 /* Retrieve everything on the txcmplq */
3441 list_splice_init(&pring->txcmplq, &txcmplq);
3442 pring->txcmplq_cnt = 0;
3443 spin_unlock_irq(&phba->hbalock);
3444
3445 /* Flush the txq */
James Smarta257bf92009-04-06 18:48:10 -04003446 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3447 IOERR_SLI_DOWN);
James Smarta8e497d2008-08-24 21:50:11 -04003448
3449 /* Flush the txcmpq */
James Smarta257bf92009-04-06 18:48:10 -04003450 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3451 IOERR_SLI_DOWN);
James Smarta8e497d2008-08-24 21:50:11 -04003452}
3453
3454/**
James Smart3772a992009-05-22 14:50:54 -04003455 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
James Smarte59058c2008-08-24 21:49:00 -04003456 * @phba: Pointer to HBA context object.
3457 * @mask: Bit mask to be checked.
3458 *
3459 * This function reads the host status register and compares
3460 * with the provided bit mask to check if HBA completed
3461 * the restart. This function will wait in a loop for the
3462 * HBA to complete restart. If the HBA does not restart within
3463 * 15 iterations, the function will reset the HBA again. The
3464 * function returns 1 when HBA fail to restart otherwise returns
3465 * zero.
3466 **/
James Smart3772a992009-05-22 14:50:54 -04003467static int
3468lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
dea31012005-04-17 16:05:31 -05003469{
Jamie Wellnitz41415862006-02-28 19:25:27 -05003470 uint32_t status;
3471 int i = 0;
3472 int retval = 0;
dea31012005-04-17 16:05:31 -05003473
Jamie Wellnitz41415862006-02-28 19:25:27 -05003474 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05003475 if (lpfc_readl(phba->HSregaddr, &status))
3476 return 1;
dea31012005-04-17 16:05:31 -05003477
Jamie Wellnitz41415862006-02-28 19:25:27 -05003478 /*
3479 * Check status register every 100ms for 5 retries, then every
3480 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3481 * every 2.5 sec for 4.
3482 * Break our of the loop if errors occurred during init.
3483 */
3484 while (((status & mask) != mask) &&
3485 !(status & HS_FFERM) &&
3486 i++ < 20) {
dea31012005-04-17 16:05:31 -05003487
Jamie Wellnitz41415862006-02-28 19:25:27 -05003488 if (i <= 5)
3489 msleep(10);
3490 else if (i <= 10)
3491 msleep(500);
3492 else
3493 msleep(2500);
dea31012005-04-17 16:05:31 -05003494
Jamie Wellnitz41415862006-02-28 19:25:27 -05003495 if (i == 15) {
James Smart2e0fef82007-06-17 19:56:36 -05003496 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05003497 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003498 lpfc_sli_brdrestart(phba);
3499 }
3500 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05003501 if (lpfc_readl(phba->HSregaddr, &status)) {
3502 retval = 1;
3503 break;
3504 }
dea31012005-04-17 16:05:31 -05003505 }
dea31012005-04-17 16:05:31 -05003506
Jamie Wellnitz41415862006-02-28 19:25:27 -05003507 /* Check to see if any errors occurred during init */
3508 if ((status & HS_FFERM) || (i >= 20)) {
James Smarte40a02c2010-02-26 14:13:54 -05003509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3510 "2751 Adapter failed to restart, "
3511 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3512 status,
3513 readl(phba->MBslimaddr + 0xa8),
3514 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05003515 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003516 retval = 1;
3517 }
dea31012005-04-17 16:05:31 -05003518
Jamie Wellnitz41415862006-02-28 19:25:27 -05003519 return retval;
dea31012005-04-17 16:05:31 -05003520}
3521
James Smartda0436e2009-05-22 14:51:39 -04003522/**
3523 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3524 * @phba: Pointer to HBA context object.
3525 * @mask: Bit mask to be checked.
3526 *
3527 * This function checks the host status register to check if HBA is
3528 * ready. This function will wait in a loop for the HBA to be ready
3529 * If the HBA is not ready , the function will will reset the HBA PCI
3530 * function again. The function returns 1 when HBA fail to be ready
3531 * otherwise returns zero.
3532 **/
3533static int
3534lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3535{
3536 uint32_t status;
3537 int retval = 0;
3538
3539 /* Read the HBA Host Status Register */
3540 status = lpfc_sli4_post_status_check(phba);
3541
3542 if (status) {
3543 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3544 lpfc_sli_brdrestart(phba);
3545 status = lpfc_sli4_post_status_check(phba);
3546 }
3547
3548 /* Check to see if any errors occurred during init */
3549 if (status) {
3550 phba->link_state = LPFC_HBA_ERROR;
3551 retval = 1;
3552 } else
3553 phba->sli4_hba.intr_enable = 0;
3554
3555 return retval;
3556}
3557
3558/**
3559 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3560 * @phba: Pointer to HBA context object.
3561 * @mask: Bit mask to be checked.
3562 *
3563 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3564 * from the API jump table function pointer from the lpfc_hba struct.
3565 **/
3566int
3567lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3568{
3569 return phba->lpfc_sli_brdready(phba, mask);
3570}
3571
James Smart92908312006-03-07 15:04:13 -05003572#define BARRIER_TEST_PATTERN (0xdeadbeef)
3573
James Smarte59058c2008-08-24 21:49:00 -04003574/**
James Smart3621a712009-04-06 18:47:14 -04003575 * lpfc_reset_barrier - Make HBA ready for HBA reset
James Smarte59058c2008-08-24 21:49:00 -04003576 * @phba: Pointer to HBA context object.
3577 *
3578 * This function is called before resetting an HBA. This
3579 * function requests HBA to quiesce DMAs before a reset.
3580 **/
James Smart2e0fef82007-06-17 19:56:36 -05003581void lpfc_reset_barrier(struct lpfc_hba *phba)
James Smart92908312006-03-07 15:04:13 -05003582{
James Smart65a29c12006-07-06 15:50:50 -04003583 uint32_t __iomem *resp_buf;
3584 uint32_t __iomem *mbox_buf;
James Smart92908312006-03-07 15:04:13 -05003585 volatile uint32_t mbox;
James Smart9940b972011-03-11 16:06:12 -05003586 uint32_t hc_copy, ha_copy, resp_data;
James Smart92908312006-03-07 15:04:13 -05003587 int i;
3588 uint8_t hdrtype;
3589
3590 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3591 if (hdrtype != 0x80 ||
3592 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3593 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3594 return;
3595
3596 /*
3597 * Tell the other part of the chip to suspend temporarily all
3598 * its DMA activity.
3599 */
James Smart65a29c12006-07-06 15:50:50 -04003600 resp_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05003601
3602 /* Disable the error attention */
James Smart9940b972011-03-11 16:06:12 -05003603 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3604 return;
James Smart92908312006-03-07 15:04:13 -05003605 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3606 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05003607 phba->link_flag |= LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05003608
James Smart9940b972011-03-11 16:06:12 -05003609 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3610 return;
3611 if (ha_copy & HA_ERATT) {
James Smart92908312006-03-07 15:04:13 -05003612 /* Clear Chip error bit */
3613 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05003614 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05003615 }
3616
3617 mbox = 0;
3618 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3619 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3620
3621 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
James Smart65a29c12006-07-06 15:50:50 -04003622 mbox_buf = phba->MBslimaddr;
James Smart92908312006-03-07 15:04:13 -05003623 writel(mbox, mbox_buf);
3624
James Smart9940b972011-03-11 16:06:12 -05003625 for (i = 0; i < 50; i++) {
3626 if (lpfc_readl((resp_buf + 1), &resp_data))
3627 return;
3628 if (resp_data != ~(BARRIER_TEST_PATTERN))
3629 mdelay(1);
3630 else
3631 break;
3632 }
3633 resp_data = 0;
3634 if (lpfc_readl((resp_buf + 1), &resp_data))
3635 return;
3636 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
James Smartf4b4c682009-05-22 14:53:12 -04003637 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
James Smart2e0fef82007-06-17 19:56:36 -05003638 phba->pport->stopped)
James Smart92908312006-03-07 15:04:13 -05003639 goto restore_hc;
3640 else
3641 goto clear_errat;
3642 }
3643
3644 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
James Smart9940b972011-03-11 16:06:12 -05003645 resp_data = 0;
3646 for (i = 0; i < 500; i++) {
3647 if (lpfc_readl(resp_buf, &resp_data))
3648 return;
3649 if (resp_data != mbox)
3650 mdelay(1);
3651 else
3652 break;
3653 }
James Smart92908312006-03-07 15:04:13 -05003654
3655clear_errat:
3656
James Smart9940b972011-03-11 16:06:12 -05003657 while (++i < 500) {
3658 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3659 return;
3660 if (!(ha_copy & HA_ERATT))
3661 mdelay(1);
3662 else
3663 break;
3664 }
James Smart92908312006-03-07 15:04:13 -05003665
3666 if (readl(phba->HAregaddr) & HA_ERATT) {
3667 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05003668 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05003669 }
3670
3671restore_hc:
James Smart2e0fef82007-06-17 19:56:36 -05003672 phba->link_flag &= ~LS_IGNORE_ERATT;
James Smart92908312006-03-07 15:04:13 -05003673 writel(hc_copy, phba->HCregaddr);
3674 readl(phba->HCregaddr); /* flush */
3675}
3676
James Smarte59058c2008-08-24 21:49:00 -04003677/**
James Smart3621a712009-04-06 18:47:14 -04003678 * lpfc_sli_brdkill - Issue a kill_board mailbox command
James Smarte59058c2008-08-24 21:49:00 -04003679 * @phba: Pointer to HBA context object.
3680 *
3681 * This function issues a kill_board mailbox command and waits for
3682 * the error attention interrupt. This function is called for stopping
3683 * the firmware processing. The caller is not required to hold any
3684 * locks. This function calls lpfc_hba_down_post function to free
3685 * any pending commands after the kill. The function will return 1 when it
3686 * fails to kill the board else will return 0.
3687 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05003688int
James Smart2e0fef82007-06-17 19:56:36 -05003689lpfc_sli_brdkill(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05003690{
Jamie Wellnitz41415862006-02-28 19:25:27 -05003691 struct lpfc_sli *psli;
3692 LPFC_MBOXQ_t *pmb;
3693 uint32_t status;
3694 uint32_t ha_copy;
3695 int retval;
3696 int i = 0;
3697
3698 psli = &phba->sli;
3699
3700 /* Kill HBA */
James Smarted957682007-06-17 19:56:37 -05003701 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003702 "0329 Kill HBA Data: x%x x%x\n",
3703 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003704
James Smart98c9ea52007-10-27 13:37:33 -04003705 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3706 if (!pmb)
Jamie Wellnitz41415862006-02-28 19:25:27 -05003707 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003708
3709 /* Disable the error attention */
James Smart2e0fef82007-06-17 19:56:36 -05003710 spin_lock_irq(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -05003711 if (lpfc_readl(phba->HCregaddr, &status)) {
3712 spin_unlock_irq(&phba->hbalock);
3713 mempool_free(pmb, phba->mbox_mem_pool);
3714 return 1;
3715 }
Jamie Wellnitz41415862006-02-28 19:25:27 -05003716 status &= ~HC_ERINT_ENA;
3717 writel(status, phba->HCregaddr);
3718 readl(phba->HCregaddr); /* flush */
James Smart2e0fef82007-06-17 19:56:36 -05003719 phba->link_flag |= LS_IGNORE_ERATT;
3720 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003721
3722 lpfc_kill_board(phba, pmb);
3723 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3724 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3725
3726 if (retval != MBX_SUCCESS) {
3727 if (retval != MBX_BUSY)
3728 mempool_free(pmb, phba->mbox_mem_pool);
James Smarte40a02c2010-02-26 14:13:54 -05003729 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3730 "2752 KILL_BOARD command failed retval %d\n",
3731 retval);
James Smart2e0fef82007-06-17 19:56:36 -05003732 spin_lock_irq(&phba->hbalock);
3733 phba->link_flag &= ~LS_IGNORE_ERATT;
3734 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003735 return 1;
3736 }
3737
James Smartf4b4c682009-05-22 14:53:12 -04003738 spin_lock_irq(&phba->hbalock);
3739 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3740 spin_unlock_irq(&phba->hbalock);
James Smart92908312006-03-07 15:04:13 -05003741
Jamie Wellnitz41415862006-02-28 19:25:27 -05003742 mempool_free(pmb, phba->mbox_mem_pool);
3743
3744 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3745 * attention every 100ms for 3 seconds. If we don't get ERATT after
3746 * 3 seconds we still set HBA_ERROR state because the status of the
3747 * board is now undefined.
3748 */
James Smart9940b972011-03-11 16:06:12 -05003749 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3750 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003751 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3752 mdelay(100);
James Smart9940b972011-03-11 16:06:12 -05003753 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3754 return 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003755 }
3756
3757 del_timer_sync(&psli->mbox_tmo);
James Smart92908312006-03-07 15:04:13 -05003758 if (ha_copy & HA_ERATT) {
3759 writel(HA_ERATT, phba->HAregaddr);
James Smart2e0fef82007-06-17 19:56:36 -05003760 phba->pport->stopped = 1;
James Smart92908312006-03-07 15:04:13 -05003761 }
James Smart2e0fef82007-06-17 19:56:36 -05003762 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003763 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart04c68492009-05-22 14:52:52 -04003764 psli->mbox_active = NULL;
James Smart2e0fef82007-06-17 19:56:36 -05003765 phba->link_flag &= ~LS_IGNORE_ERATT;
3766 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003767
Jamie Wellnitz41415862006-02-28 19:25:27 -05003768 lpfc_hba_down_post(phba);
James Smart2e0fef82007-06-17 19:56:36 -05003769 phba->link_state = LPFC_HBA_ERROR;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003770
James Smart2e0fef82007-06-17 19:56:36 -05003771 return ha_copy & HA_ERATT ? 0 : 1;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003772}
3773
James Smarte59058c2008-08-24 21:49:00 -04003774/**
James Smart3772a992009-05-22 14:50:54 -04003775 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
James Smarte59058c2008-08-24 21:49:00 -04003776 * @phba: Pointer to HBA context object.
3777 *
3778 * This function resets the HBA by writing HC_INITFF to the control
3779 * register. After the HBA resets, this function resets all the iocb ring
3780 * indices. This function disables PCI layer parity checking during
3781 * the reset.
3782 * This function returns 0 always.
3783 * The caller is not required to hold any locks.
3784 **/
Jamie Wellnitz41415862006-02-28 19:25:27 -05003785int
James Smart2e0fef82007-06-17 19:56:36 -05003786lpfc_sli_brdreset(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05003787{
3788 struct lpfc_sli *psli;
dea31012005-04-17 16:05:31 -05003789 struct lpfc_sli_ring *pring;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003790 uint16_t cfg_value;
dea31012005-04-17 16:05:31 -05003791 int i;
dea31012005-04-17 16:05:31 -05003792
Jamie Wellnitz41415862006-02-28 19:25:27 -05003793 psli = &phba->sli;
dea31012005-04-17 16:05:31 -05003794
Jamie Wellnitz41415862006-02-28 19:25:27 -05003795 /* Reset HBA */
3796 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003797 "0325 Reset HBA Data: x%x x%x\n",
James Smart2e0fef82007-06-17 19:56:36 -05003798 phba->pport->port_state, psli->sli_flag);
dea31012005-04-17 16:05:31 -05003799
3800 /* perform board reset */
3801 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04003802 phba->link_events = 0;
James Smart2e0fef82007-06-17 19:56:36 -05003803 phba->pport->fc_myDID = 0;
3804 phba->pport->fc_prevDID = 0;
dea31012005-04-17 16:05:31 -05003805
Jamie Wellnitz41415862006-02-28 19:25:27 -05003806 /* Turn off parity checking and serr during the physical reset */
3807 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3808 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3809 (cfg_value &
3810 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3811
James Smart3772a992009-05-22 14:50:54 -04003812 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3813
Jamie Wellnitz41415862006-02-28 19:25:27 -05003814 /* Now toggle INITFF bit in the Host Control Register */
3815 writel(HC_INITFF, phba->HCregaddr);
3816 mdelay(1);
3817 readl(phba->HCregaddr); /* flush */
3818 writel(0, phba->HCregaddr);
3819 readl(phba->HCregaddr); /* flush */
3820
3821 /* Restore PCI cmd register */
3822 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
dea31012005-04-17 16:05:31 -05003823
3824 /* Initialize relevant SLI info */
Jamie Wellnitz41415862006-02-28 19:25:27 -05003825 for (i = 0; i < psli->num_rings; i++) {
3826 pring = &psli->ring[i];
dea31012005-04-17 16:05:31 -05003827 pring->flag = 0;
3828 pring->rspidx = 0;
3829 pring->next_cmdidx = 0;
3830 pring->local_getidx = 0;
3831 pring->cmdidx = 0;
3832 pring->missbufcnt = 0;
3833 }
dea31012005-04-17 16:05:31 -05003834
James Smart2e0fef82007-06-17 19:56:36 -05003835 phba->link_state = LPFC_WARM_START;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003836 return 0;
3837}
3838
James Smarte59058c2008-08-24 21:49:00 -04003839/**
James Smartda0436e2009-05-22 14:51:39 -04003840 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3841 * @phba: Pointer to HBA context object.
3842 *
3843 * This function resets a SLI4 HBA. This function disables PCI layer parity
3844 * checking during resets the device. The caller is not required to hold
3845 * any locks.
3846 *
3847 * This function returns 0 always.
3848 **/
3849int
3850lpfc_sli4_brdreset(struct lpfc_hba *phba)
3851{
3852 struct lpfc_sli *psli = &phba->sli;
3853 uint16_t cfg_value;
3854 uint8_t qindx;
3855
3856 /* Reset HBA */
3857 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3858 "0295 Reset HBA Data: x%x x%x\n",
3859 phba->pport->port_state, psli->sli_flag);
3860
3861 /* perform board reset */
3862 phba->fc_eventTag = 0;
James Smart4d9ab992009-10-02 15:16:39 -04003863 phba->link_events = 0;
James Smartda0436e2009-05-22 14:51:39 -04003864 phba->pport->fc_myDID = 0;
3865 phba->pport->fc_prevDID = 0;
3866
James Smartda0436e2009-05-22 14:51:39 -04003867 spin_lock_irq(&phba->hbalock);
3868 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3869 phba->fcf.fcf_flag = 0;
3870 /* Clean up the child queue list for the CQs */
3871 list_del_init(&phba->sli4_hba.mbx_wq->list);
3872 list_del_init(&phba->sli4_hba.els_wq->list);
3873 list_del_init(&phba->sli4_hba.hdr_rq->list);
3874 list_del_init(&phba->sli4_hba.dat_rq->list);
3875 list_del_init(&phba->sli4_hba.mbx_cq->list);
3876 list_del_init(&phba->sli4_hba.els_cq->list);
James Smartda0436e2009-05-22 14:51:39 -04003877 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3878 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
James Smart05580562011-05-24 11:40:48 -04003879 qindx = 0;
3880 do
James Smartda0436e2009-05-22 14:51:39 -04003881 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
James Smart05580562011-05-24 11:40:48 -04003882 while (++qindx < phba->cfg_fcp_eq_count);
James Smartda0436e2009-05-22 14:51:39 -04003883 spin_unlock_irq(&phba->hbalock);
3884
3885 /* Now physically reset the device */
3886 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3887 "0389 Performing PCI function reset!\n");
James Smartbe858b62010-12-15 17:57:20 -05003888
3889 /* Turn off parity checking and serr during the physical reset */
3890 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3891 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
3892 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3893
James Smartda0436e2009-05-22 14:51:39 -04003894 /* Perform FCoE PCI function reset */
3895 lpfc_pci_function_reset(phba);
3896
James Smartbe858b62010-12-15 17:57:20 -05003897 /* Restore PCI cmd register */
3898 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3899
James Smartda0436e2009-05-22 14:51:39 -04003900 return 0;
3901}
3902
3903/**
3904 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
James Smarte59058c2008-08-24 21:49:00 -04003905 * @phba: Pointer to HBA context object.
3906 *
3907 * This function is called in the SLI initialization code path to
3908 * restart the HBA. The caller is not required to hold any lock.
3909 * This function writes MBX_RESTART mailbox command to the SLIM and
3910 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3911 * function to free any pending commands. The function enables
3912 * POST only during the first initialization. The function returns zero.
3913 * The function does not guarantee completion of MBX_RESTART mailbox
3914 * command before the return of this function.
3915 **/
James Smartda0436e2009-05-22 14:51:39 -04003916static int
3917lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
Jamie Wellnitz41415862006-02-28 19:25:27 -05003918{
3919 MAILBOX_t *mb;
3920 struct lpfc_sli *psli;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003921 volatile uint32_t word0;
3922 void __iomem *to_slim;
James Smart0d878412009-10-02 15:16:56 -04003923 uint32_t hba_aer_enabled;
Jamie Wellnitz41415862006-02-28 19:25:27 -05003924
James Smart2e0fef82007-06-17 19:56:36 -05003925 spin_lock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003926
James Smart0d878412009-10-02 15:16:56 -04003927 /* Take PCIe device Advanced Error Reporting (AER) state */
3928 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
3929
Jamie Wellnitz41415862006-02-28 19:25:27 -05003930 psli = &phba->sli;
3931
3932 /* Restart HBA */
3933 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04003934 "0337 Restart HBA Data: x%x x%x\n",
James Smart2e0fef82007-06-17 19:56:36 -05003935 phba->pport->port_state, psli->sli_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003936
3937 word0 = 0;
3938 mb = (MAILBOX_t *) &word0;
3939 mb->mbxCommand = MBX_RESTART;
3940 mb->mbxHc = 1;
3941
James Smart92908312006-03-07 15:04:13 -05003942 lpfc_reset_barrier(phba);
3943
Jamie Wellnitz41415862006-02-28 19:25:27 -05003944 to_slim = phba->MBslimaddr;
3945 writel(*(uint32_t *) mb, to_slim);
3946 readl(to_slim); /* flush */
3947
3948 /* Only skip post after fc_ffinit is completed */
James Smarteaf15d52008-12-04 22:39:29 -05003949 if (phba->pport->port_state)
Jamie Wellnitz41415862006-02-28 19:25:27 -05003950 word0 = 1; /* This is really setting up word1 */
James Smarteaf15d52008-12-04 22:39:29 -05003951 else
Jamie Wellnitz41415862006-02-28 19:25:27 -05003952 word0 = 0; /* This is really setting up word1 */
James Smart65a29c12006-07-06 15:50:50 -04003953 to_slim = phba->MBslimaddr + sizeof (uint32_t);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003954 writel(*(uint32_t *) mb, to_slim);
3955 readl(to_slim); /* flush */
3956
3957 lpfc_sli_brdreset(phba);
James Smart2e0fef82007-06-17 19:56:36 -05003958 phba->pport->stopped = 0;
3959 phba->link_state = LPFC_INIT_START;
James Smartda0436e2009-05-22 14:51:39 -04003960 phba->hba_flag = 0;
James Smart2e0fef82007-06-17 19:56:36 -05003961 spin_unlock_irq(&phba->hbalock);
Jamie Wellnitz41415862006-02-28 19:25:27 -05003962
James Smart64ba8812006-08-02 15:24:34 -04003963 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3964 psli->stats_start = get_seconds();
3965
James Smarteaf15d52008-12-04 22:39:29 -05003966 /* Give the INITFF and Post time to settle. */
3967 mdelay(100);
dea31012005-04-17 16:05:31 -05003968
James Smart0d878412009-10-02 15:16:56 -04003969 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3970 if (hba_aer_enabled)
3971 pci_disable_pcie_error_reporting(phba->pcidev);
3972
Jamie Wellnitz41415862006-02-28 19:25:27 -05003973 lpfc_hba_down_post(phba);
dea31012005-04-17 16:05:31 -05003974
3975 return 0;
3976}
3977
James Smarte59058c2008-08-24 21:49:00 -04003978/**
James Smartda0436e2009-05-22 14:51:39 -04003979 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3980 * @phba: Pointer to HBA context object.
3981 *
3982 * This function is called in the SLI initialization code path to restart
3983 * a SLI4 HBA. The caller is not required to hold any lock.
3984 * At the end of the function, it calls lpfc_hba_down_post function to
3985 * free any pending commands.
3986 **/
3987static int
3988lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3989{
3990 struct lpfc_sli *psli = &phba->sli;
James Smart75baf692010-06-08 18:31:21 -04003991 uint32_t hba_aer_enabled;
James Smartda0436e2009-05-22 14:51:39 -04003992
3993 /* Restart HBA */
3994 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3995 "0296 Restart HBA Data: x%x x%x\n",
3996 phba->pport->port_state, psli->sli_flag);
3997
James Smart75baf692010-06-08 18:31:21 -04003998 /* Take PCIe device Advanced Error Reporting (AER) state */
3999 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4000
James Smartda0436e2009-05-22 14:51:39 -04004001 lpfc_sli4_brdreset(phba);
4002
4003 spin_lock_irq(&phba->hbalock);
4004 phba->pport->stopped = 0;
4005 phba->link_state = LPFC_INIT_START;
4006 phba->hba_flag = 0;
4007 spin_unlock_irq(&phba->hbalock);
4008
4009 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4010 psli->stats_start = get_seconds();
4011
James Smart75baf692010-06-08 18:31:21 -04004012 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4013 if (hba_aer_enabled)
4014 pci_disable_pcie_error_reporting(phba->pcidev);
4015
James Smartda0436e2009-05-22 14:51:39 -04004016 lpfc_hba_down_post(phba);
4017
4018 return 0;
4019}
4020
4021/**
4022 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4023 * @phba: Pointer to HBA context object.
4024 *
4025 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4026 * API jump table function pointer from the lpfc_hba struct.
4027**/
4028int
4029lpfc_sli_brdrestart(struct lpfc_hba *phba)
4030{
4031 return phba->lpfc_sli_brdrestart(phba);
4032}
4033
4034/**
James Smart3621a712009-04-06 18:47:14 -04004035 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
James Smarte59058c2008-08-24 21:49:00 -04004036 * @phba: Pointer to HBA context object.
4037 *
4038 * This function is called after a HBA restart to wait for successful
4039 * restart of the HBA. Successful restart of the HBA is indicated by
4040 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4041 * iteration, the function will restart the HBA again. The function returns
4042 * zero if HBA successfully restarted else returns negative error code.
4043 **/
dea31012005-04-17 16:05:31 -05004044static int
4045lpfc_sli_chipset_init(struct lpfc_hba *phba)
4046{
4047 uint32_t status, i = 0;
4048
4049 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004050 if (lpfc_readl(phba->HSregaddr, &status))
4051 return -EIO;
dea31012005-04-17 16:05:31 -05004052
4053 /* Check status register to see what current state is */
4054 i = 0;
4055 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4056
James Smartdcf2a4e2010-09-29 11:18:53 -04004057 /* Check every 10ms for 10 retries, then every 100ms for 90
4058 * retries, then every 1 sec for 50 retires for a total of
4059 * ~60 seconds before reset the board again and check every
4060 * 1 sec for 50 retries. The up to 60 seconds before the
4061 * board ready is required by the Falcon FIPS zeroization
4062 * complete, and any reset the board in between shall cause
4063 * restart of zeroization, further delay the board ready.
dea31012005-04-17 16:05:31 -05004064 */
James Smartdcf2a4e2010-09-29 11:18:53 -04004065 if (i++ >= 200) {
dea31012005-04-17 16:05:31 -05004066 /* Adapter failed to init, timeout, status reg
4067 <status> */
James Smarted957682007-06-17 19:56:37 -05004068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004069 "0436 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004070 "timeout, status reg x%x, "
4071 "FW Data: A8 x%x AC x%x\n", status,
4072 readl(phba->MBslimaddr + 0xa8),
4073 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004074 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004075 return -ETIMEDOUT;
4076 }
4077
4078 /* Check to see if any errors occurred during init */
4079 if (status & HS_FFERM) {
4080 /* ERROR: During chipset initialization */
4081 /* Adapter failed to init, chipset, status reg
4082 <status> */
James Smarted957682007-06-17 19:56:37 -05004083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004084 "0437 Adapter failed to init, "
James Smart09372822008-01-11 01:52:54 -05004085 "chipset, status reg x%x, "
4086 "FW Data: A8 x%x AC x%x\n", status,
4087 readl(phba->MBslimaddr + 0xa8),
4088 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004089 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004090 return -EIO;
4091 }
4092
James Smartdcf2a4e2010-09-29 11:18:53 -04004093 if (i <= 10)
dea31012005-04-17 16:05:31 -05004094 msleep(10);
James Smartdcf2a4e2010-09-29 11:18:53 -04004095 else if (i <= 100)
4096 msleep(100);
4097 else
4098 msleep(1000);
dea31012005-04-17 16:05:31 -05004099
James Smartdcf2a4e2010-09-29 11:18:53 -04004100 if (i == 150) {
4101 /* Do post */
James Smart92d7f7b2007-06-17 19:56:38 -05004102 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004103 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004104 }
4105 /* Read the HBA Host Status Register */
James Smart9940b972011-03-11 16:06:12 -05004106 if (lpfc_readl(phba->HSregaddr, &status))
4107 return -EIO;
dea31012005-04-17 16:05:31 -05004108 }
4109
4110 /* Check to see if any errors occurred during init */
4111 if (status & HS_FFERM) {
4112 /* ERROR: During chipset initialization */
4113 /* Adapter failed to init, chipset, status reg <status> */
James Smarted957682007-06-17 19:56:37 -05004114 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004115 "0438 Adapter failed to init, chipset, "
James Smart09372822008-01-11 01:52:54 -05004116 "status reg x%x, "
4117 "FW Data: A8 x%x AC x%x\n", status,
4118 readl(phba->MBslimaddr + 0xa8),
4119 readl(phba->MBslimaddr + 0xac));
James Smart2e0fef82007-06-17 19:56:36 -05004120 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004121 return -EIO;
4122 }
4123
4124 /* Clear all interrupt enable conditions */
4125 writel(0, phba->HCregaddr);
4126 readl(phba->HCregaddr); /* flush */
4127
4128 /* setup host attn register */
4129 writel(0xffffffff, phba->HAregaddr);
4130 readl(phba->HAregaddr); /* flush */
4131 return 0;
4132}
4133
James Smarte59058c2008-08-24 21:49:00 -04004134/**
James Smart3621a712009-04-06 18:47:14 -04004135 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
James Smarte59058c2008-08-24 21:49:00 -04004136 *
4137 * This function calculates and returns the number of HBQs required to be
4138 * configured.
4139 **/
James Smart78b2d852007-08-02 11:10:21 -04004140int
James Smarted957682007-06-17 19:56:37 -05004141lpfc_sli_hbq_count(void)
4142{
James Smart92d7f7b2007-06-17 19:56:38 -05004143 return ARRAY_SIZE(lpfc_hbq_defs);
James Smarted957682007-06-17 19:56:37 -05004144}
4145
James Smarte59058c2008-08-24 21:49:00 -04004146/**
James Smart3621a712009-04-06 18:47:14 -04004147 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004148 *
4149 * This function adds the number of hbq entries in every HBQ to get
4150 * the total number of hbq entries required for the HBA and returns
4151 * the total count.
4152 **/
James Smarted957682007-06-17 19:56:37 -05004153static int
4154lpfc_sli_hbq_entry_count(void)
4155{
4156 int hbq_count = lpfc_sli_hbq_count();
4157 int count = 0;
4158 int i;
4159
4160 for (i = 0; i < hbq_count; ++i)
James Smart92d7f7b2007-06-17 19:56:38 -05004161 count += lpfc_hbq_defs[i]->entry_count;
James Smarted957682007-06-17 19:56:37 -05004162 return count;
4163}
4164
James Smarte59058c2008-08-24 21:49:00 -04004165/**
James Smart3621a712009-04-06 18:47:14 -04004166 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
James Smarte59058c2008-08-24 21:49:00 -04004167 *
4168 * This function calculates amount of memory required for all hbq entries
4169 * to be configured and returns the total memory required.
4170 **/
dea31012005-04-17 16:05:31 -05004171int
James Smarted957682007-06-17 19:56:37 -05004172lpfc_sli_hbq_size(void)
4173{
4174 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4175}
4176
James Smarte59058c2008-08-24 21:49:00 -04004177/**
James Smart3621a712009-04-06 18:47:14 -04004178 * lpfc_sli_hbq_setup - configure and initialize HBQs
James Smarte59058c2008-08-24 21:49:00 -04004179 * @phba: Pointer to HBA context object.
4180 *
4181 * This function is called during the SLI initialization to configure
4182 * all the HBQs and post buffers to the HBQ. The caller is not
4183 * required to hold any locks. This function will return zero if successful
4184 * else it will return negative error code.
4185 **/
James Smarted957682007-06-17 19:56:37 -05004186static int
4187lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4188{
4189 int hbq_count = lpfc_sli_hbq_count();
4190 LPFC_MBOXQ_t *pmb;
4191 MAILBOX_t *pmbox;
4192 uint32_t hbqno;
4193 uint32_t hbq_entry_index;
James Smarted957682007-06-17 19:56:37 -05004194
James Smart92d7f7b2007-06-17 19:56:38 -05004195 /* Get a Mailbox buffer to setup mailbox
4196 * commands for HBA initialization
4197 */
James Smarted957682007-06-17 19:56:37 -05004198 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4199
4200 if (!pmb)
4201 return -ENOMEM;
4202
James Smart04c68492009-05-22 14:52:52 -04004203 pmbox = &pmb->u.mb;
James Smarted957682007-06-17 19:56:37 -05004204
4205 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4206 phba->link_state = LPFC_INIT_MBX_CMDS;
James Smart3163f722008-02-08 18:50:25 -05004207 phba->hbq_in_use = 1;
James Smarted957682007-06-17 19:56:37 -05004208
4209 hbq_entry_index = 0;
4210 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4211 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4212 phba->hbqs[hbqno].hbqPutIdx = 0;
4213 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4214 phba->hbqs[hbqno].entry_count =
James Smart92d7f7b2007-06-17 19:56:38 -05004215 lpfc_hbq_defs[hbqno]->entry_count;
James Smart51ef4c22007-08-02 11:10:31 -04004216 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4217 hbq_entry_index, pmb);
James Smarted957682007-06-17 19:56:37 -05004218 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4219
4220 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4221 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4222 mbxStatus <status>, ring <num> */
4223
4224 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05004225 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004226 "1805 Adapter failed to init. "
James Smarted957682007-06-17 19:56:37 -05004227 "Data: x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04004228 pmbox->mbxCommand,
James Smarted957682007-06-17 19:56:37 -05004229 pmbox->mbxStatus, hbqno);
4230
4231 phba->link_state = LPFC_HBA_ERROR;
4232 mempool_free(pmb, phba->mbox_mem_pool);
James Smart6e7288d2010-06-07 15:23:35 -04004233 return -ENXIO;
James Smarted957682007-06-17 19:56:37 -05004234 }
4235 }
4236 phba->hbq_count = hbq_count;
4237
James Smarted957682007-06-17 19:56:37 -05004238 mempool_free(pmb, phba->mbox_mem_pool);
4239
James Smart92d7f7b2007-06-17 19:56:38 -05004240 /* Initially populate or replenish the HBQs */
James Smartd7c255b2008-08-24 21:50:00 -04004241 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4242 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
James Smarted957682007-06-17 19:56:37 -05004243 return 0;
4244}
4245
James Smarte59058c2008-08-24 21:49:00 -04004246/**
James Smart4f774512009-05-22 14:52:35 -04004247 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4248 * @phba: Pointer to HBA context object.
4249 *
4250 * This function is called during the SLI initialization to configure
4251 * all the HBQs and post buffers to the HBQ. The caller is not
4252 * required to hold any locks. This function will return zero if successful
4253 * else it will return negative error code.
4254 **/
4255static int
4256lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4257{
4258 phba->hbq_in_use = 1;
4259 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4260 phba->hbq_count = 1;
4261 /* Initially populate or replenish the HBQs */
4262 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4263 return 0;
4264}
4265
4266/**
James Smart3621a712009-04-06 18:47:14 -04004267 * lpfc_sli_config_port - Issue config port mailbox command
James Smarte59058c2008-08-24 21:49:00 -04004268 * @phba: Pointer to HBA context object.
4269 * @sli_mode: sli mode - 2/3
4270 *
4271 * This function is called by the sli intialization code path
4272 * to issue config_port mailbox command. This function restarts the
4273 * HBA firmware and issues a config_port mailbox command to configure
4274 * the SLI interface in the sli mode specified by sli_mode
4275 * variable. The caller is not required to hold any locks.
4276 * The function returns 0 if successful, else returns negative error
4277 * code.
4278 **/
James Smart93996272008-08-24 21:50:30 -04004279int
4280lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
dea31012005-04-17 16:05:31 -05004281{
4282 LPFC_MBOXQ_t *pmb;
4283 uint32_t resetcount = 0, rc = 0, done = 0;
4284
4285 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4286 if (!pmb) {
James Smart2e0fef82007-06-17 19:56:36 -05004287 phba->link_state = LPFC_HBA_ERROR;
dea31012005-04-17 16:05:31 -05004288 return -ENOMEM;
4289 }
4290
James Smarted957682007-06-17 19:56:37 -05004291 phba->sli_rev = sli_mode;
dea31012005-04-17 16:05:31 -05004292 while (resetcount < 2 && !done) {
James Smart2e0fef82007-06-17 19:56:36 -05004293 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004294 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004295 spin_unlock_irq(&phba->hbalock);
James Smart92d7f7b2007-06-17 19:56:38 -05004296 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
Jamie Wellnitz41415862006-02-28 19:25:27 -05004297 lpfc_sli_brdrestart(phba);
dea31012005-04-17 16:05:31 -05004298 rc = lpfc_sli_chipset_init(phba);
4299 if (rc)
4300 break;
4301
James Smart2e0fef82007-06-17 19:56:36 -05004302 spin_lock_irq(&phba->hbalock);
James Smart1c067a42006-08-01 07:33:52 -04004303 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004304 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004305 resetcount++;
4306
James Smarted957682007-06-17 19:56:37 -05004307 /* Call pre CONFIG_PORT mailbox command initialization. A
4308 * value of 0 means the call was successful. Any other
4309 * nonzero value is a failure, but if ERESTART is returned,
4310 * the driver may reset the HBA and try again.
4311 */
dea31012005-04-17 16:05:31 -05004312 rc = lpfc_config_port_prep(phba);
4313 if (rc == -ERESTART) {
James Smarted957682007-06-17 19:56:37 -05004314 phba->link_state = LPFC_LINK_UNKNOWN;
dea31012005-04-17 16:05:31 -05004315 continue;
James Smart34b02dc2008-08-24 21:49:55 -04004316 } else if (rc)
dea31012005-04-17 16:05:31 -05004317 break;
James Smart6d368e52011-05-24 11:44:12 -04004318
James Smart2e0fef82007-06-17 19:56:36 -05004319 phba->link_state = LPFC_INIT_MBX_CMDS;
dea31012005-04-17 16:05:31 -05004320 lpfc_config_port(phba, pmb);
4321 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
James Smart34b02dc2008-08-24 21:49:55 -04004322 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4323 LPFC_SLI3_HBQ_ENABLED |
4324 LPFC_SLI3_CRP_ENABLED |
James Smartbc739052010-08-04 16:11:18 -04004325 LPFC_SLI3_BG_ENABLED |
4326 LPFC_SLI3_DSS_ENABLED);
James Smarted957682007-06-17 19:56:37 -05004327 if (rc != MBX_SUCCESS) {
dea31012005-04-17 16:05:31 -05004328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004329 "0442 Adapter failed to init, mbxCmd x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05004330 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
James Smart04c68492009-05-22 14:52:52 -04004331 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
James Smart2e0fef82007-06-17 19:56:36 -05004332 spin_lock_irq(&phba->hbalock);
James Smart04c68492009-05-22 14:52:52 -04004333 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05004334 spin_unlock_irq(&phba->hbalock);
4335 rc = -ENXIO;
James Smart04c68492009-05-22 14:52:52 -04004336 } else {
4337 /* Allow asynchronous mailbox command to go through */
4338 spin_lock_irq(&phba->hbalock);
4339 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4340 spin_unlock_irq(&phba->hbalock);
James Smarted957682007-06-17 19:56:37 -05004341 done = 1;
James Smart04c68492009-05-22 14:52:52 -04004342 }
dea31012005-04-17 16:05:31 -05004343 }
James Smarted957682007-06-17 19:56:37 -05004344 if (!done) {
4345 rc = -EINVAL;
4346 goto do_prep_failed;
4347 }
James Smart04c68492009-05-22 14:52:52 -04004348 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4349 if (!pmb->u.mb.un.varCfgPort.cMA) {
James Smart34b02dc2008-08-24 21:49:55 -04004350 rc = -ENXIO;
4351 goto do_prep_failed;
4352 }
James Smart04c68492009-05-22 14:52:52 -04004353 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
James Smart34b02dc2008-08-24 21:49:55 -04004354 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04004355 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4356 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4357 phba->max_vpi : phba->max_vports;
4358
James Smart34b02dc2008-08-24 21:49:55 -04004359 } else
4360 phba->max_vpi = 0;
James Smartbc739052010-08-04 16:11:18 -04004361 phba->fips_level = 0;
4362 phba->fips_spec_rev = 0;
4363 if (pmb->u.mb.un.varCfgPort.gdss) {
James Smart04c68492009-05-22 14:52:52 -04004364 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
James Smartbc739052010-08-04 16:11:18 -04004365 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4366 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4367 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4368 "2850 Security Crypto Active. FIPS x%d "
4369 "(Spec Rev: x%d)",
4370 phba->fips_level, phba->fips_spec_rev);
4371 }
4372 if (pmb->u.mb.un.varCfgPort.sec_err) {
4373 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4374 "2856 Config Port Security Crypto "
4375 "Error: x%x ",
4376 pmb->u.mb.un.varCfgPort.sec_err);
4377 }
James Smart04c68492009-05-22 14:52:52 -04004378 if (pmb->u.mb.un.varCfgPort.gerbm)
James Smart34b02dc2008-08-24 21:49:55 -04004379 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
James Smart04c68492009-05-22 14:52:52 -04004380 if (pmb->u.mb.un.varCfgPort.gcrp)
James Smart34b02dc2008-08-24 21:49:55 -04004381 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
James Smart6e7288d2010-06-07 15:23:35 -04004382
4383 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4384 phba->port_gp = phba->mbox->us.s3_pgp.port;
James Smarte2a0a9d2008-12-04 22:40:02 -05004385
4386 if (phba->cfg_enable_bg) {
James Smart04c68492009-05-22 14:52:52 -04004387 if (pmb->u.mb.un.varCfgPort.gbg)
James Smarte2a0a9d2008-12-04 22:40:02 -05004388 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4389 else
4390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4391 "0443 Adapter did not grant "
4392 "BlockGuard\n");
4393 }
James Smart34b02dc2008-08-24 21:49:55 -04004394 } else {
James Smart8f34f4c2008-12-04 22:39:23 -05004395 phba->hbq_get = NULL;
James Smart34b02dc2008-08-24 21:49:55 -04004396 phba->port_gp = phba->mbox->us.s2.port;
James Smartd7c255b2008-08-24 21:50:00 -04004397 phba->max_vpi = 0;
James Smarted957682007-06-17 19:56:37 -05004398 }
James Smart92d7f7b2007-06-17 19:56:38 -05004399do_prep_failed:
James Smarted957682007-06-17 19:56:37 -05004400 mempool_free(pmb, phba->mbox_mem_pool);
4401 return rc;
4402}
4403
James Smarte59058c2008-08-24 21:49:00 -04004404
4405/**
James Smart3621a712009-04-06 18:47:14 -04004406 * lpfc_sli_hba_setup - SLI intialization function
James Smarte59058c2008-08-24 21:49:00 -04004407 * @phba: Pointer to HBA context object.
4408 *
4409 * This function is the main SLI intialization function. This function
4410 * is called by the HBA intialization code, HBA reset code and HBA
4411 * error attention handler code. Caller is not required to hold any
4412 * locks. This function issues config_port mailbox command to configure
4413 * the SLI, setup iocb rings and HBQ rings. In the end the function
4414 * calls the config_port_post function to issue init_link mailbox
4415 * command and to start the discovery. The function will return zero
4416 * if successful, else it will return negative error code.
4417 **/
James Smarted957682007-06-17 19:56:37 -05004418int
4419lpfc_sli_hba_setup(struct lpfc_hba *phba)
4420{
4421 uint32_t rc;
James Smart6d368e52011-05-24 11:44:12 -04004422 int mode = 3, i;
4423 int longs;
James Smarted957682007-06-17 19:56:37 -05004424
4425 switch (lpfc_sli_mode) {
4426 case 2:
James Smart78b2d852007-08-02 11:10:21 -04004427 if (phba->cfg_enable_npiv) {
James Smart92d7f7b2007-06-17 19:56:38 -05004428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004429 "1824 NPIV enabled: Override lpfc_sli_mode "
James Smart92d7f7b2007-06-17 19:56:38 -05004430 "parameter (%d) to auto (0).\n",
James Smarte8b62012007-08-02 11:10:09 -04004431 lpfc_sli_mode);
James Smart92d7f7b2007-06-17 19:56:38 -05004432 break;
4433 }
James Smarted957682007-06-17 19:56:37 -05004434 mode = 2;
4435 break;
4436 case 0:
4437 case 3:
4438 break;
4439 default:
James Smart92d7f7b2007-06-17 19:56:38 -05004440 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004441 "1819 Unrecognized lpfc_sli_mode "
4442 "parameter: %d.\n", lpfc_sli_mode);
James Smarted957682007-06-17 19:56:37 -05004443
4444 break;
4445 }
4446
James Smart93996272008-08-24 21:50:30 -04004447 rc = lpfc_sli_config_port(phba, mode);
4448
James Smarted957682007-06-17 19:56:37 -05004449 if (rc && lpfc_sli_mode == 3)
James Smart92d7f7b2007-06-17 19:56:38 -05004450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04004451 "1820 Unable to select SLI-3. "
4452 "Not supported by adapter.\n");
James Smarted957682007-06-17 19:56:37 -05004453 if (rc && mode != 2)
James Smart93996272008-08-24 21:50:30 -04004454 rc = lpfc_sli_config_port(phba, 2);
James Smarted957682007-06-17 19:56:37 -05004455 if (rc)
dea31012005-04-17 16:05:31 -05004456 goto lpfc_sli_hba_setup_error;
4457
James Smart0d878412009-10-02 15:16:56 -04004458 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4459 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4460 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4461 if (!rc) {
4462 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4463 "2709 This device supports "
4464 "Advanced Error Reporting (AER)\n");
4465 spin_lock_irq(&phba->hbalock);
4466 phba->hba_flag |= HBA_AER_ENABLED;
4467 spin_unlock_irq(&phba->hbalock);
4468 } else {
4469 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4470 "2708 This device does not support "
4471 "Advanced Error Reporting (AER)\n");
4472 phba->cfg_aer_support = 0;
4473 }
4474 }
4475
James Smarted957682007-06-17 19:56:37 -05004476 if (phba->sli_rev == 3) {
4477 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4478 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
James Smarted957682007-06-17 19:56:37 -05004479 } else {
4480 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4481 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
James Smart92d7f7b2007-06-17 19:56:38 -05004482 phba->sli3_options = 0;
James Smarted957682007-06-17 19:56:37 -05004483 }
4484
4485 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004486 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4487 phba->sli_rev, phba->max_vpi);
James Smarted957682007-06-17 19:56:37 -05004488 rc = lpfc_sli_ring_map(phba);
dea31012005-04-17 16:05:31 -05004489
4490 if (rc)
4491 goto lpfc_sli_hba_setup_error;
4492
James Smart6d368e52011-05-24 11:44:12 -04004493 /* Initialize VPIs. */
4494 if (phba->sli_rev == LPFC_SLI_REV3) {
4495 /*
4496 * The VPI bitmask and physical ID array are allocated
4497 * and initialized once only - at driver load. A port
4498 * reset doesn't need to reinitialize this memory.
4499 */
4500 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4501 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4502 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4503 GFP_KERNEL);
4504 if (!phba->vpi_bmask) {
4505 rc = -ENOMEM;
4506 goto lpfc_sli_hba_setup_error;
4507 }
4508
4509 phba->vpi_ids = kzalloc(
4510 (phba->max_vpi+1) * sizeof(uint16_t),
4511 GFP_KERNEL);
4512 if (!phba->vpi_ids) {
4513 kfree(phba->vpi_bmask);
4514 rc = -ENOMEM;
4515 goto lpfc_sli_hba_setup_error;
4516 }
4517 for (i = 0; i < phba->max_vpi; i++)
4518 phba->vpi_ids[i] = i;
4519 }
4520 }
4521
James Smart93996272008-08-24 21:50:30 -04004522 /* Init HBQs */
James Smarted957682007-06-17 19:56:37 -05004523 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4524 rc = lpfc_sli_hbq_setup(phba);
4525 if (rc)
4526 goto lpfc_sli_hba_setup_error;
4527 }
James Smart04c68492009-05-22 14:52:52 -04004528 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004529 phba->sli.sli_flag |= LPFC_PROCESS_LA;
James Smart04c68492009-05-22 14:52:52 -04004530 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05004531
4532 rc = lpfc_config_port_post(phba);
4533 if (rc)
4534 goto lpfc_sli_hba_setup_error;
4535
James Smarted957682007-06-17 19:56:37 -05004536 return rc;
4537
James Smart92d7f7b2007-06-17 19:56:38 -05004538lpfc_sli_hba_setup_error:
James Smart2e0fef82007-06-17 19:56:36 -05004539 phba->link_state = LPFC_HBA_ERROR;
James Smarte40a02c2010-02-26 14:13:54 -05004540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04004541 "0445 Firmware initialization failed\n");
dea31012005-04-17 16:05:31 -05004542 return rc;
4543}
4544
James Smartda0436e2009-05-22 14:51:39 -04004545/**
4546 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4547 * @phba: Pointer to HBA context object.
4548 * @mboxq: mailbox pointer.
4549 * This function issue a dump mailbox command to read config region
4550 * 23 and parse the records in the region and populate driver
4551 * data structure.
4552 **/
4553static int
James Smartff78d8f2011-12-13 13:21:35 -05004554lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
James Smartda0436e2009-05-22 14:51:39 -04004555{
James Smartff78d8f2011-12-13 13:21:35 -05004556 LPFC_MBOXQ_t *mboxq;
James Smartda0436e2009-05-22 14:51:39 -04004557 struct lpfc_dmabuf *mp;
4558 struct lpfc_mqe *mqe;
4559 uint32_t data_length;
4560 int rc;
4561
4562 /* Program the default value of vlan_id and fc_map */
4563 phba->valid_vlan = 0;
4564 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4565 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4566 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4567
James Smartff78d8f2011-12-13 13:21:35 -05004568 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4569 if (!mboxq)
James Smartda0436e2009-05-22 14:51:39 -04004570 return -ENOMEM;
4571
James Smartff78d8f2011-12-13 13:21:35 -05004572 mqe = &mboxq->u.mqe;
4573 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4574 rc = -ENOMEM;
4575 goto out_free_mboxq;
4576 }
4577
James Smartda0436e2009-05-22 14:51:39 -04004578 mp = (struct lpfc_dmabuf *) mboxq->context1;
4579 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4580
4581 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4582 "(%d):2571 Mailbox cmd x%x Status x%x "
4583 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4584 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4585 "CQ: x%x x%x x%x x%x\n",
4586 mboxq->vport ? mboxq->vport->vpi : 0,
4587 bf_get(lpfc_mqe_command, mqe),
4588 bf_get(lpfc_mqe_status, mqe),
4589 mqe->un.mb_words[0], mqe->un.mb_words[1],
4590 mqe->un.mb_words[2], mqe->un.mb_words[3],
4591 mqe->un.mb_words[4], mqe->un.mb_words[5],
4592 mqe->un.mb_words[6], mqe->un.mb_words[7],
4593 mqe->un.mb_words[8], mqe->un.mb_words[9],
4594 mqe->un.mb_words[10], mqe->un.mb_words[11],
4595 mqe->un.mb_words[12], mqe->un.mb_words[13],
4596 mqe->un.mb_words[14], mqe->un.mb_words[15],
4597 mqe->un.mb_words[16], mqe->un.mb_words[50],
4598 mboxq->mcqe.word0,
4599 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4600 mboxq->mcqe.trailer);
4601
4602 if (rc) {
4603 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4604 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05004605 rc = -EIO;
4606 goto out_free_mboxq;
James Smartda0436e2009-05-22 14:51:39 -04004607 }
4608 data_length = mqe->un.mb_words[5];
James Smarta0c87cb2009-07-19 10:01:10 -04004609 if (data_length > DMP_RGN23_SIZE) {
James Smartd11e31d2009-06-10 17:23:06 -04004610 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4611 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05004612 rc = -EIO;
4613 goto out_free_mboxq;
James Smartd11e31d2009-06-10 17:23:06 -04004614 }
James Smartda0436e2009-05-22 14:51:39 -04004615
4616 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4617 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4618 kfree(mp);
James Smartff78d8f2011-12-13 13:21:35 -05004619 rc = 0;
4620
4621out_free_mboxq:
4622 mempool_free(mboxq, phba->mbox_mem_pool);
4623 return rc;
James Smartda0436e2009-05-22 14:51:39 -04004624}
4625
4626/**
4627 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4628 * @phba: pointer to lpfc hba data structure.
4629 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4630 * @vpd: pointer to the memory to hold resulting port vpd data.
4631 * @vpd_size: On input, the number of bytes allocated to @vpd.
4632 * On output, the number of data bytes in @vpd.
4633 *
4634 * This routine executes a READ_REV SLI4 mailbox command. In
4635 * addition, this routine gets the port vpd data.
4636 *
4637 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004638 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -04004639 * -ENOMEM - could not allocated memory.
James Smartda0436e2009-05-22 14:51:39 -04004640 **/
4641static int
4642lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4643 uint8_t *vpd, uint32_t *vpd_size)
4644{
4645 int rc = 0;
4646 uint32_t dma_size;
4647 struct lpfc_dmabuf *dmabuf;
4648 struct lpfc_mqe *mqe;
4649
4650 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4651 if (!dmabuf)
4652 return -ENOMEM;
4653
4654 /*
4655 * Get a DMA buffer for the vpd data resulting from the READ_REV
4656 * mailbox command.
4657 */
4658 dma_size = *vpd_size;
4659 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4660 dma_size,
4661 &dmabuf->phys,
4662 GFP_KERNEL);
4663 if (!dmabuf->virt) {
4664 kfree(dmabuf);
4665 return -ENOMEM;
4666 }
4667 memset(dmabuf->virt, 0, dma_size);
4668
4669 /*
4670 * The SLI4 implementation of READ_REV conflicts at word1,
4671 * bits 31:16 and SLI4 adds vpd functionality not present
4672 * in SLI3. This code corrects the conflicts.
4673 */
4674 lpfc_read_rev(phba, mboxq);
4675 mqe = &mboxq->u.mqe;
4676 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4677 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4678 mqe->un.read_rev.word1 &= 0x0000FFFF;
4679 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4680 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4681
4682 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4683 if (rc) {
4684 dma_free_coherent(&phba->pcidev->dev, dma_size,
4685 dmabuf->virt, dmabuf->phys);
James Smartdef9c7a2009-12-21 17:02:28 -05004686 kfree(dmabuf);
James Smartda0436e2009-05-22 14:51:39 -04004687 return -EIO;
4688 }
4689
James Smartda0436e2009-05-22 14:51:39 -04004690 /*
4691 * The available vpd length cannot be bigger than the
4692 * DMA buffer passed to the port. Catch the less than
4693 * case and update the caller's size.
4694 */
4695 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4696 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4697
James Smartd7c47992010-06-08 18:31:54 -04004698 memcpy(vpd, dmabuf->virt, *vpd_size);
4699
James Smartda0436e2009-05-22 14:51:39 -04004700 dma_free_coherent(&phba->pcidev->dev, dma_size,
4701 dmabuf->virt, dmabuf->phys);
4702 kfree(dmabuf);
4703 return 0;
4704}
4705
4706/**
James Smartcd1c8302011-10-10 21:33:25 -04004707 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4708 * @phba: pointer to lpfc hba data structure.
4709 *
4710 * This routine retrieves SLI4 device physical port name this PCI function
4711 * is attached to.
4712 *
4713 * Return codes
4714 * 0 - sucessful
4715 * otherwise - failed to retrieve physical port name
4716 **/
4717static int
4718lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4719{
4720 LPFC_MBOXQ_t *mboxq;
James Smartcd1c8302011-10-10 21:33:25 -04004721 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4722 struct lpfc_controller_attribute *cntl_attr;
4723 struct lpfc_mbx_get_port_name *get_port_name;
4724 void *virtaddr = NULL;
4725 uint32_t alloclen, reqlen;
4726 uint32_t shdr_status, shdr_add_status;
4727 union lpfc_sli4_cfg_shdr *shdr;
4728 char cport_name = 0;
4729 int rc;
4730
4731 /* We assume nothing at this point */
4732 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4733 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4734
4735 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4736 if (!mboxq)
4737 return -ENOMEM;
James Smartcd1c8302011-10-10 21:33:25 -04004738 /* obtain link type and link number via READ_CONFIG */
James Smartff78d8f2011-12-13 13:21:35 -05004739 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4740 lpfc_sli4_read_config(phba);
4741 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4742 goto retrieve_ppname;
James Smartcd1c8302011-10-10 21:33:25 -04004743
4744 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4745 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4746 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4747 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4748 LPFC_SLI4_MBX_NEMBED);
4749 if (alloclen < reqlen) {
4750 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4751 "3084 Allocated DMA memory size (%d) is "
4752 "less than the requested DMA memory size "
4753 "(%d)\n", alloclen, reqlen);
4754 rc = -ENOMEM;
4755 goto out_free_mboxq;
4756 }
4757 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4758 virtaddr = mboxq->sge_array->addr[0];
4759 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4760 shdr = &mbx_cntl_attr->cfg_shdr;
4761 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4762 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4763 if (shdr_status || shdr_add_status || rc) {
4764 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4765 "3085 Mailbox x%x (x%x/x%x) failed, "
4766 "rc:x%x, status:x%x, add_status:x%x\n",
4767 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4768 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4769 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4770 rc, shdr_status, shdr_add_status);
4771 rc = -ENXIO;
4772 goto out_free_mboxq;
4773 }
4774 cntl_attr = &mbx_cntl_attr->cntl_attr;
4775 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4776 phba->sli4_hba.lnk_info.lnk_tp =
4777 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4778 phba->sli4_hba.lnk_info.lnk_no =
4779 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4780 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4781 "3086 lnk_type:%d, lnk_numb:%d\n",
4782 phba->sli4_hba.lnk_info.lnk_tp,
4783 phba->sli4_hba.lnk_info.lnk_no);
4784
4785retrieve_ppname:
4786 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4787 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4788 sizeof(struct lpfc_mbx_get_port_name) -
4789 sizeof(struct lpfc_sli4_cfg_mhdr),
4790 LPFC_SLI4_MBX_EMBED);
4791 get_port_name = &mboxq->u.mqe.un.get_port_name;
4792 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4793 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4794 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4795 phba->sli4_hba.lnk_info.lnk_tp);
4796 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4797 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4798 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4799 if (shdr_status || shdr_add_status || rc) {
4800 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4801 "3087 Mailbox x%x (x%x/x%x) failed: "
4802 "rc:x%x, status:x%x, add_status:x%x\n",
4803 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4804 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4805 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4806 rc, shdr_status, shdr_add_status);
4807 rc = -ENXIO;
4808 goto out_free_mboxq;
4809 }
4810 switch (phba->sli4_hba.lnk_info.lnk_no) {
4811 case LPFC_LINK_NUMBER_0:
4812 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4813 &get_port_name->u.response);
4814 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4815 break;
4816 case LPFC_LINK_NUMBER_1:
4817 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4818 &get_port_name->u.response);
4819 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4820 break;
4821 case LPFC_LINK_NUMBER_2:
4822 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4823 &get_port_name->u.response);
4824 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4825 break;
4826 case LPFC_LINK_NUMBER_3:
4827 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4828 &get_port_name->u.response);
4829 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4830 break;
4831 default:
4832 break;
4833 }
4834
4835 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4836 phba->Port[0] = cport_name;
4837 phba->Port[1] = '\0';
4838 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4839 "3091 SLI get port name: %s\n", phba->Port);
4840 }
4841
4842out_free_mboxq:
4843 if (rc != MBX_TIMEOUT) {
4844 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4845 lpfc_sli4_mbox_cmd_free(phba, mboxq);
4846 else
4847 mempool_free(mboxq, phba->mbox_mem_pool);
4848 }
4849 return rc;
4850}
4851
4852/**
James Smartda0436e2009-05-22 14:51:39 -04004853 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4854 * @phba: pointer to lpfc hba data structure.
4855 *
4856 * This routine is called to explicitly arm the SLI4 device's completion and
4857 * event queues
4858 **/
4859static void
4860lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4861{
4862 uint8_t fcp_eqidx;
4863
4864 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4865 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
James Smart05580562011-05-24 11:40:48 -04004866 fcp_eqidx = 0;
4867 do
James Smartda0436e2009-05-22 14:51:39 -04004868 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4869 LPFC_QUEUE_REARM);
James Smart05580562011-05-24 11:40:48 -04004870 while (++fcp_eqidx < phba->cfg_fcp_eq_count);
James Smartda0436e2009-05-22 14:51:39 -04004871 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4872 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4873 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4874 LPFC_QUEUE_REARM);
4875}
4876
4877/**
James Smart6d368e52011-05-24 11:44:12 -04004878 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
4879 * @phba: Pointer to HBA context object.
4880 * @type: The resource extent type.
James Smartb76f2dc2011-07-22 18:37:42 -04004881 * @extnt_count: buffer to hold port available extent count.
4882 * @extnt_size: buffer to hold element count per extent.
James Smart6d368e52011-05-24 11:44:12 -04004883 *
James Smartb76f2dc2011-07-22 18:37:42 -04004884 * This function calls the port and retrievs the number of available
4885 * extents and their size for a particular extent type.
4886 *
4887 * Returns: 0 if successful. Nonzero otherwise.
James Smart6d368e52011-05-24 11:44:12 -04004888 **/
James Smartb76f2dc2011-07-22 18:37:42 -04004889int
James Smart6d368e52011-05-24 11:44:12 -04004890lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4891 uint16_t *extnt_count, uint16_t *extnt_size)
4892{
4893 int rc = 0;
4894 uint32_t length;
4895 uint32_t mbox_tmo;
4896 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
4897 LPFC_MBOXQ_t *mbox;
4898
4899 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4900 if (!mbox)
4901 return -ENOMEM;
4902
4903 /* Find out how many extents are available for this resource type */
4904 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
4905 sizeof(struct lpfc_sli4_cfg_mhdr));
4906 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
4907 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
4908 length, LPFC_SLI4_MBX_EMBED);
4909
4910 /* Send an extents count of 0 - the GET doesn't use it. */
4911 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
4912 LPFC_SLI4_MBX_EMBED);
4913 if (unlikely(rc)) {
4914 rc = -EIO;
4915 goto err_exit;
4916 }
4917
4918 if (!phba->sli4_hba.intr_enable)
4919 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4920 else {
James Smarta183a152011-10-10 21:32:43 -04004921 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04004922 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4923 }
4924 if (unlikely(rc)) {
4925 rc = -EIO;
4926 goto err_exit;
4927 }
4928
4929 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
4930 if (bf_get(lpfc_mbox_hdr_status,
4931 &rsrc_info->header.cfg_shdr.response)) {
4932 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4933 "2930 Failed to get resource extents "
4934 "Status 0x%x Add'l Status 0x%x\n",
4935 bf_get(lpfc_mbox_hdr_status,
4936 &rsrc_info->header.cfg_shdr.response),
4937 bf_get(lpfc_mbox_hdr_add_status,
4938 &rsrc_info->header.cfg_shdr.response));
4939 rc = -EIO;
4940 goto err_exit;
4941 }
4942
4943 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
4944 &rsrc_info->u.rsp);
4945 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
4946 &rsrc_info->u.rsp);
4947 err_exit:
4948 mempool_free(mbox, phba->mbox_mem_pool);
4949 return rc;
4950}
4951
4952/**
4953 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
4954 * @phba: Pointer to HBA context object.
4955 * @type: The extent type to check.
4956 *
4957 * This function reads the current available extents from the port and checks
4958 * if the extent count or extent size has changed since the last access.
4959 * Callers use this routine post port reset to understand if there is a
4960 * extent reprovisioning requirement.
4961 *
4962 * Returns:
4963 * -Error: error indicates problem.
4964 * 1: Extent count or size has changed.
4965 * 0: No changes.
4966 **/
4967static int
4968lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
4969{
4970 uint16_t curr_ext_cnt, rsrc_ext_cnt;
4971 uint16_t size_diff, rsrc_ext_size;
4972 int rc = 0;
4973 struct lpfc_rsrc_blks *rsrc_entry;
4974 struct list_head *rsrc_blk_list = NULL;
4975
4976 size_diff = 0;
4977 curr_ext_cnt = 0;
4978 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
4979 &rsrc_ext_cnt,
4980 &rsrc_ext_size);
4981 if (unlikely(rc))
4982 return -EIO;
4983
4984 switch (type) {
4985 case LPFC_RSC_TYPE_FCOE_RPI:
4986 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
4987 break;
4988 case LPFC_RSC_TYPE_FCOE_VPI:
4989 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
4990 break;
4991 case LPFC_RSC_TYPE_FCOE_XRI:
4992 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
4993 break;
4994 case LPFC_RSC_TYPE_FCOE_VFI:
4995 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
4996 break;
4997 default:
4998 break;
4999 }
5000
5001 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5002 curr_ext_cnt++;
5003 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5004 size_diff++;
5005 }
5006
5007 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5008 rc = 1;
5009
5010 return rc;
5011}
5012
5013/**
5014 * lpfc_sli4_cfg_post_extnts -
5015 * @phba: Pointer to HBA context object.
5016 * @extnt_cnt - number of available extents.
5017 * @type - the extent type (rpi, xri, vfi, vpi).
5018 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5019 * @mbox - pointer to the caller's allocated mailbox structure.
5020 *
5021 * This function executes the extents allocation request. It also
5022 * takes care of the amount of memory needed to allocate or get the
5023 * allocated extents. It is the caller's responsibility to evaluate
5024 * the response.
5025 *
5026 * Returns:
5027 * -Error: Error value describes the condition found.
5028 * 0: if successful
5029 **/
5030static int
5031lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
5032 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5033{
5034 int rc = 0;
5035 uint32_t req_len;
5036 uint32_t emb_len;
5037 uint32_t alloc_len, mbox_tmo;
5038
5039 /* Calculate the total requested length of the dma memory */
5040 req_len = *extnt_cnt * sizeof(uint16_t);
5041
5042 /*
5043 * Calculate the size of an embedded mailbox. The uint32_t
5044 * accounts for extents-specific word.
5045 */
5046 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5047 sizeof(uint32_t);
5048
5049 /*
5050 * Presume the allocation and response will fit into an embedded
5051 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5052 */
5053 *emb = LPFC_SLI4_MBX_EMBED;
5054 if (req_len > emb_len) {
5055 req_len = *extnt_cnt * sizeof(uint16_t) +
5056 sizeof(union lpfc_sli4_cfg_shdr) +
5057 sizeof(uint32_t);
5058 *emb = LPFC_SLI4_MBX_NEMBED;
5059 }
5060
5061 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5062 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5063 req_len, *emb);
5064 if (alloc_len < req_len) {
5065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartb76f2dc2011-07-22 18:37:42 -04005066 "2982 Allocated DMA memory size (x%x) is "
James Smart6d368e52011-05-24 11:44:12 -04005067 "less than the requested DMA memory "
5068 "size (x%x)\n", alloc_len, req_len);
5069 return -ENOMEM;
5070 }
5071 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, *extnt_cnt, type, *emb);
5072 if (unlikely(rc))
5073 return -EIO;
5074
5075 if (!phba->sli4_hba.intr_enable)
5076 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5077 else {
James Smarta183a152011-10-10 21:32:43 -04005078 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005079 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5080 }
5081
5082 if (unlikely(rc))
5083 rc = -EIO;
5084 return rc;
5085}
5086
5087/**
5088 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5089 * @phba: Pointer to HBA context object.
5090 * @type: The resource extent type to allocate.
5091 *
5092 * This function allocates the number of elements for the specified
5093 * resource type.
5094 **/
5095static int
5096lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5097{
5098 bool emb = false;
5099 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5100 uint16_t rsrc_id, rsrc_start, j, k;
5101 uint16_t *ids;
5102 int i, rc;
5103 unsigned long longs;
5104 unsigned long *bmask;
5105 struct lpfc_rsrc_blks *rsrc_blks;
5106 LPFC_MBOXQ_t *mbox;
5107 uint32_t length;
5108 struct lpfc_id_range *id_array = NULL;
5109 void *virtaddr = NULL;
5110 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5111 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5112 struct list_head *ext_blk_list;
5113
5114 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5115 &rsrc_cnt,
5116 &rsrc_size);
5117 if (unlikely(rc))
5118 return -EIO;
5119
5120 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5121 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5122 "3009 No available Resource Extents "
5123 "for resource type 0x%x: Count: 0x%x, "
5124 "Size 0x%x\n", type, rsrc_cnt,
5125 rsrc_size);
5126 return -ENOMEM;
5127 }
5128
5129 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT,
5130 "2903 Available Resource Extents "
5131 "for resource type 0x%x: Count: 0x%x, "
5132 "Size 0x%x\n", type, rsrc_cnt,
5133 rsrc_size);
5134
5135 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5136 if (!mbox)
5137 return -ENOMEM;
5138
5139 rc = lpfc_sli4_cfg_post_extnts(phba, &rsrc_cnt, type, &emb, mbox);
5140 if (unlikely(rc)) {
5141 rc = -EIO;
5142 goto err_exit;
5143 }
5144
5145 /*
5146 * Figure out where the response is located. Then get local pointers
5147 * to the response data. The port does not guarantee to respond to
5148 * all extents counts request so update the local variable with the
5149 * allocated count from the port.
5150 */
5151 if (emb == LPFC_SLI4_MBX_EMBED) {
5152 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5153 id_array = &rsrc_ext->u.rsp.id[0];
5154 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5155 } else {
5156 virtaddr = mbox->sge_array->addr[0];
5157 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5158 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5159 id_array = &n_rsrc->id;
5160 }
5161
5162 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5163 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5164
5165 /*
5166 * Based on the resource size and count, correct the base and max
5167 * resource values.
5168 */
5169 length = sizeof(struct lpfc_rsrc_blks);
5170 switch (type) {
5171 case LPFC_RSC_TYPE_FCOE_RPI:
5172 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5173 sizeof(unsigned long),
5174 GFP_KERNEL);
5175 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5176 rc = -ENOMEM;
5177 goto err_exit;
5178 }
5179 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5180 sizeof(uint16_t),
5181 GFP_KERNEL);
5182 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5183 kfree(phba->sli4_hba.rpi_bmask);
5184 rc = -ENOMEM;
5185 goto err_exit;
5186 }
5187
5188 /*
5189 * The next_rpi was initialized with the maximum available
5190 * count but the port may allocate a smaller number. Catch
5191 * that case and update the next_rpi.
5192 */
5193 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5194
5195 /* Initialize local ptrs for common extent processing later. */
5196 bmask = phba->sli4_hba.rpi_bmask;
5197 ids = phba->sli4_hba.rpi_ids;
5198 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5199 break;
5200 case LPFC_RSC_TYPE_FCOE_VPI:
5201 phba->vpi_bmask = kzalloc(longs *
5202 sizeof(unsigned long),
5203 GFP_KERNEL);
5204 if (unlikely(!phba->vpi_bmask)) {
5205 rc = -ENOMEM;
5206 goto err_exit;
5207 }
5208 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5209 sizeof(uint16_t),
5210 GFP_KERNEL);
5211 if (unlikely(!phba->vpi_ids)) {
5212 kfree(phba->vpi_bmask);
5213 rc = -ENOMEM;
5214 goto err_exit;
5215 }
5216
5217 /* Initialize local ptrs for common extent processing later. */
5218 bmask = phba->vpi_bmask;
5219 ids = phba->vpi_ids;
5220 ext_blk_list = &phba->lpfc_vpi_blk_list;
5221 break;
5222 case LPFC_RSC_TYPE_FCOE_XRI:
5223 phba->sli4_hba.xri_bmask = kzalloc(longs *
5224 sizeof(unsigned long),
5225 GFP_KERNEL);
5226 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5227 rc = -ENOMEM;
5228 goto err_exit;
5229 }
5230 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5231 sizeof(uint16_t),
5232 GFP_KERNEL);
5233 if (unlikely(!phba->sli4_hba.xri_ids)) {
5234 kfree(phba->sli4_hba.xri_bmask);
5235 rc = -ENOMEM;
5236 goto err_exit;
5237 }
5238
5239 /* Initialize local ptrs for common extent processing later. */
5240 bmask = phba->sli4_hba.xri_bmask;
5241 ids = phba->sli4_hba.xri_ids;
5242 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5243 break;
5244 case LPFC_RSC_TYPE_FCOE_VFI:
5245 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5246 sizeof(unsigned long),
5247 GFP_KERNEL);
5248 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5249 rc = -ENOMEM;
5250 goto err_exit;
5251 }
5252 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5253 sizeof(uint16_t),
5254 GFP_KERNEL);
5255 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5256 kfree(phba->sli4_hba.vfi_bmask);
5257 rc = -ENOMEM;
5258 goto err_exit;
5259 }
5260
5261 /* Initialize local ptrs for common extent processing later. */
5262 bmask = phba->sli4_hba.vfi_bmask;
5263 ids = phba->sli4_hba.vfi_ids;
5264 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5265 break;
5266 default:
5267 /* Unsupported Opcode. Fail call. */
5268 id_array = NULL;
5269 bmask = NULL;
5270 ids = NULL;
5271 ext_blk_list = NULL;
5272 goto err_exit;
5273 }
5274
5275 /*
5276 * Complete initializing the extent configuration with the
5277 * allocated ids assigned to this function. The bitmask serves
5278 * as an index into the array and manages the available ids. The
5279 * array just stores the ids communicated to the port via the wqes.
5280 */
5281 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5282 if ((i % 2) == 0)
5283 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5284 &id_array[k]);
5285 else
5286 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5287 &id_array[k]);
5288
5289 rsrc_blks = kzalloc(length, GFP_KERNEL);
5290 if (unlikely(!rsrc_blks)) {
5291 rc = -ENOMEM;
5292 kfree(bmask);
5293 kfree(ids);
5294 goto err_exit;
5295 }
5296 rsrc_blks->rsrc_start = rsrc_id;
5297 rsrc_blks->rsrc_size = rsrc_size;
5298 list_add_tail(&rsrc_blks->list, ext_blk_list);
5299 rsrc_start = rsrc_id;
5300 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5301 phba->sli4_hba.scsi_xri_start = rsrc_start +
5302 lpfc_sli4_get_els_iocb_cnt(phba);
5303
5304 while (rsrc_id < (rsrc_start + rsrc_size)) {
5305 ids[j] = rsrc_id;
5306 rsrc_id++;
5307 j++;
5308 }
5309 /* Entire word processed. Get next word.*/
5310 if ((i % 2) == 1)
5311 k++;
5312 }
5313 err_exit:
5314 lpfc_sli4_mbox_cmd_free(phba, mbox);
5315 return rc;
5316}
5317
5318/**
5319 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5320 * @phba: Pointer to HBA context object.
5321 * @type: the extent's type.
5322 *
5323 * This function deallocates all extents of a particular resource type.
5324 * SLI4 does not allow for deallocating a particular extent range. It
5325 * is the caller's responsibility to release all kernel memory resources.
5326 **/
5327static int
5328lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5329{
5330 int rc;
5331 uint32_t length, mbox_tmo = 0;
5332 LPFC_MBOXQ_t *mbox;
5333 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5334 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5335
5336 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5337 if (!mbox)
5338 return -ENOMEM;
5339
5340 /*
5341 * This function sends an embedded mailbox because it only sends the
5342 * the resource type. All extents of this type are released by the
5343 * port.
5344 */
5345 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5346 sizeof(struct lpfc_sli4_cfg_mhdr));
5347 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5348 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5349 length, LPFC_SLI4_MBX_EMBED);
5350
5351 /* Send an extents count of 0 - the dealloc doesn't use it. */
5352 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5353 LPFC_SLI4_MBX_EMBED);
5354 if (unlikely(rc)) {
5355 rc = -EIO;
5356 goto out_free_mbox;
5357 }
5358 if (!phba->sli4_hba.intr_enable)
5359 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5360 else {
James Smarta183a152011-10-10 21:32:43 -04005361 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -04005362 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5363 }
5364 if (unlikely(rc)) {
5365 rc = -EIO;
5366 goto out_free_mbox;
5367 }
5368
5369 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5370 if (bf_get(lpfc_mbox_hdr_status,
5371 &dealloc_rsrc->header.cfg_shdr.response)) {
5372 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5373 "2919 Failed to release resource extents "
5374 "for type %d - Status 0x%x Add'l Status 0x%x. "
5375 "Resource memory not released.\n",
5376 type,
5377 bf_get(lpfc_mbox_hdr_status,
5378 &dealloc_rsrc->header.cfg_shdr.response),
5379 bf_get(lpfc_mbox_hdr_add_status,
5380 &dealloc_rsrc->header.cfg_shdr.response));
5381 rc = -EIO;
5382 goto out_free_mbox;
5383 }
5384
5385 /* Release kernel memory resources for the specific type. */
5386 switch (type) {
5387 case LPFC_RSC_TYPE_FCOE_VPI:
5388 kfree(phba->vpi_bmask);
5389 kfree(phba->vpi_ids);
5390 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5391 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5392 &phba->lpfc_vpi_blk_list, list) {
5393 list_del_init(&rsrc_blk->list);
5394 kfree(rsrc_blk);
5395 }
5396 break;
5397 case LPFC_RSC_TYPE_FCOE_XRI:
5398 kfree(phba->sli4_hba.xri_bmask);
5399 kfree(phba->sli4_hba.xri_ids);
5400 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5401 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5402 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5403 list_del_init(&rsrc_blk->list);
5404 kfree(rsrc_blk);
5405 }
5406 break;
5407 case LPFC_RSC_TYPE_FCOE_VFI:
5408 kfree(phba->sli4_hba.vfi_bmask);
5409 kfree(phba->sli4_hba.vfi_ids);
5410 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5411 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5412 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5413 list_del_init(&rsrc_blk->list);
5414 kfree(rsrc_blk);
5415 }
5416 break;
5417 case LPFC_RSC_TYPE_FCOE_RPI:
5418 /* RPI bitmask and physical id array are cleaned up earlier. */
5419 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5420 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5421 list_del_init(&rsrc_blk->list);
5422 kfree(rsrc_blk);
5423 }
5424 break;
5425 default:
5426 break;
5427 }
5428
5429 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5430
5431 out_free_mbox:
5432 mempool_free(mbox, phba->mbox_mem_pool);
5433 return rc;
5434}
5435
5436/**
5437 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5438 * @phba: Pointer to HBA context object.
5439 *
5440 * This function allocates all SLI4 resource identifiers.
5441 **/
5442int
5443lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5444{
5445 int i, rc, error = 0;
5446 uint16_t count, base;
5447 unsigned long longs;
5448
James Smartff78d8f2011-12-13 13:21:35 -05005449 if (!phba->sli4_hba.rpi_hdrs_in_use)
5450 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart6d368e52011-05-24 11:44:12 -04005451 if (phba->sli4_hba.extents_in_use) {
5452 /*
5453 * The port supports resource extents. The XRI, VPI, VFI, RPI
5454 * resource extent count must be read and allocated before
5455 * provisioning the resource id arrays.
5456 */
5457 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5458 LPFC_IDX_RSRC_RDY) {
5459 /*
5460 * Extent-based resources are set - the driver could
5461 * be in a port reset. Figure out if any corrective
5462 * actions need to be taken.
5463 */
5464 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5465 LPFC_RSC_TYPE_FCOE_VFI);
5466 if (rc != 0)
5467 error++;
5468 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5469 LPFC_RSC_TYPE_FCOE_VPI);
5470 if (rc != 0)
5471 error++;
5472 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5473 LPFC_RSC_TYPE_FCOE_XRI);
5474 if (rc != 0)
5475 error++;
5476 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5477 LPFC_RSC_TYPE_FCOE_RPI);
5478 if (rc != 0)
5479 error++;
5480
5481 /*
5482 * It's possible that the number of resources
5483 * provided to this port instance changed between
5484 * resets. Detect this condition and reallocate
5485 * resources. Otherwise, there is no action.
5486 */
5487 if (error) {
5488 lpfc_printf_log(phba, KERN_INFO,
5489 LOG_MBOX | LOG_INIT,
5490 "2931 Detected extent resource "
5491 "change. Reallocating all "
5492 "extents.\n");
5493 rc = lpfc_sli4_dealloc_extent(phba,
5494 LPFC_RSC_TYPE_FCOE_VFI);
5495 rc = lpfc_sli4_dealloc_extent(phba,
5496 LPFC_RSC_TYPE_FCOE_VPI);
5497 rc = lpfc_sli4_dealloc_extent(phba,
5498 LPFC_RSC_TYPE_FCOE_XRI);
5499 rc = lpfc_sli4_dealloc_extent(phba,
5500 LPFC_RSC_TYPE_FCOE_RPI);
5501 } else
5502 return 0;
5503 }
5504
5505 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5506 if (unlikely(rc))
5507 goto err_exit;
5508
5509 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5510 if (unlikely(rc))
5511 goto err_exit;
5512
5513 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5514 if (unlikely(rc))
5515 goto err_exit;
5516
5517 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5518 if (unlikely(rc))
5519 goto err_exit;
5520 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5521 LPFC_IDX_RSRC_RDY);
5522 return rc;
5523 } else {
5524 /*
5525 * The port does not support resource extents. The XRI, VPI,
5526 * VFI, RPI resource ids were determined from READ_CONFIG.
5527 * Just allocate the bitmasks and provision the resource id
5528 * arrays. If a port reset is active, the resources don't
5529 * need any action - just exit.
5530 */
5531 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
James Smartff78d8f2011-12-13 13:21:35 -05005532 LPFC_IDX_RSRC_RDY) {
5533 lpfc_sli4_dealloc_resource_identifiers(phba);
5534 lpfc_sli4_remove_rpis(phba);
5535 }
James Smart6d368e52011-05-24 11:44:12 -04005536 /* RPIs. */
5537 count = phba->sli4_hba.max_cfg_param.max_rpi;
5538 base = phba->sli4_hba.max_cfg_param.rpi_base;
5539 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5540 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5541 sizeof(unsigned long),
5542 GFP_KERNEL);
5543 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5544 rc = -ENOMEM;
5545 goto err_exit;
5546 }
5547 phba->sli4_hba.rpi_ids = kzalloc(count *
5548 sizeof(uint16_t),
5549 GFP_KERNEL);
5550 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5551 rc = -ENOMEM;
5552 goto free_rpi_bmask;
5553 }
5554
5555 for (i = 0; i < count; i++)
5556 phba->sli4_hba.rpi_ids[i] = base + i;
5557
5558 /* VPIs. */
5559 count = phba->sli4_hba.max_cfg_param.max_vpi;
5560 base = phba->sli4_hba.max_cfg_param.vpi_base;
5561 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5562 phba->vpi_bmask = kzalloc(longs *
5563 sizeof(unsigned long),
5564 GFP_KERNEL);
5565 if (unlikely(!phba->vpi_bmask)) {
5566 rc = -ENOMEM;
5567 goto free_rpi_ids;
5568 }
5569 phba->vpi_ids = kzalloc(count *
5570 sizeof(uint16_t),
5571 GFP_KERNEL);
5572 if (unlikely(!phba->vpi_ids)) {
5573 rc = -ENOMEM;
5574 goto free_vpi_bmask;
5575 }
5576
5577 for (i = 0; i < count; i++)
5578 phba->vpi_ids[i] = base + i;
5579
5580 /* XRIs. */
5581 count = phba->sli4_hba.max_cfg_param.max_xri;
5582 base = phba->sli4_hba.max_cfg_param.xri_base;
5583 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5584 phba->sli4_hba.xri_bmask = kzalloc(longs *
5585 sizeof(unsigned long),
5586 GFP_KERNEL);
5587 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5588 rc = -ENOMEM;
5589 goto free_vpi_ids;
5590 }
5591 phba->sli4_hba.xri_ids = kzalloc(count *
5592 sizeof(uint16_t),
5593 GFP_KERNEL);
5594 if (unlikely(!phba->sli4_hba.xri_ids)) {
5595 rc = -ENOMEM;
5596 goto free_xri_bmask;
5597 }
5598
5599 for (i = 0; i < count; i++)
5600 phba->sli4_hba.xri_ids[i] = base + i;
5601
5602 /* VFIs. */
5603 count = phba->sli4_hba.max_cfg_param.max_vfi;
5604 base = phba->sli4_hba.max_cfg_param.vfi_base;
5605 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5606 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5607 sizeof(unsigned long),
5608 GFP_KERNEL);
5609 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5610 rc = -ENOMEM;
5611 goto free_xri_ids;
5612 }
5613 phba->sli4_hba.vfi_ids = kzalloc(count *
5614 sizeof(uint16_t),
5615 GFP_KERNEL);
5616 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5617 rc = -ENOMEM;
5618 goto free_vfi_bmask;
5619 }
5620
5621 for (i = 0; i < count; i++)
5622 phba->sli4_hba.vfi_ids[i] = base + i;
5623
5624 /*
5625 * Mark all resources ready. An HBA reset doesn't need
5626 * to reset the initialization.
5627 */
5628 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5629 LPFC_IDX_RSRC_RDY);
5630 return 0;
5631 }
5632
5633 free_vfi_bmask:
5634 kfree(phba->sli4_hba.vfi_bmask);
5635 free_xri_ids:
5636 kfree(phba->sli4_hba.xri_ids);
5637 free_xri_bmask:
5638 kfree(phba->sli4_hba.xri_bmask);
5639 free_vpi_ids:
5640 kfree(phba->vpi_ids);
5641 free_vpi_bmask:
5642 kfree(phba->vpi_bmask);
5643 free_rpi_ids:
5644 kfree(phba->sli4_hba.rpi_ids);
5645 free_rpi_bmask:
5646 kfree(phba->sli4_hba.rpi_bmask);
5647 err_exit:
5648 return rc;
5649}
5650
5651/**
5652 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5653 * @phba: Pointer to HBA context object.
5654 *
5655 * This function allocates the number of elements for the specified
5656 * resource type.
5657 **/
5658int
5659lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5660{
5661 if (phba->sli4_hba.extents_in_use) {
5662 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5663 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5664 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5665 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5666 } else {
5667 kfree(phba->vpi_bmask);
5668 kfree(phba->vpi_ids);
5669 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5670 kfree(phba->sli4_hba.xri_bmask);
5671 kfree(phba->sli4_hba.xri_ids);
5672 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5673 kfree(phba->sli4_hba.vfi_bmask);
5674 kfree(phba->sli4_hba.vfi_ids);
5675 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5676 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5677 }
5678
5679 return 0;
5680}
5681
5682/**
James Smartb76f2dc2011-07-22 18:37:42 -04005683 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5684 * @phba: Pointer to HBA context object.
5685 * @type: The resource extent type.
5686 * @extnt_count: buffer to hold port extent count response
5687 * @extnt_size: buffer to hold port extent size response.
5688 *
5689 * This function calls the port to read the host allocated extents
5690 * for a particular type.
5691 **/
5692int
5693lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5694 uint16_t *extnt_cnt, uint16_t *extnt_size)
5695{
5696 bool emb;
5697 int rc = 0;
5698 uint16_t curr_blks = 0;
5699 uint32_t req_len, emb_len;
5700 uint32_t alloc_len, mbox_tmo;
5701 struct list_head *blk_list_head;
5702 struct lpfc_rsrc_blks *rsrc_blk;
5703 LPFC_MBOXQ_t *mbox;
5704 void *virtaddr = NULL;
5705 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5706 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5707 union lpfc_sli4_cfg_shdr *shdr;
5708
5709 switch (type) {
5710 case LPFC_RSC_TYPE_FCOE_VPI:
5711 blk_list_head = &phba->lpfc_vpi_blk_list;
5712 break;
5713 case LPFC_RSC_TYPE_FCOE_XRI:
5714 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
5715 break;
5716 case LPFC_RSC_TYPE_FCOE_VFI:
5717 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
5718 break;
5719 case LPFC_RSC_TYPE_FCOE_RPI:
5720 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
5721 break;
5722 default:
5723 return -EIO;
5724 }
5725
5726 /* Count the number of extents currently allocatd for this type. */
5727 list_for_each_entry(rsrc_blk, blk_list_head, list) {
5728 if (curr_blks == 0) {
5729 /*
5730 * The GET_ALLOCATED mailbox does not return the size,
5731 * just the count. The size should be just the size
5732 * stored in the current allocated block and all sizes
5733 * for an extent type are the same so set the return
5734 * value now.
5735 */
5736 *extnt_size = rsrc_blk->rsrc_size;
5737 }
5738 curr_blks++;
5739 }
5740
5741 /* Calculate the total requested length of the dma memory. */
5742 req_len = curr_blks * sizeof(uint16_t);
5743
5744 /*
5745 * Calculate the size of an embedded mailbox. The uint32_t
5746 * accounts for extents-specific word.
5747 */
5748 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5749 sizeof(uint32_t);
5750
5751 /*
5752 * Presume the allocation and response will fit into an embedded
5753 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5754 */
5755 emb = LPFC_SLI4_MBX_EMBED;
5756 req_len = emb_len;
5757 if (req_len > emb_len) {
5758 req_len = curr_blks * sizeof(uint16_t) +
5759 sizeof(union lpfc_sli4_cfg_shdr) +
5760 sizeof(uint32_t);
5761 emb = LPFC_SLI4_MBX_NEMBED;
5762 }
5763
5764 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5765 if (!mbox)
5766 return -ENOMEM;
5767 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
5768
5769 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5770 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
5771 req_len, emb);
5772 if (alloc_len < req_len) {
5773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5774 "2983 Allocated DMA memory size (x%x) is "
5775 "less than the requested DMA memory "
5776 "size (x%x)\n", alloc_len, req_len);
5777 rc = -ENOMEM;
5778 goto err_exit;
5779 }
5780 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
5781 if (unlikely(rc)) {
5782 rc = -EIO;
5783 goto err_exit;
5784 }
5785
5786 if (!phba->sli4_hba.intr_enable)
5787 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5788 else {
James Smarta183a152011-10-10 21:32:43 -04005789 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smartb76f2dc2011-07-22 18:37:42 -04005790 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5791 }
5792
5793 if (unlikely(rc)) {
5794 rc = -EIO;
5795 goto err_exit;
5796 }
5797
5798 /*
5799 * Figure out where the response is located. Then get local pointers
5800 * to the response data. The port does not guarantee to respond to
5801 * all extents counts request so update the local variable with the
5802 * allocated count from the port.
5803 */
5804 if (emb == LPFC_SLI4_MBX_EMBED) {
5805 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5806 shdr = &rsrc_ext->header.cfg_shdr;
5807 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5808 } else {
5809 virtaddr = mbox->sge_array->addr[0];
5810 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5811 shdr = &n_rsrc->cfg_shdr;
5812 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5813 }
5814
5815 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
5816 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5817 "2984 Failed to read allocated resources "
5818 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
5819 type,
5820 bf_get(lpfc_mbox_hdr_status, &shdr->response),
5821 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
5822 rc = -EIO;
5823 goto err_exit;
5824 }
5825 err_exit:
5826 lpfc_sli4_mbox_cmd_free(phba, mbox);
5827 return rc;
5828}
5829
5830/**
James Smartda0436e2009-05-22 14:51:39 -04005831 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
5832 * @phba: Pointer to HBA context object.
5833 *
5834 * This function is the main SLI4 device intialization PCI function. This
5835 * function is called by the HBA intialization code, HBA reset code and
5836 * HBA error attention handler code. Caller is not required to hold any
5837 * locks.
5838 **/
5839int
5840lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5841{
5842 int rc;
5843 LPFC_MBOXQ_t *mboxq;
5844 struct lpfc_mqe *mqe;
5845 uint8_t *vpd;
5846 uint32_t vpd_size;
5847 uint32_t ftr_rsp = 0;
5848 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
5849 struct lpfc_vport *vport = phba->pport;
5850 struct lpfc_dmabuf *mp;
5851
5852 /* Perform a PCI function reset to start from clean */
5853 rc = lpfc_pci_function_reset(phba);
5854 if (unlikely(rc))
5855 return -ENODEV;
5856
5857 /* Check the HBA Host Status Register for readyness */
5858 rc = lpfc_sli4_post_status_check(phba);
5859 if (unlikely(rc))
5860 return -ENODEV;
5861 else {
5862 spin_lock_irq(&phba->hbalock);
5863 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
5864 spin_unlock_irq(&phba->hbalock);
5865 }
5866
5867 /*
5868 * Allocate a single mailbox container for initializing the
5869 * port.
5870 */
5871 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5872 if (!mboxq)
5873 return -ENOMEM;
5874
James Smartda0436e2009-05-22 14:51:39 -04005875 /* Issue READ_REV to collect vpd and FW information. */
James Smart49198b32010-04-06 15:04:33 -04005876 vpd_size = SLI4_PAGE_SIZE;
James Smartda0436e2009-05-22 14:51:39 -04005877 vpd = kzalloc(vpd_size, GFP_KERNEL);
5878 if (!vpd) {
5879 rc = -ENOMEM;
5880 goto out_free_mbox;
5881 }
5882
5883 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
James Smart76a95d72010-11-20 23:11:48 -05005884 if (unlikely(rc)) {
5885 kfree(vpd);
5886 goto out_free_mbox;
5887 }
James Smartda0436e2009-05-22 14:51:39 -04005888 mqe = &mboxq->u.mqe;
James Smartf1126682009-06-10 17:22:44 -04005889 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
5890 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
James Smart76a95d72010-11-20 23:11:48 -05005891 phba->hba_flag |= HBA_FCOE_MODE;
5892 else
5893 phba->hba_flag &= ~HBA_FCOE_MODE;
James Smart45ed1192009-10-02 15:17:02 -04005894
5895 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
5896 LPFC_DCBX_CEE_MODE)
5897 phba->hba_flag |= HBA_FIP_SUPPORT;
5898 else
5899 phba->hba_flag &= ~HBA_FIP_SUPPORT;
5900
James Smartc31098c2011-04-16 11:03:33 -04005901 if (phba->sli_rev != LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -04005902 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5903 "0376 READ_REV Error. SLI Level %d "
5904 "FCoE enabled %d\n",
James Smart76a95d72010-11-20 23:11:48 -05005905 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
James Smartda0436e2009-05-22 14:51:39 -04005906 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05005907 kfree(vpd);
5908 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04005909 }
James Smartcd1c8302011-10-10 21:33:25 -04005910
5911 /*
James Smartff78d8f2011-12-13 13:21:35 -05005912 * Continue initialization with default values even if driver failed
5913 * to read FCoE param config regions, only read parameters if the
5914 * board is FCoE
5915 */
5916 if (phba->hba_flag & HBA_FCOE_MODE &&
5917 lpfc_sli4_read_fcoe_params(phba))
5918 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
5919 "2570 Failed to read FCoE parameters\n");
5920
5921 /*
James Smartcd1c8302011-10-10 21:33:25 -04005922 * Retrieve sli4 device physical port name, failure of doing it
5923 * is considered as non-fatal.
5924 */
5925 rc = lpfc_sli4_retrieve_pport_name(phba);
5926 if (!rc)
5927 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5928 "3080 Successful retrieving SLI4 device "
5929 "physical port name: %s.\n", phba->Port);
5930
James Smartda0436e2009-05-22 14:51:39 -04005931 /*
5932 * Evaluate the read rev and vpd data. Populate the driver
5933 * state with the results. If this routine fails, the failure
5934 * is not fatal as the driver will use generic values.
5935 */
5936 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
5937 if (unlikely(!rc)) {
5938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5939 "0377 Error %d parsing vpd. "
5940 "Using defaults.\n", rc);
5941 rc = 0;
5942 }
James Smart76a95d72010-11-20 23:11:48 -05005943 kfree(vpd);
James Smartda0436e2009-05-22 14:51:39 -04005944
James Smartf1126682009-06-10 17:22:44 -04005945 /* Save information as VPD data */
5946 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
5947 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
5948 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
5949 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
5950 &mqe->un.read_rev);
5951 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
5952 &mqe->un.read_rev);
5953 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
5954 &mqe->un.read_rev);
5955 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
5956 &mqe->un.read_rev);
5957 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
5958 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
5959 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
5960 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
5961 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
5962 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
5963 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5964 "(%d):0380 READ_REV Status x%x "
5965 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
5966 mboxq->vport ? mboxq->vport->vpi : 0,
5967 bf_get(lpfc_mqe_status, mqe),
5968 phba->vpd.rev.opFwName,
5969 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
5970 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
James Smartda0436e2009-05-22 14:51:39 -04005971
5972 /*
5973 * Discover the port's supported feature set and match it against the
5974 * hosts requests.
5975 */
5976 lpfc_request_features(phba, mboxq);
5977 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5978 if (unlikely(rc)) {
5979 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05005980 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04005981 }
5982
5983 /*
5984 * The port must support FCP initiator mode as this is the
5985 * only mode running in the host.
5986 */
5987 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
5988 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5989 "0378 No support for fcpi mode.\n");
5990 ftr_rsp++;
5991 }
James Smartfedd3b72011-02-16 12:39:24 -05005992 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
5993 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
5994 else
5995 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
James Smartda0436e2009-05-22 14:51:39 -04005996 /*
5997 * If the port cannot support the host's requested features
5998 * then turn off the global config parameters to disable the
5999 * feature in the driver. This is not a fatal error.
6000 */
James Smartbf086112011-08-21 21:48:13 -04006001 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6002 if (phba->cfg_enable_bg) {
6003 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6004 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6005 else
6006 ftr_rsp++;
6007 }
James Smartda0436e2009-05-22 14:51:39 -04006008
6009 if (phba->max_vpi && phba->cfg_enable_npiv &&
6010 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6011 ftr_rsp++;
6012
6013 if (ftr_rsp) {
6014 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6015 "0379 Feature Mismatch Data: x%08x %08x "
6016 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6017 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6018 phba->cfg_enable_npiv, phba->max_vpi);
6019 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6020 phba->cfg_enable_bg = 0;
6021 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6022 phba->cfg_enable_npiv = 0;
6023 }
6024
6025 /* These SLI3 features are assumed in SLI4 */
6026 spin_lock_irq(&phba->hbalock);
6027 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6028 spin_unlock_irq(&phba->hbalock);
6029
James Smart6d368e52011-05-24 11:44:12 -04006030 /*
6031 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6032 * calls depends on these resources to complete port setup.
6033 */
6034 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6035 if (rc) {
6036 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6037 "2920 Failed to alloc Resource IDs "
6038 "rc = x%x\n", rc);
6039 goto out_free_mbox;
6040 }
James Smartff78d8f2011-12-13 13:21:35 -05006041 /* update physical xri mappings in the scsi buffers */
6042 lpfc_scsi_buf_update(phba);
James Smart6d368e52011-05-24 11:44:12 -04006043
James Smartda0436e2009-05-22 14:51:39 -04006044 /* Read the port's service parameters. */
James Smart9f1177a2010-02-26 14:12:57 -05006045 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6046 if (rc) {
6047 phba->link_state = LPFC_HBA_ERROR;
6048 rc = -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -05006049 goto out_free_mbox;
James Smart9f1177a2010-02-26 14:12:57 -05006050 }
6051
James Smartda0436e2009-05-22 14:51:39 -04006052 mboxq->vport = vport;
6053 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6054 mp = (struct lpfc_dmabuf *) mboxq->context1;
6055 if (rc == MBX_SUCCESS) {
6056 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6057 rc = 0;
6058 }
6059
6060 /*
6061 * This memory was allocated by the lpfc_read_sparam routine. Release
6062 * it to the mbuf pool.
6063 */
6064 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6065 kfree(mp);
6066 mboxq->context1 = NULL;
6067 if (unlikely(rc)) {
6068 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6069 "0382 READ_SPARAM command failed "
6070 "status %d, mbxStatus x%x\n",
6071 rc, bf_get(lpfc_mqe_status, mqe));
6072 phba->link_state = LPFC_HBA_ERROR;
6073 rc = -EIO;
James Smart76a95d72010-11-20 23:11:48 -05006074 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006075 }
6076
James Smart05580562011-05-24 11:40:48 -04006077 lpfc_update_vport_wwn(vport);
James Smartda0436e2009-05-22 14:51:39 -04006078
6079 /* Update the fc_host data structures with new wwn. */
6080 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6081 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6082
6083 /* Register SGL pool to the device using non-embedded mailbox command */
James Smart6d368e52011-05-24 11:44:12 -04006084 if (!phba->sli4_hba.extents_in_use) {
6085 rc = lpfc_sli4_post_els_sgl_list(phba);
6086 if (unlikely(rc)) {
6087 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6088 "0582 Error %d during els sgl post "
6089 "operation\n", rc);
6090 rc = -ENODEV;
6091 goto out_free_mbox;
6092 }
6093 } else {
6094 rc = lpfc_sli4_post_els_sgl_list_ext(phba);
6095 if (unlikely(rc)) {
6096 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6097 "2560 Error %d during els sgl post "
6098 "operation\n", rc);
6099 rc = -ENODEV;
6100 goto out_free_mbox;
6101 }
James Smartda0436e2009-05-22 14:51:39 -04006102 }
6103
6104 /* Register SCSI SGL pool to the device */
6105 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6106 if (unlikely(rc)) {
James Smart6d368e52011-05-24 11:44:12 -04006107 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart6a9c52c2009-10-02 15:16:51 -04006108 "0383 Error %d during scsi sgl post "
6109 "operation\n", rc);
James Smartda0436e2009-05-22 14:51:39 -04006110 /* Some Scsi buffers were moved to the abort scsi list */
6111 /* A pci function reset will repost them */
6112 rc = -ENODEV;
James Smart76a95d72010-11-20 23:11:48 -05006113 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006114 }
6115
6116 /* Post the rpi header region to the device. */
6117 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6118 if (unlikely(rc)) {
6119 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6120 "0393 Error %d during rpi post operation\n",
6121 rc);
6122 rc = -ENODEV;
James Smart76a95d72010-11-20 23:11:48 -05006123 goto out_free_mbox;
James Smartda0436e2009-05-22 14:51:39 -04006124 }
James Smartda0436e2009-05-22 14:51:39 -04006125
James Smart5350d872011-10-10 21:33:49 -04006126 /* Create all the SLI4 queues */
6127 rc = lpfc_sli4_queue_create(phba);
6128 if (rc) {
6129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6130 "3089 Failed to allocate queues\n");
6131 rc = -ENODEV;
6132 goto out_stop_timers;
6133 }
James Smartda0436e2009-05-22 14:51:39 -04006134 /* Set up all the queues to the device */
6135 rc = lpfc_sli4_queue_setup(phba);
6136 if (unlikely(rc)) {
6137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6138 "0381 Error %d during queue setup.\n ", rc);
James Smart5350d872011-10-10 21:33:49 -04006139 goto out_destroy_queue;
James Smartda0436e2009-05-22 14:51:39 -04006140 }
6141
6142 /* Arm the CQs and then EQs on device */
6143 lpfc_sli4_arm_cqeq_intr(phba);
6144
6145 /* Indicate device interrupt mode */
6146 phba->sli4_hba.intr_enable = 1;
6147
6148 /* Allow asynchronous mailbox command to go through */
6149 spin_lock_irq(&phba->hbalock);
6150 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6151 spin_unlock_irq(&phba->hbalock);
6152
6153 /* Post receive buffers to the device */
6154 lpfc_sli4_rb_setup(phba);
6155
James Smartfc2b9892010-02-26 14:15:29 -05006156 /* Reset HBA FCF states after HBA reset */
6157 phba->fcf.fcf_flag = 0;
6158 phba->fcf.current_rec.flag = 0;
6159
James Smartda0436e2009-05-22 14:51:39 -04006160 /* Start the ELS watchdog timer */
James Smart8fa38512009-07-19 10:01:03 -04006161 mod_timer(&vport->els_tmofunc,
6162 jiffies + HZ * (phba->fc_ratov * 2));
James Smartda0436e2009-05-22 14:51:39 -04006163
6164 /* Start heart beat timer */
6165 mod_timer(&phba->hb_tmofunc,
6166 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
6167 phba->hb_outstanding = 0;
6168 phba->last_completion_time = jiffies;
6169
6170 /* Start error attention (ERATT) polling timer */
6171 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
6172
James Smart75baf692010-06-08 18:31:21 -04006173 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6174 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6175 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6176 if (!rc) {
6177 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6178 "2829 This device supports "
6179 "Advanced Error Reporting (AER)\n");
6180 spin_lock_irq(&phba->hbalock);
6181 phba->hba_flag |= HBA_AER_ENABLED;
6182 spin_unlock_irq(&phba->hbalock);
6183 } else {
6184 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6185 "2830 This device does not support "
6186 "Advanced Error Reporting (AER)\n");
6187 phba->cfg_aer_support = 0;
6188 }
James Smart0a96e972011-07-22 18:37:28 -04006189 rc = 0;
James Smart75baf692010-06-08 18:31:21 -04006190 }
6191
James Smart76a95d72010-11-20 23:11:48 -05006192 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6193 /*
6194 * The FC Port needs to register FCFI (index 0)
6195 */
6196 lpfc_reg_fcfi(phba, mboxq);
6197 mboxq->vport = phba->pport;
6198 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart9589b062011-04-16 11:03:17 -04006199 if (rc != MBX_SUCCESS)
James Smart76a95d72010-11-20 23:11:48 -05006200 goto out_unset_queue;
James Smart9589b062011-04-16 11:03:17 -04006201 rc = 0;
6202 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6203 &mboxq->u.mqe.un.reg_fcfi);
James Smart026abb82011-12-13 13:20:45 -05006204
6205 /* Check if the port is configured to be disabled */
6206 lpfc_sli_read_link_ste(phba);
James Smart76a95d72010-11-20 23:11:48 -05006207 }
James Smart026abb82011-12-13 13:20:45 -05006208
James Smartda0436e2009-05-22 14:51:39 -04006209 /*
6210 * The port is ready, set the host's link state to LINK_DOWN
6211 * in preparation for link interrupts.
6212 */
James Smartda0436e2009-05-22 14:51:39 -04006213 spin_lock_irq(&phba->hbalock);
6214 phba->link_state = LPFC_LINK_DOWN;
6215 spin_unlock_irq(&phba->hbalock);
James Smart026abb82011-12-13 13:20:45 -05006216 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6217 (phba->hba_flag & LINK_DISABLED)) {
6218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6219 "3103 Adapter Link is disabled.\n");
6220 lpfc_down_link(phba, mboxq);
6221 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6222 if (rc != MBX_SUCCESS) {
6223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6224 "3104 Adapter failed to issue "
6225 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6226 goto out_unset_queue;
6227 }
6228 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
James Smartfedd3b72011-02-16 12:39:24 -05006229 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
James Smart5350d872011-10-10 21:33:49 -04006230 if (rc)
6231 goto out_unset_queue;
6232 }
6233 mempool_free(mboxq, phba->mbox_mem_pool);
6234 return rc;
James Smart76a95d72010-11-20 23:11:48 -05006235out_unset_queue:
James Smartda0436e2009-05-22 14:51:39 -04006236 /* Unset all the queues set up in this routine when error out */
James Smart5350d872011-10-10 21:33:49 -04006237 lpfc_sli4_queue_unset(phba);
6238out_destroy_queue:
6239 lpfc_sli4_queue_destroy(phba);
James Smartda0436e2009-05-22 14:51:39 -04006240out_stop_timers:
James Smart5350d872011-10-10 21:33:49 -04006241 lpfc_stop_hba_timers(phba);
James Smartda0436e2009-05-22 14:51:39 -04006242out_free_mbox:
6243 mempool_free(mboxq, phba->mbox_mem_pool);
6244 return rc;
6245}
James Smarte59058c2008-08-24 21:49:00 -04006246
6247/**
James Smart3621a712009-04-06 18:47:14 -04006248 * lpfc_mbox_timeout - Timeout call back function for mbox timer
James Smarte59058c2008-08-24 21:49:00 -04006249 * @ptr: context object - pointer to hba structure.
dea31012005-04-17 16:05:31 -05006250 *
James Smarte59058c2008-08-24 21:49:00 -04006251 * This is the callback function for mailbox timer. The mailbox
6252 * timer is armed when a new mailbox command is issued and the timer
6253 * is deleted when the mailbox complete. The function is called by
6254 * the kernel timer code when a mailbox does not complete within
6255 * expected time. This function wakes up the worker thread to
6256 * process the mailbox timeout and returns. All the processing is
6257 * done by the worker thread function lpfc_mbox_timeout_handler.
6258 **/
dea31012005-04-17 16:05:31 -05006259void
6260lpfc_mbox_timeout(unsigned long ptr)
6261{
James Smart92d7f7b2007-06-17 19:56:38 -05006262 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
dea31012005-04-17 16:05:31 -05006263 unsigned long iflag;
James Smart2e0fef82007-06-17 19:56:36 -05006264 uint32_t tmo_posted;
dea31012005-04-17 16:05:31 -05006265
James Smart2e0fef82007-06-17 19:56:36 -05006266 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -05006267 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
James Smart2e0fef82007-06-17 19:56:36 -05006268 if (!tmo_posted)
6269 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6270 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6271
James Smart5e9d9b82008-06-14 22:52:53 -04006272 if (!tmo_posted)
6273 lpfc_worker_wake_up(phba);
6274 return;
dea31012005-04-17 16:05:31 -05006275}
6276
James Smarte59058c2008-08-24 21:49:00 -04006277
6278/**
James Smart3621a712009-04-06 18:47:14 -04006279 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
James Smarte59058c2008-08-24 21:49:00 -04006280 * @phba: Pointer to HBA context object.
6281 *
6282 * This function is called from worker thread when a mailbox command times out.
6283 * The caller is not required to hold any locks. This function will reset the
6284 * HBA and recover all the pending commands.
6285 **/
dea31012005-04-17 16:05:31 -05006286void
6287lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6288{
James Smart2e0fef82007-06-17 19:56:36 -05006289 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
James Smart04c68492009-05-22 14:52:52 -04006290 MAILBOX_t *mb = &pmbox->u.mb;
James Smart1dcb58e2007-04-25 09:51:30 -04006291 struct lpfc_sli *psli = &phba->sli;
6292 struct lpfc_sli_ring *pring;
dea31012005-04-17 16:05:31 -05006293
James Smarta257bf92009-04-06 18:48:10 -04006294 /* Check the pmbox pointer first. There is a race condition
6295 * between the mbox timeout handler getting executed in the
6296 * worklist and the mailbox actually completing. When this
6297 * race condition occurs, the mbox_active will be NULL.
6298 */
6299 spin_lock_irq(&phba->hbalock);
6300 if (pmbox == NULL) {
6301 lpfc_printf_log(phba, KERN_WARNING,
6302 LOG_MBOX | LOG_SLI,
6303 "0353 Active Mailbox cleared - mailbox timeout "
6304 "exiting\n");
6305 spin_unlock_irq(&phba->hbalock);
6306 return;
6307 }
6308
dea31012005-04-17 16:05:31 -05006309 /* Mbox cmd <mbxCommand> timeout */
James Smarted957682007-06-17 19:56:37 -05006310 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04006311 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
James Smart92d7f7b2007-06-17 19:56:38 -05006312 mb->mbxCommand,
6313 phba->pport->port_state,
6314 phba->sli.sli_flag,
6315 phba->sli.mbox_active);
James Smarta257bf92009-04-06 18:48:10 -04006316 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05006317
James Smart1dcb58e2007-04-25 09:51:30 -04006318 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6319 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006320 * it to fail all outstanding SCSI IO.
James Smart1dcb58e2007-04-25 09:51:30 -04006321 */
James Smart2e0fef82007-06-17 19:56:36 -05006322 spin_lock_irq(&phba->pport->work_port_lock);
6323 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6324 spin_unlock_irq(&phba->pport->work_port_lock);
6325 spin_lock_irq(&phba->hbalock);
6326 phba->link_state = LPFC_LINK_UNKNOWN;
James Smartf4b4c682009-05-22 14:53:12 -04006327 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05006328 spin_unlock_irq(&phba->hbalock);
James Smart1dcb58e2007-04-25 09:51:30 -04006329
6330 pring = &psli->ring[psli->fcp_ring];
6331 lpfc_sli_abort_iocb_ring(phba, pring);
6332
6333 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -04006334 "0345 Resetting board due to mailbox timeout\n");
James Smart3772a992009-05-22 14:50:54 -04006335
6336 /* Reset the HBA device */
6337 lpfc_reset_hba(phba);
dea31012005-04-17 16:05:31 -05006338}
6339
James Smarte59058c2008-08-24 21:49:00 -04006340/**
James Smart3772a992009-05-22 14:50:54 -04006341 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
James Smarte59058c2008-08-24 21:49:00 -04006342 * @phba: Pointer to HBA context object.
6343 * @pmbox: Pointer to mailbox object.
6344 * @flag: Flag indicating how the mailbox need to be processed.
6345 *
6346 * This function is called by discovery code and HBA management code
James Smart3772a992009-05-22 14:50:54 -04006347 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6348 * function gets the hbalock to protect the data structures.
James Smarte59058c2008-08-24 21:49:00 -04006349 * The mailbox command can be submitted in polling mode, in which case
6350 * this function will wait in a polling loop for the completion of the
6351 * mailbox.
6352 * If the mailbox is submitted in no_wait mode (not polling) the
6353 * function will submit the command and returns immediately without waiting
6354 * for the mailbox completion. The no_wait is supported only when HBA
6355 * is in SLI2/SLI3 mode - interrupts are enabled.
6356 * The SLI interface allows only one mailbox pending at a time. If the
6357 * mailbox is issued in polling mode and there is already a mailbox
6358 * pending, then the function will return an error. If the mailbox is issued
6359 * in NO_WAIT mode and there is a mailbox pending already, the function
6360 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6361 * The sli layer owns the mailbox object until the completion of mailbox
6362 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6363 * return codes the caller owns the mailbox command after the return of
6364 * the function.
6365 **/
James Smart3772a992009-05-22 14:50:54 -04006366static int
6367lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6368 uint32_t flag)
dea31012005-04-17 16:05:31 -05006369{
dea31012005-04-17 16:05:31 -05006370 MAILBOX_t *mb;
James Smart2e0fef82007-06-17 19:56:36 -05006371 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05006372 uint32_t status, evtctr;
James Smart9940b972011-03-11 16:06:12 -05006373 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -05006374 int i;
James Smart09372822008-01-11 01:52:54 -05006375 unsigned long timeout;
dea31012005-04-17 16:05:31 -05006376 unsigned long drvr_flag = 0;
James Smart34b02dc2008-08-24 21:49:55 -04006377 uint32_t word0, ldata;
dea31012005-04-17 16:05:31 -05006378 void __iomem *to_slim;
James Smart58da1ff2008-04-07 10:15:56 -04006379 int processing_queue = 0;
6380
6381 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6382 if (!pmbox) {
James Smart8568a4d2009-07-19 10:01:16 -04006383 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart58da1ff2008-04-07 10:15:56 -04006384 /* processing mbox queue from intr_handler */
James Smart3772a992009-05-22 14:50:54 -04006385 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6386 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6387 return MBX_SUCCESS;
6388 }
James Smart58da1ff2008-04-07 10:15:56 -04006389 processing_queue = 1;
James Smart58da1ff2008-04-07 10:15:56 -04006390 pmbox = lpfc_mbox_get(phba);
6391 if (!pmbox) {
6392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6393 return MBX_SUCCESS;
6394 }
6395 }
dea31012005-04-17 16:05:31 -05006396
James Smarted957682007-06-17 19:56:37 -05006397 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
James Smart92d7f7b2007-06-17 19:56:38 -05006398 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
James Smarted957682007-06-17 19:56:37 -05006399 if(!pmbox->vport) {
James Smart58da1ff2008-04-07 10:15:56 -04006400 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
James Smarted957682007-06-17 19:56:37 -05006401 lpfc_printf_log(phba, KERN_ERR,
James Smart92d7f7b2007-06-17 19:56:38 -05006402 LOG_MBOX | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04006403 "1806 Mbox x%x failed. No vport\n",
James Smart3772a992009-05-22 14:50:54 -04006404 pmbox->u.mb.mbxCommand);
James Smarted957682007-06-17 19:56:37 -05006405 dump_stack();
James Smart58da1ff2008-04-07 10:15:56 -04006406 goto out_not_finished;
James Smarted957682007-06-17 19:56:37 -05006407 }
6408 }
6409
Linas Vepstas8d63f372007-02-14 14:28:36 -06006410 /* If the PCI channel is in offline state, do not post mbox. */
James Smart58da1ff2008-04-07 10:15:56 -04006411 if (unlikely(pci_channel_offline(phba->pcidev))) {
6412 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6413 goto out_not_finished;
6414 }
Linas Vepstas8d63f372007-02-14 14:28:36 -06006415
James Smarta257bf92009-04-06 18:48:10 -04006416 /* If HBA has a deferred error attention, fail the iocb. */
6417 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6418 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6419 goto out_not_finished;
6420 }
6421
dea31012005-04-17 16:05:31 -05006422 psli = &phba->sli;
James Smart92d7f7b2007-06-17 19:56:38 -05006423
James Smart3772a992009-05-22 14:50:54 -04006424 mb = &pmbox->u.mb;
dea31012005-04-17 16:05:31 -05006425 status = MBX_SUCCESS;
6426
James Smart2e0fef82007-06-17 19:56:36 -05006427 if (phba->link_state == LPFC_HBA_ERROR) {
6428 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
Jamie Wellnitz41415862006-02-28 19:25:27 -05006429
6430 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04006431 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6432 "(%d):0311 Mailbox command x%x cannot "
6433 "issue Data: x%x x%x\n",
6434 pmbox->vport ? pmbox->vport->vpi : 0,
6435 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04006436 goto out_not_finished;
Jamie Wellnitz41415862006-02-28 19:25:27 -05006437 }
6438
James Smart9940b972011-03-11 16:06:12 -05006439 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6440 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6441 !(hc_copy & HC_MBINT_ENA)) {
6442 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6443 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smart3772a992009-05-22 14:50:54 -04006444 "(%d):2528 Mailbox command x%x cannot "
6445 "issue Data: x%x x%x\n",
6446 pmbox->vport ? pmbox->vport->vpi : 0,
6447 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
James Smart9940b972011-03-11 16:06:12 -05006448 goto out_not_finished;
6449 }
James Smart92908312006-03-07 15:04:13 -05006450 }
6451
dea31012005-04-17 16:05:31 -05006452 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6453 /* Polling for a mbox command when another one is already active
6454 * is not allowed in SLI. Also, the driver must have established
6455 * SLI2 mode to queue and process multiple mbox commands.
6456 */
6457
6458 if (flag & MBX_POLL) {
James Smart2e0fef82007-06-17 19:56:36 -05006459 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05006460
6461 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04006462 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6463 "(%d):2529 Mailbox command x%x "
6464 "cannot issue Data: x%x x%x\n",
6465 pmbox->vport ? pmbox->vport->vpi : 0,
6466 pmbox->u.mb.mbxCommand,
6467 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04006468 goto out_not_finished;
dea31012005-04-17 16:05:31 -05006469 }
6470
James Smart3772a992009-05-22 14:50:54 -04006471 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
James Smart2e0fef82007-06-17 19:56:36 -05006472 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05006473 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04006474 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6475 "(%d):2530 Mailbox command x%x "
6476 "cannot issue Data: x%x x%x\n",
6477 pmbox->vport ? pmbox->vport->vpi : 0,
6478 pmbox->u.mb.mbxCommand,
6479 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04006480 goto out_not_finished;
dea31012005-04-17 16:05:31 -05006481 }
6482
dea31012005-04-17 16:05:31 -05006483 /* Another mailbox command is still being processed, queue this
6484 * command to be processed later.
6485 */
6486 lpfc_mbox_put(phba, pmbox);
6487
6488 /* Mbox cmd issue - BUSY */
James Smarted957682007-06-17 19:56:37 -05006489 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04006490 "(%d):0308 Mbox cmd issue - BUSY Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05006491 "x%x x%x x%x x%x\n",
James Smart92d7f7b2007-06-17 19:56:38 -05006492 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
6493 mb->mbxCommand, phba->pport->port_state,
6494 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05006495
6496 psli->slistat.mbox_busy++;
James Smart2e0fef82007-06-17 19:56:36 -05006497 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05006498
James Smart858c9f62007-06-17 19:56:39 -05006499 if (pmbox->vport) {
6500 lpfc_debugfs_disc_trc(pmbox->vport,
6501 LPFC_DISC_TRC_MBOX_VPORT,
6502 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
6503 (uint32_t)mb->mbxCommand,
6504 mb->un.varWords[0], mb->un.varWords[1]);
6505 }
6506 else {
6507 lpfc_debugfs_disc_trc(phba->pport,
6508 LPFC_DISC_TRC_MBOX,
6509 "MBOX Bsy: cmd:x%x mb:x%x x%x",
6510 (uint32_t)mb->mbxCommand,
6511 mb->un.varWords[0], mb->un.varWords[1]);
6512 }
6513
James Smart2e0fef82007-06-17 19:56:36 -05006514 return MBX_BUSY;
dea31012005-04-17 16:05:31 -05006515 }
6516
dea31012005-04-17 16:05:31 -05006517 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6518
6519 /* If we are not polling, we MUST be in SLI2 mode */
6520 if (flag != MBX_POLL) {
James Smart3772a992009-05-22 14:50:54 -04006521 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
Jamie Wellnitz41415862006-02-28 19:25:27 -05006522 (mb->mbxCommand != MBX_KILL_BOARD)) {
dea31012005-04-17 16:05:31 -05006523 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05006524 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
dea31012005-04-17 16:05:31 -05006525 /* Mbox command <mbxCommand> cannot issue */
James Smart3772a992009-05-22 14:50:54 -04006526 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6527 "(%d):2531 Mailbox command x%x "
6528 "cannot issue Data: x%x x%x\n",
6529 pmbox->vport ? pmbox->vport->vpi : 0,
6530 pmbox->u.mb.mbxCommand,
6531 psli->sli_flag, flag);
James Smart58da1ff2008-04-07 10:15:56 -04006532 goto out_not_finished;
dea31012005-04-17 16:05:31 -05006533 }
6534 /* timeout active mbox command */
James Smarta309a6b2006-08-01 07:33:43 -04006535 mod_timer(&psli->mbox_tmo, (jiffies +
James Smarta183a152011-10-10 21:32:43 -04006536 (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
dea31012005-04-17 16:05:31 -05006537 }
6538
6539 /* Mailbox cmd <cmd> issue */
James Smarted957682007-06-17 19:56:37 -05006540 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04006541 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
James Smart92d7f7b2007-06-17 19:56:38 -05006542 "x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04006543 pmbox->vport ? pmbox->vport->vpi : 0,
James Smart92d7f7b2007-06-17 19:56:38 -05006544 mb->mbxCommand, phba->pport->port_state,
6545 psli->sli_flag, flag);
dea31012005-04-17 16:05:31 -05006546
James Smart858c9f62007-06-17 19:56:39 -05006547 if (mb->mbxCommand != MBX_HEARTBEAT) {
6548 if (pmbox->vport) {
6549 lpfc_debugfs_disc_trc(pmbox->vport,
6550 LPFC_DISC_TRC_MBOX_VPORT,
6551 "MBOX Send vport: cmd:x%x mb:x%x x%x",
6552 (uint32_t)mb->mbxCommand,
6553 mb->un.varWords[0], mb->un.varWords[1]);
6554 }
6555 else {
6556 lpfc_debugfs_disc_trc(phba->pport,
6557 LPFC_DISC_TRC_MBOX,
6558 "MBOX Send: cmd:x%x mb:x%x x%x",
6559 (uint32_t)mb->mbxCommand,
6560 mb->un.varWords[0], mb->un.varWords[1]);
6561 }
6562 }
6563
dea31012005-04-17 16:05:31 -05006564 psli->slistat.mbox_cmd++;
6565 evtctr = psli->slistat.mbox_event;
6566
6567 /* next set own bit for the adapter and copy over command word */
6568 mb->mbxOwner = OWN_CHIP;
6569
James Smart3772a992009-05-22 14:50:54 -04006570 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
James Smart7a470272010-03-15 11:25:20 -04006571 /* Populate mbox extension offset word. */
6572 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
6573 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6574 = (uint8_t *)phba->mbox_ext
6575 - (uint8_t *)phba->mbox;
6576 }
6577
6578 /* Copy the mailbox extension data */
6579 if (pmbox->in_ext_byte_len && pmbox->context2) {
6580 lpfc_sli_pcimem_bcopy(pmbox->context2,
6581 (uint8_t *)phba->mbox_ext,
6582 pmbox->in_ext_byte_len);
6583 }
6584 /* Copy command data to host SLIM area */
James Smart34b02dc2008-08-24 21:49:55 -04006585 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05006586 } else {
James Smart7a470272010-03-15 11:25:20 -04006587 /* Populate mbox extension offset word. */
6588 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
6589 *(((uint32_t *)mb) + pmbox->mbox_offset_word)
6590 = MAILBOX_HBA_EXT_OFFSET;
6591
6592 /* Copy the mailbox extension data */
6593 if (pmbox->in_ext_byte_len && pmbox->context2) {
6594 lpfc_memcpy_to_slim(phba->MBslimaddr +
6595 MAILBOX_HBA_EXT_OFFSET,
6596 pmbox->context2, pmbox->in_ext_byte_len);
6597
6598 }
James Smart92908312006-03-07 15:04:13 -05006599 if (mb->mbxCommand == MBX_CONFIG_PORT) {
dea31012005-04-17 16:05:31 -05006600 /* copy command data into host mbox for cmpl */
James Smart34b02dc2008-08-24 21:49:55 -04006601 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
dea31012005-04-17 16:05:31 -05006602 }
6603
6604 /* First copy mbox command data to HBA SLIM, skip past first
6605 word */
6606 to_slim = phba->MBslimaddr + sizeof (uint32_t);
6607 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
6608 MAILBOX_CMD_SIZE - sizeof (uint32_t));
6609
6610 /* Next copy over first word, with mbxOwner set */
James Smart34b02dc2008-08-24 21:49:55 -04006611 ldata = *((uint32_t *)mb);
dea31012005-04-17 16:05:31 -05006612 to_slim = phba->MBslimaddr;
6613 writel(ldata, to_slim);
6614 readl(to_slim); /* flush */
6615
6616 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6617 /* switch over to host mailbox */
James Smart3772a992009-05-22 14:50:54 -04006618 psli->sli_flag |= LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05006619 }
6620 }
6621
6622 wmb();
dea31012005-04-17 16:05:31 -05006623
6624 switch (flag) {
6625 case MBX_NOWAIT:
James Smart09372822008-01-11 01:52:54 -05006626 /* Set up reference to mailbox command */
dea31012005-04-17 16:05:31 -05006627 psli->mbox_active = pmbox;
James Smart09372822008-01-11 01:52:54 -05006628 /* Interrupt board to do it */
6629 writel(CA_MBATT, phba->CAregaddr);
6630 readl(phba->CAregaddr); /* flush */
6631 /* Don't wait for it to finish, just return */
dea31012005-04-17 16:05:31 -05006632 break;
6633
6634 case MBX_POLL:
James Smart09372822008-01-11 01:52:54 -05006635 /* Set up null reference to mailbox command */
dea31012005-04-17 16:05:31 -05006636 psli->mbox_active = NULL;
James Smart09372822008-01-11 01:52:54 -05006637 /* Interrupt board to do it */
6638 writel(CA_MBATT, phba->CAregaddr);
6639 readl(phba->CAregaddr); /* flush */
6640
James Smart3772a992009-05-22 14:50:54 -04006641 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05006642 /* First read mbox status word */
James Smart34b02dc2008-08-24 21:49:55 -04006643 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05006644 word0 = le32_to_cpu(word0);
6645 } else {
6646 /* First read mbox status word */
James Smart9940b972011-03-11 16:06:12 -05006647 if (lpfc_readl(phba->MBslimaddr, &word0)) {
6648 spin_unlock_irqrestore(&phba->hbalock,
6649 drvr_flag);
6650 goto out_not_finished;
6651 }
dea31012005-04-17 16:05:31 -05006652 }
6653
6654 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05006655 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6656 spin_unlock_irqrestore(&phba->hbalock,
6657 drvr_flag);
6658 goto out_not_finished;
6659 }
James Smarta183a152011-10-10 21:32:43 -04006660 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6661 1000) + jiffies;
James Smart09372822008-01-11 01:52:54 -05006662 i = 0;
dea31012005-04-17 16:05:31 -05006663 /* Wait for command to complete */
Jamie Wellnitz41415862006-02-28 19:25:27 -05006664 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
6665 (!(ha_copy & HA_MBATT) &&
James Smart2e0fef82007-06-17 19:56:36 -05006666 (phba->link_state > LPFC_WARM_START))) {
James Smart09372822008-01-11 01:52:54 -05006667 if (time_after(jiffies, timeout)) {
dea31012005-04-17 16:05:31 -05006668 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
James Smart2e0fef82007-06-17 19:56:36 -05006669 spin_unlock_irqrestore(&phba->hbalock,
dea31012005-04-17 16:05:31 -05006670 drvr_flag);
James Smart58da1ff2008-04-07 10:15:56 -04006671 goto out_not_finished;
dea31012005-04-17 16:05:31 -05006672 }
6673
6674 /* Check if we took a mbox interrupt while we were
6675 polling */
6676 if (((word0 & OWN_CHIP) != OWN_CHIP)
6677 && (evtctr != psli->slistat.mbox_event))
6678 break;
6679
James Smart09372822008-01-11 01:52:54 -05006680 if (i++ > 10) {
6681 spin_unlock_irqrestore(&phba->hbalock,
6682 drvr_flag);
6683 msleep(1);
6684 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6685 }
dea31012005-04-17 16:05:31 -05006686
James Smart3772a992009-05-22 14:50:54 -04006687 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05006688 /* First copy command data */
James Smart34b02dc2008-08-24 21:49:55 -04006689 word0 = *((uint32_t *)phba->mbox);
dea31012005-04-17 16:05:31 -05006690 word0 = le32_to_cpu(word0);
6691 if (mb->mbxCommand == MBX_CONFIG_PORT) {
6692 MAILBOX_t *slimmb;
James Smart34b02dc2008-08-24 21:49:55 -04006693 uint32_t slimword0;
dea31012005-04-17 16:05:31 -05006694 /* Check real SLIM for any errors */
6695 slimword0 = readl(phba->MBslimaddr);
6696 slimmb = (MAILBOX_t *) & slimword0;
6697 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
6698 && slimmb->mbxStatus) {
6699 psli->sli_flag &=
James Smart3772a992009-05-22 14:50:54 -04006700 ~LPFC_SLI_ACTIVE;
dea31012005-04-17 16:05:31 -05006701 word0 = slimword0;
6702 }
6703 }
6704 } else {
6705 /* First copy command data */
6706 word0 = readl(phba->MBslimaddr);
6707 }
6708 /* Read the HBA Host Attention Register */
James Smart9940b972011-03-11 16:06:12 -05006709 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
6710 spin_unlock_irqrestore(&phba->hbalock,
6711 drvr_flag);
6712 goto out_not_finished;
6713 }
dea31012005-04-17 16:05:31 -05006714 }
6715
James Smart3772a992009-05-22 14:50:54 -04006716 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
dea31012005-04-17 16:05:31 -05006717 /* copy results back to user */
James Smart34b02dc2008-08-24 21:49:55 -04006718 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04006719 /* Copy the mailbox extension data */
6720 if (pmbox->out_ext_byte_len && pmbox->context2) {
6721 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
6722 pmbox->context2,
6723 pmbox->out_ext_byte_len);
6724 }
dea31012005-04-17 16:05:31 -05006725 } else {
6726 /* First copy command data */
6727 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
6728 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -04006729 /* Copy the mailbox extension data */
6730 if (pmbox->out_ext_byte_len && pmbox->context2) {
6731 lpfc_memcpy_from_slim(pmbox->context2,
6732 phba->MBslimaddr +
6733 MAILBOX_HBA_EXT_OFFSET,
6734 pmbox->out_ext_byte_len);
dea31012005-04-17 16:05:31 -05006735 }
6736 }
6737
6738 writel(HA_MBATT, phba->HAregaddr);
6739 readl(phba->HAregaddr); /* flush */
6740
6741 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6742 status = mb->mbxStatus;
6743 }
6744
James Smart2e0fef82007-06-17 19:56:36 -05006745 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6746 return status;
James Smart58da1ff2008-04-07 10:15:56 -04006747
6748out_not_finished:
6749 if (processing_queue) {
James Smartda0436e2009-05-22 14:51:39 -04006750 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
James Smart58da1ff2008-04-07 10:15:56 -04006751 lpfc_mbox_cmpl_put(phba, pmbox);
6752 }
6753 return MBX_NOT_FINISHED;
dea31012005-04-17 16:05:31 -05006754}
6755
James Smarte59058c2008-08-24 21:49:00 -04006756/**
James Smartf1126682009-06-10 17:22:44 -04006757 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
6758 * @phba: Pointer to HBA context object.
6759 *
6760 * The function blocks the posting of SLI4 asynchronous mailbox commands from
6761 * the driver internal pending mailbox queue. It will then try to wait out the
6762 * possible outstanding mailbox command before return.
6763 *
6764 * Returns:
6765 * 0 - the outstanding mailbox command completed; otherwise, the wait for
6766 * the outstanding mailbox command timed out.
6767 **/
6768static int
6769lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
6770{
6771 struct lpfc_sli *psli = &phba->sli;
James Smartf1126682009-06-10 17:22:44 -04006772 int rc = 0;
James Smarta183a152011-10-10 21:32:43 -04006773 unsigned long timeout = 0;
James Smartf1126682009-06-10 17:22:44 -04006774
6775 /* Mark the asynchronous mailbox command posting as blocked */
6776 spin_lock_irq(&phba->hbalock);
6777 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
James Smartf1126682009-06-10 17:22:44 -04006778 /* Determine how long we might wait for the active mailbox
6779 * command to be gracefully completed by firmware.
6780 */
James Smarta183a152011-10-10 21:32:43 -04006781 if (phba->sli.mbox_active)
6782 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
6783 phba->sli.mbox_active) *
6784 1000) + jiffies;
6785 spin_unlock_irq(&phba->hbalock);
6786
James Smartf1126682009-06-10 17:22:44 -04006787 /* Wait for the outstnading mailbox command to complete */
6788 while (phba->sli.mbox_active) {
6789 /* Check active mailbox complete status every 2ms */
6790 msleep(2);
6791 if (time_after(jiffies, timeout)) {
6792 /* Timeout, marked the outstanding cmd not complete */
6793 rc = 1;
6794 break;
6795 }
6796 }
6797
6798 /* Can not cleanly block async mailbox command, fails it */
6799 if (rc) {
6800 spin_lock_irq(&phba->hbalock);
6801 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6802 spin_unlock_irq(&phba->hbalock);
6803 }
6804 return rc;
6805}
6806
6807/**
6808 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
6809 * @phba: Pointer to HBA context object.
6810 *
6811 * The function unblocks and resume posting of SLI4 asynchronous mailbox
6812 * commands from the driver internal pending mailbox queue. It makes sure
6813 * that there is no outstanding mailbox command before resuming posting
6814 * asynchronous mailbox commands. If, for any reason, there is outstanding
6815 * mailbox command, it will try to wait it out before resuming asynchronous
6816 * mailbox command posting.
6817 **/
6818static void
6819lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
6820{
6821 struct lpfc_sli *psli = &phba->sli;
6822
6823 spin_lock_irq(&phba->hbalock);
6824 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6825 /* Asynchronous mailbox posting is not blocked, do nothing */
6826 spin_unlock_irq(&phba->hbalock);
6827 return;
6828 }
6829
6830 /* Outstanding synchronous mailbox command is guaranteed to be done,
6831 * successful or timeout, after timing-out the outstanding mailbox
6832 * command shall always be removed, so just unblock posting async
6833 * mailbox command and resume
6834 */
6835 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6836 spin_unlock_irq(&phba->hbalock);
6837
6838 /* wake up worker thread to post asynchronlous mailbox command */
6839 lpfc_worker_wake_up(phba);
6840}
6841
6842/**
James Smartda0436e2009-05-22 14:51:39 -04006843 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
6844 * @phba: Pointer to HBA context object.
6845 * @mboxq: Pointer to mailbox object.
6846 *
6847 * The function posts a mailbox to the port. The mailbox is expected
6848 * to be comletely filled in and ready for the port to operate on it.
6849 * This routine executes a synchronous completion operation on the
6850 * mailbox by polling for its completion.
6851 *
6852 * The caller must not be holding any locks when calling this routine.
6853 *
6854 * Returns:
6855 * MBX_SUCCESS - mailbox posted successfully
6856 * Any of the MBX error values.
6857 **/
6858static int
6859lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6860{
6861 int rc = MBX_SUCCESS;
6862 unsigned long iflag;
6863 uint32_t db_ready;
6864 uint32_t mcqe_status;
6865 uint32_t mbx_cmnd;
6866 unsigned long timeout;
6867 struct lpfc_sli *psli = &phba->sli;
6868 struct lpfc_mqe *mb = &mboxq->u.mqe;
6869 struct lpfc_bmbx_create *mbox_rgn;
6870 struct dma_address *dma_address;
6871 struct lpfc_register bmbx_reg;
6872
6873 /*
6874 * Only one mailbox can be active to the bootstrap mailbox region
6875 * at a time and there is no queueing provided.
6876 */
6877 spin_lock_irqsave(&phba->hbalock, iflag);
6878 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6879 spin_unlock_irqrestore(&phba->hbalock, iflag);
6880 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04006881 "(%d):2532 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04006882 "cannot issue Data: x%x x%x\n",
6883 mboxq->vport ? mboxq->vport->vpi : 0,
6884 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04006885 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6886 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04006887 psli->sli_flag, MBX_POLL);
6888 return MBXERR_ERROR;
6889 }
6890 /* The server grabs the token and owns it until release */
6891 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
6892 phba->sli.mbox_active = mboxq;
6893 spin_unlock_irqrestore(&phba->hbalock, iflag);
6894
6895 /*
6896 * Initialize the bootstrap memory region to avoid stale data areas
6897 * in the mailbox post. Then copy the caller's mailbox contents to
6898 * the bmbx mailbox region.
6899 */
6900 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
6901 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
6902 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
6903 sizeof(struct lpfc_mqe));
6904
6905 /* Post the high mailbox dma address to the port and wait for ready. */
6906 dma_address = &phba->sli4_hba.bmbx.dma_address;
6907 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
6908
James Smarta183a152011-10-10 21:32:43 -04006909 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
James Smartda0436e2009-05-22 14:51:39 -04006910 * 1000) + jiffies;
6911 do {
6912 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
6913 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
6914 if (!db_ready)
6915 msleep(2);
6916
6917 if (time_after(jiffies, timeout)) {
6918 rc = MBXERR_ERROR;
6919 goto exit;
6920 }
6921 } while (!db_ready);
6922
6923 /* Post the low mailbox dma address to the port. */
6924 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
James Smarta183a152011-10-10 21:32:43 -04006925 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
James Smartda0436e2009-05-22 14:51:39 -04006926 * 1000) + jiffies;
6927 do {
6928 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
6929 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
6930 if (!db_ready)
6931 msleep(2);
6932
6933 if (time_after(jiffies, timeout)) {
6934 rc = MBXERR_ERROR;
6935 goto exit;
6936 }
6937 } while (!db_ready);
6938
6939 /*
6940 * Read the CQ to ensure the mailbox has completed.
6941 * If so, update the mailbox status so that the upper layers
6942 * can complete the request normally.
6943 */
6944 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
6945 sizeof(struct lpfc_mqe));
6946 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
6947 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
6948 sizeof(struct lpfc_mcqe));
6949 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
James Smart05580562011-05-24 11:40:48 -04006950 /*
6951 * When the CQE status indicates a failure and the mailbox status
6952 * indicates success then copy the CQE status into the mailbox status
6953 * (and prefix it with x4000).
6954 */
James Smartda0436e2009-05-22 14:51:39 -04006955 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
James Smart05580562011-05-24 11:40:48 -04006956 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
6957 bf_set(lpfc_mqe_status, mb,
6958 (LPFC_MBX_ERROR_RANGE | mcqe_status));
James Smartda0436e2009-05-22 14:51:39 -04006959 rc = MBXERR_ERROR;
James Smartd7c47992010-06-08 18:31:54 -04006960 } else
6961 lpfc_sli4_swap_str(phba, mboxq);
James Smartda0436e2009-05-22 14:51:39 -04006962
6963 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04006964 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
James Smartda0436e2009-05-22 14:51:39 -04006965 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
6966 " x%x x%x CQ: x%x x%x x%x x%x\n",
James Smarta183a152011-10-10 21:32:43 -04006967 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
6968 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6969 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04006970 bf_get(lpfc_mqe_status, mb),
6971 mb->un.mb_words[0], mb->un.mb_words[1],
6972 mb->un.mb_words[2], mb->un.mb_words[3],
6973 mb->un.mb_words[4], mb->un.mb_words[5],
6974 mb->un.mb_words[6], mb->un.mb_words[7],
6975 mb->un.mb_words[8], mb->un.mb_words[9],
6976 mb->un.mb_words[10], mb->un.mb_words[11],
6977 mb->un.mb_words[12], mboxq->mcqe.word0,
6978 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
6979 mboxq->mcqe.trailer);
6980exit:
6981 /* We are holding the token, no needed for lock when release */
6982 spin_lock_irqsave(&phba->hbalock, iflag);
6983 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6984 phba->sli.mbox_active = NULL;
6985 spin_unlock_irqrestore(&phba->hbalock, iflag);
6986 return rc;
6987}
6988
6989/**
6990 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
6991 * @phba: Pointer to HBA context object.
6992 * @pmbox: Pointer to mailbox object.
6993 * @flag: Flag indicating how the mailbox need to be processed.
6994 *
6995 * This function is called by discovery code and HBA management code to submit
6996 * a mailbox command to firmware with SLI-4 interface spec.
6997 *
6998 * Return codes the caller owns the mailbox command after the return of the
6999 * function.
7000 **/
7001static int
7002lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7003 uint32_t flag)
7004{
7005 struct lpfc_sli *psli = &phba->sli;
7006 unsigned long iflags;
7007 int rc;
7008
James Smartb76f2dc2011-07-22 18:37:42 -04007009 /* dump from issue mailbox command if setup */
7010 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7011
James Smart8fa38512009-07-19 10:01:03 -04007012 rc = lpfc_mbox_dev_check(phba);
7013 if (unlikely(rc)) {
7014 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007015 "(%d):2544 Mailbox command x%x (x%x/x%x) "
James Smart8fa38512009-07-19 10:01:03 -04007016 "cannot issue Data: x%x x%x\n",
7017 mboxq->vport ? mboxq->vport->vpi : 0,
7018 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007019 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7020 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smart8fa38512009-07-19 10:01:03 -04007021 psli->sli_flag, flag);
7022 goto out_not_finished;
7023 }
7024
James Smartda0436e2009-05-22 14:51:39 -04007025 /* Detect polling mode and jump to a handler */
7026 if (!phba->sli4_hba.intr_enable) {
7027 if (flag == MBX_POLL)
7028 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7029 else
7030 rc = -EIO;
7031 if (rc != MBX_SUCCESS)
James Smart05580562011-05-24 11:40:48 -04007032 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
James Smartda0436e2009-05-22 14:51:39 -04007033 "(%d):2541 Mailbox command x%x "
James Smarta183a152011-10-10 21:32:43 -04007034 "(x%x/x%x) cannot issue Data: "
7035 "x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04007036 mboxq->vport ? mboxq->vport->vpi : 0,
7037 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007038 lpfc_sli_config_mbox_subsys_get(phba,
7039 mboxq),
7040 lpfc_sli_config_mbox_opcode_get(phba,
7041 mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007042 psli->sli_flag, flag);
7043 return rc;
7044 } else if (flag == MBX_POLL) {
James Smartf1126682009-06-10 17:22:44 -04007045 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7046 "(%d):2542 Try to issue mailbox command "
James Smarta183a152011-10-10 21:32:43 -04007047 "x%x (x%x/x%x) synchronously ahead of async"
James Smartf1126682009-06-10 17:22:44 -04007048 "mailbox command queue: x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04007049 mboxq->vport ? mboxq->vport->vpi : 0,
7050 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007051 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7052 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007053 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04007054 /* Try to block the asynchronous mailbox posting */
7055 rc = lpfc_sli4_async_mbox_block(phba);
7056 if (!rc) {
7057 /* Successfully blocked, now issue sync mbox cmd */
7058 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7059 if (rc != MBX_SUCCESS)
7060 lpfc_printf_log(phba, KERN_ERR,
James Smarta183a152011-10-10 21:32:43 -04007061 LOG_MBOX | LOG_SLI,
7062 "(%d):2597 Mailbox command "
7063 "x%x (x%x/x%x) cannot issue "
7064 "Data: x%x x%x\n",
7065 mboxq->vport ?
7066 mboxq->vport->vpi : 0,
7067 mboxq->u.mb.mbxCommand,
7068 lpfc_sli_config_mbox_subsys_get(phba,
7069 mboxq),
7070 lpfc_sli_config_mbox_opcode_get(phba,
7071 mboxq),
7072 psli->sli_flag, flag);
James Smartf1126682009-06-10 17:22:44 -04007073 /* Unblock the async mailbox posting afterward */
7074 lpfc_sli4_async_mbox_unblock(phba);
7075 }
7076 return rc;
James Smartda0436e2009-05-22 14:51:39 -04007077 }
7078
7079 /* Now, interrupt mode asynchrous mailbox command */
7080 rc = lpfc_mbox_cmd_check(phba, mboxq);
7081 if (rc) {
7082 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007083 "(%d):2543 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04007084 "cannot issue Data: x%x x%x\n",
7085 mboxq->vport ? mboxq->vport->vpi : 0,
7086 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007087 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7088 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007089 psli->sli_flag, flag);
7090 goto out_not_finished;
7091 }
James Smartda0436e2009-05-22 14:51:39 -04007092
7093 /* Put the mailbox command to the driver internal FIFO */
7094 psli->slistat.mbox_busy++;
7095 spin_lock_irqsave(&phba->hbalock, iflags);
7096 lpfc_mbox_put(phba, mboxq);
7097 spin_unlock_irqrestore(&phba->hbalock, iflags);
7098 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7099 "(%d):0354 Mbox cmd issue - Enqueue Data: "
James Smarta183a152011-10-10 21:32:43 -04007100 "x%x (x%x/x%x) x%x x%x x%x\n",
James Smartda0436e2009-05-22 14:51:39 -04007101 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7102 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
James Smarta183a152011-10-10 21:32:43 -04007103 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7104 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007105 phba->pport->port_state,
7106 psli->sli_flag, MBX_NOWAIT);
7107 /* Wake up worker thread to transport mailbox command from head */
7108 lpfc_worker_wake_up(phba);
7109
7110 return MBX_BUSY;
7111
7112out_not_finished:
7113 return MBX_NOT_FINISHED;
7114}
7115
7116/**
7117 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7118 * @phba: Pointer to HBA context object.
7119 *
7120 * This function is called by worker thread to send a mailbox command to
7121 * SLI4 HBA firmware.
7122 *
7123 **/
7124int
7125lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7126{
7127 struct lpfc_sli *psli = &phba->sli;
7128 LPFC_MBOXQ_t *mboxq;
7129 int rc = MBX_SUCCESS;
7130 unsigned long iflags;
7131 struct lpfc_mqe *mqe;
7132 uint32_t mbx_cmnd;
7133
7134 /* Check interrupt mode before post async mailbox command */
7135 if (unlikely(!phba->sli4_hba.intr_enable))
7136 return MBX_NOT_FINISHED;
7137
7138 /* Check for mailbox command service token */
7139 spin_lock_irqsave(&phba->hbalock, iflags);
7140 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7141 spin_unlock_irqrestore(&phba->hbalock, iflags);
7142 return MBX_NOT_FINISHED;
7143 }
7144 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7145 spin_unlock_irqrestore(&phba->hbalock, iflags);
7146 return MBX_NOT_FINISHED;
7147 }
7148 if (unlikely(phba->sli.mbox_active)) {
7149 spin_unlock_irqrestore(&phba->hbalock, iflags);
7150 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7151 "0384 There is pending active mailbox cmd\n");
7152 return MBX_NOT_FINISHED;
7153 }
7154 /* Take the mailbox command service token */
7155 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7156
7157 /* Get the next mailbox command from head of queue */
7158 mboxq = lpfc_mbox_get(phba);
7159
7160 /* If no more mailbox command waiting for post, we're done */
7161 if (!mboxq) {
7162 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7163 spin_unlock_irqrestore(&phba->hbalock, iflags);
7164 return MBX_SUCCESS;
7165 }
7166 phba->sli.mbox_active = mboxq;
7167 spin_unlock_irqrestore(&phba->hbalock, iflags);
7168
7169 /* Check device readiness for posting mailbox command */
7170 rc = lpfc_mbox_dev_check(phba);
7171 if (unlikely(rc))
7172 /* Driver clean routine will clean up pending mailbox */
7173 goto out_not_finished;
7174
7175 /* Prepare the mbox command to be posted */
7176 mqe = &mboxq->u.mqe;
7177 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7178
7179 /* Start timer for the mbox_tmo and log some mailbox post messages */
7180 mod_timer(&psli->mbox_tmo, (jiffies +
James Smarta183a152011-10-10 21:32:43 -04007181 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
James Smartda0436e2009-05-22 14:51:39 -04007182
7183 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007184 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
James Smartda0436e2009-05-22 14:51:39 -04007185 "x%x x%x\n",
7186 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
James Smarta183a152011-10-10 21:32:43 -04007187 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7188 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007189 phba->pport->port_state, psli->sli_flag);
7190
7191 if (mbx_cmnd != MBX_HEARTBEAT) {
7192 if (mboxq->vport) {
7193 lpfc_debugfs_disc_trc(mboxq->vport,
7194 LPFC_DISC_TRC_MBOX_VPORT,
7195 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7196 mbx_cmnd, mqe->un.mb_words[0],
7197 mqe->un.mb_words[1]);
7198 } else {
7199 lpfc_debugfs_disc_trc(phba->pport,
7200 LPFC_DISC_TRC_MBOX,
7201 "MBOX Send: cmd:x%x mb:x%x x%x",
7202 mbx_cmnd, mqe->un.mb_words[0],
7203 mqe->un.mb_words[1]);
7204 }
7205 }
7206 psli->slistat.mbox_cmd++;
7207
7208 /* Post the mailbox command to the port */
7209 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7210 if (rc != MBX_SUCCESS) {
7211 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
James Smarta183a152011-10-10 21:32:43 -04007212 "(%d):2533 Mailbox command x%x (x%x/x%x) "
James Smartda0436e2009-05-22 14:51:39 -04007213 "cannot issue Data: x%x x%x\n",
7214 mboxq->vport ? mboxq->vport->vpi : 0,
7215 mboxq->u.mb.mbxCommand,
James Smarta183a152011-10-10 21:32:43 -04007216 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7217 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
James Smartda0436e2009-05-22 14:51:39 -04007218 psli->sli_flag, MBX_NOWAIT);
7219 goto out_not_finished;
7220 }
7221
7222 return rc;
7223
7224out_not_finished:
7225 spin_lock_irqsave(&phba->hbalock, iflags);
7226 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7227 __lpfc_mbox_cmpl_put(phba, mboxq);
7228 /* Release the token */
7229 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7230 phba->sli.mbox_active = NULL;
7231 spin_unlock_irqrestore(&phba->hbalock, iflags);
7232
7233 return MBX_NOT_FINISHED;
7234}
7235
7236/**
7237 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7238 * @phba: Pointer to HBA context object.
7239 * @pmbox: Pointer to mailbox object.
7240 * @flag: Flag indicating how the mailbox need to be processed.
7241 *
7242 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7243 * the API jump table function pointer from the lpfc_hba struct.
7244 *
7245 * Return codes the caller owns the mailbox command after the return of the
7246 * function.
7247 **/
7248int
7249lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7250{
7251 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7252}
7253
7254/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007255 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
James Smartda0436e2009-05-22 14:51:39 -04007256 * @phba: The hba struct for which this call is being executed.
7257 * @dev_grp: The HBA PCI-Device group number.
7258 *
7259 * This routine sets up the mbox interface API function jump table in @phba
7260 * struct.
7261 * Returns: 0 - success, -ENODEV - failure.
7262 **/
7263int
7264lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7265{
7266
7267 switch (dev_grp) {
7268 case LPFC_PCI_DEV_LP:
7269 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7270 phba->lpfc_sli_handle_slow_ring_event =
7271 lpfc_sli_handle_slow_ring_event_s3;
7272 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7273 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7274 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7275 break;
7276 case LPFC_PCI_DEV_OC:
7277 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7278 phba->lpfc_sli_handle_slow_ring_event =
7279 lpfc_sli_handle_slow_ring_event_s4;
7280 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7281 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7282 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7283 break;
7284 default:
7285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7286 "1420 Invalid HBA PCI-device group: 0x%x\n",
7287 dev_grp);
7288 return -ENODEV;
7289 break;
7290 }
7291 return 0;
7292}
7293
7294/**
James Smart3621a712009-04-06 18:47:14 -04007295 * __lpfc_sli_ringtx_put - Add an iocb to the txq
James Smarte59058c2008-08-24 21:49:00 -04007296 * @phba: Pointer to HBA context object.
7297 * @pring: Pointer to driver SLI ring object.
7298 * @piocb: Pointer to address of newly added command iocb.
7299 *
7300 * This function is called with hbalock held to add a command
7301 * iocb to the txq when SLI layer cannot submit the command iocb
7302 * to the ring.
7303 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04007304void
James Smart92d7f7b2007-06-17 19:56:38 -05007305__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05007306 struct lpfc_iocbq *piocb)
dea31012005-04-17 16:05:31 -05007307{
7308 /* Insert the caller's iocb in the txq tail for later processing. */
7309 list_add_tail(&piocb->list, &pring->txq);
7310 pring->txq_cnt++;
dea31012005-04-17 16:05:31 -05007311}
7312
James Smarte59058c2008-08-24 21:49:00 -04007313/**
James Smart3621a712009-04-06 18:47:14 -04007314 * lpfc_sli_next_iocb - Get the next iocb in the txq
James Smarte59058c2008-08-24 21:49:00 -04007315 * @phba: Pointer to HBA context object.
7316 * @pring: Pointer to driver SLI ring object.
7317 * @piocb: Pointer to address of newly added command iocb.
7318 *
7319 * This function is called with hbalock held before a new
7320 * iocb is submitted to the firmware. This function checks
7321 * txq to flush the iocbs in txq to Firmware before
7322 * submitting new iocbs to the Firmware.
7323 * If there are iocbs in the txq which need to be submitted
7324 * to firmware, lpfc_sli_next_iocb returns the first element
7325 * of the txq after dequeuing it from txq.
7326 * If there is no iocb in the txq then the function will return
7327 * *piocb and *piocb is set to NULL. Caller needs to check
7328 * *piocb to find if there are more commands in the txq.
7329 **/
dea31012005-04-17 16:05:31 -05007330static struct lpfc_iocbq *
7331lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05007332 struct lpfc_iocbq **piocb)
dea31012005-04-17 16:05:31 -05007333{
7334 struct lpfc_iocbq * nextiocb;
7335
7336 nextiocb = lpfc_sli_ringtx_get(phba, pring);
7337 if (!nextiocb) {
7338 nextiocb = *piocb;
7339 *piocb = NULL;
7340 }
7341
7342 return nextiocb;
7343}
7344
James Smarte59058c2008-08-24 21:49:00 -04007345/**
James Smart3772a992009-05-22 14:50:54 -04007346 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04007347 * @phba: Pointer to HBA context object.
James Smart3772a992009-05-22 14:50:54 -04007348 * @ring_number: SLI ring number to issue iocb on.
James Smarte59058c2008-08-24 21:49:00 -04007349 * @piocb: Pointer to command iocb.
7350 * @flag: Flag indicating if this command can be put into txq.
7351 *
James Smart3772a992009-05-22 14:50:54 -04007352 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7353 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7354 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7355 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7356 * this function allows only iocbs for posting buffers. This function finds
7357 * next available slot in the command ring and posts the command to the
7358 * available slot and writes the port attention register to request HBA start
7359 * processing new iocb. If there is no slot available in the ring and
7360 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7361 * the function returns IOCB_BUSY.
James Smarte59058c2008-08-24 21:49:00 -04007362 *
James Smart3772a992009-05-22 14:50:54 -04007363 * This function is called with hbalock held. The function will return success
7364 * after it successfully submit the iocb to firmware or after adding to the
7365 * txq.
James Smarte59058c2008-08-24 21:49:00 -04007366 **/
James Smart98c9ea52007-10-27 13:37:33 -04007367static int
James Smart3772a992009-05-22 14:50:54 -04007368__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
dea31012005-04-17 16:05:31 -05007369 struct lpfc_iocbq *piocb, uint32_t flag)
7370{
7371 struct lpfc_iocbq *nextiocb;
7372 IOCB_t *iocb;
James Smart3772a992009-05-22 14:50:54 -04007373 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
dea31012005-04-17 16:05:31 -05007374
James Smart92d7f7b2007-06-17 19:56:38 -05007375 if (piocb->iocb_cmpl && (!piocb->vport) &&
7376 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7377 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7378 lpfc_printf_log(phba, KERN_ERR,
7379 LOG_SLI | LOG_VPORT,
James Smarte8b62012007-08-02 11:10:09 -04007380 "1807 IOCB x%x failed. No vport\n",
James Smart92d7f7b2007-06-17 19:56:38 -05007381 piocb->iocb.ulpCommand);
7382 dump_stack();
7383 return IOCB_ERROR;
7384 }
7385
7386
Linas Vepstas8d63f372007-02-14 14:28:36 -06007387 /* If the PCI channel is in offline state, do not post iocbs. */
7388 if (unlikely(pci_channel_offline(phba->pcidev)))
7389 return IOCB_ERROR;
7390
James Smarta257bf92009-04-06 18:48:10 -04007391 /* If HBA has a deferred error attention, fail the iocb. */
7392 if (unlikely(phba->hba_flag & DEFER_ERATT))
7393 return IOCB_ERROR;
7394
dea31012005-04-17 16:05:31 -05007395 /*
7396 * We should never get an IOCB if we are in a < LINK_DOWN state
7397 */
James Smart2e0fef82007-06-17 19:56:36 -05007398 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
dea31012005-04-17 16:05:31 -05007399 return IOCB_ERROR;
7400
7401 /*
7402 * Check to see if we are blocking IOCB processing because of a
James Smart0b727fe2007-10-27 13:37:25 -04007403 * outstanding event.
dea31012005-04-17 16:05:31 -05007404 */
James Smart0b727fe2007-10-27 13:37:25 -04007405 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
dea31012005-04-17 16:05:31 -05007406 goto iocb_busy;
7407
James Smart2e0fef82007-06-17 19:56:36 -05007408 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
dea31012005-04-17 16:05:31 -05007409 /*
James Smart2680eea2007-04-25 09:52:55 -04007410 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
dea31012005-04-17 16:05:31 -05007411 * can be issued if the link is not up.
7412 */
7413 switch (piocb->iocb.ulpCommand) {
James Smart84774a42008-08-24 21:50:06 -04007414 case CMD_GEN_REQUEST64_CR:
7415 case CMD_GEN_REQUEST64_CX:
7416 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
7417 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
James Smart6a9c52c2009-10-02 15:16:51 -04007418 FC_RCTL_DD_UNSOL_CMD) ||
James Smart84774a42008-08-24 21:50:06 -04007419 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
7420 MENLO_TRANSPORT_TYPE))
7421
7422 goto iocb_busy;
7423 break;
dea31012005-04-17 16:05:31 -05007424 case CMD_QUE_RING_BUF_CN:
7425 case CMD_QUE_RING_BUF64_CN:
dea31012005-04-17 16:05:31 -05007426 /*
7427 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7428 * completion, iocb_cmpl MUST be 0.
7429 */
7430 if (piocb->iocb_cmpl)
7431 piocb->iocb_cmpl = NULL;
7432 /*FALLTHROUGH*/
7433 case CMD_CREATE_XRI_CR:
James Smart2680eea2007-04-25 09:52:55 -04007434 case CMD_CLOSE_XRI_CN:
7435 case CMD_CLOSE_XRI_CX:
dea31012005-04-17 16:05:31 -05007436 break;
7437 default:
7438 goto iocb_busy;
7439 }
7440
7441 /*
7442 * For FCP commands, we must be in a state where we can process link
7443 * attention events.
7444 */
7445 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
James Smart92d7f7b2007-06-17 19:56:38 -05007446 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
dea31012005-04-17 16:05:31 -05007447 goto iocb_busy;
James Smart92d7f7b2007-06-17 19:56:38 -05007448 }
dea31012005-04-17 16:05:31 -05007449
dea31012005-04-17 16:05:31 -05007450 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
7451 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
7452 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
7453
7454 if (iocb)
7455 lpfc_sli_update_ring(phba, pring);
7456 else
7457 lpfc_sli_update_full_ring(phba, pring);
7458
7459 if (!piocb)
7460 return IOCB_SUCCESS;
7461
7462 goto out_busy;
7463
7464 iocb_busy:
7465 pring->stats.iocb_cmd_delay++;
7466
7467 out_busy:
7468
7469 if (!(flag & SLI_IOCB_RET_IOCB)) {
James Smart92d7f7b2007-06-17 19:56:38 -05007470 __lpfc_sli_ringtx_put(phba, pring, piocb);
dea31012005-04-17 16:05:31 -05007471 return IOCB_SUCCESS;
7472 }
7473
7474 return IOCB_BUSY;
7475}
7476
James Smart3772a992009-05-22 14:50:54 -04007477/**
James Smart4f774512009-05-22 14:52:35 -04007478 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
7479 * @phba: Pointer to HBA context object.
7480 * @piocb: Pointer to command iocb.
7481 * @sglq: Pointer to the scatter gather queue object.
7482 *
7483 * This routine converts the bpl or bde that is in the IOCB
7484 * to a sgl list for the sli4 hardware. The physical address
7485 * of the bpl/bde is converted back to a virtual address.
7486 * If the IOCB contains a BPL then the list of BDE's is
7487 * converted to sli4_sge's. If the IOCB contains a single
7488 * BDE then it is converted to a single sli_sge.
7489 * The IOCB is still in cpu endianess so the contents of
7490 * the bpl can be used without byte swapping.
7491 *
7492 * Returns valid XRI = Success, NO_XRI = Failure.
7493**/
7494static uint16_t
7495lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7496 struct lpfc_sglq *sglq)
7497{
7498 uint16_t xritag = NO_XRI;
7499 struct ulp_bde64 *bpl = NULL;
7500 struct ulp_bde64 bde;
7501 struct sli4_sge *sgl = NULL;
7502 IOCB_t *icmd;
7503 int numBdes = 0;
7504 int i = 0;
James Smart63e801c2010-11-20 23:14:19 -05007505 uint32_t offset = 0; /* accumulated offset in the sg request list */
7506 int inbound = 0; /* number of sg reply entries inbound from firmware */
James Smart4f774512009-05-22 14:52:35 -04007507
7508 if (!piocbq || !sglq)
7509 return xritag;
7510
7511 sgl = (struct sli4_sge *)sglq->sgl;
7512 icmd = &piocbq->iocb;
7513 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7514 numBdes = icmd->un.genreq64.bdl.bdeSize /
7515 sizeof(struct ulp_bde64);
7516 /* The addrHigh and addrLow fields within the IOCB
7517 * have not been byteswapped yet so there is no
7518 * need to swap them back.
7519 */
7520 bpl = (struct ulp_bde64 *)
7521 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
7522
7523 if (!bpl)
7524 return xritag;
7525
7526 for (i = 0; i < numBdes; i++) {
7527 /* Should already be byte swapped. */
James Smart28baac72010-02-12 14:42:03 -05007528 sgl->addr_hi = bpl->addrHigh;
7529 sgl->addr_lo = bpl->addrLow;
7530
James Smart05580562011-05-24 11:40:48 -04007531 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04007532 if ((i+1) == numBdes)
7533 bf_set(lpfc_sli4_sge_last, sgl, 1);
7534 else
7535 bf_set(lpfc_sli4_sge_last, sgl, 0);
James Smart28baac72010-02-12 14:42:03 -05007536 /* swap the size field back to the cpu so we
7537 * can assign it to the sgl.
7538 */
7539 bde.tus.w = le32_to_cpu(bpl->tus.w);
7540 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
James Smart63e801c2010-11-20 23:14:19 -05007541 /* The offsets in the sgl need to be accumulated
7542 * separately for the request and reply lists.
7543 * The request is always first, the reply follows.
7544 */
7545 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
7546 /* add up the reply sg entries */
7547 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
7548 inbound++;
7549 /* first inbound? reset the offset */
7550 if (inbound == 1)
7551 offset = 0;
7552 bf_set(lpfc_sli4_sge_offset, sgl, offset);
James Smartf9bb2da2011-10-10 21:34:11 -04007553 bf_set(lpfc_sli4_sge_type, sgl,
7554 LPFC_SGE_TYPE_DATA);
James Smart63e801c2010-11-20 23:14:19 -05007555 offset += bde.tus.f.bdeSize;
7556 }
James Smart546fc852011-03-11 16:06:29 -05007557 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04007558 bpl++;
7559 sgl++;
7560 }
7561 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
7562 /* The addrHigh and addrLow fields of the BDE have not
7563 * been byteswapped yet so they need to be swapped
7564 * before putting them in the sgl.
7565 */
7566 sgl->addr_hi =
7567 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
7568 sgl->addr_lo =
7569 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
James Smart05580562011-05-24 11:40:48 -04007570 sgl->word2 = le32_to_cpu(sgl->word2);
James Smart4f774512009-05-22 14:52:35 -04007571 bf_set(lpfc_sli4_sge_last, sgl, 1);
7572 sgl->word2 = cpu_to_le32(sgl->word2);
James Smart28baac72010-02-12 14:42:03 -05007573 sgl->sge_len =
7574 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
James Smart4f774512009-05-22 14:52:35 -04007575 }
7576 return sglq->sli4_xritag;
7577}
7578
7579/**
7580 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
7581 * @phba: Pointer to HBA context object.
James Smart4f774512009-05-22 14:52:35 -04007582 *
James Smarta93ff372010-10-22 11:06:08 -04007583 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
James Smart8fa38512009-07-19 10:01:03 -04007584 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
7585 * held.
James Smart4f774512009-05-22 14:52:35 -04007586 *
7587 * Return: index into SLI4 fast-path FCP queue index.
7588 **/
7589static uint32_t
James Smart8fa38512009-07-19 10:01:03 -04007590lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
James Smart4f774512009-05-22 14:52:35 -04007591{
James Smart8fa38512009-07-19 10:01:03 -04007592 ++phba->fcp_qidx;
7593 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
7594 phba->fcp_qidx = 0;
James Smart4f774512009-05-22 14:52:35 -04007595
James Smart8fa38512009-07-19 10:01:03 -04007596 return phba->fcp_qidx;
James Smart4f774512009-05-22 14:52:35 -04007597}
7598
7599/**
7600 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
7601 * @phba: Pointer to HBA context object.
7602 * @piocb: Pointer to command iocb.
7603 * @wqe: Pointer to the work queue entry.
7604 *
7605 * This routine converts the iocb command to its Work Queue Entry
7606 * equivalent. The wqe pointer should not have any fields set when
7607 * this routine is called because it will memcpy over them.
7608 * This routine does not set the CQ_ID or the WQEC bits in the
7609 * wqe.
7610 *
7611 * Returns: 0 = Success, IOCB_ERROR = Failure.
7612 **/
7613static int
7614lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7615 union lpfc_wqe *wqe)
7616{
James Smart5ffc2662009-11-18 15:39:44 -05007617 uint32_t xmit_len = 0, total_len = 0;
James Smart4f774512009-05-22 14:52:35 -04007618 uint8_t ct = 0;
7619 uint32_t fip;
7620 uint32_t abort_tag;
7621 uint8_t command_type = ELS_COMMAND_NON_FIP;
7622 uint8_t cmnd;
7623 uint16_t xritag;
James Smartdcf2a4e2010-09-29 11:18:53 -04007624 uint16_t abrt_iotag;
7625 struct lpfc_iocbq *abrtiocbq;
James Smart4f774512009-05-22 14:52:35 -04007626 struct ulp_bde64 *bpl = NULL;
James Smartf0d9bcc2010-10-22 11:07:09 -04007627 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
James Smart5ffc2662009-11-18 15:39:44 -05007628 int numBdes, i;
7629 struct ulp_bde64 bde;
James Smartc31098c2011-04-16 11:03:33 -04007630 struct lpfc_nodelist *ndlp;
James Smartff78d8f2011-12-13 13:21:35 -05007631 uint32_t *pcmd;
James Smart4f774512009-05-22 14:52:35 -04007632
James Smart45ed1192009-10-02 15:17:02 -04007633 fip = phba->hba_flag & HBA_FIP_SUPPORT;
James Smart4f774512009-05-22 14:52:35 -04007634 /* The fcp commands will set command type */
James Smart0c287582009-06-10 17:22:56 -04007635 if (iocbq->iocb_flag & LPFC_IO_FCP)
James Smart4f774512009-05-22 14:52:35 -04007636 command_type = FCP_COMMAND;
James Smartc8685952009-11-18 15:39:16 -05007637 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
James Smart0c287582009-06-10 17:22:56 -04007638 command_type = ELS_COMMAND_FIP;
7639 else
7640 command_type = ELS_COMMAND_NON_FIP;
7641
James Smart4f774512009-05-22 14:52:35 -04007642 /* Some of the fields are in the right position already */
7643 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
7644 abort_tag = (uint32_t) iocbq->iotag;
7645 xritag = iocbq->sli4_xritag;
James Smartf0d9bcc2010-10-22 11:07:09 -04007646 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
James Smart4f774512009-05-22 14:52:35 -04007647 /* words0-2 bpl convert bde */
7648 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
James Smart5ffc2662009-11-18 15:39:44 -05007649 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7650 sizeof(struct ulp_bde64);
James Smart4f774512009-05-22 14:52:35 -04007651 bpl = (struct ulp_bde64 *)
7652 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
7653 if (!bpl)
7654 return IOCB_ERROR;
7655
7656 /* Should already be byte swapped. */
7657 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
7658 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
7659 /* swap the size field back to the cpu so we
7660 * can assign it to the sgl.
7661 */
7662 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
James Smart5ffc2662009-11-18 15:39:44 -05007663 xmit_len = wqe->generic.bde.tus.f.bdeSize;
7664 total_len = 0;
7665 for (i = 0; i < numBdes; i++) {
7666 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
7667 total_len += bde.tus.f.bdeSize;
7668 }
James Smart4f774512009-05-22 14:52:35 -04007669 } else
James Smart5ffc2662009-11-18 15:39:44 -05007670 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
James Smart4f774512009-05-22 14:52:35 -04007671
7672 iocbq->iocb.ulpIoTag = iocbq->iotag;
7673 cmnd = iocbq->iocb.ulpCommand;
7674
7675 switch (iocbq->iocb.ulpCommand) {
7676 case CMD_ELS_REQUEST64_CR:
James Smartc31098c2011-04-16 11:03:33 -04007677 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04007678 if (!iocbq->iocb.ulpLe) {
7679 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7680 "2007 Only Limited Edition cmd Format"
7681 " supported 0x%x\n",
7682 iocbq->iocb.ulpCommand);
7683 return IOCB_ERROR;
7684 }
James Smartff78d8f2011-12-13 13:21:35 -05007685
James Smart5ffc2662009-11-18 15:39:44 -05007686 wqe->els_req.payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04007687 /* Els_reguest64 has a TMO */
7688 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
7689 iocbq->iocb.ulpTimeout);
7690 /* Need a VF for word 4 set the vf bit*/
7691 bf_set(els_req64_vf, &wqe->els_req, 0);
7692 /* And a VFID for word 12 */
7693 bf_set(els_req64_vfid, &wqe->els_req, 0);
James Smart4f774512009-05-22 14:52:35 -04007694 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
James Smartf0d9bcc2010-10-22 11:07:09 -04007695 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7696 iocbq->iocb.ulpContext);
7697 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
7698 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
James Smart4f774512009-05-22 14:52:35 -04007699 /* CCP CCPE PV PRI in word10 were set in the memcpy */
James Smartff78d8f2011-12-13 13:21:35 -05007700 if (command_type == ELS_COMMAND_FIP)
James Smartc8685952009-11-18 15:39:16 -05007701 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
7702 >> LPFC_FIP_ELS_ID_SHIFT);
James Smartff78d8f2011-12-13 13:21:35 -05007703 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7704 iocbq->context2)->virt);
7705 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7706 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
7707 *pcmd == ELS_CMD_PLOGI)) {
7708 bf_set(els_req64_sp, &wqe->els_req, 1);
7709 bf_set(els_req64_sid, &wqe->els_req,
7710 iocbq->vport->fc_myDID);
7711 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7712 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7713 phba->vpi_ids[phba->pport->vpi]);
7714 } else if (iocbq->context1) {
7715 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
7716 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7717 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
7718 }
James Smartc8685952009-11-18 15:39:16 -05007719 }
James Smart6d368e52011-05-24 11:44:12 -04007720 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
7721 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04007722 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
7723 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
7724 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
7725 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
7726 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7727 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
James Smart7851fe22011-07-22 18:36:52 -04007728 break;
James Smart5ffc2662009-11-18 15:39:44 -05007729 case CMD_XMIT_SEQUENCE64_CX:
James Smartf0d9bcc2010-10-22 11:07:09 -04007730 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
7731 iocbq->iocb.un.ulpWord[3]);
7732 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04007733 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart5ffc2662009-11-18 15:39:44 -05007734 /* The entire sequence is transmitted for this IOCB */
7735 xmit_len = total_len;
7736 cmnd = CMD_XMIT_SEQUENCE64_CR;
James Smart4f774512009-05-22 14:52:35 -04007737 case CMD_XMIT_SEQUENCE64_CR:
James Smartf0d9bcc2010-10-22 11:07:09 -04007738 /* word3 iocb=io_tag32 wqe=reserved */
7739 wqe->xmit_sequence.rsvd3 = 0;
James Smart4f774512009-05-22 14:52:35 -04007740 /* word4 relative_offset memcpy */
7741 /* word5 r_ctl/df_ctl memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04007742 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
7743 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
7744 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
7745 LPFC_WQE_IOD_WRITE);
7746 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
7747 LPFC_WQE_LENLOC_WORD12);
7748 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
James Smart5ffc2662009-11-18 15:39:44 -05007749 wqe->xmit_sequence.xmit_len = xmit_len;
7750 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04007751 break;
James Smart4f774512009-05-22 14:52:35 -04007752 case CMD_XMIT_BCAST64_CN:
James Smartf0d9bcc2010-10-22 11:07:09 -04007753 /* word3 iocb=iotag32 wqe=seq_payload_len */
7754 wqe->xmit_bcast64.seq_payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04007755 /* word4 iocb=rsvd wqe=rsvd */
7756 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
7757 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
James Smartf0d9bcc2010-10-22 11:07:09 -04007758 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04007759 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
James Smartf0d9bcc2010-10-22 11:07:09 -04007760 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
7761 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
7762 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
7763 LPFC_WQE_LENLOC_WORD3);
7764 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
James Smart7851fe22011-07-22 18:36:52 -04007765 break;
James Smart4f774512009-05-22 14:52:35 -04007766 case CMD_FCP_IWRITE64_CR:
7767 command_type = FCP_COMMAND_DATA_OUT;
James Smartf0d9bcc2010-10-22 11:07:09 -04007768 /* word3 iocb=iotag wqe=payload_offset_len */
7769 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
7770 wqe->fcp_iwrite.payload_offset_len =
James Smart5ffc2662009-11-18 15:39:44 -05007771 xmit_len + sizeof(struct fcp_rsp);
James Smartf0d9bcc2010-10-22 11:07:09 -04007772 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
7773 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
7774 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
7775 iocbq->iocb.ulpFCP2Rcvy);
7776 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
7777 /* Always open the exchange */
7778 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
7779 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7780 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
7781 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
7782 LPFC_WQE_LENLOC_WORD4);
7783 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7784 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
James Smart7851fe22011-07-22 18:36:52 -04007785 break;
James Smartf0d9bcc2010-10-22 11:07:09 -04007786 case CMD_FCP_IREAD64_CR:
7787 /* word3 iocb=iotag wqe=payload_offset_len */
7788 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
7789 wqe->fcp_iread.payload_offset_len =
7790 xmit_len + sizeof(struct fcp_rsp);
7791 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
7792 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
7793 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
7794 iocbq->iocb.ulpFCP2Rcvy);
7795 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
James Smart4f774512009-05-22 14:52:35 -04007796 /* Always open the exchange */
7797 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
James Smartf0d9bcc2010-10-22 11:07:09 -04007798 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7799 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
7800 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
7801 LPFC_WQE_LENLOC_WORD4);
7802 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7803 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
James Smart7851fe22011-07-22 18:36:52 -04007804 break;
James Smartf1126682009-06-10 17:22:44 -04007805 case CMD_FCP_ICMND64_CR:
James Smartf0d9bcc2010-10-22 11:07:09 -04007806 /* word3 iocb=IO_TAG wqe=reserved */
7807 wqe->fcp_icmd.rsrvd3 = 0;
7808 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
James Smartf1126682009-06-10 17:22:44 -04007809 /* Always open the exchange */
James Smartf0d9bcc2010-10-22 11:07:09 -04007810 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
7811 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
7812 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
7813 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
7814 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
7815 LPFC_WQE_LENLOC_NONE);
7816 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
James Smart7851fe22011-07-22 18:36:52 -04007817 break;
James Smart4f774512009-05-22 14:52:35 -04007818 case CMD_GEN_REQUEST64_CR:
James Smart63e801c2010-11-20 23:14:19 -05007819 /* For this command calculate the xmit length of the
7820 * request bde.
7821 */
7822 xmit_len = 0;
7823 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
7824 sizeof(struct ulp_bde64);
7825 for (i = 0; i < numBdes; i++) {
James Smart63e801c2010-11-20 23:14:19 -05007826 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
James Smart546fc852011-03-11 16:06:29 -05007827 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
7828 break;
James Smart63e801c2010-11-20 23:14:19 -05007829 xmit_len += bde.tus.f.bdeSize;
7830 }
James Smartf0d9bcc2010-10-22 11:07:09 -04007831 /* word3 iocb=IO_TAG wqe=request_payload_len */
7832 wqe->gen_req.request_payload_len = xmit_len;
7833 /* word4 iocb=parameter wqe=relative_offset memcpy */
7834 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
James Smart4f774512009-05-22 14:52:35 -04007835 /* word6 context tag copied in memcpy */
7836 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
7837 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
7838 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7839 "2015 Invalid CT %x command 0x%x\n",
7840 ct, iocbq->iocb.ulpCommand);
7841 return IOCB_ERROR;
7842 }
James Smartf0d9bcc2010-10-22 11:07:09 -04007843 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
7844 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
7845 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
7846 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
7847 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
7848 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
7849 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
7850 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
James Smart4f774512009-05-22 14:52:35 -04007851 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04007852 break;
James Smart4f774512009-05-22 14:52:35 -04007853 case CMD_XMIT_ELS_RSP64_CX:
James Smartc31098c2011-04-16 11:03:33 -04007854 ndlp = (struct lpfc_nodelist *)iocbq->context1;
James Smart4f774512009-05-22 14:52:35 -04007855 /* words0-2 BDE memcpy */
James Smartf0d9bcc2010-10-22 11:07:09 -04007856 /* word3 iocb=iotag32 wqe=response_payload_len */
7857 wqe->xmit_els_rsp.response_payload_len = xmit_len;
James Smart4f774512009-05-22 14:52:35 -04007858 /* word4 iocb=did wge=rsvd. */
James Smartf0d9bcc2010-10-22 11:07:09 -04007859 wqe->xmit_els_rsp.rsvd4 = 0;
James Smart4f774512009-05-22 14:52:35 -04007860 /* word5 iocb=rsvd wge=did */
7861 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
7862 iocbq->iocb.un.elsreq64.remoteID);
James Smartf0d9bcc2010-10-22 11:07:09 -04007863 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
7864 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7865 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
7866 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
James Smart7851fe22011-07-22 18:36:52 -04007867 iocbq->iocb.unsli3.rcvsli3.ox_id);
James Smart4f774512009-05-22 14:52:35 -04007868 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
James Smartf0d9bcc2010-10-22 11:07:09 -04007869 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
James Smart6d368e52011-05-24 11:44:12 -04007870 phba->vpi_ids[iocbq->vport->vpi]);
James Smartf0d9bcc2010-10-22 11:07:09 -04007871 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
7872 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
7873 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
7874 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
7875 LPFC_WQE_LENLOC_WORD3);
7876 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
James Smart6d368e52011-05-24 11:44:12 -04007877 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
7878 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
James Smartff78d8f2011-12-13 13:21:35 -05007879 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7880 iocbq->context2)->virt);
7881 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7882 bf_set(els_req64_sp, &wqe->els_req, 1);
7883 bf_set(els_req64_sid, &wqe->els_req,
7884 iocbq->vport->fc_myDID);
7885 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7886 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7887 phba->vpi_ids[phba->pport->vpi]);
7888 }
James Smart4f774512009-05-22 14:52:35 -04007889 command_type = OTHER_COMMAND;
James Smart7851fe22011-07-22 18:36:52 -04007890 break;
James Smart4f774512009-05-22 14:52:35 -04007891 case CMD_CLOSE_XRI_CN:
7892 case CMD_ABORT_XRI_CN:
7893 case CMD_ABORT_XRI_CX:
7894 /* words 0-2 memcpy should be 0 rserved */
7895 /* port will send abts */
James Smartdcf2a4e2010-09-29 11:18:53 -04007896 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
7897 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
7898 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
7899 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
7900 } else
7901 fip = 0;
7902
7903 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
James Smart4f774512009-05-22 14:52:35 -04007904 /*
James Smartdcf2a4e2010-09-29 11:18:53 -04007905 * The link is down, or the command was ELS_FIP
7906 * so the fw does not need to send abts
James Smart4f774512009-05-22 14:52:35 -04007907 * on the wire.
7908 */
7909 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
7910 else
7911 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
7912 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
James Smartf0d9bcc2010-10-22 11:07:09 -04007913 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
7914 wqe->abort_cmd.rsrvd5 = 0;
7915 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
James Smart4f774512009-05-22 14:52:35 -04007916 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
7917 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
James Smart4f774512009-05-22 14:52:35 -04007918 /*
7919 * The abort handler will send us CMD_ABORT_XRI_CN or
7920 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
7921 */
James Smartf0d9bcc2010-10-22 11:07:09 -04007922 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
7923 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
7924 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
7925 LPFC_WQE_LENLOC_NONE);
James Smart4f774512009-05-22 14:52:35 -04007926 cmnd = CMD_ABORT_XRI_CX;
7927 command_type = OTHER_COMMAND;
7928 xritag = 0;
James Smart7851fe22011-07-22 18:36:52 -04007929 break;
James Smart6669f9b2009-10-02 15:16:45 -04007930 case CMD_XMIT_BLS_RSP64_CX:
James Smart546fc852011-03-11 16:06:29 -05007931 /* As BLS ABTS RSP WQE is very different from other WQEs,
James Smart6669f9b2009-10-02 15:16:45 -04007932 * we re-construct this WQE here based on information in
7933 * iocbq from scratch.
7934 */
7935 memset(wqe, 0, sizeof(union lpfc_wqe));
James Smart5ffc2662009-11-18 15:39:44 -05007936 /* OX_ID is invariable to who sent ABTS to CT exchange */
James Smart6669f9b2009-10-02 15:16:45 -04007937 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05007938 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
7939 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
James Smart5ffc2662009-11-18 15:39:44 -05007940 LPFC_ABTS_UNSOL_INT) {
7941 /* ABTS sent by initiator to CT exchange, the
7942 * RX_ID field will be filled with the newly
7943 * allocated responder XRI.
7944 */
7945 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
7946 iocbq->sli4_xritag);
7947 } else {
7948 /* ABTS sent by responder to CT exchange, the
7949 * RX_ID field will be filled with the responder
7950 * RX_ID from ABTS.
7951 */
7952 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
James Smart546fc852011-03-11 16:06:29 -05007953 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
James Smart5ffc2662009-11-18 15:39:44 -05007954 }
James Smart6669f9b2009-10-02 15:16:45 -04007955 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
7956 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
7957 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
7958 iocbq->iocb.ulpContext);
James Smartf0d9bcc2010-10-22 11:07:09 -04007959 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
7960 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
7961 LPFC_WQE_LENLOC_NONE);
James Smart6669f9b2009-10-02 15:16:45 -04007962 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
7963 command_type = OTHER_COMMAND;
James Smart546fc852011-03-11 16:06:29 -05007964 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
7965 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
7966 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
7967 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
7968 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
7969 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
7970 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
7971 }
7972
James Smart7851fe22011-07-22 18:36:52 -04007973 break;
James Smart4f774512009-05-22 14:52:35 -04007974 case CMD_XRI_ABORTED_CX:
7975 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
James Smart4f774512009-05-22 14:52:35 -04007976 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
7977 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
7978 case CMD_FCP_TRSP64_CX: /* Target mode rcv */
7979 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
7980 default:
7981 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7982 "2014 Invalid command 0x%x\n",
7983 iocbq->iocb.ulpCommand);
7984 return IOCB_ERROR;
James Smart7851fe22011-07-22 18:36:52 -04007985 break;
James Smart4f774512009-05-22 14:52:35 -04007986 }
James Smart6d368e52011-05-24 11:44:12 -04007987
James Smartf0d9bcc2010-10-22 11:07:09 -04007988 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
7989 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
7990 wqe->generic.wqe_com.abort_tag = abort_tag;
7991 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
7992 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
7993 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
7994 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
James Smart4f774512009-05-22 14:52:35 -04007995 return 0;
7996}
7997
7998/**
7999 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8000 * @phba: Pointer to HBA context object.
8001 * @ring_number: SLI ring number to issue iocb on.
8002 * @piocb: Pointer to command iocb.
8003 * @flag: Flag indicating if this command can be put into txq.
8004 *
8005 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8006 * an iocb command to an HBA with SLI-4 interface spec.
8007 *
8008 * This function is called with hbalock held. The function will return success
8009 * after it successfully submit the iocb to firmware or after adding to the
8010 * txq.
8011 **/
8012static int
8013__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8014 struct lpfc_iocbq *piocb, uint32_t flag)
8015{
8016 struct lpfc_sglq *sglq;
James Smart4f774512009-05-22 14:52:35 -04008017 union lpfc_wqe wqe;
8018 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
James Smart4f774512009-05-22 14:52:35 -04008019
8020 if (piocb->sli4_xritag == NO_XRI) {
8021 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
James Smart546fc852011-03-11 16:06:29 -05008022 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
8023 piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
James Smart4f774512009-05-22 14:52:35 -04008024 sglq = NULL;
8025 else {
James Smart2a9bf3d2010-06-07 15:24:45 -04008026 if (pring->txq_cnt) {
8027 if (!(flag & SLI_IOCB_RET_IOCB)) {
8028 __lpfc_sli_ringtx_put(phba,
8029 pring, piocb);
8030 return IOCB_SUCCESS;
8031 } else {
8032 return IOCB_BUSY;
8033 }
8034 } else {
James Smart6d368e52011-05-24 11:44:12 -04008035 sglq = __lpfc_sli_get_sglq(phba, piocb);
James Smart2a9bf3d2010-06-07 15:24:45 -04008036 if (!sglq) {
8037 if (!(flag & SLI_IOCB_RET_IOCB)) {
8038 __lpfc_sli_ringtx_put(phba,
8039 pring,
8040 piocb);
8041 return IOCB_SUCCESS;
8042 } else
8043 return IOCB_BUSY;
8044 }
8045 }
James Smart4f774512009-05-22 14:52:35 -04008046 }
8047 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
James Smart6d368e52011-05-24 11:44:12 -04008048 /* These IO's already have an XRI and a mapped sgl. */
8049 sglq = NULL;
James Smart4f774512009-05-22 14:52:35 -04008050 } else {
James Smart6d368e52011-05-24 11:44:12 -04008051 /*
8052 * This is a continuation of a commandi,(CX) so this
James Smart4f774512009-05-22 14:52:35 -04008053 * sglq is on the active list
8054 */
8055 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
8056 if (!sglq)
8057 return IOCB_ERROR;
8058 }
8059
8060 if (sglq) {
James Smart6d368e52011-05-24 11:44:12 -04008061 piocb->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -04008062 piocb->sli4_xritag = sglq->sli4_xritag;
James Smart2a9bf3d2010-06-07 15:24:45 -04008063 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
James Smart4f774512009-05-22 14:52:35 -04008064 return IOCB_ERROR;
8065 }
8066
8067 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8068 return IOCB_ERROR;
8069
James Smart341af102010-01-26 23:07:37 -05008070 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8071 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
James Smart5ffc2662009-11-18 15:39:44 -05008072 /*
8073 * For FCP command IOCB, get a new WQ index to distribute
8074 * WQE across the WQsr. On the other hand, for abort IOCB,
8075 * it carries the same WQ index to the original command
8076 * IOCB.
8077 */
James Smart341af102010-01-26 23:07:37 -05008078 if (piocb->iocb_flag & LPFC_IO_FCP)
James Smart5ffc2662009-11-18 15:39:44 -05008079 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8080 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
8081 &wqe))
James Smart4f774512009-05-22 14:52:35 -04008082 return IOCB_ERROR;
8083 } else {
8084 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8085 return IOCB_ERROR;
8086 }
8087 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8088
8089 return 0;
8090}
8091
8092/**
James Smart3772a992009-05-22 14:50:54 -04008093 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8094 *
8095 * This routine wraps the actual lockless version for issusing IOCB function
8096 * pointer from the lpfc_hba struct.
8097 *
8098 * Return codes:
8099 * IOCB_ERROR - Error
8100 * IOCB_SUCCESS - Success
8101 * IOCB_BUSY - Busy
8102 **/
James Smart2a9bf3d2010-06-07 15:24:45 -04008103int
James Smart3772a992009-05-22 14:50:54 -04008104__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8105 struct lpfc_iocbq *piocb, uint32_t flag)
8106{
8107 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8108}
8109
8110/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008111 * lpfc_sli_api_table_setup - Set up sli api function jump table
James Smart3772a992009-05-22 14:50:54 -04008112 * @phba: The hba struct for which this call is being executed.
8113 * @dev_grp: The HBA PCI-Device group number.
8114 *
8115 * This routine sets up the SLI interface API function jump table in @phba
8116 * struct.
8117 * Returns: 0 - success, -ENODEV - failure.
8118 **/
8119int
8120lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8121{
8122
8123 switch (dev_grp) {
8124 case LPFC_PCI_DEV_LP:
8125 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8126 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8127 break;
James Smart4f774512009-05-22 14:52:35 -04008128 case LPFC_PCI_DEV_OC:
8129 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8130 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8131 break;
James Smart3772a992009-05-22 14:50:54 -04008132 default:
8133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8134 "1419 Invalid HBA PCI-device group: 0x%x\n",
8135 dev_grp);
8136 return -ENODEV;
8137 break;
8138 }
8139 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8140 return 0;
8141}
James Smart92d7f7b2007-06-17 19:56:38 -05008142
James Smarte59058c2008-08-24 21:49:00 -04008143/**
James Smart3621a712009-04-06 18:47:14 -04008144 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
James Smarte59058c2008-08-24 21:49:00 -04008145 * @phba: Pointer to HBA context object.
8146 * @pring: Pointer to driver SLI ring object.
8147 * @piocb: Pointer to command iocb.
8148 * @flag: Flag indicating if this command can be put into txq.
8149 *
8150 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8151 * function. This function gets the hbalock and calls
8152 * __lpfc_sli_issue_iocb function and will return the error returned
8153 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8154 * functions which do not hold hbalock.
8155 **/
James Smart92d7f7b2007-06-17 19:56:38 -05008156int
James Smart3772a992009-05-22 14:50:54 -04008157lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
James Smart92d7f7b2007-06-17 19:56:38 -05008158 struct lpfc_iocbq *piocb, uint32_t flag)
8159{
8160 unsigned long iflags;
8161 int rc;
8162
8163 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart3772a992009-05-22 14:50:54 -04008164 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
James Smart92d7f7b2007-06-17 19:56:38 -05008165 spin_unlock_irqrestore(&phba->hbalock, iflags);
8166
8167 return rc;
8168}
8169
James Smarte59058c2008-08-24 21:49:00 -04008170/**
James Smart3621a712009-04-06 18:47:14 -04008171 * lpfc_extra_ring_setup - Extra ring setup function
James Smarte59058c2008-08-24 21:49:00 -04008172 * @phba: Pointer to HBA context object.
8173 *
8174 * This function is called while driver attaches with the
8175 * HBA to setup the extra ring. The extra ring is used
8176 * only when driver needs to support target mode functionality
8177 * or IP over FC functionalities.
8178 *
8179 * This function is called with no lock held.
8180 **/
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05008181static int
8182lpfc_extra_ring_setup( struct lpfc_hba *phba)
8183{
8184 struct lpfc_sli *psli;
8185 struct lpfc_sli_ring *pring;
8186
8187 psli = &phba->sli;
8188
8189 /* Adjust cmd/rsp ring iocb entries more evenly */
James Smarta4bc3372006-12-02 13:34:16 -05008190
8191 /* Take some away from the FCP ring */
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05008192 pring = &psli->ring[psli->fcp_ring];
8193 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8194 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8195 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8196 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8197
James Smarta4bc3372006-12-02 13:34:16 -05008198 /* and give them to the extra ring */
8199 pring = &psli->ring[psli->extra_ring];
8200
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05008201 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8202 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8203 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8204 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8205
8206 /* Setup default profile for this ring */
8207 pring->iotag_max = 4096;
8208 pring->num_mask = 1;
8209 pring->prt[0].profile = 0; /* Mask 0 */
James Smarta4bc3372006-12-02 13:34:16 -05008210 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8211 pring->prt[0].type = phba->cfg_multi_ring_type;
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05008212 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8213 return 0;
8214}
8215
James Smarte59058c2008-08-24 21:49:00 -04008216/**
James Smart3621a712009-04-06 18:47:14 -04008217 * lpfc_sli_async_event_handler - ASYNC iocb handler function
James Smarte59058c2008-08-24 21:49:00 -04008218 * @phba: Pointer to HBA context object.
8219 * @pring: Pointer to driver SLI ring object.
8220 * @iocbq: Pointer to iocb object.
8221 *
8222 * This function is called by the slow ring event handler
8223 * function when there is an ASYNC event iocb in the ring.
8224 * This function is called with no lock held.
8225 * Currently this function handles only temperature related
8226 * ASYNC events. The function decodes the temperature sensor
8227 * event message and posts events for the management applications.
8228 **/
James Smart98c9ea52007-10-27 13:37:33 -04008229static void
James Smart57127f12007-10-27 13:37:05 -04008230lpfc_sli_async_event_handler(struct lpfc_hba * phba,
8231 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
8232{
8233 IOCB_t *icmd;
8234 uint16_t evt_code;
8235 uint16_t temp;
8236 struct temp_event temp_event_data;
8237 struct Scsi_Host *shost;
James Smarta257bf92009-04-06 18:48:10 -04008238 uint32_t *iocb_w;
James Smart57127f12007-10-27 13:37:05 -04008239
8240 icmd = &iocbq->iocb;
8241 evt_code = icmd->un.asyncstat.evt_code;
8242 temp = icmd->ulpContext;
8243
8244 if ((evt_code != ASYNC_TEMP_WARN) &&
8245 (evt_code != ASYNC_TEMP_SAFE)) {
James Smarta257bf92009-04-06 18:48:10 -04008246 iocb_w = (uint32_t *) icmd;
James Smart57127f12007-10-27 13:37:05 -04008247 lpfc_printf_log(phba,
8248 KERN_ERR,
8249 LOG_SLI,
James Smart76bb24e2007-10-27 13:38:00 -04008250 "0346 Ring %d handler: unexpected ASYNC_STATUS"
James Smarte4e74272009-07-19 10:01:38 -04008251 " evt_code 0x%x\n"
James Smarta257bf92009-04-06 18:48:10 -04008252 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
8253 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
8254 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
8255 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
James Smart57127f12007-10-27 13:37:05 -04008256 pring->ringno,
James Smarta257bf92009-04-06 18:48:10 -04008257 icmd->un.asyncstat.evt_code,
8258 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
8259 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
8260 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
8261 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
8262
James Smart57127f12007-10-27 13:37:05 -04008263 return;
8264 }
8265 temp_event_data.data = (uint32_t)temp;
8266 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
8267 if (evt_code == ASYNC_TEMP_WARN) {
8268 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
8269 lpfc_printf_log(phba,
James Smart09372822008-01-11 01:52:54 -05008270 KERN_ERR,
James Smart57127f12007-10-27 13:37:05 -04008271 LOG_TEMP,
James Smart76bb24e2007-10-27 13:38:00 -04008272 "0347 Adapter is very hot, please take "
James Smart57127f12007-10-27 13:37:05 -04008273 "corrective action. temperature : %d Celsius\n",
8274 temp);
8275 }
8276 if (evt_code == ASYNC_TEMP_SAFE) {
8277 temp_event_data.event_code = LPFC_NORMAL_TEMP;
8278 lpfc_printf_log(phba,
James Smart09372822008-01-11 01:52:54 -05008279 KERN_ERR,
James Smart57127f12007-10-27 13:37:05 -04008280 LOG_TEMP,
8281 "0340 Adapter temperature is OK now. "
8282 "temperature : %d Celsius\n",
8283 temp);
8284 }
8285
8286 /* Send temperature change event to applications */
8287 shost = lpfc_shost_from_vport(phba->pport);
8288 fc_host_post_vendor_event(shost, fc_get_event_number(),
8289 sizeof(temp_event_data), (char *) &temp_event_data,
James Smartddcc50f2008-12-04 22:38:46 -05008290 LPFC_NL_VENDOR_ID);
James Smart57127f12007-10-27 13:37:05 -04008291
8292}
8293
8294
James Smarte59058c2008-08-24 21:49:00 -04008295/**
James Smart3621a712009-04-06 18:47:14 -04008296 * lpfc_sli_setup - SLI ring setup function
James Smarte59058c2008-08-24 21:49:00 -04008297 * @phba: Pointer to HBA context object.
8298 *
8299 * lpfc_sli_setup sets up rings of the SLI interface with
8300 * number of iocbs per ring and iotags. This function is
8301 * called while driver attach to the HBA and before the
8302 * interrupts are enabled. So there is no need for locking.
8303 *
8304 * This function always returns 0.
8305 **/
dea31012005-04-17 16:05:31 -05008306int
8307lpfc_sli_setup(struct lpfc_hba *phba)
8308{
James Smarted957682007-06-17 19:56:37 -05008309 int i, totiocbsize = 0;
dea31012005-04-17 16:05:31 -05008310 struct lpfc_sli *psli = &phba->sli;
8311 struct lpfc_sli_ring *pring;
8312
8313 psli->num_rings = MAX_CONFIGURED_RINGS;
8314 psli->sli_flag = 0;
8315 psli->fcp_ring = LPFC_FCP_RING;
8316 psli->next_ring = LPFC_FCP_NEXT_RING;
James Smarta4bc3372006-12-02 13:34:16 -05008317 psli->extra_ring = LPFC_EXTRA_RING;
dea31012005-04-17 16:05:31 -05008318
James Bottomley604a3e32005-10-29 10:28:33 -05008319 psli->iocbq_lookup = NULL;
8320 psli->iocbq_lookup_len = 0;
8321 psli->last_iotag = 0;
8322
dea31012005-04-17 16:05:31 -05008323 for (i = 0; i < psli->num_rings; i++) {
8324 pring = &psli->ring[i];
8325 switch (i) {
8326 case LPFC_FCP_RING: /* ring 0 - FCP */
8327 /* numCiocb and numRiocb are used in config_port */
8328 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
8329 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
8330 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8331 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8332 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8333 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
James Smarted957682007-06-17 19:56:37 -05008334 pring->sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05008335 SLI3_IOCB_CMD_SIZE :
8336 SLI2_IOCB_CMD_SIZE;
James Smarted957682007-06-17 19:56:37 -05008337 pring->sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05008338 SLI3_IOCB_RSP_SIZE :
8339 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -05008340 pring->iotag_ctr = 0;
8341 pring->iotag_max =
James Smart92d7f7b2007-06-17 19:56:38 -05008342 (phba->cfg_hba_queue_depth * 2);
dea31012005-04-17 16:05:31 -05008343 pring->fast_iotag = pring->iotag_max;
8344 pring->num_mask = 0;
8345 break;
James Smarta4bc3372006-12-02 13:34:16 -05008346 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
dea31012005-04-17 16:05:31 -05008347 /* numCiocb and numRiocb are used in config_port */
8348 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
8349 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
James Smarted957682007-06-17 19:56:37 -05008350 pring->sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05008351 SLI3_IOCB_CMD_SIZE :
8352 SLI2_IOCB_CMD_SIZE;
James Smarted957682007-06-17 19:56:37 -05008353 pring->sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05008354 SLI3_IOCB_RSP_SIZE :
8355 SLI2_IOCB_RSP_SIZE;
James Smart2e0fef82007-06-17 19:56:36 -05008356 pring->iotag_max = phba->cfg_hba_queue_depth;
dea31012005-04-17 16:05:31 -05008357 pring->num_mask = 0;
8358 break;
8359 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
8360 /* numCiocb and numRiocb are used in config_port */
8361 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
8362 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
James Smarted957682007-06-17 19:56:37 -05008363 pring->sizeCiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05008364 SLI3_IOCB_CMD_SIZE :
8365 SLI2_IOCB_CMD_SIZE;
James Smarted957682007-06-17 19:56:37 -05008366 pring->sizeRiocb = (phba->sli_rev == 3) ?
James Smart92d7f7b2007-06-17 19:56:38 -05008367 SLI3_IOCB_RSP_SIZE :
8368 SLI2_IOCB_RSP_SIZE;
dea31012005-04-17 16:05:31 -05008369 pring->fast_iotag = 0;
8370 pring->iotag_ctr = 0;
8371 pring->iotag_max = 4096;
James Smart57127f12007-10-27 13:37:05 -04008372 pring->lpfc_sli_rcv_async_status =
8373 lpfc_sli_async_event_handler;
James Smart6669f9b2009-10-02 15:16:45 -04008374 pring->num_mask = LPFC_MAX_RING_MASK;
dea31012005-04-17 16:05:31 -05008375 pring->prt[0].profile = 0; /* Mask 0 */
James Smart6a9c52c2009-10-02 15:16:51 -04008376 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
8377 pring->prt[0].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -05008378 pring->prt[0].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05008379 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -05008380 pring->prt[1].profile = 0; /* Mask 1 */
James Smart6a9c52c2009-10-02 15:16:51 -04008381 pring->prt[1].rctl = FC_RCTL_ELS_REP;
8382 pring->prt[1].type = FC_TYPE_ELS;
dea31012005-04-17 16:05:31 -05008383 pring->prt[1].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05008384 lpfc_els_unsol_event;
dea31012005-04-17 16:05:31 -05008385 pring->prt[2].profile = 0; /* Mask 2 */
8386 /* NameServer Inquiry */
James Smart6a9c52c2009-10-02 15:16:51 -04008387 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
dea31012005-04-17 16:05:31 -05008388 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -04008389 pring->prt[2].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -05008390 pring->prt[2].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05008391 lpfc_ct_unsol_event;
dea31012005-04-17 16:05:31 -05008392 pring->prt[3].profile = 0; /* Mask 3 */
8393 /* NameServer response */
James Smart6a9c52c2009-10-02 15:16:51 -04008394 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
dea31012005-04-17 16:05:31 -05008395 /* NameServer */
James Smart6a9c52c2009-10-02 15:16:51 -04008396 pring->prt[3].type = FC_TYPE_CT;
dea31012005-04-17 16:05:31 -05008397 pring->prt[3].lpfc_sli_rcv_unsol_event =
James Smart92d7f7b2007-06-17 19:56:38 -05008398 lpfc_ct_unsol_event;
James Smart6669f9b2009-10-02 15:16:45 -04008399 /* abort unsolicited sequence */
8400 pring->prt[4].profile = 0; /* Mask 4 */
8401 pring->prt[4].rctl = FC_RCTL_BA_ABTS;
8402 pring->prt[4].type = FC_TYPE_BLS;
8403 pring->prt[4].lpfc_sli_rcv_unsol_event =
8404 lpfc_sli4_ct_abort_unsol_event;
dea31012005-04-17 16:05:31 -05008405 break;
8406 }
James Smarted957682007-06-17 19:56:37 -05008407 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
James Smart92d7f7b2007-06-17 19:56:38 -05008408 (pring->numRiocb * pring->sizeRiocb);
dea31012005-04-17 16:05:31 -05008409 }
James Smarted957682007-06-17 19:56:37 -05008410 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
dea31012005-04-17 16:05:31 -05008411 /* Too many cmd / rsp ring entries in SLI2 SLIM */
James Smarte8b62012007-08-02 11:10:09 -04008412 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
8413 "SLI2 SLIM Data: x%x x%lx\n",
8414 phba->brd_no, totiocbsize,
8415 (unsigned long) MAX_SLIM_IOCB_SIZE);
dea31012005-04-17 16:05:31 -05008416 }
Jamie Wellnitzcf5bf972006-02-28 22:33:08 -05008417 if (phba->cfg_multi_ring_support == 2)
8418 lpfc_extra_ring_setup(phba);
dea31012005-04-17 16:05:31 -05008419
8420 return 0;
8421}
8422
James Smarte59058c2008-08-24 21:49:00 -04008423/**
James Smart3621a712009-04-06 18:47:14 -04008424 * lpfc_sli_queue_setup - Queue initialization function
James Smarte59058c2008-08-24 21:49:00 -04008425 * @phba: Pointer to HBA context object.
8426 *
8427 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
8428 * ring. This function also initializes ring indices of each ring.
8429 * This function is called during the initialization of the SLI
8430 * interface of an HBA.
8431 * This function is called with no lock held and always returns
8432 * 1.
8433 **/
dea31012005-04-17 16:05:31 -05008434int
James Smart2e0fef82007-06-17 19:56:36 -05008435lpfc_sli_queue_setup(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05008436{
8437 struct lpfc_sli *psli;
8438 struct lpfc_sli_ring *pring;
James Bottomley604a3e32005-10-29 10:28:33 -05008439 int i;
dea31012005-04-17 16:05:31 -05008440
8441 psli = &phba->sli;
James Smart2e0fef82007-06-17 19:56:36 -05008442 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008443 INIT_LIST_HEAD(&psli->mboxq);
James Smart92d7f7b2007-06-17 19:56:38 -05008444 INIT_LIST_HEAD(&psli->mboxq_cmpl);
dea31012005-04-17 16:05:31 -05008445 /* Initialize list headers for txq and txcmplq as double linked lists */
8446 for (i = 0; i < psli->num_rings; i++) {
8447 pring = &psli->ring[i];
8448 pring->ringno = i;
8449 pring->next_cmdidx = 0;
8450 pring->local_getidx = 0;
8451 pring->cmdidx = 0;
8452 INIT_LIST_HEAD(&pring->txq);
8453 INIT_LIST_HEAD(&pring->txcmplq);
8454 INIT_LIST_HEAD(&pring->iocb_continueq);
James Smart9c2face2008-01-11 01:53:18 -05008455 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
dea31012005-04-17 16:05:31 -05008456 INIT_LIST_HEAD(&pring->postbufq);
dea31012005-04-17 16:05:31 -05008457 }
James Smart2e0fef82007-06-17 19:56:36 -05008458 spin_unlock_irq(&phba->hbalock);
8459 return 1;
dea31012005-04-17 16:05:31 -05008460}
8461
James Smarte59058c2008-08-24 21:49:00 -04008462/**
James Smart04c68492009-05-22 14:52:52 -04008463 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
8464 * @phba: Pointer to HBA context object.
8465 *
8466 * This routine flushes the mailbox command subsystem. It will unconditionally
8467 * flush all the mailbox commands in the three possible stages in the mailbox
8468 * command sub-system: pending mailbox command queue; the outstanding mailbox
8469 * command; and completed mailbox command queue. It is caller's responsibility
8470 * to make sure that the driver is in the proper state to flush the mailbox
8471 * command sub-system. Namely, the posting of mailbox commands into the
8472 * pending mailbox command queue from the various clients must be stopped;
8473 * either the HBA is in a state that it will never works on the outstanding
8474 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
8475 * mailbox command has been completed.
8476 **/
8477static void
8478lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
8479{
8480 LIST_HEAD(completions);
8481 struct lpfc_sli *psli = &phba->sli;
8482 LPFC_MBOXQ_t *pmb;
8483 unsigned long iflag;
8484
8485 /* Flush all the mailbox commands in the mbox system */
8486 spin_lock_irqsave(&phba->hbalock, iflag);
8487 /* The pending mailbox command queue */
8488 list_splice_init(&phba->sli.mboxq, &completions);
8489 /* The outstanding active mailbox command */
8490 if (psli->mbox_active) {
8491 list_add_tail(&psli->mbox_active->list, &completions);
8492 psli->mbox_active = NULL;
8493 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8494 }
8495 /* The completed mailbox command queue */
8496 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
8497 spin_unlock_irqrestore(&phba->hbalock, iflag);
8498
8499 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
8500 while (!list_empty(&completions)) {
8501 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
8502 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
8503 if (pmb->mbox_cmpl)
8504 pmb->mbox_cmpl(phba, pmb);
8505 }
8506}
8507
8508/**
James Smart3621a712009-04-06 18:47:14 -04008509 * lpfc_sli_host_down - Vport cleanup function
James Smarte59058c2008-08-24 21:49:00 -04008510 * @vport: Pointer to virtual port object.
8511 *
8512 * lpfc_sli_host_down is called to clean up the resources
8513 * associated with a vport before destroying virtual
8514 * port data structures.
8515 * This function does following operations:
8516 * - Free discovery resources associated with this virtual
8517 * port.
8518 * - Free iocbs associated with this virtual port in
8519 * the txq.
8520 * - Send abort for all iocb commands associated with this
8521 * vport in txcmplq.
8522 *
8523 * This function is called with no lock held and always returns 1.
8524 **/
dea31012005-04-17 16:05:31 -05008525int
James Smart92d7f7b2007-06-17 19:56:38 -05008526lpfc_sli_host_down(struct lpfc_vport *vport)
8527{
James Smart858c9f62007-06-17 19:56:39 -05008528 LIST_HEAD(completions);
James Smart92d7f7b2007-06-17 19:56:38 -05008529 struct lpfc_hba *phba = vport->phba;
8530 struct lpfc_sli *psli = &phba->sli;
8531 struct lpfc_sli_ring *pring;
8532 struct lpfc_iocbq *iocb, *next_iocb;
James Smart92d7f7b2007-06-17 19:56:38 -05008533 int i;
8534 unsigned long flags = 0;
8535 uint16_t prev_pring_flag;
8536
8537 lpfc_cleanup_discovery_resources(vport);
8538
8539 spin_lock_irqsave(&phba->hbalock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -05008540 for (i = 0; i < psli->num_rings; i++) {
8541 pring = &psli->ring[i];
8542 prev_pring_flag = pring->flag;
James Smart5e9d9b82008-06-14 22:52:53 -04008543 /* Only slow rings */
8544 if (pring->ringno == LPFC_ELS_RING) {
James Smart858c9f62007-06-17 19:56:39 -05008545 pring->flag |= LPFC_DEFERRED_RING_EVENT;
James Smart5e9d9b82008-06-14 22:52:53 -04008546 /* Set the lpfc data pending flag */
8547 set_bit(LPFC_DATA_READY, &phba->data_flags);
8548 }
James Smart92d7f7b2007-06-17 19:56:38 -05008549 /*
8550 * Error everything on the txq since these iocbs have not been
8551 * given to the FW yet.
8552 */
James Smart92d7f7b2007-06-17 19:56:38 -05008553 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
8554 if (iocb->vport != vport)
8555 continue;
James Smart858c9f62007-06-17 19:56:39 -05008556 list_move_tail(&iocb->list, &completions);
James Smart92d7f7b2007-06-17 19:56:38 -05008557 pring->txq_cnt--;
James Smart92d7f7b2007-06-17 19:56:38 -05008558 }
8559
8560 /* Next issue ABTS for everything on the txcmplq */
8561 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
8562 list) {
8563 if (iocb->vport != vport)
8564 continue;
8565 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
8566 }
8567
8568 pring->flag = prev_pring_flag;
8569 }
8570
8571 spin_unlock_irqrestore(&phba->hbalock, flags);
8572
James Smarta257bf92009-04-06 18:48:10 -04008573 /* Cancel all the IOCBs from the completions list */
8574 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8575 IOERR_SLI_DOWN);
James Smart92d7f7b2007-06-17 19:56:38 -05008576 return 1;
8577}
8578
James Smarte59058c2008-08-24 21:49:00 -04008579/**
James Smart3621a712009-04-06 18:47:14 -04008580 * lpfc_sli_hba_down - Resource cleanup function for the HBA
James Smarte59058c2008-08-24 21:49:00 -04008581 * @phba: Pointer to HBA context object.
8582 *
8583 * This function cleans up all iocb, buffers, mailbox commands
8584 * while shutting down the HBA. This function is called with no
8585 * lock held and always returns 1.
8586 * This function does the following to cleanup driver resources:
8587 * - Free discovery resources for each virtual port
8588 * - Cleanup any pending fabric iocbs
8589 * - Iterate through the iocb txq and free each entry
8590 * in the list.
8591 * - Free up any buffer posted to the HBA
8592 * - Free mailbox commands in the mailbox queue.
8593 **/
James Smart92d7f7b2007-06-17 19:56:38 -05008594int
James Smart2e0fef82007-06-17 19:56:36 -05008595lpfc_sli_hba_down(struct lpfc_hba *phba)
dea31012005-04-17 16:05:31 -05008596{
James Smart2534ba72007-04-25 09:52:20 -04008597 LIST_HEAD(completions);
James Smart2e0fef82007-06-17 19:56:36 -05008598 struct lpfc_sli *psli = &phba->sli;
dea31012005-04-17 16:05:31 -05008599 struct lpfc_sli_ring *pring;
James Smart0ff10d42008-01-11 01:52:36 -05008600 struct lpfc_dmabuf *buf_ptr;
dea31012005-04-17 16:05:31 -05008601 unsigned long flags = 0;
James Smart04c68492009-05-22 14:52:52 -04008602 int i;
8603
8604 /* Shutdown the mailbox command sub-system */
8605 lpfc_sli_mbox_sys_shutdown(phba);
dea31012005-04-17 16:05:31 -05008606
dea31012005-04-17 16:05:31 -05008607 lpfc_hba_down_prep(phba);
8608
James Smart92d7f7b2007-06-17 19:56:38 -05008609 lpfc_fabric_abort_hba(phba);
8610
James Smart2e0fef82007-06-17 19:56:36 -05008611 spin_lock_irqsave(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -05008612 for (i = 0; i < psli->num_rings; i++) {
8613 pring = &psli->ring[i];
James Smart5e9d9b82008-06-14 22:52:53 -04008614 /* Only slow rings */
8615 if (pring->ringno == LPFC_ELS_RING) {
James Smart858c9f62007-06-17 19:56:39 -05008616 pring->flag |= LPFC_DEFERRED_RING_EVENT;
James Smart5e9d9b82008-06-14 22:52:53 -04008617 /* Set the lpfc data pending flag */
8618 set_bit(LPFC_DATA_READY, &phba->data_flags);
8619 }
dea31012005-04-17 16:05:31 -05008620
8621 /*
8622 * Error everything on the txq since these iocbs have not been
8623 * given to the FW yet.
8624 */
James Smart2534ba72007-04-25 09:52:20 -04008625 list_splice_init(&pring->txq, &completions);
dea31012005-04-17 16:05:31 -05008626 pring->txq_cnt = 0;
8627
dea31012005-04-17 16:05:31 -05008628 }
James Smart2e0fef82007-06-17 19:56:36 -05008629 spin_unlock_irqrestore(&phba->hbalock, flags);
dea31012005-04-17 16:05:31 -05008630
James Smarta257bf92009-04-06 18:48:10 -04008631 /* Cancel all the IOCBs from the completions list */
8632 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
8633 IOERR_SLI_DOWN);
James Smart2534ba72007-04-25 09:52:20 -04008634
James Smart0ff10d42008-01-11 01:52:36 -05008635 spin_lock_irqsave(&phba->hbalock, flags);
8636 list_splice_init(&phba->elsbuf, &completions);
8637 phba->elsbuf_cnt = 0;
8638 phba->elsbuf_prev_cnt = 0;
8639 spin_unlock_irqrestore(&phba->hbalock, flags);
8640
8641 while (!list_empty(&completions)) {
8642 list_remove_head(&completions, buf_ptr,
8643 struct lpfc_dmabuf, list);
8644 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
8645 kfree(buf_ptr);
8646 }
8647
dea31012005-04-17 16:05:31 -05008648 /* Return any active mbox cmds */
8649 del_timer_sync(&psli->mbox_tmo);
James Smart92d7f7b2007-06-17 19:56:38 -05008650
James Smartda0436e2009-05-22 14:51:39 -04008651 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -05008652 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
James Smartda0436e2009-05-22 14:51:39 -04008653 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
James Smart92d7f7b2007-06-17 19:56:38 -05008654
James Smartda0436e2009-05-22 14:51:39 -04008655 return 1;
8656}
James Smart92d7f7b2007-06-17 19:56:38 -05008657
James Smartda0436e2009-05-22 14:51:39 -04008658/**
James Smart3621a712009-04-06 18:47:14 -04008659 * lpfc_sli_pcimem_bcopy - SLI memory copy function
James Smarte59058c2008-08-24 21:49:00 -04008660 * @srcp: Source memory pointer.
8661 * @destp: Destination memory pointer.
8662 * @cnt: Number of words required to be copied.
8663 *
8664 * This function is used for copying data between driver memory
8665 * and the SLI memory. This function also changes the endianness
8666 * of each word if native endianness is different from SLI
8667 * endianness. This function can be called with or without
8668 * lock.
8669 **/
dea31012005-04-17 16:05:31 -05008670void
8671lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
8672{
8673 uint32_t *src = srcp;
8674 uint32_t *dest = destp;
8675 uint32_t ldata;
8676 int i;
8677
8678 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
8679 ldata = *src;
8680 ldata = le32_to_cpu(ldata);
8681 *dest = ldata;
8682 src++;
8683 dest++;
8684 }
8685}
8686
James Smarte59058c2008-08-24 21:49:00 -04008687
8688/**
James Smarta0c87cb2009-07-19 10:01:10 -04008689 * lpfc_sli_bemem_bcopy - SLI memory copy function
8690 * @srcp: Source memory pointer.
8691 * @destp: Destination memory pointer.
8692 * @cnt: Number of words required to be copied.
8693 *
8694 * This function is used for copying data between a data structure
8695 * with big endian representation to local endianness.
8696 * This function can be called with or without lock.
8697 **/
8698void
8699lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
8700{
8701 uint32_t *src = srcp;
8702 uint32_t *dest = destp;
8703 uint32_t ldata;
8704 int i;
8705
8706 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
8707 ldata = *src;
8708 ldata = be32_to_cpu(ldata);
8709 *dest = ldata;
8710 src++;
8711 dest++;
8712 }
8713}
8714
8715/**
James Smart3621a712009-04-06 18:47:14 -04008716 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
James Smarte59058c2008-08-24 21:49:00 -04008717 * @phba: Pointer to HBA context object.
8718 * @pring: Pointer to driver SLI ring object.
8719 * @mp: Pointer to driver buffer object.
8720 *
8721 * This function is called with no lock held.
8722 * It always return zero after adding the buffer to the postbufq
8723 * buffer list.
8724 **/
dea31012005-04-17 16:05:31 -05008725int
James Smart2e0fef82007-06-17 19:56:36 -05008726lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8727 struct lpfc_dmabuf *mp)
dea31012005-04-17 16:05:31 -05008728{
8729 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
8730 later */
James Smart2e0fef82007-06-17 19:56:36 -05008731 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008732 list_add_tail(&mp->list, &pring->postbufq);
dea31012005-04-17 16:05:31 -05008733 pring->postbufq_cnt++;
James Smart2e0fef82007-06-17 19:56:36 -05008734 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008735 return 0;
8736}
8737
James Smarte59058c2008-08-24 21:49:00 -04008738/**
James Smart3621a712009-04-06 18:47:14 -04008739 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
James Smarte59058c2008-08-24 21:49:00 -04008740 * @phba: Pointer to HBA context object.
8741 *
8742 * When HBQ is enabled, buffers are searched based on tags. This function
8743 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
8744 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
8745 * does not conflict with tags of buffer posted for unsolicited events.
8746 * The function returns the allocated tag. The function is called with
8747 * no locks held.
8748 **/
James Smart76bb24e2007-10-27 13:38:00 -04008749uint32_t
8750lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
8751{
8752 spin_lock_irq(&phba->hbalock);
8753 phba->buffer_tag_count++;
8754 /*
8755 * Always set the QUE_BUFTAG_BIT to distiguish between
8756 * a tag assigned by HBQ.
8757 */
8758 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
8759 spin_unlock_irq(&phba->hbalock);
8760 return phba->buffer_tag_count;
8761}
8762
James Smarte59058c2008-08-24 21:49:00 -04008763/**
James Smart3621a712009-04-06 18:47:14 -04008764 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
James Smarte59058c2008-08-24 21:49:00 -04008765 * @phba: Pointer to HBA context object.
8766 * @pring: Pointer to driver SLI ring object.
8767 * @tag: Buffer tag.
8768 *
8769 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
8770 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
8771 * iocb is posted to the response ring with the tag of the buffer.
8772 * This function searches the pring->postbufq list using the tag
8773 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
8774 * iocb. If the buffer is found then lpfc_dmabuf object of the
8775 * buffer is returned to the caller else NULL is returned.
8776 * This function is called with no lock held.
8777 **/
James Smart76bb24e2007-10-27 13:38:00 -04008778struct lpfc_dmabuf *
8779lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8780 uint32_t tag)
8781{
8782 struct lpfc_dmabuf *mp, *next_mp;
8783 struct list_head *slp = &pring->postbufq;
8784
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008785 /* Search postbufq, from the beginning, looking for a match on tag */
James Smart76bb24e2007-10-27 13:38:00 -04008786 spin_lock_irq(&phba->hbalock);
8787 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
8788 if (mp->buffer_tag == tag) {
8789 list_del_init(&mp->list);
8790 pring->postbufq_cnt--;
8791 spin_unlock_irq(&phba->hbalock);
8792 return mp;
8793 }
8794 }
8795
8796 spin_unlock_irq(&phba->hbalock);
8797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smartd7c255b2008-08-24 21:50:00 -04008798 "0402 Cannot find virtual addr for buffer tag on "
James Smart76bb24e2007-10-27 13:38:00 -04008799 "ring %d Data x%lx x%p x%p x%x\n",
8800 pring->ringno, (unsigned long) tag,
8801 slp->next, slp->prev, pring->postbufq_cnt);
8802
8803 return NULL;
8804}
dea31012005-04-17 16:05:31 -05008805
James Smarte59058c2008-08-24 21:49:00 -04008806/**
James Smart3621a712009-04-06 18:47:14 -04008807 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
James Smarte59058c2008-08-24 21:49:00 -04008808 * @phba: Pointer to HBA context object.
8809 * @pring: Pointer to driver SLI ring object.
8810 * @phys: DMA address of the buffer.
8811 *
8812 * This function searches the buffer list using the dma_address
8813 * of unsolicited event to find the driver's lpfc_dmabuf object
8814 * corresponding to the dma_address. The function returns the
8815 * lpfc_dmabuf object if a buffer is found else it returns NULL.
8816 * This function is called by the ct and els unsolicited event
8817 * handlers to get the buffer associated with the unsolicited
8818 * event.
8819 *
8820 * This function is called with no lock held.
8821 **/
dea31012005-04-17 16:05:31 -05008822struct lpfc_dmabuf *
8823lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8824 dma_addr_t phys)
8825{
8826 struct lpfc_dmabuf *mp, *next_mp;
8827 struct list_head *slp = &pring->postbufq;
8828
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008829 /* Search postbufq, from the beginning, looking for a match on phys */
James Smart2e0fef82007-06-17 19:56:36 -05008830 spin_lock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008831 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
8832 if (mp->phys == phys) {
8833 list_del_init(&mp->list);
8834 pring->postbufq_cnt--;
James Smart2e0fef82007-06-17 19:56:36 -05008835 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008836 return mp;
8837 }
8838 }
8839
James Smart2e0fef82007-06-17 19:56:36 -05008840 spin_unlock_irq(&phba->hbalock);
dea31012005-04-17 16:05:31 -05008841 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smarte8b62012007-08-02 11:10:09 -04008842 "0410 Cannot find virtual addr for mapped buf on "
dea31012005-04-17 16:05:31 -05008843 "ring %d Data x%llx x%p x%p x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04008844 pring->ringno, (unsigned long long)phys,
dea31012005-04-17 16:05:31 -05008845 slp->next, slp->prev, pring->postbufq_cnt);
8846 return NULL;
8847}
8848
James Smarte59058c2008-08-24 21:49:00 -04008849/**
James Smart3621a712009-04-06 18:47:14 -04008850 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
James Smarte59058c2008-08-24 21:49:00 -04008851 * @phba: Pointer to HBA context object.
8852 * @cmdiocb: Pointer to driver command iocb object.
8853 * @rspiocb: Pointer to driver response iocb object.
8854 *
8855 * This function is the completion handler for the abort iocbs for
8856 * ELS commands. This function is called from the ELS ring event
8857 * handler with no lock held. This function frees memory resources
8858 * associated with the abort iocb.
8859 **/
dea31012005-04-17 16:05:31 -05008860static void
James Smart2e0fef82007-06-17 19:56:36 -05008861lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8862 struct lpfc_iocbq *rspiocb)
dea31012005-04-17 16:05:31 -05008863{
James Smart2e0fef82007-06-17 19:56:36 -05008864 IOCB_t *irsp = &rspiocb->iocb;
James Smart2680eea2007-04-25 09:52:55 -04008865 uint16_t abort_iotag, abort_context;
James Smartff78d8f2011-12-13 13:21:35 -05008866 struct lpfc_iocbq *abort_iocb = NULL;
James Smart2680eea2007-04-25 09:52:55 -04008867
8868 if (irsp->ulpStatus) {
James Smartff78d8f2011-12-13 13:21:35 -05008869
8870 /*
8871 * Assume that the port already completed and returned, or
8872 * will return the iocb. Just Log the message.
8873 */
James Smart2680eea2007-04-25 09:52:55 -04008874 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
8875 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
8876
James Smart2e0fef82007-06-17 19:56:36 -05008877 spin_lock_irq(&phba->hbalock);
James Smart45ed1192009-10-02 15:17:02 -04008878 if (phba->sli_rev < LPFC_SLI_REV4) {
8879 if (abort_iotag != 0 &&
8880 abort_iotag <= phba->sli.last_iotag)
8881 abort_iocb =
8882 phba->sli.iocbq_lookup[abort_iotag];
8883 } else
8884 /* For sli4 the abort_tag is the XRI,
8885 * so the abort routine puts the iotag of the iocb
8886 * being aborted in the context field of the abort
8887 * IOCB.
8888 */
8889 abort_iocb = phba->sli.iocbq_lookup[abort_context];
James Smart2680eea2007-04-25 09:52:55 -04008890
James Smart2a9bf3d2010-06-07 15:24:45 -04008891 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
8892 "0327 Cannot abort els iocb %p "
8893 "with tag %x context %x, abort status %x, "
8894 "abort code %x\n",
8895 abort_iocb, abort_iotag, abort_context,
8896 irsp->ulpStatus, irsp->un.ulpWord[4]);
James Smart2680eea2007-04-25 09:52:55 -04008897
James Smartff78d8f2011-12-13 13:21:35 -05008898 spin_unlock_irq(&phba->hbalock);
James Smart2680eea2007-04-25 09:52:55 -04008899 }
James Bottomley604a3e32005-10-29 10:28:33 -05008900 lpfc_sli_release_iocbq(phba, cmdiocb);
dea31012005-04-17 16:05:31 -05008901 return;
8902}
8903
James Smarte59058c2008-08-24 21:49:00 -04008904/**
James Smart3621a712009-04-06 18:47:14 -04008905 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
James Smarte59058c2008-08-24 21:49:00 -04008906 * @phba: Pointer to HBA context object.
8907 * @cmdiocb: Pointer to driver command iocb object.
8908 * @rspiocb: Pointer to driver response iocb object.
8909 *
8910 * The function is called from SLI ring event handler with no
8911 * lock held. This function is the completion handler for ELS commands
8912 * which are aborted. The function frees memory resources used for
8913 * the aborted ELS commands.
8914 **/
James Smart92d7f7b2007-06-17 19:56:38 -05008915static void
8916lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8917 struct lpfc_iocbq *rspiocb)
8918{
8919 IOCB_t *irsp = &rspiocb->iocb;
8920
8921 /* ELS cmd tag <ulpIoTag> completes */
8922 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smartd7c255b2008-08-24 21:50:00 -04008923 "0139 Ignoring ELS cmd tag x%x completion Data: "
James Smart92d7f7b2007-06-17 19:56:38 -05008924 "x%x x%x x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04008925 irsp->ulpIoTag, irsp->ulpStatus,
James Smart92d7f7b2007-06-17 19:56:38 -05008926 irsp->un.ulpWord[4], irsp->ulpTimeout);
James Smart858c9f62007-06-17 19:56:39 -05008927 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
8928 lpfc_ct_free_iocb(phba, cmdiocb);
8929 else
8930 lpfc_els_free_iocb(phba, cmdiocb);
James Smart92d7f7b2007-06-17 19:56:38 -05008931 return;
8932}
8933
James Smarte59058c2008-08-24 21:49:00 -04008934/**
James Smart5af5eee2010-10-22 11:06:38 -04008935 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
James Smarte59058c2008-08-24 21:49:00 -04008936 * @phba: Pointer to HBA context object.
8937 * @pring: Pointer to driver SLI ring object.
8938 * @cmdiocb: Pointer to driver command iocb object.
8939 *
James Smart5af5eee2010-10-22 11:06:38 -04008940 * This function issues an abort iocb for the provided command iocb down to
8941 * the port. Other than the case the outstanding command iocb is an abort
8942 * request, this function issues abort out unconditionally. This function is
8943 * called with hbalock held. The function returns 0 when it fails due to
8944 * memory allocation failure or when the command iocb is an abort request.
James Smarte59058c2008-08-24 21:49:00 -04008945 **/
James Smart5af5eee2010-10-22 11:06:38 -04008946static int
8947lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
James Smart2e0fef82007-06-17 19:56:36 -05008948 struct lpfc_iocbq *cmdiocb)
dea31012005-04-17 16:05:31 -05008949{
James Smart2e0fef82007-06-17 19:56:36 -05008950 struct lpfc_vport *vport = cmdiocb->vport;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04008951 struct lpfc_iocbq *abtsiocbp;
dea31012005-04-17 16:05:31 -05008952 IOCB_t *icmd = NULL;
8953 IOCB_t *iabt = NULL;
James Smart5af5eee2010-10-22 11:06:38 -04008954 int retval;
James Smart07951072007-04-25 09:51:38 -04008955
James Smart92d7f7b2007-06-17 19:56:38 -05008956 /*
8957 * There are certain command types we don't want to abort. And we
8958 * don't want to abort commands that are already in the process of
8959 * being aborted.
James Smart07951072007-04-25 09:51:38 -04008960 */
8961 icmd = &cmdiocb->iocb;
James Smart2e0fef82007-06-17 19:56:36 -05008962 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
James Smart92d7f7b2007-06-17 19:56:38 -05008963 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
8964 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
James Smart07951072007-04-25 09:51:38 -04008965 return 0;
8966
dea31012005-04-17 16:05:31 -05008967 /* issue ABTS for this IOCB based on iotag */
James Smart92d7f7b2007-06-17 19:56:38 -05008968 abtsiocbp = __lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05008969 if (abtsiocbp == NULL)
8970 return 0;
dea31012005-04-17 16:05:31 -05008971
James Smart07951072007-04-25 09:51:38 -04008972 /* This signals the response to set the correct status
James Smart341af102010-01-26 23:07:37 -05008973 * before calling the completion handler
James Smart07951072007-04-25 09:51:38 -04008974 */
8975 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
8976
dea31012005-04-17 16:05:31 -05008977 iabt = &abtsiocbp->iocb;
James Smart07951072007-04-25 09:51:38 -04008978 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
8979 iabt->un.acxri.abortContextTag = icmd->ulpContext;
James Smart45ed1192009-10-02 15:17:02 -04008980 if (phba->sli_rev == LPFC_SLI_REV4) {
James Smartda0436e2009-05-22 14:51:39 -04008981 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
James Smart45ed1192009-10-02 15:17:02 -04008982 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
8983 }
James Smartda0436e2009-05-22 14:51:39 -04008984 else
8985 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
dea31012005-04-17 16:05:31 -05008986 iabt->ulpLe = 1;
James Smart07951072007-04-25 09:51:38 -04008987 iabt->ulpClass = icmd->ulpClass;
dea31012005-04-17 16:05:31 -05008988
James Smart5ffc2662009-11-18 15:39:44 -05008989 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
8990 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
James Smart341af102010-01-26 23:07:37 -05008991 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
8992 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart5ffc2662009-11-18 15:39:44 -05008993
James Smart2e0fef82007-06-17 19:56:36 -05008994 if (phba->link_state >= LPFC_LINK_UP)
James Smart07951072007-04-25 09:51:38 -04008995 iabt->ulpCommand = CMD_ABORT_XRI_CN;
8996 else
8997 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
8998
8999 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
James Smart5b8bd0c2007-04-25 09:52:49 -04009000
James Smarte8b62012007-08-02 11:10:09 -04009001 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9002 "0339 Abort xri x%x, original iotag x%x, "
9003 "abort cmd iotag x%x\n",
James Smart2a9bf3d2010-06-07 15:24:45 -04009004 iabt->un.acxri.abortIoTag,
James Smarte8b62012007-08-02 11:10:09 -04009005 iabt->un.acxri.abortContextTag,
James Smart2a9bf3d2010-06-07 15:24:45 -04009006 abtsiocbp->iotag);
James Smartda0436e2009-05-22 14:51:39 -04009007 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
James Smart07951072007-04-25 09:51:38 -04009008
James Smartd7c255b2008-08-24 21:50:00 -04009009 if (retval)
9010 __lpfc_sli_release_iocbq(phba, abtsiocbp);
James Smart5af5eee2010-10-22 11:06:38 -04009011
9012 /*
9013 * Caller to this routine should check for IOCB_ERROR
9014 * and handle it properly. This routine no longer removes
9015 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9016 */
9017 return retval;
9018}
9019
9020/**
9021 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9022 * @phba: Pointer to HBA context object.
9023 * @pring: Pointer to driver SLI ring object.
9024 * @cmdiocb: Pointer to driver command iocb object.
9025 *
9026 * This function issues an abort iocb for the provided command iocb. In case
9027 * of unloading, the abort iocb will not be issued to commands on the ELS
9028 * ring. Instead, the callback function shall be changed to those commands
9029 * so that nothing happens when them finishes. This function is called with
9030 * hbalock held. The function returns 0 when the command iocb is an abort
9031 * request.
9032 **/
9033int
9034lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9035 struct lpfc_iocbq *cmdiocb)
9036{
9037 struct lpfc_vport *vport = cmdiocb->vport;
9038 int retval = IOCB_ERROR;
9039 IOCB_t *icmd = NULL;
9040
9041 /*
9042 * There are certain command types we don't want to abort. And we
9043 * don't want to abort commands that are already in the process of
9044 * being aborted.
9045 */
9046 icmd = &cmdiocb->iocb;
9047 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9048 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9049 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9050 return 0;
9051
9052 /*
9053 * If we're unloading, don't abort iocb on the ELS ring, but change
9054 * the callback so that nothing happens when it finishes.
9055 */
9056 if ((vport->load_flag & FC_UNLOADING) &&
9057 (pring->ringno == LPFC_ELS_RING)) {
9058 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9059 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9060 else
9061 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9062 goto abort_iotag_exit;
9063 }
9064
9065 /* Now, we try to issue the abort to the cmdiocb out */
9066 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9067
James Smart07951072007-04-25 09:51:38 -04009068abort_iotag_exit:
James Smart2e0fef82007-06-17 19:56:36 -05009069 /*
9070 * Caller to this routine should check for IOCB_ERROR
9071 * and handle it properly. This routine no longer removes
9072 * iocb off txcmplq and call compl in case of IOCB_ERROR.
James Smart07951072007-04-25 09:51:38 -04009073 */
James Smart2e0fef82007-06-17 19:56:36 -05009074 return retval;
dea31012005-04-17 16:05:31 -05009075}
9076
James Smarte59058c2008-08-24 21:49:00 -04009077/**
James Smart5af5eee2010-10-22 11:06:38 -04009078 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
9079 * @phba: Pointer to HBA context object.
9080 * @pring: Pointer to driver SLI ring object.
9081 *
9082 * This function aborts all iocbs in the given ring and frees all the iocb
9083 * objects in txq. This function issues abort iocbs unconditionally for all
9084 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
9085 * to complete before the return of this function. The caller is not required
9086 * to hold any locks.
9087 **/
9088static void
9089lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
9090{
9091 LIST_HEAD(completions);
9092 struct lpfc_iocbq *iocb, *next_iocb;
9093
9094 if (pring->ringno == LPFC_ELS_RING)
9095 lpfc_fabric_abort_hba(phba);
9096
9097 spin_lock_irq(&phba->hbalock);
9098
9099 /* Take off all the iocbs on txq for cancelling */
9100 list_splice_init(&pring->txq, &completions);
9101 pring->txq_cnt = 0;
9102
9103 /* Next issue ABTS for everything on the txcmplq */
9104 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
9105 lpfc_sli_abort_iotag_issue(phba, pring, iocb);
9106
9107 spin_unlock_irq(&phba->hbalock);
9108
9109 /* Cancel all the IOCBs from the completions list */
9110 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9111 IOERR_SLI_ABORTED);
9112}
9113
9114/**
9115 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9116 * @phba: pointer to lpfc HBA data structure.
9117 *
9118 * This routine will abort all pending and outstanding iocbs to an HBA.
9119 **/
9120void
9121lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9122{
9123 struct lpfc_sli *psli = &phba->sli;
9124 struct lpfc_sli_ring *pring;
9125 int i;
9126
9127 for (i = 0; i < psli->num_rings; i++) {
9128 pring = &psli->ring[i];
9129 lpfc_sli_iocb_ring_abort(phba, pring);
9130 }
9131}
9132
9133/**
James Smart3621a712009-04-06 18:47:14 -04009134 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
James Smarte59058c2008-08-24 21:49:00 -04009135 * @iocbq: Pointer to driver iocb object.
9136 * @vport: Pointer to driver virtual port object.
9137 * @tgt_id: SCSI ID of the target.
9138 * @lun_id: LUN ID of the scsi device.
9139 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9140 *
James Smart3621a712009-04-06 18:47:14 -04009141 * This function acts as an iocb filter for functions which abort or count
James Smarte59058c2008-08-24 21:49:00 -04009142 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9143 * 0 if the filtering criteria is met for the given iocb and will return
9144 * 1 if the filtering criteria is not met.
9145 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9146 * given iocb is for the SCSI device specified by vport, tgt_id and
9147 * lun_id parameter.
9148 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9149 * given iocb is for the SCSI target specified by vport and tgt_id
9150 * parameters.
9151 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9152 * given iocb is for the SCSI host associated with the given vport.
9153 * This function is called with no locks held.
9154 **/
dea31012005-04-17 16:05:31 -05009155static int
James Smart51ef4c22007-08-02 11:10:31 -04009156lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9157 uint16_t tgt_id, uint64_t lun_id,
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009158 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -05009159{
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009160 struct lpfc_scsi_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -05009161 int rc = 1;
9162
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009163 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
9164 return rc;
9165
James Smart51ef4c22007-08-02 11:10:31 -04009166 if (iocbq->vport != vport)
9167 return rc;
9168
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009169 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009170
James Smart495a7142008-06-14 22:52:59 -04009171 if (lpfc_cmd->pCmd == NULL)
dea31012005-04-17 16:05:31 -05009172 return rc;
9173
9174 switch (ctx_cmd) {
9175 case LPFC_CTX_LUN:
James Smart495a7142008-06-14 22:52:59 -04009176 if ((lpfc_cmd->rdata->pnode) &&
9177 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
9178 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
dea31012005-04-17 16:05:31 -05009179 rc = 0;
9180 break;
9181 case LPFC_CTX_TGT:
James Smart495a7142008-06-14 22:52:59 -04009182 if ((lpfc_cmd->rdata->pnode) &&
9183 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
dea31012005-04-17 16:05:31 -05009184 rc = 0;
9185 break;
dea31012005-04-17 16:05:31 -05009186 case LPFC_CTX_HOST:
9187 rc = 0;
9188 break;
9189 default:
9190 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
Harvey Harrisoncadbd4a2008-07-03 23:47:27 -07009191 __func__, ctx_cmd);
dea31012005-04-17 16:05:31 -05009192 break;
9193 }
9194
9195 return rc;
9196}
9197
James Smarte59058c2008-08-24 21:49:00 -04009198/**
James Smart3621a712009-04-06 18:47:14 -04009199 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
James Smarte59058c2008-08-24 21:49:00 -04009200 * @vport: Pointer to virtual port.
9201 * @tgt_id: SCSI ID of the target.
9202 * @lun_id: LUN ID of the scsi device.
9203 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9204 *
9205 * This function returns number of FCP commands pending for the vport.
9206 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9207 * commands pending on the vport associated with SCSI device specified
9208 * by tgt_id and lun_id parameters.
9209 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9210 * commands pending on the vport associated with SCSI target specified
9211 * by tgt_id parameter.
9212 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9213 * commands pending on the vport.
9214 * This function returns the number of iocbs which satisfy the filter.
9215 * This function is called without any lock held.
9216 **/
dea31012005-04-17 16:05:31 -05009217int
James Smart51ef4c22007-08-02 11:10:31 -04009218lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
9219 lpfc_ctx_cmd ctx_cmd)
dea31012005-04-17 16:05:31 -05009220{
James Smart51ef4c22007-08-02 11:10:31 -04009221 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009222 struct lpfc_iocbq *iocbq;
9223 int sum, i;
dea31012005-04-17 16:05:31 -05009224
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009225 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
9226 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -05009227
James Smart51ef4c22007-08-02 11:10:31 -04009228 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
9229 ctx_cmd) == 0)
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009230 sum++;
dea31012005-04-17 16:05:31 -05009231 }
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009232
dea31012005-04-17 16:05:31 -05009233 return sum;
9234}
9235
James Smarte59058c2008-08-24 21:49:00 -04009236/**
James Smart3621a712009-04-06 18:47:14 -04009237 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
James Smarte59058c2008-08-24 21:49:00 -04009238 * @phba: Pointer to HBA context object
9239 * @cmdiocb: Pointer to command iocb object.
9240 * @rspiocb: Pointer to response iocb object.
9241 *
9242 * This function is called when an aborted FCP iocb completes. This
9243 * function is called by the ring event handler with no lock held.
9244 * This function frees the iocb.
9245 **/
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -04009246void
James Smart2e0fef82007-06-17 19:56:36 -05009247lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9248 struct lpfc_iocbq *rspiocb)
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -04009249{
James Bottomley604a3e32005-10-29 10:28:33 -05009250 lpfc_sli_release_iocbq(phba, cmdiocb);
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -04009251 return;
9252}
9253
James Smarte59058c2008-08-24 21:49:00 -04009254/**
James Smart3621a712009-04-06 18:47:14 -04009255 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
James Smarte59058c2008-08-24 21:49:00 -04009256 * @vport: Pointer to virtual port.
9257 * @pring: Pointer to driver SLI ring object.
9258 * @tgt_id: SCSI ID of the target.
9259 * @lun_id: LUN ID of the scsi device.
9260 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9261 *
9262 * This function sends an abort command for every SCSI command
9263 * associated with the given virtual port pending on the ring
9264 * filtered by lpfc_sli_validate_fcp_iocb function.
9265 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
9266 * FCP iocbs associated with lun specified by tgt_id and lun_id
9267 * parameters
9268 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
9269 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
9270 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
9271 * FCP iocbs associated with virtual port.
9272 * This function returns number of iocbs it failed to abort.
9273 * This function is called with no locks held.
9274 **/
dea31012005-04-17 16:05:31 -05009275int
James Smart51ef4c22007-08-02 11:10:31 -04009276lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
9277 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
dea31012005-04-17 16:05:31 -05009278{
James Smart51ef4c22007-08-02 11:10:31 -04009279 struct lpfc_hba *phba = vport->phba;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009280 struct lpfc_iocbq *iocbq;
9281 struct lpfc_iocbq *abtsiocb;
dea31012005-04-17 16:05:31 -05009282 IOCB_t *cmd = NULL;
dea31012005-04-17 16:05:31 -05009283 int errcnt = 0, ret_val = 0;
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009284 int i;
dea31012005-04-17 16:05:31 -05009285
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009286 for (i = 1; i <= phba->sli.last_iotag; i++) {
9287 iocbq = phba->sli.iocbq_lookup[i];
dea31012005-04-17 16:05:31 -05009288
James Smart51ef4c22007-08-02 11:10:31 -04009289 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
James Smart2e0fef82007-06-17 19:56:36 -05009290 abort_cmd) != 0)
dea31012005-04-17 16:05:31 -05009291 continue;
9292
9293 /* issue ABTS for this IOCB based on iotag */
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009294 abtsiocb = lpfc_sli_get_iocbq(phba);
dea31012005-04-17 16:05:31 -05009295 if (abtsiocb == NULL) {
9296 errcnt++;
9297 continue;
9298 }
dea31012005-04-17 16:05:31 -05009299
James.Smart@Emulex.Com0bd4ca22005-10-28 20:30:02 -04009300 cmd = &iocbq->iocb;
dea31012005-04-17 16:05:31 -05009301 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
9302 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
James Smartda0436e2009-05-22 14:51:39 -04009303 if (phba->sli_rev == LPFC_SLI_REV4)
9304 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
9305 else
9306 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
dea31012005-04-17 16:05:31 -05009307 abtsiocb->iocb.ulpLe = 1;
9308 abtsiocb->iocb.ulpClass = cmd->ulpClass;
James Smart2e0fef82007-06-17 19:56:36 -05009309 abtsiocb->vport = phba->pport;
dea31012005-04-17 16:05:31 -05009310
James Smart5ffc2662009-11-18 15:39:44 -05009311 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9312 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
James Smart341af102010-01-26 23:07:37 -05009313 if (iocbq->iocb_flag & LPFC_IO_FCP)
9314 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
James Smart5ffc2662009-11-18 15:39:44 -05009315
James Smart2e0fef82007-06-17 19:56:36 -05009316 if (lpfc_is_link_up(phba))
dea31012005-04-17 16:05:31 -05009317 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
9318 else
9319 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
9320
James.Smart@Emulex.Com5eb95af2005-06-25 10:34:30 -04009321 /* Setup callback routine and issue the command. */
9322 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
James Smartda0436e2009-05-22 14:51:39 -04009323 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
9324 abtsiocb, 0);
dea31012005-04-17 16:05:31 -05009325 if (ret_val == IOCB_ERROR) {
James Bottomley604a3e32005-10-29 10:28:33 -05009326 lpfc_sli_release_iocbq(phba, abtsiocb);
dea31012005-04-17 16:05:31 -05009327 errcnt++;
9328 continue;
9329 }
9330 }
9331
9332 return errcnt;
9333}
9334
James Smarte59058c2008-08-24 21:49:00 -04009335/**
James Smart3621a712009-04-06 18:47:14 -04009336 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
James Smarte59058c2008-08-24 21:49:00 -04009337 * @phba: Pointer to HBA context object.
9338 * @cmdiocbq: Pointer to command iocb.
9339 * @rspiocbq: Pointer to response iocb.
9340 *
9341 * This function is the completion handler for iocbs issued using
9342 * lpfc_sli_issue_iocb_wait function. This function is called by the
9343 * ring event handler function without any lock held. This function
9344 * can be called from both worker thread context and interrupt
9345 * context. This function also can be called from other thread which
9346 * cleans up the SLI layer objects.
9347 * This function copy the contents of the response iocb to the
9348 * response iocb memory object provided by the caller of
9349 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
9350 * sleeps for the iocb completion.
9351 **/
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009352static void
9353lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
9354 struct lpfc_iocbq *cmdiocbq,
9355 struct lpfc_iocbq *rspiocbq)
dea31012005-04-17 16:05:31 -05009356{
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009357 wait_queue_head_t *pdone_q;
9358 unsigned long iflags;
James Smart0f65ff62010-02-26 14:14:23 -05009359 struct lpfc_scsi_buf *lpfc_cmd;
dea31012005-04-17 16:05:31 -05009360
James Smart2e0fef82007-06-17 19:56:36 -05009361 spin_lock_irqsave(&phba->hbalock, iflags);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009362 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
9363 if (cmdiocbq->context2 && rspiocbq)
9364 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
9365 &rspiocbq->iocb, sizeof(IOCB_t));
9366
James Smart0f65ff62010-02-26 14:14:23 -05009367 /* Set the exchange busy flag for task management commands */
9368 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
9369 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
9370 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
9371 cur_iocbq);
9372 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
9373 }
9374
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009375 pdone_q = cmdiocbq->context_un.wait_queue;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009376 if (pdone_q)
9377 wake_up(pdone_q);
James Smart858c9f62007-06-17 19:56:39 -05009378 spin_unlock_irqrestore(&phba->hbalock, iflags);
dea31012005-04-17 16:05:31 -05009379 return;
9380}
9381
James Smarte59058c2008-08-24 21:49:00 -04009382/**
James Smartd11e31d2009-06-10 17:23:06 -04009383 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
9384 * @phba: Pointer to HBA context object..
9385 * @piocbq: Pointer to command iocb.
9386 * @flag: Flag to test.
9387 *
9388 * This routine grabs the hbalock and then test the iocb_flag to
9389 * see if the passed in flag is set.
9390 * Returns:
9391 * 1 if flag is set.
9392 * 0 if flag is not set.
9393 **/
9394static int
9395lpfc_chk_iocb_flg(struct lpfc_hba *phba,
9396 struct lpfc_iocbq *piocbq, uint32_t flag)
9397{
9398 unsigned long iflags;
9399 int ret;
9400
9401 spin_lock_irqsave(&phba->hbalock, iflags);
9402 ret = piocbq->iocb_flag & flag;
9403 spin_unlock_irqrestore(&phba->hbalock, iflags);
9404 return ret;
9405
9406}
9407
9408/**
James Smart3621a712009-04-06 18:47:14 -04009409 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
James Smarte59058c2008-08-24 21:49:00 -04009410 * @phba: Pointer to HBA context object..
9411 * @pring: Pointer to sli ring.
9412 * @piocb: Pointer to command iocb.
9413 * @prspiocbq: Pointer to response iocb.
9414 * @timeout: Timeout in number of seconds.
9415 *
9416 * This function issues the iocb to firmware and waits for the
9417 * iocb to complete. If the iocb command is not
9418 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
9419 * Caller should not free the iocb resources if this function
9420 * returns IOCB_TIMEDOUT.
9421 * The function waits for the iocb completion using an
9422 * non-interruptible wait.
9423 * This function will sleep while waiting for iocb completion.
9424 * So, this function should not be called from any context which
9425 * does not allow sleeping. Due to the same reason, this function
9426 * cannot be called with interrupt disabled.
9427 * This function assumes that the iocb completions occur while
9428 * this function sleep. So, this function cannot be called from
9429 * the thread which process iocb completion for this ring.
9430 * This function clears the iocb_flag of the iocb object before
9431 * issuing the iocb and the iocb completion handler sets this
9432 * flag and wakes this thread when the iocb completes.
9433 * The contents of the response iocb will be copied to prspiocbq
9434 * by the completion handler when the command completes.
9435 * This function returns IOCB_SUCCESS when success.
9436 * This function is called with no lock held.
9437 **/
dea31012005-04-17 16:05:31 -05009438int
James Smart2e0fef82007-06-17 19:56:36 -05009439lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
James Smartda0436e2009-05-22 14:51:39 -04009440 uint32_t ring_number,
James Smart2e0fef82007-06-17 19:56:36 -05009441 struct lpfc_iocbq *piocb,
9442 struct lpfc_iocbq *prspiocbq,
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009443 uint32_t timeout)
dea31012005-04-17 16:05:31 -05009444{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -08009445 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009446 long timeleft, timeout_req = 0;
9447 int retval = IOCB_SUCCESS;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05009448 uint32_t creg_val;
James Smart2a9bf3d2010-06-07 15:24:45 -04009449 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
dea31012005-04-17 16:05:31 -05009450 /*
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009451 * If the caller has provided a response iocbq buffer, then context2
9452 * is NULL or its an error.
dea31012005-04-17 16:05:31 -05009453 */
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009454 if (prspiocbq) {
9455 if (piocb->context2)
9456 return IOCB_ERROR;
9457 piocb->context2 = prspiocbq;
dea31012005-04-17 16:05:31 -05009458 }
9459
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009460 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
9461 piocb->context_un.wait_queue = &done_q;
9462 piocb->iocb_flag &= ~LPFC_IO_WAKE;
dea31012005-04-17 16:05:31 -05009463
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05009464 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -05009465 if (lpfc_readl(phba->HCregaddr, &creg_val))
9466 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05009467 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
9468 writel(creg_val, phba->HCregaddr);
9469 readl(phba->HCregaddr); /* flush */
9470 }
9471
James Smart2a9bf3d2010-06-07 15:24:45 -04009472 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
9473 SLI_IOCB_RET_IOCB);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009474 if (retval == IOCB_SUCCESS) {
9475 timeout_req = timeout * HZ;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009476 timeleft = wait_event_timeout(done_q,
James Smartd11e31d2009-06-10 17:23:06 -04009477 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009478 timeout_req);
dea31012005-04-17 16:05:31 -05009479
James Smart7054a602007-04-25 09:52:34 -04009480 if (piocb->iocb_flag & LPFC_IO_WAKE) {
9481 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04009482 "0331 IOCB wake signaled\n");
James Smart7054a602007-04-25 09:52:34 -04009483 } else if (timeleft == 0) {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04009485 "0338 IOCB wait timeout error - no "
9486 "wake response Data x%x\n", timeout);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009487 retval = IOCB_TIMEDOUT;
James Smart7054a602007-04-25 09:52:34 -04009488 } else {
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -04009490 "0330 IOCB wake NOT set, "
9491 "Data x%x x%lx\n",
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009492 timeout, (timeleft / jiffies));
9493 retval = IOCB_TIMEDOUT;
dea31012005-04-17 16:05:31 -05009494 }
James Smart2a9bf3d2010-06-07 15:24:45 -04009495 } else if (retval == IOCB_BUSY) {
9496 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9497 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
9498 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
9499 return retval;
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009500 } else {
9501 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -04009502 "0332 IOCB wait issue failed, Data x%x\n",
James Smarte8b62012007-08-02 11:10:09 -04009503 retval);
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009504 retval = IOCB_ERROR;
dea31012005-04-17 16:05:31 -05009505 }
9506
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05009507 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
James Smart9940b972011-03-11 16:06:12 -05009508 if (lpfc_readl(phba->HCregaddr, &creg_val))
9509 return IOCB_ERROR;
James.Smart@Emulex.Com875fbdf2005-11-29 16:32:13 -05009510 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
9511 writel(creg_val, phba->HCregaddr);
9512 readl(phba->HCregaddr); /* flush */
9513 }
9514
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009515 if (prspiocbq)
9516 piocb->context2 = NULL;
9517
9518 piocb->context_un.wait_queue = NULL;
9519 piocb->iocb_cmpl = NULL;
dea31012005-04-17 16:05:31 -05009520 return retval;
9521}
James.Smart@Emulex.Com68876922005-10-28 20:29:47 -04009522
James Smarte59058c2008-08-24 21:49:00 -04009523/**
James Smart3621a712009-04-06 18:47:14 -04009524 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
James Smarte59058c2008-08-24 21:49:00 -04009525 * @phba: Pointer to HBA context object.
9526 * @pmboxq: Pointer to driver mailbox object.
9527 * @timeout: Timeout in number of seconds.
9528 *
9529 * This function issues the mailbox to firmware and waits for the
9530 * mailbox command to complete. If the mailbox command is not
9531 * completed within timeout seconds, it returns MBX_TIMEOUT.
9532 * The function waits for the mailbox completion using an
9533 * interruptible wait. If the thread is woken up due to a
9534 * signal, MBX_TIMEOUT error is returned to the caller. Caller
9535 * should not free the mailbox resources, if this function returns
9536 * MBX_TIMEOUT.
9537 * This function will sleep while waiting for mailbox completion.
9538 * So, this function should not be called from any context which
9539 * does not allow sleeping. Due to the same reason, this function
9540 * cannot be called with interrupt disabled.
9541 * This function assumes that the mailbox completion occurs while
9542 * this function sleep. So, this function cannot be called from
9543 * the worker thread which processes mailbox completion.
9544 * This function is called in the context of HBA management
9545 * applications.
9546 * This function returns MBX_SUCCESS when successful.
9547 * This function is called with no lock held.
9548 **/
dea31012005-04-17 16:05:31 -05009549int
James Smart2e0fef82007-06-17 19:56:36 -05009550lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
dea31012005-04-17 16:05:31 -05009551 uint32_t timeout)
9552{
Peter Zijlstra7259f0d2006-10-29 22:46:36 -08009553 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
dea31012005-04-17 16:05:31 -05009554 int retval;
James Smart858c9f62007-06-17 19:56:39 -05009555 unsigned long flag;
dea31012005-04-17 16:05:31 -05009556
9557 /* The caller must leave context1 empty. */
James Smart98c9ea52007-10-27 13:37:33 -04009558 if (pmboxq->context1)
James Smart2e0fef82007-06-17 19:56:36 -05009559 return MBX_NOT_FINISHED;
dea31012005-04-17 16:05:31 -05009560
James Smart495a7142008-06-14 22:52:59 -04009561 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
dea31012005-04-17 16:05:31 -05009562 /* setup wake call as IOCB callback */
9563 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
9564 /* setup context field to pass wait_queue pointer to wake function */
9565 pmboxq->context1 = &done_q;
9566
dea31012005-04-17 16:05:31 -05009567 /* now issue the command */
9568 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
dea31012005-04-17 16:05:31 -05009569 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
James Smart7054a602007-04-25 09:52:34 -04009570 wait_event_interruptible_timeout(done_q,
9571 pmboxq->mbox_flag & LPFC_MBX_WAKE,
9572 timeout * HZ);
9573
James Smart858c9f62007-06-17 19:56:39 -05009574 spin_lock_irqsave(&phba->hbalock, flag);
dea31012005-04-17 16:05:31 -05009575 pmboxq->context1 = NULL;
James Smart7054a602007-04-25 09:52:34 -04009576 /*
9577 * if LPFC_MBX_WAKE flag is set the mailbox is completed
9578 * else do not free the resources.
9579 */
James Smartd7c47992010-06-08 18:31:54 -04009580 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
dea31012005-04-17 16:05:31 -05009581 retval = MBX_SUCCESS;
James Smartd7c47992010-06-08 18:31:54 -04009582 lpfc_sli4_swap_str(phba, pmboxq);
9583 } else {
James Smart7054a602007-04-25 09:52:34 -04009584 retval = MBX_TIMEOUT;
James Smart858c9f62007-06-17 19:56:39 -05009585 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9586 }
9587 spin_unlock_irqrestore(&phba->hbalock, flag);
dea31012005-04-17 16:05:31 -05009588 }
9589
dea31012005-04-17 16:05:31 -05009590 return retval;
9591}
9592
James Smarte59058c2008-08-24 21:49:00 -04009593/**
James Smart3772a992009-05-22 14:50:54 -04009594 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
James Smarte59058c2008-08-24 21:49:00 -04009595 * @phba: Pointer to HBA context.
9596 *
James Smart3772a992009-05-22 14:50:54 -04009597 * This function is called to shutdown the driver's mailbox sub-system.
9598 * It first marks the mailbox sub-system is in a block state to prevent
9599 * the asynchronous mailbox command from issued off the pending mailbox
9600 * command queue. If the mailbox command sub-system shutdown is due to
9601 * HBA error conditions such as EEH or ERATT, this routine shall invoke
9602 * the mailbox sub-system flush routine to forcefully bring down the
9603 * mailbox sub-system. Otherwise, if it is due to normal condition (such
9604 * as with offline or HBA function reset), this routine will wait for the
9605 * outstanding mailbox command to complete before invoking the mailbox
9606 * sub-system flush routine to gracefully bring down mailbox sub-system.
James Smarte59058c2008-08-24 21:49:00 -04009607 **/
James Smart3772a992009-05-22 14:50:54 -04009608void
9609lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
James Smartb4c02652006-07-06 15:50:43 -04009610{
James Smart3772a992009-05-22 14:50:54 -04009611 struct lpfc_sli *psli = &phba->sli;
James Smart3772a992009-05-22 14:50:54 -04009612 unsigned long timeout;
9613
James Smarta183a152011-10-10 21:32:43 -04009614 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
James Smart3772a992009-05-22 14:50:54 -04009615 spin_lock_irq(&phba->hbalock);
9616 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9617 spin_unlock_irq(&phba->hbalock);
9618
9619 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9620 spin_lock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04009621 /* Determine how long we might wait for the active mailbox
9622 * command to be gracefully completed by firmware.
9623 */
James Smarta183a152011-10-10 21:32:43 -04009624 if (phba->sli.mbox_active)
9625 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9626 phba->sli.mbox_active) *
9627 1000) + jiffies;
9628 spin_unlock_irq(&phba->hbalock);
9629
James Smart3772a992009-05-22 14:50:54 -04009630 while (phba->sli.mbox_active) {
9631 /* Check active mailbox complete status every 2ms */
9632 msleep(2);
9633 if (time_after(jiffies, timeout))
9634 /* Timeout, let the mailbox flush routine to
9635 * forcefully release active mailbox command
9636 */
9637 break;
9638 }
9639 }
9640 lpfc_sli_mbox_sys_flush(phba);
9641}
9642
9643/**
9644 * lpfc_sli_eratt_read - read sli-3 error attention events
9645 * @phba: Pointer to HBA context.
9646 *
9647 * This function is called to read the SLI3 device error attention registers
9648 * for possible error attention events. The caller must hold the hostlock
9649 * with spin_lock_irq().
9650 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009651 * This function returns 1 when there is Error Attention in the Host Attention
James Smart3772a992009-05-22 14:50:54 -04009652 * Register and returns 0 otherwise.
9653 **/
9654static int
9655lpfc_sli_eratt_read(struct lpfc_hba *phba)
9656{
James Smarted957682007-06-17 19:56:37 -05009657 uint32_t ha_copy;
James Smartb4c02652006-07-06 15:50:43 -04009658
James Smart3772a992009-05-22 14:50:54 -04009659 /* Read chip Host Attention (HA) register */
James Smart9940b972011-03-11 16:06:12 -05009660 if (lpfc_readl(phba->HAregaddr, &ha_copy))
9661 goto unplug_err;
9662
James Smart3772a992009-05-22 14:50:54 -04009663 if (ha_copy & HA_ERATT) {
9664 /* Read host status register to retrieve error event */
James Smart9940b972011-03-11 16:06:12 -05009665 if (lpfc_sli_read_hs(phba))
9666 goto unplug_err;
James Smartb4c02652006-07-06 15:50:43 -04009667
James Smart3772a992009-05-22 14:50:54 -04009668 /* Check if there is a deferred error condition is active */
9669 if ((HS_FFER1 & phba->work_hs) &&
9670 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -04009671 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
James Smart3772a992009-05-22 14:50:54 -04009672 phba->hba_flag |= DEFER_ERATT;
James Smart3772a992009-05-22 14:50:54 -04009673 /* Clear all interrupt enable conditions */
9674 writel(0, phba->HCregaddr);
9675 readl(phba->HCregaddr);
9676 }
9677
9678 /* Set the driver HA work bitmap */
James Smart3772a992009-05-22 14:50:54 -04009679 phba->work_ha |= HA_ERATT;
9680 /* Indicate polling handles this ERATT */
9681 phba->hba_flag |= HBA_ERATT_HANDLED;
James Smart3772a992009-05-22 14:50:54 -04009682 return 1;
James Smartb4c02652006-07-06 15:50:43 -04009683 }
James Smart3772a992009-05-22 14:50:54 -04009684 return 0;
James Smart9940b972011-03-11 16:06:12 -05009685
9686unplug_err:
9687 /* Set the driver HS work bitmap */
9688 phba->work_hs |= UNPLUG_ERR;
9689 /* Set the driver HA work bitmap */
9690 phba->work_ha |= HA_ERATT;
9691 /* Indicate polling handles this ERATT */
9692 phba->hba_flag |= HBA_ERATT_HANDLED;
9693 return 1;
James Smartb4c02652006-07-06 15:50:43 -04009694}
9695
James Smarte59058c2008-08-24 21:49:00 -04009696/**
James Smartda0436e2009-05-22 14:51:39 -04009697 * lpfc_sli4_eratt_read - read sli-4 error attention events
9698 * @phba: Pointer to HBA context.
9699 *
9700 * This function is called to read the SLI4 device error attention registers
9701 * for possible error attention events. The caller must hold the hostlock
9702 * with spin_lock_irq().
9703 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009704 * This function returns 1 when there is Error Attention in the Host Attention
James Smartda0436e2009-05-22 14:51:39 -04009705 * Register and returns 0 otherwise.
9706 **/
9707static int
9708lpfc_sli4_eratt_read(struct lpfc_hba *phba)
9709{
9710 uint32_t uerr_sta_hi, uerr_sta_lo;
James Smart2fcee4b2010-12-15 17:57:46 -05009711 uint32_t if_type, portsmphr;
9712 struct lpfc_register portstat_reg;
James Smartda0436e2009-05-22 14:51:39 -04009713
James Smart2fcee4b2010-12-15 17:57:46 -05009714 /*
9715 * For now, use the SLI4 device internal unrecoverable error
James Smartda0436e2009-05-22 14:51:39 -04009716 * registers for error attention. This can be changed later.
9717 */
James Smart2fcee4b2010-12-15 17:57:46 -05009718 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
9719 switch (if_type) {
9720 case LPFC_SLI_INTF_IF_TYPE_0:
James Smart9940b972011-03-11 16:06:12 -05009721 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
9722 &uerr_sta_lo) ||
9723 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
9724 &uerr_sta_hi)) {
9725 phba->work_hs |= UNPLUG_ERR;
9726 phba->work_ha |= HA_ERATT;
9727 phba->hba_flag |= HBA_ERATT_HANDLED;
9728 return 1;
9729 }
James Smart2fcee4b2010-12-15 17:57:46 -05009730 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
9731 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
9732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9733 "1423 HBA Unrecoverable error: "
9734 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
9735 "ue_mask_lo_reg=0x%x, "
9736 "ue_mask_hi_reg=0x%x\n",
9737 uerr_sta_lo, uerr_sta_hi,
9738 phba->sli4_hba.ue_mask_lo,
9739 phba->sli4_hba.ue_mask_hi);
9740 phba->work_status[0] = uerr_sta_lo;
9741 phba->work_status[1] = uerr_sta_hi;
9742 phba->work_ha |= HA_ERATT;
9743 phba->hba_flag |= HBA_ERATT_HANDLED;
9744 return 1;
9745 }
9746 break;
9747 case LPFC_SLI_INTF_IF_TYPE_2:
James Smart9940b972011-03-11 16:06:12 -05009748 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9749 &portstat_reg.word0) ||
9750 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9751 &portsmphr)){
9752 phba->work_hs |= UNPLUG_ERR;
9753 phba->work_ha |= HA_ERATT;
9754 phba->hba_flag |= HBA_ERATT_HANDLED;
9755 return 1;
9756 }
James Smart2fcee4b2010-12-15 17:57:46 -05009757 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
9758 phba->work_status[0] =
9759 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
9760 phba->work_status[1] =
9761 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
9762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9763 "2885 Port Error Detected: "
9764 "port status reg 0x%x, "
9765 "port smphr reg 0x%x, "
9766 "error 1=0x%x, error 2=0x%x\n",
9767 portstat_reg.word0,
9768 portsmphr,
9769 phba->work_status[0],
9770 phba->work_status[1]);
9771 phba->work_ha |= HA_ERATT;
9772 phba->hba_flag |= HBA_ERATT_HANDLED;
9773 return 1;
9774 }
9775 break;
9776 case LPFC_SLI_INTF_IF_TYPE_1:
9777 default:
James Smarta747c9c2009-11-18 15:41:10 -05009778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart2fcee4b2010-12-15 17:57:46 -05009779 "2886 HBA Error Attention on unsupported "
9780 "if type %d.", if_type);
James Smarta747c9c2009-11-18 15:41:10 -05009781 return 1;
James Smartda0436e2009-05-22 14:51:39 -04009782 }
James Smart2fcee4b2010-12-15 17:57:46 -05009783
James Smartda0436e2009-05-22 14:51:39 -04009784 return 0;
9785}
9786
9787/**
James Smart3621a712009-04-06 18:47:14 -04009788 * lpfc_sli_check_eratt - check error attention events
James Smart93996272008-08-24 21:50:30 -04009789 * @phba: Pointer to HBA context.
9790 *
James Smart3772a992009-05-22 14:50:54 -04009791 * This function is called from timer soft interrupt context to check HBA's
James Smart93996272008-08-24 21:50:30 -04009792 * error attention register bit for error attention events.
9793 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03009794 * This function returns 1 when there is Error Attention in the Host Attention
James Smart93996272008-08-24 21:50:30 -04009795 * Register and returns 0 otherwise.
9796 **/
9797int
9798lpfc_sli_check_eratt(struct lpfc_hba *phba)
9799{
9800 uint32_t ha_copy;
9801
9802 /* If somebody is waiting to handle an eratt, don't process it
9803 * here. The brdkill function will do this.
9804 */
9805 if (phba->link_flag & LS_IGNORE_ERATT)
9806 return 0;
9807
9808 /* Check if interrupt handler handles this ERATT */
9809 spin_lock_irq(&phba->hbalock);
9810 if (phba->hba_flag & HBA_ERATT_HANDLED) {
9811 /* Interrupt handler has handled ERATT */
9812 spin_unlock_irq(&phba->hbalock);
9813 return 0;
9814 }
9815
James Smarta257bf92009-04-06 18:48:10 -04009816 /*
9817 * If there is deferred error attention, do not check for error
9818 * attention
9819 */
9820 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9821 spin_unlock_irq(&phba->hbalock);
9822 return 0;
9823 }
9824
James Smart3772a992009-05-22 14:50:54 -04009825 /* If PCI channel is offline, don't process it */
9826 if (unlikely(pci_channel_offline(phba->pcidev))) {
James Smart93996272008-08-24 21:50:30 -04009827 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04009828 return 0;
9829 }
9830
9831 switch (phba->sli_rev) {
9832 case LPFC_SLI_REV2:
9833 case LPFC_SLI_REV3:
9834 /* Read chip Host Attention (HA) register */
9835 ha_copy = lpfc_sli_eratt_read(phba);
9836 break;
James Smartda0436e2009-05-22 14:51:39 -04009837 case LPFC_SLI_REV4:
James Smart2fcee4b2010-12-15 17:57:46 -05009838 /* Read device Uncoverable Error (UERR) registers */
James Smartda0436e2009-05-22 14:51:39 -04009839 ha_copy = lpfc_sli4_eratt_read(phba);
9840 break;
James Smart3772a992009-05-22 14:50:54 -04009841 default:
9842 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9843 "0299 Invalid SLI revision (%d)\n",
9844 phba->sli_rev);
9845 ha_copy = 0;
9846 break;
James Smart93996272008-08-24 21:50:30 -04009847 }
9848 spin_unlock_irq(&phba->hbalock);
James Smart3772a992009-05-22 14:50:54 -04009849
9850 return ha_copy;
9851}
9852
9853/**
9854 * lpfc_intr_state_check - Check device state for interrupt handling
9855 * @phba: Pointer to HBA context.
9856 *
9857 * This inline routine checks whether a device or its PCI slot is in a state
9858 * that the interrupt should be handled.
9859 *
9860 * This function returns 0 if the device or the PCI slot is in a state that
9861 * interrupt should be handled, otherwise -EIO.
9862 */
9863static inline int
9864lpfc_intr_state_check(struct lpfc_hba *phba)
9865{
9866 /* If the pci channel is offline, ignore all the interrupts */
9867 if (unlikely(pci_channel_offline(phba->pcidev)))
9868 return -EIO;
9869
9870 /* Update device level interrupt statistics */
9871 phba->sli.slistat.sli_intr++;
9872
9873 /* Ignore all interrupts during initialization. */
9874 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9875 return -EIO;
9876
James Smart93996272008-08-24 21:50:30 -04009877 return 0;
9878}
9879
9880/**
James Smart3772a992009-05-22 14:50:54 -04009881 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
James Smarte59058c2008-08-24 21:49:00 -04009882 * @irq: Interrupt number.
9883 * @dev_id: The device context pointer.
9884 *
James Smart93996272008-08-24 21:50:30 -04009885 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -04009886 * service routine when device with SLI-3 interface spec is enabled with
9887 * MSI-X multi-message interrupt mode and there are slow-path events in
9888 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
9889 * interrupt mode, this function is called as part of the device-level
9890 * interrupt handler. When the PCI slot is in error recovery or the HBA
9891 * is undergoing initialization, the interrupt handler will not process
9892 * the interrupt. The link attention and ELS ring attention events are
9893 * handled by the worker thread. The interrupt handler signals the worker
9894 * thread and returns for these events. This function is called without
9895 * any lock held. It gets the hbalock to access and update SLI data
James Smart93996272008-08-24 21:50:30 -04009896 * structures.
9897 *
9898 * This function returns IRQ_HANDLED when interrupt is handled else it
9899 * returns IRQ_NONE.
James Smarte59058c2008-08-24 21:49:00 -04009900 **/
dea31012005-04-17 16:05:31 -05009901irqreturn_t
James Smart3772a992009-05-22 14:50:54 -04009902lpfc_sli_sp_intr_handler(int irq, void *dev_id)
dea31012005-04-17 16:05:31 -05009903{
James Smart2e0fef82007-06-17 19:56:36 -05009904 struct lpfc_hba *phba;
James Smarta747c9c2009-11-18 15:41:10 -05009905 uint32_t ha_copy, hc_copy;
dea31012005-04-17 16:05:31 -05009906 uint32_t work_ha_copy;
9907 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -05009908 unsigned long iflag;
dea31012005-04-17 16:05:31 -05009909 uint32_t control;
9910
James Smart92d7f7b2007-06-17 19:56:38 -05009911 MAILBOX_t *mbox, *pmbox;
James Smart858c9f62007-06-17 19:56:39 -05009912 struct lpfc_vport *vport;
9913 struct lpfc_nodelist *ndlp;
9914 struct lpfc_dmabuf *mp;
James Smart92d7f7b2007-06-17 19:56:38 -05009915 LPFC_MBOXQ_t *pmb;
9916 int rc;
9917
dea31012005-04-17 16:05:31 -05009918 /*
9919 * Get the driver's phba structure from the dev_id and
9920 * assume the HBA is not interrupting.
9921 */
James Smart93996272008-08-24 21:50:30 -04009922 phba = (struct lpfc_hba *)dev_id;
dea31012005-04-17 16:05:31 -05009923
9924 if (unlikely(!phba))
9925 return IRQ_NONE;
9926
dea31012005-04-17 16:05:31 -05009927 /*
James Smart93996272008-08-24 21:50:30 -04009928 * Stuff needs to be attented to when this function is invoked as an
9929 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -05009930 */
James Smart93996272008-08-24 21:50:30 -04009931 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -04009932 /* Check device state for handling interrupt */
9933 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -04009934 return IRQ_NONE;
9935 /* Need to read HA REG for slow-path events */
James Smart5b75da22008-12-04 22:39:35 -05009936 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -05009937 if (lpfc_readl(phba->HAregaddr, &ha_copy))
9938 goto unplug_error;
James Smart93996272008-08-24 21:50:30 -04009939 /* If somebody is waiting to handle an eratt don't process it
9940 * here. The brdkill function will do this.
9941 */
9942 if (phba->link_flag & LS_IGNORE_ERATT)
9943 ha_copy &= ~HA_ERATT;
9944 /* Check the need for handling ERATT in interrupt handler */
9945 if (ha_copy & HA_ERATT) {
9946 if (phba->hba_flag & HBA_ERATT_HANDLED)
9947 /* ERATT polling has handled ERATT */
9948 ha_copy &= ~HA_ERATT;
9949 else
9950 /* Indicate interrupt handler handles ERATT */
9951 phba->hba_flag |= HBA_ERATT_HANDLED;
9952 }
James Smarta257bf92009-04-06 18:48:10 -04009953
9954 /*
9955 * If there is deferred error attention, do not check for any
9956 * interrupt.
9957 */
9958 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -04009959 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -04009960 return IRQ_NONE;
9961 }
9962
James Smart93996272008-08-24 21:50:30 -04009963 /* Clear up only attention source related to slow-path */
James Smart9940b972011-03-11 16:06:12 -05009964 if (lpfc_readl(phba->HCregaddr, &hc_copy))
9965 goto unplug_error;
9966
James Smarta747c9c2009-11-18 15:41:10 -05009967 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
9968 HC_LAINT_ENA | HC_ERINT_ENA),
9969 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -04009970 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
9971 phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -05009972 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -04009973 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -05009974 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -04009975 } else
9976 ha_copy = phba->ha_copy;
dea31012005-04-17 16:05:31 -05009977
dea31012005-04-17 16:05:31 -05009978 work_ha_copy = ha_copy & phba->work_ha_mask;
9979
James Smart93996272008-08-24 21:50:30 -04009980 if (work_ha_copy) {
dea31012005-04-17 16:05:31 -05009981 if (work_ha_copy & HA_LATT) {
9982 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
9983 /*
9984 * Turn off Link Attention interrupts
9985 * until CLEAR_LA done
9986 */
James Smart5b75da22008-12-04 22:39:35 -05009987 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05009988 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
James Smart9940b972011-03-11 16:06:12 -05009989 if (lpfc_readl(phba->HCregaddr, &control))
9990 goto unplug_error;
dea31012005-04-17 16:05:31 -05009991 control &= ~HC_LAINT_ENA;
9992 writel(control, phba->HCregaddr);
9993 readl(phba->HCregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -05009994 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -05009995 }
9996 else
9997 work_ha_copy &= ~HA_LATT;
9998 }
9999
James Smart93996272008-08-24 21:50:30 -040010000 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
James Smart858c9f62007-06-17 19:56:39 -050010001 /*
10002 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10003 * the only slow ring.
10004 */
10005 status = (work_ha_copy &
10006 (HA_RXMASK << (4*LPFC_ELS_RING)));
10007 status >>= (4*LPFC_ELS_RING);
10008 if (status & HA_RXMASK) {
James Smart5b75da22008-12-04 22:39:35 -050010009 spin_lock_irqsave(&phba->hbalock, iflag);
James Smart9940b972011-03-11 16:06:12 -050010010 if (lpfc_readl(phba->HCregaddr, &control))
10011 goto unplug_error;
James Smarta58cbd52007-08-02 11:09:43 -040010012
10013 lpfc_debugfs_slow_ring_trc(phba,
10014 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
10015 control, status,
10016 (uint32_t)phba->sli.slistat.sli_intr);
10017
James Smart858c9f62007-06-17 19:56:39 -050010018 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
James Smarta58cbd52007-08-02 11:09:43 -040010019 lpfc_debugfs_slow_ring_trc(phba,
10020 "ISR Disable ring:"
10021 "pwork:x%x hawork:x%x wait:x%x",
10022 phba->work_ha, work_ha_copy,
10023 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040010024 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040010025
James Smart858c9f62007-06-17 19:56:39 -050010026 control &=
10027 ~(HC_R0INT_ENA << LPFC_ELS_RING);
dea31012005-04-17 16:05:31 -050010028 writel(control, phba->HCregaddr);
10029 readl(phba->HCregaddr); /* flush */
dea31012005-04-17 16:05:31 -050010030 }
James Smarta58cbd52007-08-02 11:09:43 -040010031 else {
10032 lpfc_debugfs_slow_ring_trc(phba,
10033 "ISR slow ring: pwork:"
10034 "x%x hawork:x%x wait:x%x",
10035 phba->work_ha, work_ha_copy,
10036 (uint32_t)((unsigned long)
James Smart5e9d9b82008-06-14 22:52:53 -040010037 &phba->work_waitq));
James Smarta58cbd52007-08-02 11:09:43 -040010038 }
James Smart5b75da22008-12-04 22:39:35 -050010039 spin_unlock_irqrestore(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050010040 }
10041 }
James Smart5b75da22008-12-04 22:39:35 -050010042 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040010043 if (work_ha_copy & HA_ERATT) {
James Smart9940b972011-03-11 16:06:12 -050010044 if (lpfc_sli_read_hs(phba))
10045 goto unplug_error;
James Smarta257bf92009-04-06 18:48:10 -040010046 /*
10047 * Check if there is a deferred error condition
10048 * is active
10049 */
10050 if ((HS_FFER1 & phba->work_hs) &&
10051 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
James Smartdcf2a4e2010-09-29 11:18:53 -040010052 HS_FFER6 | HS_FFER7 | HS_FFER8) &
10053 phba->work_hs)) {
James Smarta257bf92009-04-06 18:48:10 -040010054 phba->hba_flag |= DEFER_ERATT;
10055 /* Clear all interrupt enable conditions */
10056 writel(0, phba->HCregaddr);
10057 readl(phba->HCregaddr);
10058 }
10059 }
10060
James Smart93996272008-08-24 21:50:30 -040010061 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
James Smart92d7f7b2007-06-17 19:56:38 -050010062 pmb = phba->sli.mbox_active;
James Smart04c68492009-05-22 14:52:52 -040010063 pmbox = &pmb->u.mb;
James Smart34b02dc2008-08-24 21:49:55 -040010064 mbox = phba->mbox;
James Smart858c9f62007-06-17 19:56:39 -050010065 vport = pmb->vport;
James Smart92d7f7b2007-06-17 19:56:38 -050010066
10067 /* First check out the status word */
10068 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
10069 if (pmbox->mbxOwner != OWN_HOST) {
James Smart5b75da22008-12-04 22:39:35 -050010070 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart92d7f7b2007-06-17 19:56:38 -050010071 /*
10072 * Stray Mailbox Interrupt, mbxCommand <cmd>
10073 * mbxStatus <status>
10074 */
James Smart09372822008-01-11 01:52:54 -050010075 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
James Smart92d7f7b2007-06-17 19:56:38 -050010076 LOG_SLI,
James Smarte8b62012007-08-02 11:10:09 -040010077 "(%d):0304 Stray Mailbox "
James Smart92d7f7b2007-06-17 19:56:38 -050010078 "Interrupt mbxCommand x%x "
10079 "mbxStatus x%x\n",
James Smarte8b62012007-08-02 11:10:09 -040010080 (vport ? vport->vpi : 0),
James Smart92d7f7b2007-06-17 19:56:38 -050010081 pmbox->mbxCommand,
10082 pmbox->mbxStatus);
James Smart09372822008-01-11 01:52:54 -050010083 /* clear mailbox attention bit */
10084 work_ha_copy &= ~HA_MBATT;
10085 } else {
James Smart97eab632008-04-07 10:16:05 -040010086 phba->sli.mbox_active = NULL;
James Smart5b75da22008-12-04 22:39:35 -050010087 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart09372822008-01-11 01:52:54 -050010088 phba->last_completion_time = jiffies;
10089 del_timer(&phba->sli.mbox_tmo);
James Smart09372822008-01-11 01:52:54 -050010090 if (pmb->mbox_cmpl) {
10091 lpfc_sli_pcimem_bcopy(mbox, pmbox,
10092 MAILBOX_CMD_SIZE);
James Smart7a470272010-03-15 11:25:20 -040010093 if (pmb->out_ext_byte_len &&
10094 pmb->context2)
10095 lpfc_sli_pcimem_bcopy(
10096 phba->mbox_ext,
10097 pmb->context2,
10098 pmb->out_ext_byte_len);
James Smart858c9f62007-06-17 19:56:39 -050010099 }
James Smart09372822008-01-11 01:52:54 -050010100 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10101 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10102
10103 lpfc_debugfs_disc_trc(vport,
10104 LPFC_DISC_TRC_MBOX_VPORT,
10105 "MBOX dflt rpi: : "
10106 "status:x%x rpi:x%x",
10107 (uint32_t)pmbox->mbxStatus,
10108 pmbox->un.varWords[0], 0);
10109
10110 if (!pmbox->mbxStatus) {
10111 mp = (struct lpfc_dmabuf *)
10112 (pmb->context1);
10113 ndlp = (struct lpfc_nodelist *)
10114 pmb->context2;
10115
10116 /* Reg_LOGIN of dflt RPI was
10117 * successful. new lets get
10118 * rid of the RPI using the
10119 * same mbox buffer.
10120 */
10121 lpfc_unreg_login(phba,
10122 vport->vpi,
10123 pmbox->un.varWords[0],
10124 pmb);
10125 pmb->mbox_cmpl =
10126 lpfc_mbx_cmpl_dflt_rpi;
10127 pmb->context1 = mp;
10128 pmb->context2 = ndlp;
10129 pmb->vport = vport;
James Smart58da1ff2008-04-07 10:15:56 -040010130 rc = lpfc_sli_issue_mbox(phba,
10131 pmb,
10132 MBX_NOWAIT);
10133 if (rc != MBX_BUSY)
10134 lpfc_printf_log(phba,
10135 KERN_ERR,
10136 LOG_MBOX | LOG_SLI,
James Smartd7c255b2008-08-24 21:50:00 -040010137 "0350 rc should have"
James Smart6a9c52c2009-10-02 15:16:51 -040010138 "been MBX_BUSY\n");
James Smart3772a992009-05-22 14:50:54 -040010139 if (rc != MBX_NOT_FINISHED)
10140 goto send_current_mbox;
James Smart09372822008-01-11 01:52:54 -050010141 }
10142 }
James Smart5b75da22008-12-04 22:39:35 -050010143 spin_lock_irqsave(
10144 &phba->pport->work_port_lock,
10145 iflag);
James Smart09372822008-01-11 01:52:54 -050010146 phba->pport->work_port_events &=
10147 ~WORKER_MBOX_TMO;
James Smart5b75da22008-12-04 22:39:35 -050010148 spin_unlock_irqrestore(
10149 &phba->pport->work_port_lock,
10150 iflag);
James Smart09372822008-01-11 01:52:54 -050010151 lpfc_mbox_cmpl_put(phba, pmb);
James Smart858c9f62007-06-17 19:56:39 -050010152 }
James Smart97eab632008-04-07 10:16:05 -040010153 } else
James Smart5b75da22008-12-04 22:39:35 -050010154 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040010155
James Smart92d7f7b2007-06-17 19:56:38 -050010156 if ((work_ha_copy & HA_MBATT) &&
10157 (phba->sli.mbox_active == NULL)) {
James Smart858c9f62007-06-17 19:56:39 -050010158send_current_mbox:
James Smart92d7f7b2007-06-17 19:56:38 -050010159 /* Process next mailbox command if there is one */
James Smart58da1ff2008-04-07 10:15:56 -040010160 do {
10161 rc = lpfc_sli_issue_mbox(phba, NULL,
10162 MBX_NOWAIT);
10163 } while (rc == MBX_NOT_FINISHED);
10164 if (rc != MBX_SUCCESS)
10165 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10166 LOG_SLI, "0349 rc should be "
James Smart6a9c52c2009-10-02 15:16:51 -040010167 "MBX_SUCCESS\n");
James Smart92d7f7b2007-06-17 19:56:38 -050010168 }
10169
James Smart5b75da22008-12-04 22:39:35 -050010170 spin_lock_irqsave(&phba->hbalock, iflag);
dea31012005-04-17 16:05:31 -050010171 phba->work_ha |= work_ha_copy;
James Smart5b75da22008-12-04 22:39:35 -050010172 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart5e9d9b82008-06-14 22:52:53 -040010173 lpfc_worker_wake_up(phba);
dea31012005-04-17 16:05:31 -050010174 }
James Smart93996272008-08-24 21:50:30 -040010175 return IRQ_HANDLED;
James Smart9940b972011-03-11 16:06:12 -050010176unplug_error:
10177 spin_unlock_irqrestore(&phba->hbalock, iflag);
10178 return IRQ_HANDLED;
dea31012005-04-17 16:05:31 -050010179
James Smart3772a992009-05-22 14:50:54 -040010180} /* lpfc_sli_sp_intr_handler */
James Smart93996272008-08-24 21:50:30 -040010181
10182/**
James Smart3772a992009-05-22 14:50:54 -040010183 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
James Smart93996272008-08-24 21:50:30 -040010184 * @irq: Interrupt number.
10185 * @dev_id: The device context pointer.
10186 *
10187 * This function is directly called from the PCI layer as an interrupt
James Smart3772a992009-05-22 14:50:54 -040010188 * service routine when device with SLI-3 interface spec is enabled with
10189 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10190 * ring event in the HBA. However, when the device is enabled with either
10191 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10192 * device-level interrupt handler. When the PCI slot is in error recovery
10193 * or the HBA is undergoing initialization, the interrupt handler will not
10194 * process the interrupt. The SCSI FCP fast-path ring event are handled in
10195 * the intrrupt context. This function is called without any lock held.
10196 * It gets the hbalock to access and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040010197 *
10198 * This function returns IRQ_HANDLED when interrupt is handled else it
10199 * returns IRQ_NONE.
10200 **/
10201irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040010202lpfc_sli_fp_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040010203{
10204 struct lpfc_hba *phba;
10205 uint32_t ha_copy;
10206 unsigned long status;
James Smart5b75da22008-12-04 22:39:35 -050010207 unsigned long iflag;
James Smart93996272008-08-24 21:50:30 -040010208
10209 /* Get the driver's phba structure from the dev_id and
10210 * assume the HBA is not interrupting.
10211 */
10212 phba = (struct lpfc_hba *) dev_id;
10213
10214 if (unlikely(!phba))
10215 return IRQ_NONE;
dea31012005-04-17 16:05:31 -050010216
10217 /*
James Smart93996272008-08-24 21:50:30 -040010218 * Stuff needs to be attented to when this function is invoked as an
10219 * individual interrupt handler in MSI-X multi-message interrupt mode
dea31012005-04-17 16:05:31 -050010220 */
James Smart93996272008-08-24 21:50:30 -040010221 if (phba->intr_type == MSIX) {
James Smart3772a992009-05-22 14:50:54 -040010222 /* Check device state for handling interrupt */
10223 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040010224 return IRQ_NONE;
10225 /* Need to read HA REG for FCP ring and other ring events */
James Smart9940b972011-03-11 16:06:12 -050010226 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10227 return IRQ_HANDLED;
James Smart93996272008-08-24 21:50:30 -040010228 /* Clear up only attention source related to fast-path */
James Smart5b75da22008-12-04 22:39:35 -050010229 spin_lock_irqsave(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040010230 /*
10231 * If there is deferred error attention, do not check for
10232 * any interrupt.
10233 */
10234 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
James Smart3772a992009-05-22 14:50:54 -040010235 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smarta257bf92009-04-06 18:48:10 -040010236 return IRQ_NONE;
10237 }
James Smart93996272008-08-24 21:50:30 -040010238 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
10239 phba->HAregaddr);
10240 readl(phba->HAregaddr); /* flush */
James Smart5b75da22008-12-04 22:39:35 -050010241 spin_unlock_irqrestore(&phba->hbalock, iflag);
James Smart93996272008-08-24 21:50:30 -040010242 } else
10243 ha_copy = phba->ha_copy;
10244
10245 /*
10246 * Process all events on FCP ring. Take the optimized path for FCP IO.
10247 */
10248 ha_copy &= ~(phba->work_ha_mask);
10249
10250 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
dea31012005-04-17 16:05:31 -050010251 status >>= (4*LPFC_FCP_RING);
James Smart858c9f62007-06-17 19:56:39 -050010252 if (status & HA_RXMASK)
dea31012005-04-17 16:05:31 -050010253 lpfc_sli_handle_fast_ring_event(phba,
10254 &phba->sli.ring[LPFC_FCP_RING],
10255 status);
James Smarta4bc3372006-12-02 13:34:16 -050010256
10257 if (phba->cfg_multi_ring_support == 2) {
10258 /*
James Smart93996272008-08-24 21:50:30 -040010259 * Process all events on extra ring. Take the optimized path
10260 * for extra ring IO.
James Smarta4bc3372006-12-02 13:34:16 -050010261 */
James Smart93996272008-08-24 21:50:30 -040010262 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
James Smarta4bc3372006-12-02 13:34:16 -050010263 status >>= (4*LPFC_EXTRA_RING);
James Smart858c9f62007-06-17 19:56:39 -050010264 if (status & HA_RXMASK) {
James Smarta4bc3372006-12-02 13:34:16 -050010265 lpfc_sli_handle_fast_ring_event(phba,
10266 &phba->sli.ring[LPFC_EXTRA_RING],
10267 status);
10268 }
10269 }
dea31012005-04-17 16:05:31 -050010270 return IRQ_HANDLED;
James Smart3772a992009-05-22 14:50:54 -040010271} /* lpfc_sli_fp_intr_handler */
dea31012005-04-17 16:05:31 -050010272
James Smart93996272008-08-24 21:50:30 -040010273/**
James Smart3772a992009-05-22 14:50:54 -040010274 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
James Smart93996272008-08-24 21:50:30 -040010275 * @irq: Interrupt number.
10276 * @dev_id: The device context pointer.
10277 *
James Smart3772a992009-05-22 14:50:54 -040010278 * This function is the HBA device-level interrupt handler to device with
10279 * SLI-3 interface spec, called from the PCI layer when either MSI or
10280 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
10281 * requires driver attention. This function invokes the slow-path interrupt
10282 * attention handling function and fast-path interrupt attention handling
10283 * function in turn to process the relevant HBA attention events. This
10284 * function is called without any lock held. It gets the hbalock to access
10285 * and update SLI data structures.
James Smart93996272008-08-24 21:50:30 -040010286 *
10287 * This function returns IRQ_HANDLED when interrupt is handled, else it
10288 * returns IRQ_NONE.
10289 **/
10290irqreturn_t
James Smart3772a992009-05-22 14:50:54 -040010291lpfc_sli_intr_handler(int irq, void *dev_id)
James Smart93996272008-08-24 21:50:30 -040010292{
10293 struct lpfc_hba *phba;
10294 irqreturn_t sp_irq_rc, fp_irq_rc;
10295 unsigned long status1, status2;
James Smarta747c9c2009-11-18 15:41:10 -050010296 uint32_t hc_copy;
James Smart93996272008-08-24 21:50:30 -040010297
10298 /*
10299 * Get the driver's phba structure from the dev_id and
10300 * assume the HBA is not interrupting.
10301 */
10302 phba = (struct lpfc_hba *) dev_id;
10303
10304 if (unlikely(!phba))
10305 return IRQ_NONE;
10306
James Smart3772a992009-05-22 14:50:54 -040010307 /* Check device state for handling interrupt */
10308 if (lpfc_intr_state_check(phba))
James Smart93996272008-08-24 21:50:30 -040010309 return IRQ_NONE;
10310
10311 spin_lock(&phba->hbalock);
James Smart9940b972011-03-11 16:06:12 -050010312 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
10313 spin_unlock(&phba->hbalock);
10314 return IRQ_HANDLED;
10315 }
10316
James Smart93996272008-08-24 21:50:30 -040010317 if (unlikely(!phba->ha_copy)) {
10318 spin_unlock(&phba->hbalock);
10319 return IRQ_NONE;
10320 } else if (phba->ha_copy & HA_ERATT) {
10321 if (phba->hba_flag & HBA_ERATT_HANDLED)
10322 /* ERATT polling has handled ERATT */
10323 phba->ha_copy &= ~HA_ERATT;
10324 else
10325 /* Indicate interrupt handler handles ERATT */
10326 phba->hba_flag |= HBA_ERATT_HANDLED;
10327 }
10328
James Smarta257bf92009-04-06 18:48:10 -040010329 /*
10330 * If there is deferred error attention, do not check for any interrupt.
10331 */
10332 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020010333 spin_unlock(&phba->hbalock);
James Smarta257bf92009-04-06 18:48:10 -040010334 return IRQ_NONE;
10335 }
10336
James Smart93996272008-08-24 21:50:30 -040010337 /* Clear attention sources except link and error attentions */
James Smart9940b972011-03-11 16:06:12 -050010338 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
10339 spin_unlock(&phba->hbalock);
10340 return IRQ_HANDLED;
10341 }
James Smarta747c9c2009-11-18 15:41:10 -050010342 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
10343 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
10344 phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040010345 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
James Smarta747c9c2009-11-18 15:41:10 -050010346 writel(hc_copy, phba->HCregaddr);
James Smart93996272008-08-24 21:50:30 -040010347 readl(phba->HAregaddr); /* flush */
10348 spin_unlock(&phba->hbalock);
10349
10350 /*
10351 * Invokes slow-path host attention interrupt handling as appropriate.
10352 */
10353
10354 /* status of events with mailbox and link attention */
10355 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
10356
10357 /* status of events with ELS ring */
10358 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
10359 status2 >>= (4*LPFC_ELS_RING);
10360
10361 if (status1 || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040010362 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040010363 else
10364 sp_irq_rc = IRQ_NONE;
10365
10366 /*
10367 * Invoke fast-path host attention interrupt handling as appropriate.
10368 */
10369
10370 /* status of events with FCP ring */
10371 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
10372 status1 >>= (4*LPFC_FCP_RING);
10373
10374 /* status of events with extra ring */
10375 if (phba->cfg_multi_ring_support == 2) {
10376 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
10377 status2 >>= (4*LPFC_EXTRA_RING);
10378 } else
10379 status2 = 0;
10380
10381 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
James Smart3772a992009-05-22 14:50:54 -040010382 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
James Smart93996272008-08-24 21:50:30 -040010383 else
10384 fp_irq_rc = IRQ_NONE;
10385
10386 /* Return device-level interrupt handling status */
10387 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
James Smart3772a992009-05-22 14:50:54 -040010388} /* lpfc_sli_intr_handler */
James Smart4f774512009-05-22 14:52:35 -040010389
10390/**
10391 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
10392 * @phba: pointer to lpfc hba data structure.
10393 *
10394 * This routine is invoked by the worker thread to process all the pending
10395 * SLI4 FCP abort XRI events.
10396 **/
10397void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
10398{
10399 struct lpfc_cq_event *cq_event;
10400
10401 /* First, declare the fcp xri abort event has been handled */
10402 spin_lock_irq(&phba->hbalock);
10403 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
10404 spin_unlock_irq(&phba->hbalock);
10405 /* Now, handle all the fcp xri abort events */
10406 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
10407 /* Get the first event from the head of the event queue */
10408 spin_lock_irq(&phba->hbalock);
10409 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
10410 cq_event, struct lpfc_cq_event, list);
10411 spin_unlock_irq(&phba->hbalock);
10412 /* Notify aborted XRI for FCP work queue */
10413 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10414 /* Free the event processed back to the free pool */
10415 lpfc_sli4_cq_event_release(phba, cq_event);
10416 }
10417}
10418
10419/**
10420 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
10421 * @phba: pointer to lpfc hba data structure.
10422 *
10423 * This routine is invoked by the worker thread to process all the pending
10424 * SLI4 els abort xri events.
10425 **/
10426void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
10427{
10428 struct lpfc_cq_event *cq_event;
10429
10430 /* First, declare the els xri abort event has been handled */
10431 spin_lock_irq(&phba->hbalock);
10432 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
10433 spin_unlock_irq(&phba->hbalock);
10434 /* Now, handle all the els xri abort events */
10435 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
10436 /* Get the first event from the head of the event queue */
10437 spin_lock_irq(&phba->hbalock);
10438 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
10439 cq_event, struct lpfc_cq_event, list);
10440 spin_unlock_irq(&phba->hbalock);
10441 /* Notify aborted XRI for ELS work queue */
10442 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
10443 /* Free the event processed back to the free pool */
10444 lpfc_sli4_cq_event_release(phba, cq_event);
10445 }
10446}
10447
James Smart341af102010-01-26 23:07:37 -050010448/**
10449 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
10450 * @phba: pointer to lpfc hba data structure
10451 * @pIocbIn: pointer to the rspiocbq
10452 * @pIocbOut: pointer to the cmdiocbq
10453 * @wcqe: pointer to the complete wcqe
10454 *
10455 * This routine transfers the fields of a command iocbq to a response iocbq
10456 * by copying all the IOCB fields from command iocbq and transferring the
10457 * completion status information from the complete wcqe.
10458 **/
James Smart4f774512009-05-22 14:52:35 -040010459static void
James Smart341af102010-01-26 23:07:37 -050010460lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10461 struct lpfc_iocbq *pIocbIn,
James Smart4f774512009-05-22 14:52:35 -040010462 struct lpfc_iocbq *pIocbOut,
10463 struct lpfc_wcqe_complete *wcqe)
10464{
James Smart341af102010-01-26 23:07:37 -050010465 unsigned long iflags;
James Smart4f774512009-05-22 14:52:35 -040010466 size_t offset = offsetof(struct lpfc_iocbq, iocb);
10467
10468 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10469 sizeof(struct lpfc_iocbq) - offset);
James Smart4f774512009-05-22 14:52:35 -040010470 /* Map WCQE parameters into irspiocb parameters */
10471 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
10472 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
10473 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
10474 pIocbIn->iocb.un.fcpi.fcpi_parm =
10475 pIocbOut->iocb.un.fcpi.fcpi_parm -
10476 wcqe->total_data_placed;
10477 else
10478 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smart695a8142010-01-26 23:08:03 -050010479 else {
James Smart4f774512009-05-22 14:52:35 -040010480 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
James Smart695a8142010-01-26 23:08:03 -050010481 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
10482 }
James Smart341af102010-01-26 23:07:37 -050010483
10484 /* Pick up HBA exchange busy condition */
10485 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
10486 spin_lock_irqsave(&phba->hbalock, iflags);
10487 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
10488 spin_unlock_irqrestore(&phba->hbalock, iflags);
10489 }
James Smart4f774512009-05-22 14:52:35 -040010490}
10491
10492/**
James Smart45ed1192009-10-02 15:17:02 -040010493 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
10494 * @phba: Pointer to HBA context object.
10495 * @wcqe: Pointer to work-queue completion queue entry.
10496 *
10497 * This routine handles an ELS work-queue completion event and construct
10498 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
10499 * discovery engine to handle.
10500 *
10501 * Return: Pointer to the receive IOCBQ, NULL otherwise.
10502 **/
10503static struct lpfc_iocbq *
10504lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
10505 struct lpfc_iocbq *irspiocbq)
10506{
10507 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10508 struct lpfc_iocbq *cmdiocbq;
10509 struct lpfc_wcqe_complete *wcqe;
10510 unsigned long iflags;
10511
10512 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
10513 spin_lock_irqsave(&phba->hbalock, iflags);
10514 pring->stats.iocb_event++;
10515 /* Look up the ELS command IOCB and create pseudo response IOCB */
10516 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
10517 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10518 spin_unlock_irqrestore(&phba->hbalock, iflags);
10519
10520 if (unlikely(!cmdiocbq)) {
10521 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10522 "0386 ELS complete with no corresponding "
10523 "cmdiocb: iotag (%d)\n",
10524 bf_get(lpfc_wcqe_c_request_tag, wcqe));
10525 lpfc_sli_release_iocbq(phba, irspiocbq);
10526 return NULL;
10527 }
10528
10529 /* Fake the irspiocbq and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050010530 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
James Smart45ed1192009-10-02 15:17:02 -040010531
10532 return irspiocbq;
10533}
10534
10535/**
James Smart04c68492009-05-22 14:52:52 -040010536 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
10537 * @phba: Pointer to HBA context object.
10538 * @cqe: Pointer to mailbox completion queue entry.
10539 *
10540 * This routine process a mailbox completion queue entry with asynchrous
10541 * event.
10542 *
10543 * Return: true if work posted to worker thread, otherwise false.
10544 **/
10545static bool
10546lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
10547{
10548 struct lpfc_cq_event *cq_event;
10549 unsigned long iflags;
10550
10551 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10552 "0392 Async Event: word0:x%x, word1:x%x, "
10553 "word2:x%x, word3:x%x\n", mcqe->word0,
10554 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
10555
10556 /* Allocate a new internal CQ_EVENT entry */
10557 cq_event = lpfc_sli4_cq_event_alloc(phba);
10558 if (!cq_event) {
10559 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10560 "0394 Failed to allocate CQ_EVENT entry\n");
10561 return false;
10562 }
10563
10564 /* Move the CQE into an asynchronous event entry */
10565 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
10566 spin_lock_irqsave(&phba->hbalock, iflags);
10567 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
10568 /* Set the async event flag */
10569 phba->hba_flag |= ASYNC_EVENT;
10570 spin_unlock_irqrestore(&phba->hbalock, iflags);
10571
10572 return true;
10573}
10574
10575/**
10576 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
10577 * @phba: Pointer to HBA context object.
10578 * @cqe: Pointer to mailbox completion queue entry.
10579 *
10580 * This routine process a mailbox completion queue entry with mailbox
10581 * completion event.
10582 *
10583 * Return: true if work posted to worker thread, otherwise false.
10584 **/
10585static bool
10586lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
10587{
10588 uint32_t mcqe_status;
10589 MAILBOX_t *mbox, *pmbox;
10590 struct lpfc_mqe *mqe;
10591 struct lpfc_vport *vport;
10592 struct lpfc_nodelist *ndlp;
10593 struct lpfc_dmabuf *mp;
10594 unsigned long iflags;
10595 LPFC_MBOXQ_t *pmb;
10596 bool workposted = false;
10597 int rc;
10598
10599 /* If not a mailbox complete MCQE, out by checking mailbox consume */
10600 if (!bf_get(lpfc_trailer_completed, mcqe))
10601 goto out_no_mqe_complete;
10602
10603 /* Get the reference to the active mbox command */
10604 spin_lock_irqsave(&phba->hbalock, iflags);
10605 pmb = phba->sli.mbox_active;
10606 if (unlikely(!pmb)) {
10607 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10608 "1832 No pending MBOX command to handle\n");
10609 spin_unlock_irqrestore(&phba->hbalock, iflags);
10610 goto out_no_mqe_complete;
10611 }
10612 spin_unlock_irqrestore(&phba->hbalock, iflags);
10613 mqe = &pmb->u.mqe;
10614 pmbox = (MAILBOX_t *)&pmb->u.mqe;
10615 mbox = phba->mbox;
10616 vport = pmb->vport;
10617
10618 /* Reset heartbeat timer */
10619 phba->last_completion_time = jiffies;
10620 del_timer(&phba->sli.mbox_tmo);
10621
10622 /* Move mbox data to caller's mailbox region, do endian swapping */
10623 if (pmb->mbox_cmpl && mbox)
10624 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
James Smart04c68492009-05-22 14:52:52 -040010625
James Smart73d91e52011-10-10 21:32:10 -040010626 /*
10627 * For mcqe errors, conditionally move a modified error code to
10628 * the mbox so that the error will not be missed.
10629 */
10630 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
10631 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
10632 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
10633 bf_set(lpfc_mqe_status, mqe,
10634 (LPFC_MBX_ERROR_RANGE | mcqe_status));
10635 }
James Smart04c68492009-05-22 14:52:52 -040010636 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10637 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10638 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
10639 "MBOX dflt rpi: status:x%x rpi:x%x",
10640 mcqe_status,
10641 pmbox->un.varWords[0], 0);
10642 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
10643 mp = (struct lpfc_dmabuf *)(pmb->context1);
10644 ndlp = (struct lpfc_nodelist *)pmb->context2;
10645 /* Reg_LOGIN of dflt RPI was successful. Now lets get
10646 * RID of the PPI using the same mbox buffer.
10647 */
10648 lpfc_unreg_login(phba, vport->vpi,
10649 pmbox->un.varWords[0], pmb);
10650 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
10651 pmb->context1 = mp;
10652 pmb->context2 = ndlp;
10653 pmb->vport = vport;
10654 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
10655 if (rc != MBX_BUSY)
10656 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
10657 LOG_SLI, "0385 rc should "
10658 "have been MBX_BUSY\n");
10659 if (rc != MBX_NOT_FINISHED)
10660 goto send_current_mbox;
10661 }
10662 }
10663 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
10664 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10665 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
10666
10667 /* There is mailbox completion work to do */
10668 spin_lock_irqsave(&phba->hbalock, iflags);
10669 __lpfc_mbox_cmpl_put(phba, pmb);
10670 phba->work_ha |= HA_MBATT;
10671 spin_unlock_irqrestore(&phba->hbalock, iflags);
10672 workposted = true;
10673
10674send_current_mbox:
10675 spin_lock_irqsave(&phba->hbalock, iflags);
10676 /* Release the mailbox command posting token */
10677 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10678 /* Setting active mailbox pointer need to be in sync to flag clear */
10679 phba->sli.mbox_active = NULL;
10680 spin_unlock_irqrestore(&phba->hbalock, iflags);
10681 /* Wake up worker thread to post the next pending mailbox command */
10682 lpfc_worker_wake_up(phba);
10683out_no_mqe_complete:
10684 if (bf_get(lpfc_trailer_consumed, mcqe))
10685 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
10686 return workposted;
10687}
10688
10689/**
10690 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
10691 * @phba: Pointer to HBA context object.
10692 * @cqe: Pointer to mailbox completion queue entry.
10693 *
10694 * This routine process a mailbox completion queue entry, it invokes the
10695 * proper mailbox complete handling or asynchrous event handling routine
10696 * according to the MCQE's async bit.
10697 *
10698 * Return: true if work posted to worker thread, otherwise false.
10699 **/
10700static bool
10701lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
10702{
10703 struct lpfc_mcqe mcqe;
10704 bool workposted;
10705
10706 /* Copy the mailbox MCQE and convert endian order as needed */
10707 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
10708
10709 /* Invoke the proper event handling routine */
10710 if (!bf_get(lpfc_trailer_async, &mcqe))
10711 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
10712 else
10713 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
10714 return workposted;
10715}
10716
10717/**
James Smart4f774512009-05-22 14:52:35 -040010718 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
10719 * @phba: Pointer to HBA context object.
10720 * @wcqe: Pointer to work-queue completion queue entry.
10721 *
10722 * This routine handles an ELS work-queue completion event.
10723 *
10724 * Return: true if work posted to worker thread, otherwise false.
10725 **/
10726static bool
10727lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
10728 struct lpfc_wcqe_complete *wcqe)
10729{
James Smart4f774512009-05-22 14:52:35 -040010730 struct lpfc_iocbq *irspiocbq;
10731 unsigned long iflags;
James Smart2a9bf3d2010-06-07 15:24:45 -040010732 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
James Smart4f774512009-05-22 14:52:35 -040010733
James Smart45ed1192009-10-02 15:17:02 -040010734 /* Get an irspiocbq for later ELS response processing use */
James Smart4f774512009-05-22 14:52:35 -040010735 irspiocbq = lpfc_sli_get_iocbq(phba);
10736 if (!irspiocbq) {
10737 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart2a9bf3d2010-06-07 15:24:45 -040010738 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
10739 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
10740 pring->txq_cnt, phba->iocb_cnt,
10741 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
10742 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
James Smart45ed1192009-10-02 15:17:02 -040010743 return false;
James Smart4f774512009-05-22 14:52:35 -040010744 }
James Smart4f774512009-05-22 14:52:35 -040010745
James Smart45ed1192009-10-02 15:17:02 -040010746 /* Save off the slow-path queue event for work thread to process */
10747 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
James Smart4f774512009-05-22 14:52:35 -040010748 spin_lock_irqsave(&phba->hbalock, iflags);
James Smart4d9ab992009-10-02 15:16:39 -040010749 list_add_tail(&irspiocbq->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040010750 &phba->sli4_hba.sp_queue_event);
10751 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4f774512009-05-22 14:52:35 -040010752 spin_unlock_irqrestore(&phba->hbalock, iflags);
James Smart4f774512009-05-22 14:52:35 -040010753
James Smart45ed1192009-10-02 15:17:02 -040010754 return true;
James Smart4f774512009-05-22 14:52:35 -040010755}
10756
10757/**
10758 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
10759 * @phba: Pointer to HBA context object.
10760 * @wcqe: Pointer to work-queue completion queue entry.
10761 *
10762 * This routine handles slow-path WQ entry comsumed event by invoking the
10763 * proper WQ release routine to the slow-path WQ.
10764 **/
10765static void
10766lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
10767 struct lpfc_wcqe_release *wcqe)
10768{
10769 /* Check for the slow-path ELS work queue */
10770 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
10771 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
10772 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
10773 else
10774 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10775 "2579 Slow-path wqe consume event carries "
10776 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
10777 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
10778 phba->sli4_hba.els_wq->queue_id);
10779}
10780
10781/**
10782 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
10783 * @phba: Pointer to HBA context object.
10784 * @cq: Pointer to a WQ completion queue.
10785 * @wcqe: Pointer to work-queue completion queue entry.
10786 *
10787 * This routine handles an XRI abort event.
10788 *
10789 * Return: true if work posted to worker thread, otherwise false.
10790 **/
10791static bool
10792lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
10793 struct lpfc_queue *cq,
10794 struct sli4_wcqe_xri_aborted *wcqe)
10795{
10796 bool workposted = false;
10797 struct lpfc_cq_event *cq_event;
10798 unsigned long iflags;
10799
10800 /* Allocate a new internal CQ_EVENT entry */
10801 cq_event = lpfc_sli4_cq_event_alloc(phba);
10802 if (!cq_event) {
10803 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10804 "0602 Failed to allocate CQ_EVENT entry\n");
10805 return false;
10806 }
10807
10808 /* Move the CQE into the proper xri abort event list */
10809 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
10810 switch (cq->subtype) {
10811 case LPFC_FCP:
10812 spin_lock_irqsave(&phba->hbalock, iflags);
10813 list_add_tail(&cq_event->list,
10814 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
10815 /* Set the fcp xri abort event flag */
10816 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
10817 spin_unlock_irqrestore(&phba->hbalock, iflags);
10818 workposted = true;
10819 break;
10820 case LPFC_ELS:
10821 spin_lock_irqsave(&phba->hbalock, iflags);
10822 list_add_tail(&cq_event->list,
10823 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
10824 /* Set the els xri abort event flag */
10825 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
10826 spin_unlock_irqrestore(&phba->hbalock, iflags);
10827 workposted = true;
10828 break;
10829 default:
10830 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10831 "0603 Invalid work queue CQE subtype (x%x)\n",
10832 cq->subtype);
10833 workposted = false;
10834 break;
10835 }
10836 return workposted;
10837}
10838
10839/**
James Smart4d9ab992009-10-02 15:16:39 -040010840 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
James Smart4f774512009-05-22 14:52:35 -040010841 * @phba: Pointer to HBA context object.
James Smart4d9ab992009-10-02 15:16:39 -040010842 * @rcqe: Pointer to receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040010843 *
James Smart4d9ab992009-10-02 15:16:39 -040010844 * This routine process a receive-queue completion queue entry.
James Smart4f774512009-05-22 14:52:35 -040010845 *
10846 * Return: true if work posted to worker thread, otherwise false.
10847 **/
10848static bool
James Smart4d9ab992009-10-02 15:16:39 -040010849lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
10850{
10851 bool workposted = false;
10852 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
10853 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
10854 struct hbq_dmabuf *dma_buf;
James Smart7851fe22011-07-22 18:36:52 -040010855 uint32_t status, rq_id;
James Smart4d9ab992009-10-02 15:16:39 -040010856 unsigned long iflags;
10857
James Smart7851fe22011-07-22 18:36:52 -040010858 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
10859 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
10860 else
10861 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
10862 if (rq_id != hrq->queue_id)
James Smart4d9ab992009-10-02 15:16:39 -040010863 goto out;
10864
10865 status = bf_get(lpfc_rcqe_status, rcqe);
10866 switch (status) {
10867 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
10868 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10869 "2537 Receive Frame Truncated!!\n");
10870 case FC_STATUS_RQ_SUCCESS:
James Smart5ffc2662009-11-18 15:39:44 -050010871 lpfc_sli4_rq_release(hrq, drq);
James Smart4d9ab992009-10-02 15:16:39 -040010872 spin_lock_irqsave(&phba->hbalock, iflags);
10873 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
10874 if (!dma_buf) {
10875 spin_unlock_irqrestore(&phba->hbalock, iflags);
10876 goto out;
10877 }
10878 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
10879 /* save off the frame for the word thread to process */
10880 list_add_tail(&dma_buf->cq_event.list,
James Smart45ed1192009-10-02 15:17:02 -040010881 &phba->sli4_hba.sp_queue_event);
James Smart4d9ab992009-10-02 15:16:39 -040010882 /* Frame received */
James Smart45ed1192009-10-02 15:17:02 -040010883 phba->hba_flag |= HBA_SP_QUEUE_EVT;
James Smart4d9ab992009-10-02 15:16:39 -040010884 spin_unlock_irqrestore(&phba->hbalock, iflags);
10885 workposted = true;
10886 break;
10887 case FC_STATUS_INSUFF_BUF_NEED_BUF:
10888 case FC_STATUS_INSUFF_BUF_FRM_DISC:
10889 /* Post more buffers if possible */
10890 spin_lock_irqsave(&phba->hbalock, iflags);
10891 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
10892 spin_unlock_irqrestore(&phba->hbalock, iflags);
10893 workposted = true;
10894 break;
10895 }
10896out:
10897 return workposted;
James Smart4d9ab992009-10-02 15:16:39 -040010898}
10899
10900/**
10901 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
10902 * @phba: Pointer to HBA context object.
10903 * @cq: Pointer to the completion queue.
10904 * @wcqe: Pointer to a completion queue entry.
10905 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030010906 * This routine process a slow-path work-queue or receive queue completion queue
James Smart4d9ab992009-10-02 15:16:39 -040010907 * entry.
10908 *
10909 * Return: true if work posted to worker thread, otherwise false.
10910 **/
10911static bool
10912lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
James Smart4f774512009-05-22 14:52:35 -040010913 struct lpfc_cqe *cqe)
10914{
James Smart45ed1192009-10-02 15:17:02 -040010915 struct lpfc_cqe cqevt;
James Smart4f774512009-05-22 14:52:35 -040010916 bool workposted = false;
10917
10918 /* Copy the work queue CQE and convert endian order if needed */
James Smart45ed1192009-10-02 15:17:02 -040010919 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
James Smart4f774512009-05-22 14:52:35 -040010920
10921 /* Check and process for different type of WCQE and dispatch */
James Smart45ed1192009-10-02 15:17:02 -040010922 switch (bf_get(lpfc_cqe_code, &cqevt)) {
James Smart4f774512009-05-22 14:52:35 -040010923 case CQE_CODE_COMPL_WQE:
James Smart45ed1192009-10-02 15:17:02 -040010924 /* Process the WQ/RQ complete event */
James Smartbc739052010-08-04 16:11:18 -040010925 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040010926 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040010927 (struct lpfc_wcqe_complete *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040010928 break;
10929 case CQE_CODE_RELEASE_WQE:
10930 /* Process the WQ release event */
10931 lpfc_sli4_sp_handle_rel_wcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040010932 (struct lpfc_wcqe_release *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040010933 break;
10934 case CQE_CODE_XRI_ABORTED:
10935 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040010936 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040010937 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
James Smart45ed1192009-10-02 15:17:02 -040010938 (struct sli4_wcqe_xri_aborted *)&cqevt);
James Smart4f774512009-05-22 14:52:35 -040010939 break;
James Smart4d9ab992009-10-02 15:16:39 -040010940 case CQE_CODE_RECEIVE:
James Smart7851fe22011-07-22 18:36:52 -040010941 case CQE_CODE_RECEIVE_V1:
James Smart4d9ab992009-10-02 15:16:39 -040010942 /* Process the RQ event */
James Smartbc739052010-08-04 16:11:18 -040010943 phba->last_completion_time = jiffies;
James Smart4d9ab992009-10-02 15:16:39 -040010944 workposted = lpfc_sli4_sp_handle_rcqe(phba,
James Smart45ed1192009-10-02 15:17:02 -040010945 (struct lpfc_rcqe *)&cqevt);
James Smart4d9ab992009-10-02 15:16:39 -040010946 break;
James Smart4f774512009-05-22 14:52:35 -040010947 default:
10948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10949 "0388 Not a valid WCQE code: x%x\n",
James Smart45ed1192009-10-02 15:17:02 -040010950 bf_get(lpfc_cqe_code, &cqevt));
James Smart4f774512009-05-22 14:52:35 -040010951 break;
10952 }
10953 return workposted;
10954}
10955
10956/**
James Smart4f774512009-05-22 14:52:35 -040010957 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
10958 * @phba: Pointer to HBA context object.
10959 * @eqe: Pointer to fast-path event queue entry.
10960 *
10961 * This routine process a event queue entry from the slow-path event queue.
10962 * It will check the MajorCode and MinorCode to determine this is for a
10963 * completion event on a completion queue, if not, an error shall be logged
10964 * and just return. Otherwise, it will get to the corresponding completion
10965 * queue and process all the entries on that completion queue, rearm the
10966 * completion queue, and then return.
10967 *
10968 **/
10969static void
10970lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
10971{
10972 struct lpfc_queue *cq = NULL, *childq, *speq;
10973 struct lpfc_cqe *cqe;
10974 bool workposted = false;
10975 int ecount = 0;
10976 uint16_t cqid;
10977
James Smartcb5172e2010-03-15 11:25:07 -040010978 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) {
James Smart4f774512009-05-22 14:52:35 -040010979 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10980 "0359 Not a valid slow-path completion "
10981 "event: majorcode=x%x, minorcode=x%x\n",
James Smartcb5172e2010-03-15 11:25:07 -040010982 bf_get_le32(lpfc_eqe_major_code, eqe),
10983 bf_get_le32(lpfc_eqe_minor_code, eqe));
James Smart4f774512009-05-22 14:52:35 -040010984 return;
10985 }
10986
10987 /* Get the reference to the corresponding CQ */
James Smartcb5172e2010-03-15 11:25:07 -040010988 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
James Smart4f774512009-05-22 14:52:35 -040010989
10990 /* Search for completion queue pointer matching this cqid */
10991 speq = phba->sli4_hba.sp_eq;
10992 list_for_each_entry(childq, &speq->child_list, list) {
10993 if (childq->queue_id == cqid) {
10994 cq = childq;
10995 break;
10996 }
10997 }
10998 if (unlikely(!cq)) {
James Smart75baf692010-06-08 18:31:21 -040010999 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11000 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11001 "0365 Slow-path CQ identifier "
11002 "(%d) does not exist\n", cqid);
James Smart4f774512009-05-22 14:52:35 -040011003 return;
11004 }
11005
11006 /* Process all the entries to the CQ */
11007 switch (cq->type) {
11008 case LPFC_MCQ:
11009 while ((cqe = lpfc_sli4_cq_get(cq))) {
11010 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
James Smart73d91e52011-10-10 21:32:10 -040011011 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040011012 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11013 }
11014 break;
11015 case LPFC_WCQ:
11016 while ((cqe = lpfc_sli4_cq_get(cq))) {
James Smart05580562011-05-24 11:40:48 -040011017 if (cq->subtype == LPFC_FCP)
11018 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
11019 cqe);
11020 else
11021 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
11022 cqe);
James Smart73d91e52011-10-10 21:32:10 -040011023 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040011024 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11025 }
11026 break;
11027 default:
11028 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11029 "0370 Invalid completion queue type (%d)\n",
11030 cq->type);
11031 return;
11032 }
11033
11034 /* Catch the no cq entry condition, log an error */
11035 if (unlikely(ecount == 0))
11036 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11037 "0371 No entry from the CQ: identifier "
11038 "(x%x), type (%d)\n", cq->queue_id, cq->type);
11039
11040 /* In any case, flash and re-arm the RCQ */
11041 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11042
11043 /* wake up worker thread if there are works to be done */
11044 if (workposted)
11045 lpfc_worker_wake_up(phba);
11046}
11047
11048/**
11049 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
11050 * @eqe: Pointer to fast-path completion queue entry.
11051 *
11052 * This routine process a fast-path work queue completion entry from fast-path
11053 * event queue for FCP command response completion.
11054 **/
11055static void
11056lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
11057 struct lpfc_wcqe_complete *wcqe)
11058{
11059 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
11060 struct lpfc_iocbq *cmdiocbq;
11061 struct lpfc_iocbq irspiocbq;
11062 unsigned long iflags;
11063
11064 spin_lock_irqsave(&phba->hbalock, iflags);
11065 pring->stats.iocb_event++;
11066 spin_unlock_irqrestore(&phba->hbalock, iflags);
11067
11068 /* Check for response status */
11069 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
11070 /* If resource errors reported from HBA, reduce queue
11071 * depth of the SCSI device.
11072 */
11073 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
11074 IOSTAT_LOCAL_REJECT) &&
11075 (wcqe->parameter == IOERR_NO_RESOURCES)) {
11076 phba->lpfc_rampdown_queue_depth(phba);
11077 }
11078 /* Log the error status */
11079 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11080 "0373 FCP complete error: status=x%x, "
11081 "hw_status=x%x, total_data_specified=%d, "
11082 "parameter=x%x, word3=x%x\n",
11083 bf_get(lpfc_wcqe_c_status, wcqe),
11084 bf_get(lpfc_wcqe_c_hw_status, wcqe),
11085 wcqe->total_data_placed, wcqe->parameter,
11086 wcqe->word3);
11087 }
11088
11089 /* Look up the FCP command IOCB and create pseudo response IOCB */
11090 spin_lock_irqsave(&phba->hbalock, iflags);
11091 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11092 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11093 spin_unlock_irqrestore(&phba->hbalock, iflags);
11094 if (unlikely(!cmdiocbq)) {
11095 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11096 "0374 FCP complete with no corresponding "
11097 "cmdiocb: iotag (%d)\n",
11098 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11099 return;
11100 }
11101 if (unlikely(!cmdiocbq->iocb_cmpl)) {
11102 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11103 "0375 FCP cmdiocb not callback function "
11104 "iotag: (%d)\n",
11105 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11106 return;
11107 }
11108
11109 /* Fake the irspiocb and copy necessary response information */
James Smart341af102010-01-26 23:07:37 -050011110 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
James Smart4f774512009-05-22 14:52:35 -040011111
James Smart0f65ff62010-02-26 14:14:23 -050011112 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
11113 spin_lock_irqsave(&phba->hbalock, iflags);
11114 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11115 spin_unlock_irqrestore(&phba->hbalock, iflags);
11116 }
11117
James Smart4f774512009-05-22 14:52:35 -040011118 /* Pass the cmd_iocb and the rsp state to the upper layer */
11119 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
11120}
11121
11122/**
11123 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
11124 * @phba: Pointer to HBA context object.
11125 * @cq: Pointer to completion queue.
11126 * @wcqe: Pointer to work-queue completion queue entry.
11127 *
11128 * This routine handles an fast-path WQ entry comsumed event by invoking the
11129 * proper WQ release routine to the slow-path WQ.
11130 **/
11131static void
11132lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11133 struct lpfc_wcqe_release *wcqe)
11134{
11135 struct lpfc_queue *childwq;
11136 bool wqid_matched = false;
11137 uint16_t fcp_wqid;
11138
11139 /* Check for fast-path FCP work queue release */
11140 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
11141 list_for_each_entry(childwq, &cq->child_list, list) {
11142 if (childwq->queue_id == fcp_wqid) {
11143 lpfc_sli4_wq_release(childwq,
11144 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11145 wqid_matched = true;
11146 break;
11147 }
11148 }
11149 /* Report warning log message if no match found */
11150 if (wqid_matched != true)
11151 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11152 "2580 Fast-path wqe consume event carries "
11153 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
11154}
11155
11156/**
11157 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
11158 * @cq: Pointer to the completion queue.
11159 * @eqe: Pointer to fast-path completion queue entry.
11160 *
11161 * This routine process a fast-path work queue completion entry from fast-path
11162 * event queue for FCP command response completion.
11163 **/
11164static int
11165lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11166 struct lpfc_cqe *cqe)
11167{
11168 struct lpfc_wcqe_release wcqe;
11169 bool workposted = false;
11170
11171 /* Copy the work queue CQE and convert endian order if needed */
11172 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
11173
11174 /* Check and process for different type of WCQE and dispatch */
11175 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
11176 case CQE_CODE_COMPL_WQE:
11177 /* Process the WQ complete event */
James Smart98fc5dd2010-06-07 15:24:29 -040011178 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040011179 lpfc_sli4_fp_handle_fcp_wcqe(phba,
11180 (struct lpfc_wcqe_complete *)&wcqe);
11181 break;
11182 case CQE_CODE_RELEASE_WQE:
11183 /* Process the WQ release event */
11184 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
11185 (struct lpfc_wcqe_release *)&wcqe);
11186 break;
11187 case CQE_CODE_XRI_ABORTED:
11188 /* Process the WQ XRI abort event */
James Smartbc739052010-08-04 16:11:18 -040011189 phba->last_completion_time = jiffies;
James Smart4f774512009-05-22 14:52:35 -040011190 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
11191 (struct sli4_wcqe_xri_aborted *)&wcqe);
11192 break;
11193 default:
11194 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11195 "0144 Not a valid WCQE code: x%x\n",
11196 bf_get(lpfc_wcqe_c_code, &wcqe));
11197 break;
11198 }
11199 return workposted;
11200}
11201
11202/**
11203 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
11204 * @phba: Pointer to HBA context object.
11205 * @eqe: Pointer to fast-path event queue entry.
11206 *
11207 * This routine process a event queue entry from the fast-path event queue.
11208 * It will check the MajorCode and MinorCode to determine this is for a
11209 * completion event on a completion queue, if not, an error shall be logged
11210 * and just return. Otherwise, it will get to the corresponding completion
11211 * queue and process all the entries on the completion queue, rearm the
11212 * completion queue, and then return.
11213 **/
11214static void
11215lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11216 uint32_t fcp_cqidx)
11217{
11218 struct lpfc_queue *cq;
11219 struct lpfc_cqe *cqe;
11220 bool workposted = false;
11221 uint16_t cqid;
11222 int ecount = 0;
11223
James Smartcb5172e2010-03-15 11:25:07 -040011224 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
James Smart4f774512009-05-22 14:52:35 -040011225 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11226 "0366 Not a valid fast-path completion "
11227 "event: majorcode=x%x, minorcode=x%x\n",
James Smartcb5172e2010-03-15 11:25:07 -040011228 bf_get_le32(lpfc_eqe_major_code, eqe),
11229 bf_get_le32(lpfc_eqe_minor_code, eqe));
James Smart4f774512009-05-22 14:52:35 -040011230 return;
11231 }
11232
11233 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
11234 if (unlikely(!cq)) {
James Smart75baf692010-06-08 18:31:21 -040011235 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
11236 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11237 "0367 Fast-path completion queue "
11238 "does not exist\n");
James Smart4f774512009-05-22 14:52:35 -040011239 return;
11240 }
11241
11242 /* Get the reference to the corresponding CQ */
James Smartcb5172e2010-03-15 11:25:07 -040011243 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
James Smart4f774512009-05-22 14:52:35 -040011244 if (unlikely(cqid != cq->queue_id)) {
11245 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11246 "0368 Miss-matched fast-path completion "
11247 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
11248 cqid, cq->queue_id);
11249 return;
11250 }
11251
11252 /* Process all the entries to the CQ */
11253 while ((cqe = lpfc_sli4_cq_get(cq))) {
11254 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
James Smart73d91e52011-10-10 21:32:10 -040011255 if (!(++ecount % cq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040011256 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11257 }
11258
11259 /* Catch the no cq entry condition */
11260 if (unlikely(ecount == 0))
11261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11262 "0369 No entry from fast-path completion "
11263 "queue fcpcqid=%d\n", cq->queue_id);
11264
11265 /* In any case, flash and re-arm the CQ */
11266 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
11267
11268 /* wake up worker thread if there are works to be done */
11269 if (workposted)
11270 lpfc_worker_wake_up(phba);
11271}
11272
11273static void
11274lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
11275{
11276 struct lpfc_eqe *eqe;
11277
11278 /* walk all the EQ entries and drop on the floor */
11279 while ((eqe = lpfc_sli4_eq_get(eq)))
11280 ;
11281
11282 /* Clear and re-arm the EQ */
11283 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
11284}
11285
11286/**
11287 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
11288 * @irq: Interrupt number.
11289 * @dev_id: The device context pointer.
11290 *
11291 * This function is directly called from the PCI layer as an interrupt
11292 * service routine when device with SLI-4 interface spec is enabled with
11293 * MSI-X multi-message interrupt mode and there are slow-path events in
11294 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11295 * interrupt mode, this function is called as part of the device-level
11296 * interrupt handler. When the PCI slot is in error recovery or the HBA is
11297 * undergoing initialization, the interrupt handler will not process the
11298 * interrupt. The link attention and ELS ring attention events are handled
11299 * by the worker thread. The interrupt handler signals the worker thread
11300 * and returns for these events. This function is called without any lock
11301 * held. It gets the hbalock to access and update SLI data structures.
11302 *
11303 * This function returns IRQ_HANDLED when interrupt is handled else it
11304 * returns IRQ_NONE.
11305 **/
11306irqreturn_t
11307lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11308{
11309 struct lpfc_hba *phba;
11310 struct lpfc_queue *speq;
11311 struct lpfc_eqe *eqe;
11312 unsigned long iflag;
11313 int ecount = 0;
11314
11315 /*
11316 * Get the driver's phba structure from the dev_id
11317 */
11318 phba = (struct lpfc_hba *)dev_id;
11319
11320 if (unlikely(!phba))
11321 return IRQ_NONE;
11322
11323 /* Get to the EQ struct associated with this vector */
11324 speq = phba->sli4_hba.sp_eq;
James Smart5350d872011-10-10 21:33:49 -040011325 if (unlikely(!speq))
11326 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040011327
11328 /* Check device state for handling interrupt */
11329 if (unlikely(lpfc_intr_state_check(phba))) {
11330 /* Check again for link_state with lock held */
11331 spin_lock_irqsave(&phba->hbalock, iflag);
11332 if (phba->link_state < LPFC_LINK_DOWN)
11333 /* Flush, clear interrupt, and rearm the EQ */
11334 lpfc_sli4_eq_flush(phba, speq);
11335 spin_unlock_irqrestore(&phba->hbalock, iflag);
11336 return IRQ_NONE;
11337 }
11338
11339 /*
11340 * Process all the event on FCP slow-path EQ
11341 */
11342 while ((eqe = lpfc_sli4_eq_get(speq))) {
11343 lpfc_sli4_sp_handle_eqe(phba, eqe);
James Smart73d91e52011-10-10 21:32:10 -040011344 if (!(++ecount % speq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040011345 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11346 }
11347
11348 /* Always clear and re-arm the slow-path EQ */
11349 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
11350
11351 /* Catch the no cq entry condition */
11352 if (unlikely(ecount == 0)) {
11353 if (phba->intr_type == MSIX)
11354 /* MSI-X treated interrupt served as no EQ share INT */
11355 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11356 "0357 MSI-X interrupt with no EQE\n");
11357 else
11358 /* Non MSI-X treated on interrupt as EQ share INT */
11359 return IRQ_NONE;
11360 }
11361
11362 return IRQ_HANDLED;
11363} /* lpfc_sli4_sp_intr_handler */
11364
11365/**
11366 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
11367 * @irq: Interrupt number.
11368 * @dev_id: The device context pointer.
11369 *
11370 * This function is directly called from the PCI layer as an interrupt
11371 * service routine when device with SLI-4 interface spec is enabled with
11372 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11373 * ring event in the HBA. However, when the device is enabled with either
11374 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11375 * device-level interrupt handler. When the PCI slot is in error recovery
11376 * or the HBA is undergoing initialization, the interrupt handler will not
11377 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11378 * the intrrupt context. This function is called without any lock held.
11379 * It gets the hbalock to access and update SLI data structures. Note that,
11380 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
11381 * equal to that of FCP CQ index.
11382 *
11383 * This function returns IRQ_HANDLED when interrupt is handled else it
11384 * returns IRQ_NONE.
11385 **/
11386irqreturn_t
11387lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11388{
11389 struct lpfc_hba *phba;
11390 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
11391 struct lpfc_queue *fpeq;
11392 struct lpfc_eqe *eqe;
11393 unsigned long iflag;
11394 int ecount = 0;
11395 uint32_t fcp_eqidx;
11396
11397 /* Get the driver's phba structure from the dev_id */
11398 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
11399 phba = fcp_eq_hdl->phba;
11400 fcp_eqidx = fcp_eq_hdl->idx;
11401
11402 if (unlikely(!phba))
11403 return IRQ_NONE;
James Smart5350d872011-10-10 21:33:49 -040011404 if (unlikely(!phba->sli4_hba.fp_eq))
11405 return IRQ_NONE;
James Smart4f774512009-05-22 14:52:35 -040011406
11407 /* Get to the EQ struct associated with this vector */
11408 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
11409
11410 /* Check device state for handling interrupt */
11411 if (unlikely(lpfc_intr_state_check(phba))) {
11412 /* Check again for link_state with lock held */
11413 spin_lock_irqsave(&phba->hbalock, iflag);
11414 if (phba->link_state < LPFC_LINK_DOWN)
11415 /* Flush, clear interrupt, and rearm the EQ */
11416 lpfc_sli4_eq_flush(phba, fpeq);
11417 spin_unlock_irqrestore(&phba->hbalock, iflag);
11418 return IRQ_NONE;
11419 }
11420
11421 /*
11422 * Process all the event on FCP fast-path EQ
11423 */
11424 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11425 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
James Smart73d91e52011-10-10 21:32:10 -040011426 if (!(++ecount % fpeq->entry_repost))
James Smart4f774512009-05-22 14:52:35 -040011427 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11428 }
11429
11430 /* Always clear and re-arm the fast-path EQ */
11431 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
11432
11433 if (unlikely(ecount == 0)) {
11434 if (phba->intr_type == MSIX)
11435 /* MSI-X treated interrupt served as no EQ share INT */
11436 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11437 "0358 MSI-X interrupt with no EQE\n");
11438 else
11439 /* Non MSI-X treated on interrupt as EQ share INT */
11440 return IRQ_NONE;
11441 }
11442
11443 return IRQ_HANDLED;
11444} /* lpfc_sli4_fp_intr_handler */
11445
11446/**
11447 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
11448 * @irq: Interrupt number.
11449 * @dev_id: The device context pointer.
11450 *
11451 * This function is the device-level interrupt handler to device with SLI-4
11452 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
11453 * interrupt mode is enabled and there is an event in the HBA which requires
11454 * driver attention. This function invokes the slow-path interrupt attention
11455 * handling function and fast-path interrupt attention handling function in
11456 * turn to process the relevant HBA attention events. This function is called
11457 * without any lock held. It gets the hbalock to access and update SLI data
11458 * structures.
11459 *
11460 * This function returns IRQ_HANDLED when interrupt is handled, else it
11461 * returns IRQ_NONE.
11462 **/
11463irqreturn_t
11464lpfc_sli4_intr_handler(int irq, void *dev_id)
11465{
11466 struct lpfc_hba *phba;
11467 irqreturn_t sp_irq_rc, fp_irq_rc;
11468 bool fp_handled = false;
11469 uint32_t fcp_eqidx;
11470
11471 /* Get the driver's phba structure from the dev_id */
11472 phba = (struct lpfc_hba *)dev_id;
11473
11474 if (unlikely(!phba))
11475 return IRQ_NONE;
11476
11477 /*
11478 * Invokes slow-path host attention interrupt handling as appropriate.
11479 */
11480 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
11481
11482 /*
11483 * Invoke fast-path host attention interrupt handling as appropriate.
11484 */
11485 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
11486 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
11487 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
11488 if (fp_irq_rc == IRQ_HANDLED)
11489 fp_handled |= true;
11490 }
11491
11492 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
11493} /* lpfc_sli4_intr_handler */
11494
11495/**
11496 * lpfc_sli4_queue_free - free a queue structure and associated memory
11497 * @queue: The queue structure to free.
11498 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040011499 * This function frees a queue structure and the DMAable memory used for
James Smart4f774512009-05-22 14:52:35 -040011500 * the host resident queue. This function must be called after destroying the
11501 * queue on the HBA.
11502 **/
11503void
11504lpfc_sli4_queue_free(struct lpfc_queue *queue)
11505{
11506 struct lpfc_dmabuf *dmabuf;
11507
11508 if (!queue)
11509 return;
11510
11511 while (!list_empty(&queue->page_list)) {
11512 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
11513 list);
James Smart49198b32010-04-06 15:04:33 -040011514 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
James Smart4f774512009-05-22 14:52:35 -040011515 dmabuf->virt, dmabuf->phys);
11516 kfree(dmabuf);
11517 }
11518 kfree(queue);
11519 return;
11520}
11521
11522/**
11523 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
11524 * @phba: The HBA that this queue is being created on.
11525 * @entry_size: The size of each queue entry for this queue.
11526 * @entry count: The number of entries that this queue will handle.
11527 *
11528 * This function allocates a queue structure and the DMAable memory used for
11529 * the host resident queue. This function must be called before creating the
11530 * queue on the HBA.
11531 **/
11532struct lpfc_queue *
11533lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
11534 uint32_t entry_count)
11535{
11536 struct lpfc_queue *queue;
11537 struct lpfc_dmabuf *dmabuf;
11538 int x, total_qe_count;
11539 void *dma_pointer;
James Smartcb5172e2010-03-15 11:25:07 -040011540 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart4f774512009-05-22 14:52:35 -040011541
James Smartcb5172e2010-03-15 11:25:07 -040011542 if (!phba->sli4_hba.pc_sli4_params.supported)
11543 hw_page_size = SLI4_PAGE_SIZE;
11544
James Smart4f774512009-05-22 14:52:35 -040011545 queue = kzalloc(sizeof(struct lpfc_queue) +
11546 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
11547 if (!queue)
11548 return NULL;
James Smartcb5172e2010-03-15 11:25:07 -040011549 queue->page_count = (ALIGN(entry_size * entry_count,
11550 hw_page_size))/hw_page_size;
James Smart4f774512009-05-22 14:52:35 -040011551 INIT_LIST_HEAD(&queue->list);
11552 INIT_LIST_HEAD(&queue->page_list);
11553 INIT_LIST_HEAD(&queue->child_list);
11554 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
11555 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
11556 if (!dmabuf)
11557 goto out_fail;
11558 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
James Smartcb5172e2010-03-15 11:25:07 -040011559 hw_page_size, &dmabuf->phys,
James Smart4f774512009-05-22 14:52:35 -040011560 GFP_KERNEL);
11561 if (!dmabuf->virt) {
11562 kfree(dmabuf);
11563 goto out_fail;
11564 }
James Smartcb5172e2010-03-15 11:25:07 -040011565 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040011566 dmabuf->buffer_tag = x;
11567 list_add_tail(&dmabuf->list, &queue->page_list);
11568 /* initialize queue's entry array */
11569 dma_pointer = dmabuf->virt;
11570 for (; total_qe_count < entry_count &&
James Smartcb5172e2010-03-15 11:25:07 -040011571 dma_pointer < (hw_page_size + dmabuf->virt);
James Smart4f774512009-05-22 14:52:35 -040011572 total_qe_count++, dma_pointer += entry_size) {
11573 queue->qe[total_qe_count].address = dma_pointer;
11574 }
11575 }
11576 queue->entry_size = entry_size;
11577 queue->entry_count = entry_count;
James Smart73d91e52011-10-10 21:32:10 -040011578
11579 /*
11580 * entry_repost is calculated based on the number of entries in the
11581 * queue. This works out except for RQs. If buffers are NOT initially
11582 * posted for every RQE, entry_repost should be adjusted accordingly.
11583 */
11584 queue->entry_repost = (entry_count >> 3);
11585 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
11586 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
James Smart4f774512009-05-22 14:52:35 -040011587 queue->phba = phba;
11588
11589 return queue;
11590out_fail:
11591 lpfc_sli4_queue_free(queue);
11592 return NULL;
11593}
11594
11595/**
11596 * lpfc_eq_create - Create an Event Queue on the HBA
11597 * @phba: HBA structure that indicates port to create a queue on.
11598 * @eq: The queue structure to use to create the event queue.
11599 * @imax: The maximum interrupt per second limit.
11600 *
11601 * This function creates an event queue, as detailed in @eq, on a port,
11602 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
11603 *
11604 * The @phba struct is used to send mailbox command to HBA. The @eq struct
11605 * is used to get the entry count and entry size that are necessary to
11606 * determine the number of pages to allocate and use for this queue. This
11607 * function will send the EQ_CREATE mailbox command to the HBA to setup the
11608 * event queue. This function is asynchronous and will wait for the mailbox
11609 * command to finish before continuing.
11610 *
11611 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040011612 * memory this function will return -ENOMEM. If the queue create mailbox command
11613 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040011614 **/
11615uint32_t
11616lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
11617{
11618 struct lpfc_mbx_eq_create *eq_create;
11619 LPFC_MBOXQ_t *mbox;
11620 int rc, length, status = 0;
11621 struct lpfc_dmabuf *dmabuf;
11622 uint32_t shdr_status, shdr_add_status;
11623 union lpfc_sli4_cfg_shdr *shdr;
11624 uint16_t dmult;
James Smart49198b32010-04-06 15:04:33 -040011625 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11626
11627 if (!phba->sli4_hba.pc_sli4_params.supported)
11628 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040011629
11630 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11631 if (!mbox)
11632 return -ENOMEM;
11633 length = (sizeof(struct lpfc_mbx_eq_create) -
11634 sizeof(struct lpfc_sli4_cfg_mhdr));
11635 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11636 LPFC_MBOX_OPCODE_EQ_CREATE,
11637 length, LPFC_SLI4_MBX_EMBED);
11638 eq_create = &mbox->u.mqe.un.eq_create;
11639 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
11640 eq->page_count);
11641 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
11642 LPFC_EQE_SIZE);
11643 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
11644 /* Calculate delay multiper from maximum interrupt per second */
11645 dmult = LPFC_DMULT_CONST/imax - 1;
11646 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
11647 dmult);
11648 switch (eq->entry_count) {
11649 default:
11650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11651 "0360 Unsupported EQ count. (%d)\n",
11652 eq->entry_count);
11653 if (eq->entry_count < 256)
11654 return -EINVAL;
11655 /* otherwise default to smallest count (drop through) */
11656 case 256:
11657 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11658 LPFC_EQ_CNT_256);
11659 break;
11660 case 512:
11661 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11662 LPFC_EQ_CNT_512);
11663 break;
11664 case 1024:
11665 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11666 LPFC_EQ_CNT_1024);
11667 break;
11668 case 2048:
11669 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11670 LPFC_EQ_CNT_2048);
11671 break;
11672 case 4096:
11673 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
11674 LPFC_EQ_CNT_4096);
11675 break;
11676 }
11677 list_for_each_entry(dmabuf, &eq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040011678 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040011679 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
11680 putPaddrLow(dmabuf->phys);
11681 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
11682 putPaddrHigh(dmabuf->phys);
11683 }
11684 mbox->vport = phba->pport;
11685 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11686 mbox->context1 = NULL;
11687 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11688 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
11689 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11690 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11691 if (shdr_status || shdr_add_status || rc) {
11692 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11693 "2500 EQ_CREATE mailbox failed with "
11694 "status x%x add_status x%x, mbx status x%x\n",
11695 shdr_status, shdr_add_status, rc);
11696 status = -ENXIO;
11697 }
11698 eq->type = LPFC_EQ;
11699 eq->subtype = LPFC_NONE;
11700 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
11701 if (eq->queue_id == 0xFFFF)
11702 status = -ENXIO;
11703 eq->host_index = 0;
11704 eq->hba_index = 0;
11705
James Smart8fa38512009-07-19 10:01:03 -040011706 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040011707 return status;
11708}
11709
11710/**
11711 * lpfc_cq_create - Create a Completion Queue on the HBA
11712 * @phba: HBA structure that indicates port to create a queue on.
11713 * @cq: The queue structure to use to create the completion queue.
11714 * @eq: The event queue to bind this completion queue to.
11715 *
11716 * This function creates a completion queue, as detailed in @wq, on a port,
11717 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
11718 *
11719 * The @phba struct is used to send mailbox command to HBA. The @cq struct
11720 * is used to get the entry count and entry size that are necessary to
11721 * determine the number of pages to allocate and use for this queue. The @eq
11722 * is used to indicate which event queue to bind this completion queue to. This
11723 * function will send the CQ_CREATE mailbox command to the HBA to setup the
11724 * completion queue. This function is asynchronous and will wait for the mailbox
11725 * command to finish before continuing.
11726 *
11727 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040011728 * memory this function will return -ENOMEM. If the queue create mailbox command
11729 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040011730 **/
11731uint32_t
11732lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
11733 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
11734{
11735 struct lpfc_mbx_cq_create *cq_create;
11736 struct lpfc_dmabuf *dmabuf;
11737 LPFC_MBOXQ_t *mbox;
11738 int rc, length, status = 0;
11739 uint32_t shdr_status, shdr_add_status;
11740 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040011741 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
11742
11743 if (!phba->sli4_hba.pc_sli4_params.supported)
11744 hw_page_size = SLI4_PAGE_SIZE;
11745
James Smart4f774512009-05-22 14:52:35 -040011746 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11747 if (!mbox)
11748 return -ENOMEM;
11749 length = (sizeof(struct lpfc_mbx_cq_create) -
11750 sizeof(struct lpfc_sli4_cfg_mhdr));
11751 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11752 LPFC_MBOX_OPCODE_CQ_CREATE,
11753 length, LPFC_SLI4_MBX_EMBED);
11754 cq_create = &mbox->u.mqe.un.cq_create;
James Smart5a6f1332011-03-11 16:05:35 -050011755 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040011756 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
11757 cq->page_count);
11758 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
11759 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050011760 bf_set(lpfc_mbox_hdr_version, &shdr->request,
11761 phba->sli4_hba.pc_sli4_params.cqv);
11762 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
James Smartc31098c2011-04-16 11:03:33 -040011763 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
11764 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
James Smart5a6f1332011-03-11 16:05:35 -050011765 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
11766 eq->queue_id);
11767 } else {
11768 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
11769 eq->queue_id);
11770 }
James Smart4f774512009-05-22 14:52:35 -040011771 switch (cq->entry_count) {
11772 default:
11773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11774 "0361 Unsupported CQ count. (%d)\n",
11775 cq->entry_count);
11776 if (cq->entry_count < 256)
11777 return -EINVAL;
11778 /* otherwise default to smallest count (drop through) */
11779 case 256:
11780 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
11781 LPFC_CQ_CNT_256);
11782 break;
11783 case 512:
11784 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
11785 LPFC_CQ_CNT_512);
11786 break;
11787 case 1024:
11788 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
11789 LPFC_CQ_CNT_1024);
11790 break;
11791 }
11792 list_for_each_entry(dmabuf, &cq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040011793 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040011794 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
11795 putPaddrLow(dmabuf->phys);
11796 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
11797 putPaddrHigh(dmabuf->phys);
11798 }
11799 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
11800
11801 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040011802 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11803 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11804 if (shdr_status || shdr_add_status || rc) {
11805 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11806 "2501 CQ_CREATE mailbox failed with "
11807 "status x%x add_status x%x, mbx status x%x\n",
11808 shdr_status, shdr_add_status, rc);
11809 status = -ENXIO;
11810 goto out;
11811 }
11812 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
11813 if (cq->queue_id == 0xFFFF) {
11814 status = -ENXIO;
11815 goto out;
11816 }
11817 /* link the cq onto the parent eq child list */
11818 list_add_tail(&cq->list, &eq->child_list);
11819 /* Set up completion queue's type and subtype */
11820 cq->type = type;
11821 cq->subtype = subtype;
11822 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
James Smart2a622bf2011-02-16 12:40:06 -050011823 cq->assoc_qid = eq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040011824 cq->host_index = 0;
11825 cq->hba_index = 0;
James Smart4f774512009-05-22 14:52:35 -040011826
James Smart8fa38512009-07-19 10:01:03 -040011827out:
11828 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040011829 return status;
11830}
11831
11832/**
James Smartb19a0612010-04-06 14:48:51 -040011833 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
James Smart04c68492009-05-22 14:52:52 -040011834 * @phba: HBA structure that indicates port to create a queue on.
11835 * @mq: The queue structure to use to create the mailbox queue.
James Smartb19a0612010-04-06 14:48:51 -040011836 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
11837 * @cq: The completion queue to associate with this cq.
James Smart04c68492009-05-22 14:52:52 -040011838 *
James Smartb19a0612010-04-06 14:48:51 -040011839 * This function provides failback (fb) functionality when the
11840 * mq_create_ext fails on older FW generations. It's purpose is identical
11841 * to mq_create_ext otherwise.
James Smart04c68492009-05-22 14:52:52 -040011842 *
James Smartb19a0612010-04-06 14:48:51 -040011843 * This routine cannot fail as all attributes were previously accessed and
11844 * initialized in mq_create_ext.
James Smart04c68492009-05-22 14:52:52 -040011845 **/
James Smartb19a0612010-04-06 14:48:51 -040011846static void
11847lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
11848 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
James Smart04c68492009-05-22 14:52:52 -040011849{
11850 struct lpfc_mbx_mq_create *mq_create;
11851 struct lpfc_dmabuf *dmabuf;
James Smartb19a0612010-04-06 14:48:51 -040011852 int length;
James Smart04c68492009-05-22 14:52:52 -040011853
James Smart04c68492009-05-22 14:52:52 -040011854 length = (sizeof(struct lpfc_mbx_mq_create) -
11855 sizeof(struct lpfc_sli4_cfg_mhdr));
11856 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11857 LPFC_MBOX_OPCODE_MQ_CREATE,
11858 length, LPFC_SLI4_MBX_EMBED);
11859 mq_create = &mbox->u.mqe.un.mq_create;
11860 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
James Smartb19a0612010-04-06 14:48:51 -040011861 mq->page_count);
James Smart04c68492009-05-22 14:52:52 -040011862 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
James Smartb19a0612010-04-06 14:48:51 -040011863 cq->queue_id);
James Smart04c68492009-05-22 14:52:52 -040011864 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
11865 switch (mq->entry_count) {
James Smart04c68492009-05-22 14:52:52 -040011866 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050011867 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11868 LPFC_MQ_RING_SIZE_16);
James Smart04c68492009-05-22 14:52:52 -040011869 break;
11870 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050011871 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11872 LPFC_MQ_RING_SIZE_32);
James Smart04c68492009-05-22 14:52:52 -040011873 break;
11874 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050011875 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11876 LPFC_MQ_RING_SIZE_64);
James Smart04c68492009-05-22 14:52:52 -040011877 break;
11878 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050011879 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
11880 LPFC_MQ_RING_SIZE_128);
James Smart04c68492009-05-22 14:52:52 -040011881 break;
11882 }
11883 list_for_each_entry(dmabuf, &mq->page_list, list) {
11884 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
James Smartb19a0612010-04-06 14:48:51 -040011885 putPaddrLow(dmabuf->phys);
James Smart04c68492009-05-22 14:52:52 -040011886 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smartb19a0612010-04-06 14:48:51 -040011887 putPaddrHigh(dmabuf->phys);
11888 }
11889}
11890
11891/**
11892 * lpfc_mq_create - Create a mailbox Queue on the HBA
11893 * @phba: HBA structure that indicates port to create a queue on.
11894 * @mq: The queue structure to use to create the mailbox queue.
11895 * @cq: The completion queue to associate with this cq.
11896 * @subtype: The queue's subtype.
11897 *
11898 * This function creates a mailbox queue, as detailed in @mq, on a port,
11899 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
11900 *
11901 * The @phba struct is used to send mailbox command to HBA. The @cq struct
11902 * is used to get the entry count and entry size that are necessary to
11903 * determine the number of pages to allocate and use for this queue. This
11904 * function will send the MQ_CREATE mailbox command to the HBA to setup the
11905 * mailbox queue. This function is asynchronous and will wait for the mailbox
11906 * command to finish before continuing.
11907 *
11908 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040011909 * memory this function will return -ENOMEM. If the queue create mailbox command
11910 * fails this function will return -ENXIO.
James Smartb19a0612010-04-06 14:48:51 -040011911 **/
11912int32_t
11913lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
11914 struct lpfc_queue *cq, uint32_t subtype)
11915{
11916 struct lpfc_mbx_mq_create *mq_create;
11917 struct lpfc_mbx_mq_create_ext *mq_create_ext;
11918 struct lpfc_dmabuf *dmabuf;
11919 LPFC_MBOXQ_t *mbox;
11920 int rc, length, status = 0;
11921 uint32_t shdr_status, shdr_add_status;
11922 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040011923 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smartb19a0612010-04-06 14:48:51 -040011924
James Smart49198b32010-04-06 15:04:33 -040011925 if (!phba->sli4_hba.pc_sli4_params.supported)
11926 hw_page_size = SLI4_PAGE_SIZE;
James Smartb19a0612010-04-06 14:48:51 -040011927
11928 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11929 if (!mbox)
11930 return -ENOMEM;
11931 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
11932 sizeof(struct lpfc_sli4_cfg_mhdr));
11933 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
11934 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
11935 length, LPFC_SLI4_MBX_EMBED);
11936
11937 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
James Smart5a6f1332011-03-11 16:05:35 -050011938 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
James Smart70f3c072010-12-15 17:57:33 -050011939 bf_set(lpfc_mbx_mq_create_ext_num_pages,
11940 &mq_create_ext->u.request, mq->page_count);
11941 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
11942 &mq_create_ext->u.request, 1);
11943 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
James Smartb19a0612010-04-06 14:48:51 -040011944 &mq_create_ext->u.request, 1);
11945 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
11946 &mq_create_ext->u.request, 1);
James Smart70f3c072010-12-15 17:57:33 -050011947 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
11948 &mq_create_ext->u.request, 1);
11949 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
11950 &mq_create_ext->u.request, 1);
James Smartb19a0612010-04-06 14:48:51 -040011951 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
James Smart5a6f1332011-03-11 16:05:35 -050011952 bf_set(lpfc_mbox_hdr_version, &shdr->request,
11953 phba->sli4_hba.pc_sli4_params.mqv);
11954 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
11955 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
11956 cq->queue_id);
11957 else
11958 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
11959 cq->queue_id);
James Smartb19a0612010-04-06 14:48:51 -040011960 switch (mq->entry_count) {
11961 default:
11962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11963 "0362 Unsupported MQ count. (%d)\n",
11964 mq->entry_count);
11965 if (mq->entry_count < 16)
11966 return -EINVAL;
11967 /* otherwise default to smallest count (drop through) */
11968 case 16:
James Smart5a6f1332011-03-11 16:05:35 -050011969 bf_set(lpfc_mq_context_ring_size,
11970 &mq_create_ext->u.request.context,
11971 LPFC_MQ_RING_SIZE_16);
James Smartb19a0612010-04-06 14:48:51 -040011972 break;
11973 case 32:
James Smart5a6f1332011-03-11 16:05:35 -050011974 bf_set(lpfc_mq_context_ring_size,
11975 &mq_create_ext->u.request.context,
11976 LPFC_MQ_RING_SIZE_32);
James Smartb19a0612010-04-06 14:48:51 -040011977 break;
11978 case 64:
James Smart5a6f1332011-03-11 16:05:35 -050011979 bf_set(lpfc_mq_context_ring_size,
11980 &mq_create_ext->u.request.context,
11981 LPFC_MQ_RING_SIZE_64);
James Smartb19a0612010-04-06 14:48:51 -040011982 break;
11983 case 128:
James Smart5a6f1332011-03-11 16:05:35 -050011984 bf_set(lpfc_mq_context_ring_size,
11985 &mq_create_ext->u.request.context,
11986 LPFC_MQ_RING_SIZE_128);
James Smartb19a0612010-04-06 14:48:51 -040011987 break;
11988 }
11989 list_for_each_entry(dmabuf, &mq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040011990 memset(dmabuf->virt, 0, hw_page_size);
James Smartb19a0612010-04-06 14:48:51 -040011991 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
11992 putPaddrLow(dmabuf->phys);
11993 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
James Smart04c68492009-05-22 14:52:52 -040011994 putPaddrHigh(dmabuf->phys);
11995 }
11996 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smartb19a0612010-04-06 14:48:51 -040011997 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
11998 &mq_create_ext->u.response);
11999 if (rc != MBX_SUCCESS) {
12000 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12001 "2795 MQ_CREATE_EXT failed with "
12002 "status x%x. Failback to MQ_CREATE.\n",
12003 rc);
12004 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
12005 mq_create = &mbox->u.mqe.un.mq_create;
12006 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12007 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
12008 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
12009 &mq_create->u.response);
12010 }
12011
James Smart04c68492009-05-22 14:52:52 -040012012 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart04c68492009-05-22 14:52:52 -040012013 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12014 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12015 if (shdr_status || shdr_add_status || rc) {
12016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12017 "2502 MQ_CREATE mailbox failed with "
12018 "status x%x add_status x%x, mbx status x%x\n",
12019 shdr_status, shdr_add_status, rc);
12020 status = -ENXIO;
12021 goto out;
12022 }
James Smart04c68492009-05-22 14:52:52 -040012023 if (mq->queue_id == 0xFFFF) {
12024 status = -ENXIO;
12025 goto out;
12026 }
12027 mq->type = LPFC_MQ;
James Smart2a622bf2011-02-16 12:40:06 -050012028 mq->assoc_qid = cq->queue_id;
James Smart04c68492009-05-22 14:52:52 -040012029 mq->subtype = subtype;
12030 mq->host_index = 0;
12031 mq->hba_index = 0;
12032
12033 /* link the mq onto the parent cq child list */
12034 list_add_tail(&mq->list, &cq->child_list);
12035out:
James Smart8fa38512009-07-19 10:01:03 -040012036 mempool_free(mbox, phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040012037 return status;
12038}
12039
12040/**
James Smart4f774512009-05-22 14:52:35 -040012041 * lpfc_wq_create - Create a Work Queue on the HBA
12042 * @phba: HBA structure that indicates port to create a queue on.
12043 * @wq: The queue structure to use to create the work queue.
12044 * @cq: The completion queue to bind this work queue to.
12045 * @subtype: The subtype of the work queue indicating its functionality.
12046 *
12047 * This function creates a work queue, as detailed in @wq, on a port, described
12048 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
12049 *
12050 * The @phba struct is used to send mailbox command to HBA. The @wq struct
12051 * is used to get the entry count and entry size that are necessary to
12052 * determine the number of pages to allocate and use for this queue. The @cq
12053 * is used to indicate which completion queue to bind this work queue to. This
12054 * function will send the WQ_CREATE mailbox command to the HBA to setup the
12055 * work queue. This function is asynchronous and will wait for the mailbox
12056 * command to finish before continuing.
12057 *
12058 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040012059 * memory this function will return -ENOMEM. If the queue create mailbox command
12060 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040012061 **/
12062uint32_t
12063lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
12064 struct lpfc_queue *cq, uint32_t subtype)
12065{
12066 struct lpfc_mbx_wq_create *wq_create;
12067 struct lpfc_dmabuf *dmabuf;
12068 LPFC_MBOXQ_t *mbox;
12069 int rc, length, status = 0;
12070 uint32_t shdr_status, shdr_add_status;
12071 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040012072 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
James Smart5a6f1332011-03-11 16:05:35 -050012073 struct dma_address *page;
James Smart49198b32010-04-06 15:04:33 -040012074
12075 if (!phba->sli4_hba.pc_sli4_params.supported)
12076 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040012077
12078 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12079 if (!mbox)
12080 return -ENOMEM;
12081 length = (sizeof(struct lpfc_mbx_wq_create) -
12082 sizeof(struct lpfc_sli4_cfg_mhdr));
12083 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12084 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
12085 length, LPFC_SLI4_MBX_EMBED);
12086 wq_create = &mbox->u.mqe.un.wq_create;
James Smart5a6f1332011-03-11 16:05:35 -050012087 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
James Smart4f774512009-05-22 14:52:35 -040012088 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
12089 wq->page_count);
12090 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
12091 cq->queue_id);
James Smart5a6f1332011-03-11 16:05:35 -050012092 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12093 phba->sli4_hba.pc_sli4_params.wqv);
12094 if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
12095 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
12096 wq->entry_count);
12097 switch (wq->entry_size) {
12098 default:
12099 case 64:
12100 bf_set(lpfc_mbx_wq_create_wqe_size,
12101 &wq_create->u.request_1,
12102 LPFC_WQ_WQE_SIZE_64);
12103 break;
12104 case 128:
12105 bf_set(lpfc_mbx_wq_create_wqe_size,
12106 &wq_create->u.request_1,
12107 LPFC_WQ_WQE_SIZE_128);
12108 break;
12109 }
12110 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
12111 (PAGE_SIZE/SLI4_PAGE_SIZE));
12112 page = wq_create->u.request_1.page;
12113 } else {
12114 page = wq_create->u.request.page;
12115 }
James Smart4f774512009-05-22 14:52:35 -040012116 list_for_each_entry(dmabuf, &wq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040012117 memset(dmabuf->virt, 0, hw_page_size);
James Smart5a6f1332011-03-11 16:05:35 -050012118 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
12119 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
James Smart4f774512009-05-22 14:52:35 -040012120 }
12121 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12122 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040012123 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12124 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12125 if (shdr_status || shdr_add_status || rc) {
12126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12127 "2503 WQ_CREATE mailbox failed with "
12128 "status x%x add_status x%x, mbx status x%x\n",
12129 shdr_status, shdr_add_status, rc);
12130 status = -ENXIO;
12131 goto out;
12132 }
12133 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
12134 if (wq->queue_id == 0xFFFF) {
12135 status = -ENXIO;
12136 goto out;
12137 }
12138 wq->type = LPFC_WQ;
James Smart2a622bf2011-02-16 12:40:06 -050012139 wq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040012140 wq->subtype = subtype;
12141 wq->host_index = 0;
12142 wq->hba_index = 0;
James Smartff78d8f2011-12-13 13:21:35 -050012143 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
James Smart4f774512009-05-22 14:52:35 -040012144
12145 /* link the wq onto the parent cq child list */
12146 list_add_tail(&wq->list, &cq->child_list);
12147out:
James Smart8fa38512009-07-19 10:01:03 -040012148 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040012149 return status;
12150}
12151
12152/**
James Smart73d91e52011-10-10 21:32:10 -040012153 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12154 * @phba: HBA structure that indicates port to create a queue on.
12155 * @rq: The queue structure to use for the receive queue.
12156 * @qno: The associated HBQ number
12157 *
12158 *
12159 * For SLI4 we need to adjust the RQ repost value based on
12160 * the number of buffers that are initially posted to the RQ.
12161 */
12162void
12163lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12164{
12165 uint32_t cnt;
12166
12167 cnt = lpfc_hbq_defs[qno]->entry_count;
12168
12169 /* Recalc repost for RQs based on buffers initially posted */
12170 cnt = (cnt >> 3);
12171 if (cnt < LPFC_QUEUE_MIN_REPOST)
12172 cnt = LPFC_QUEUE_MIN_REPOST;
12173
12174 rq->entry_repost = cnt;
12175}
12176
12177/**
James Smart4f774512009-05-22 14:52:35 -040012178 * lpfc_rq_create - Create a Receive Queue on the HBA
12179 * @phba: HBA structure that indicates port to create a queue on.
12180 * @hrq: The queue structure to use to create the header receive queue.
12181 * @drq: The queue structure to use to create the data receive queue.
12182 * @cq: The completion queue to bind this work queue to.
12183 *
12184 * This function creates a receive buffer queue pair , as detailed in @hrq and
12185 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
12186 * to the HBA.
12187 *
12188 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
12189 * struct is used to get the entry count that is necessary to determine the
12190 * number of pages to use for this queue. The @cq is used to indicate which
12191 * completion queue to bind received buffers that are posted to these queues to.
12192 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
12193 * receive queue pair. This function is asynchronous and will wait for the
12194 * mailbox command to finish before continuing.
12195 *
12196 * On success this function will return a zero. If unable to allocate enough
James Smartd439d282010-09-29 11:18:45 -040012197 * memory this function will return -ENOMEM. If the queue create mailbox command
12198 * fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040012199 **/
12200uint32_t
12201lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12202 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
12203{
12204 struct lpfc_mbx_rq_create *rq_create;
12205 struct lpfc_dmabuf *dmabuf;
12206 LPFC_MBOXQ_t *mbox;
12207 int rc, length, status = 0;
12208 uint32_t shdr_status, shdr_add_status;
12209 union lpfc_sli4_cfg_shdr *shdr;
James Smart49198b32010-04-06 15:04:33 -040012210 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12211
12212 if (!phba->sli4_hba.pc_sli4_params.supported)
12213 hw_page_size = SLI4_PAGE_SIZE;
James Smart4f774512009-05-22 14:52:35 -040012214
12215 if (hrq->entry_count != drq->entry_count)
12216 return -EINVAL;
12217 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12218 if (!mbox)
12219 return -ENOMEM;
12220 length = (sizeof(struct lpfc_mbx_rq_create) -
12221 sizeof(struct lpfc_sli4_cfg_mhdr));
12222 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12223 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12224 length, LPFC_SLI4_MBX_EMBED);
12225 rq_create = &mbox->u.mqe.un.rq_create;
James Smart5a6f1332011-03-11 16:05:35 -050012226 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12227 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12228 phba->sli4_hba.pc_sli4_params.rqv);
12229 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12230 bf_set(lpfc_rq_context_rqe_count_1,
12231 &rq_create->u.request.context,
12232 hrq->entry_count);
12233 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040012234 bf_set(lpfc_rq_context_rqe_size,
12235 &rq_create->u.request.context,
12236 LPFC_RQE_SIZE_8);
12237 bf_set(lpfc_rq_context_page_size,
12238 &rq_create->u.request.context,
12239 (PAGE_SIZE/SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050012240 } else {
12241 switch (hrq->entry_count) {
12242 default:
12243 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12244 "2535 Unsupported RQ count. (%d)\n",
12245 hrq->entry_count);
12246 if (hrq->entry_count < 512)
12247 return -EINVAL;
12248 /* otherwise default to smallest count (drop through) */
12249 case 512:
12250 bf_set(lpfc_rq_context_rqe_count,
12251 &rq_create->u.request.context,
12252 LPFC_RQ_RING_SIZE_512);
12253 break;
12254 case 1024:
12255 bf_set(lpfc_rq_context_rqe_count,
12256 &rq_create->u.request.context,
12257 LPFC_RQ_RING_SIZE_1024);
12258 break;
12259 case 2048:
12260 bf_set(lpfc_rq_context_rqe_count,
12261 &rq_create->u.request.context,
12262 LPFC_RQ_RING_SIZE_2048);
12263 break;
12264 case 4096:
12265 bf_set(lpfc_rq_context_rqe_count,
12266 &rq_create->u.request.context,
12267 LPFC_RQ_RING_SIZE_4096);
12268 break;
12269 }
12270 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12271 LPFC_HDR_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040012272 }
12273 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12274 cq->queue_id);
12275 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12276 hrq->page_count);
James Smart4f774512009-05-22 14:52:35 -040012277 list_for_each_entry(dmabuf, &hrq->page_list, list) {
James Smart49198b32010-04-06 15:04:33 -040012278 memset(dmabuf->virt, 0, hw_page_size);
James Smart4f774512009-05-22 14:52:35 -040012279 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12280 putPaddrLow(dmabuf->phys);
12281 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12282 putPaddrHigh(dmabuf->phys);
12283 }
12284 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12285 /* The IOCTL status is embedded in the mailbox subheader. */
James Smart4f774512009-05-22 14:52:35 -040012286 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12287 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12288 if (shdr_status || shdr_add_status || rc) {
12289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12290 "2504 RQ_CREATE mailbox failed with "
12291 "status x%x add_status x%x, mbx status x%x\n",
12292 shdr_status, shdr_add_status, rc);
12293 status = -ENXIO;
12294 goto out;
12295 }
12296 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12297 if (hrq->queue_id == 0xFFFF) {
12298 status = -ENXIO;
12299 goto out;
12300 }
12301 hrq->type = LPFC_HRQ;
James Smart2a622bf2011-02-16 12:40:06 -050012302 hrq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040012303 hrq->subtype = subtype;
12304 hrq->host_index = 0;
12305 hrq->hba_index = 0;
12306
12307 /* now create the data queue */
12308 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12309 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
12310 length, LPFC_SLI4_MBX_EMBED);
James Smart5a6f1332011-03-11 16:05:35 -050012311 bf_set(lpfc_mbox_hdr_version, &shdr->request,
12312 phba->sli4_hba.pc_sli4_params.rqv);
12313 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
12314 bf_set(lpfc_rq_context_rqe_count_1,
James Smartc31098c2011-04-16 11:03:33 -040012315 &rq_create->u.request.context, hrq->entry_count);
James Smart5a6f1332011-03-11 16:05:35 -050012316 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
James Smartc31098c2011-04-16 11:03:33 -040012317 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
12318 LPFC_RQE_SIZE_8);
12319 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
12320 (PAGE_SIZE/SLI4_PAGE_SIZE));
James Smart5a6f1332011-03-11 16:05:35 -050012321 } else {
12322 switch (drq->entry_count) {
12323 default:
12324 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12325 "2536 Unsupported RQ count. (%d)\n",
12326 drq->entry_count);
12327 if (drq->entry_count < 512)
12328 return -EINVAL;
12329 /* otherwise default to smallest count (drop through) */
12330 case 512:
12331 bf_set(lpfc_rq_context_rqe_count,
12332 &rq_create->u.request.context,
12333 LPFC_RQ_RING_SIZE_512);
12334 break;
12335 case 1024:
12336 bf_set(lpfc_rq_context_rqe_count,
12337 &rq_create->u.request.context,
12338 LPFC_RQ_RING_SIZE_1024);
12339 break;
12340 case 2048:
12341 bf_set(lpfc_rq_context_rqe_count,
12342 &rq_create->u.request.context,
12343 LPFC_RQ_RING_SIZE_2048);
12344 break;
12345 case 4096:
12346 bf_set(lpfc_rq_context_rqe_count,
12347 &rq_create->u.request.context,
12348 LPFC_RQ_RING_SIZE_4096);
12349 break;
12350 }
12351 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
12352 LPFC_DATA_BUF_SIZE);
James Smart4f774512009-05-22 14:52:35 -040012353 }
12354 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
12355 cq->queue_id);
12356 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
12357 drq->page_count);
James Smart4f774512009-05-22 14:52:35 -040012358 list_for_each_entry(dmabuf, &drq->page_list, list) {
12359 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
12360 putPaddrLow(dmabuf->phys);
12361 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
12362 putPaddrHigh(dmabuf->phys);
12363 }
12364 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12365 /* The IOCTL status is embedded in the mailbox subheader. */
12366 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
12367 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12368 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12369 if (shdr_status || shdr_add_status || rc) {
12370 status = -ENXIO;
12371 goto out;
12372 }
12373 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
12374 if (drq->queue_id == 0xFFFF) {
12375 status = -ENXIO;
12376 goto out;
12377 }
12378 drq->type = LPFC_DRQ;
James Smart2a622bf2011-02-16 12:40:06 -050012379 drq->assoc_qid = cq->queue_id;
James Smart4f774512009-05-22 14:52:35 -040012380 drq->subtype = subtype;
12381 drq->host_index = 0;
12382 drq->hba_index = 0;
12383
12384 /* link the header and data RQs onto the parent cq child list */
12385 list_add_tail(&hrq->list, &cq->child_list);
12386 list_add_tail(&drq->list, &cq->child_list);
12387
12388out:
James Smart8fa38512009-07-19 10:01:03 -040012389 mempool_free(mbox, phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040012390 return status;
12391}
12392
12393/**
12394 * lpfc_eq_destroy - Destroy an event Queue on the HBA
12395 * @eq: The queue structure associated with the queue to destroy.
12396 *
12397 * This function destroys a queue, as detailed in @eq by sending an mailbox
12398 * command, specific to the type of queue, to the HBA.
12399 *
12400 * The @eq struct is used to get the queue ID of the queue to destroy.
12401 *
12402 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040012403 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040012404 **/
12405uint32_t
12406lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
12407{
12408 LPFC_MBOXQ_t *mbox;
12409 int rc, length, status = 0;
12410 uint32_t shdr_status, shdr_add_status;
12411 union lpfc_sli4_cfg_shdr *shdr;
12412
12413 if (!eq)
12414 return -ENODEV;
12415 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
12416 if (!mbox)
12417 return -ENOMEM;
12418 length = (sizeof(struct lpfc_mbx_eq_destroy) -
12419 sizeof(struct lpfc_sli4_cfg_mhdr));
12420 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12421 LPFC_MBOX_OPCODE_EQ_DESTROY,
12422 length, LPFC_SLI4_MBX_EMBED);
12423 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
12424 eq->queue_id);
12425 mbox->vport = eq->phba->pport;
12426 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12427
12428 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
12429 /* The IOCTL status is embedded in the mailbox subheader. */
12430 shdr = (union lpfc_sli4_cfg_shdr *)
12431 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
12432 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12433 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12434 if (shdr_status || shdr_add_status || rc) {
12435 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12436 "2505 EQ_DESTROY mailbox failed with "
12437 "status x%x add_status x%x, mbx status x%x\n",
12438 shdr_status, shdr_add_status, rc);
12439 status = -ENXIO;
12440 }
12441
12442 /* Remove eq from any list */
12443 list_del_init(&eq->list);
James Smart8fa38512009-07-19 10:01:03 -040012444 mempool_free(mbox, eq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040012445 return status;
12446}
12447
12448/**
12449 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
12450 * @cq: The queue structure associated with the queue to destroy.
12451 *
12452 * This function destroys a queue, as detailed in @cq by sending an mailbox
12453 * command, specific to the type of queue, to the HBA.
12454 *
12455 * The @cq struct is used to get the queue ID of the queue to destroy.
12456 *
12457 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040012458 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040012459 **/
12460uint32_t
12461lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
12462{
12463 LPFC_MBOXQ_t *mbox;
12464 int rc, length, status = 0;
12465 uint32_t shdr_status, shdr_add_status;
12466 union lpfc_sli4_cfg_shdr *shdr;
12467
12468 if (!cq)
12469 return -ENODEV;
12470 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
12471 if (!mbox)
12472 return -ENOMEM;
12473 length = (sizeof(struct lpfc_mbx_cq_destroy) -
12474 sizeof(struct lpfc_sli4_cfg_mhdr));
12475 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12476 LPFC_MBOX_OPCODE_CQ_DESTROY,
12477 length, LPFC_SLI4_MBX_EMBED);
12478 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
12479 cq->queue_id);
12480 mbox->vport = cq->phba->pport;
12481 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12482 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
12483 /* The IOCTL status is embedded in the mailbox subheader. */
12484 shdr = (union lpfc_sli4_cfg_shdr *)
12485 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
12486 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12487 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12488 if (shdr_status || shdr_add_status || rc) {
12489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12490 "2506 CQ_DESTROY mailbox failed with "
12491 "status x%x add_status x%x, mbx status x%x\n",
12492 shdr_status, shdr_add_status, rc);
12493 status = -ENXIO;
12494 }
12495 /* Remove cq from any list */
12496 list_del_init(&cq->list);
James Smart8fa38512009-07-19 10:01:03 -040012497 mempool_free(mbox, cq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040012498 return status;
12499}
12500
12501/**
James Smart04c68492009-05-22 14:52:52 -040012502 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
12503 * @qm: The queue structure associated with the queue to destroy.
12504 *
12505 * This function destroys a queue, as detailed in @mq by sending an mailbox
12506 * command, specific to the type of queue, to the HBA.
12507 *
12508 * The @mq struct is used to get the queue ID of the queue to destroy.
12509 *
12510 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040012511 * command fails this function will return -ENXIO.
James Smart04c68492009-05-22 14:52:52 -040012512 **/
12513uint32_t
12514lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
12515{
12516 LPFC_MBOXQ_t *mbox;
12517 int rc, length, status = 0;
12518 uint32_t shdr_status, shdr_add_status;
12519 union lpfc_sli4_cfg_shdr *shdr;
12520
12521 if (!mq)
12522 return -ENODEV;
12523 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
12524 if (!mbox)
12525 return -ENOMEM;
12526 length = (sizeof(struct lpfc_mbx_mq_destroy) -
12527 sizeof(struct lpfc_sli4_cfg_mhdr));
12528 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12529 LPFC_MBOX_OPCODE_MQ_DESTROY,
12530 length, LPFC_SLI4_MBX_EMBED);
12531 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
12532 mq->queue_id);
12533 mbox->vport = mq->phba->pport;
12534 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12535 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
12536 /* The IOCTL status is embedded in the mailbox subheader. */
12537 shdr = (union lpfc_sli4_cfg_shdr *)
12538 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
12539 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12540 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12541 if (shdr_status || shdr_add_status || rc) {
12542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12543 "2507 MQ_DESTROY mailbox failed with "
12544 "status x%x add_status x%x, mbx status x%x\n",
12545 shdr_status, shdr_add_status, rc);
12546 status = -ENXIO;
12547 }
12548 /* Remove mq from any list */
12549 list_del_init(&mq->list);
James Smart8fa38512009-07-19 10:01:03 -040012550 mempool_free(mbox, mq->phba->mbox_mem_pool);
James Smart04c68492009-05-22 14:52:52 -040012551 return status;
12552}
12553
12554/**
James Smart4f774512009-05-22 14:52:35 -040012555 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
12556 * @wq: The queue structure associated with the queue to destroy.
12557 *
12558 * This function destroys a queue, as detailed in @wq by sending an mailbox
12559 * command, specific to the type of queue, to the HBA.
12560 *
12561 * The @wq struct is used to get the queue ID of the queue to destroy.
12562 *
12563 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040012564 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040012565 **/
12566uint32_t
12567lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
12568{
12569 LPFC_MBOXQ_t *mbox;
12570 int rc, length, status = 0;
12571 uint32_t shdr_status, shdr_add_status;
12572 union lpfc_sli4_cfg_shdr *shdr;
12573
12574 if (!wq)
12575 return -ENODEV;
12576 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
12577 if (!mbox)
12578 return -ENOMEM;
12579 length = (sizeof(struct lpfc_mbx_wq_destroy) -
12580 sizeof(struct lpfc_sli4_cfg_mhdr));
12581 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12582 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
12583 length, LPFC_SLI4_MBX_EMBED);
12584 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
12585 wq->queue_id);
12586 mbox->vport = wq->phba->pport;
12587 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12588 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
12589 shdr = (union lpfc_sli4_cfg_shdr *)
12590 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
12591 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12592 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12593 if (shdr_status || shdr_add_status || rc) {
12594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12595 "2508 WQ_DESTROY mailbox failed with "
12596 "status x%x add_status x%x, mbx status x%x\n",
12597 shdr_status, shdr_add_status, rc);
12598 status = -ENXIO;
12599 }
12600 /* Remove wq from any list */
12601 list_del_init(&wq->list);
James Smart8fa38512009-07-19 10:01:03 -040012602 mempool_free(mbox, wq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040012603 return status;
12604}
12605
12606/**
12607 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
12608 * @rq: The queue structure associated with the queue to destroy.
12609 *
12610 * This function destroys a queue, as detailed in @rq by sending an mailbox
12611 * command, specific to the type of queue, to the HBA.
12612 *
12613 * The @rq struct is used to get the queue ID of the queue to destroy.
12614 *
12615 * On success this function will return a zero. If the queue destroy mailbox
James Smartd439d282010-09-29 11:18:45 -040012616 * command fails this function will return -ENXIO.
James Smart4f774512009-05-22 14:52:35 -040012617 **/
12618uint32_t
12619lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
12620 struct lpfc_queue *drq)
12621{
12622 LPFC_MBOXQ_t *mbox;
12623 int rc, length, status = 0;
12624 uint32_t shdr_status, shdr_add_status;
12625 union lpfc_sli4_cfg_shdr *shdr;
12626
12627 if (!hrq || !drq)
12628 return -ENODEV;
12629 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
12630 if (!mbox)
12631 return -ENOMEM;
12632 length = (sizeof(struct lpfc_mbx_rq_destroy) -
James Smartfedd3b72011-02-16 12:39:24 -050012633 sizeof(struct lpfc_sli4_cfg_mhdr));
James Smart4f774512009-05-22 14:52:35 -040012634 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12635 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
12636 length, LPFC_SLI4_MBX_EMBED);
12637 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
12638 hrq->queue_id);
12639 mbox->vport = hrq->phba->pport;
12640 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12641 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
12642 /* The IOCTL status is embedded in the mailbox subheader. */
12643 shdr = (union lpfc_sli4_cfg_shdr *)
12644 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
12645 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12646 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12647 if (shdr_status || shdr_add_status || rc) {
12648 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12649 "2509 RQ_DESTROY mailbox failed with "
12650 "status x%x add_status x%x, mbx status x%x\n",
12651 shdr_status, shdr_add_status, rc);
12652 if (rc != MBX_TIMEOUT)
12653 mempool_free(mbox, hrq->phba->mbox_mem_pool);
12654 return -ENXIO;
12655 }
12656 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
12657 drq->queue_id);
12658 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
12659 shdr = (union lpfc_sli4_cfg_shdr *)
12660 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
12661 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12662 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12663 if (shdr_status || shdr_add_status || rc) {
12664 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12665 "2510 RQ_DESTROY mailbox failed with "
12666 "status x%x add_status x%x, mbx status x%x\n",
12667 shdr_status, shdr_add_status, rc);
12668 status = -ENXIO;
12669 }
12670 list_del_init(&hrq->list);
12671 list_del_init(&drq->list);
James Smart8fa38512009-07-19 10:01:03 -040012672 mempool_free(mbox, hrq->phba->mbox_mem_pool);
James Smart4f774512009-05-22 14:52:35 -040012673 return status;
12674}
12675
12676/**
12677 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
12678 * @phba: The virtual port for which this call being executed.
12679 * @pdma_phys_addr0: Physical address of the 1st SGL page.
12680 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
12681 * @xritag: the xritag that ties this io to the SGL pages.
12682 *
12683 * This routine will post the sgl pages for the IO that has the xritag
12684 * that is in the iocbq structure. The xritag is assigned during iocbq
12685 * creation and persists for as long as the driver is loaded.
12686 * if the caller has fewer than 256 scatter gather segments to map then
12687 * pdma_phys_addr1 should be 0.
12688 * If the caller needs to map more than 256 scatter gather segment then
12689 * pdma_phys_addr1 should be a valid physical address.
12690 * physical address for SGLs must be 64 byte aligned.
12691 * If you are going to map 2 SGL's then the first one must have 256 entries
12692 * the second sgl can have between 1 and 256 entries.
12693 *
12694 * Return codes:
12695 * 0 - Success
12696 * -ENXIO, -ENOMEM - Failure
12697 **/
12698int
12699lpfc_sli4_post_sgl(struct lpfc_hba *phba,
12700 dma_addr_t pdma_phys_addr0,
12701 dma_addr_t pdma_phys_addr1,
12702 uint16_t xritag)
12703{
12704 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
12705 LPFC_MBOXQ_t *mbox;
12706 int rc;
12707 uint32_t shdr_status, shdr_add_status;
James Smart6d368e52011-05-24 11:44:12 -040012708 uint32_t mbox_tmo;
James Smart4f774512009-05-22 14:52:35 -040012709 union lpfc_sli4_cfg_shdr *shdr;
12710
12711 if (xritag == NO_XRI) {
12712 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12713 "0364 Invalid param:\n");
12714 return -EINVAL;
12715 }
12716
12717 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12718 if (!mbox)
12719 return -ENOMEM;
12720
12721 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12722 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
12723 sizeof(struct lpfc_mbx_post_sgl_pages) -
James Smartfedd3b72011-02-16 12:39:24 -050012724 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
James Smart4f774512009-05-22 14:52:35 -040012725
12726 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
12727 &mbox->u.mqe.un.post_sgl_pages;
12728 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
12729 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
12730
12731 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
12732 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
12733 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
12734 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
12735
12736 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
12737 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
12738 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
12739 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
12740 if (!phba->sli4_hba.intr_enable)
12741 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
James Smart6d368e52011-05-24 11:44:12 -040012742 else {
James Smarta183a152011-10-10 21:32:43 -040012743 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -040012744 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12745 }
James Smart4f774512009-05-22 14:52:35 -040012746 /* The IOCTL status is embedded in the mailbox subheader. */
12747 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
12748 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12749 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12750 if (rc != MBX_TIMEOUT)
12751 mempool_free(mbox, phba->mbox_mem_pool);
12752 if (shdr_status || shdr_add_status || rc) {
12753 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12754 "2511 POST_SGL mailbox failed with "
12755 "status x%x add_status x%x, mbx status x%x\n",
12756 shdr_status, shdr_add_status, rc);
12757 rc = -ENXIO;
12758 }
12759 return 0;
12760}
James Smart4f774512009-05-22 14:52:35 -040012761
12762/**
James Smart88a2cfb2011-07-22 18:36:33 -040012763 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
James Smart6d368e52011-05-24 11:44:12 -040012764 * @phba: pointer to lpfc hba data structure.
12765 *
12766 * This routine is invoked to post rpi header templates to the
James Smart88a2cfb2011-07-22 18:36:33 -040012767 * HBA consistent with the SLI-4 interface spec. This routine
12768 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12769 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6d368e52011-05-24 11:44:12 -040012770 *
James Smart88a2cfb2011-07-22 18:36:33 -040012771 * Returns
12772 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
12773 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
12774 **/
James Smart6d368e52011-05-24 11:44:12 -040012775uint16_t
12776lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
12777{
12778 unsigned long xri;
12779
12780 /*
12781 * Fetch the next logical xri. Because this index is logical,
12782 * the driver starts at 0 each time.
12783 */
12784 spin_lock_irq(&phba->hbalock);
12785 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
12786 phba->sli4_hba.max_cfg_param.max_xri, 0);
12787 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
12788 spin_unlock_irq(&phba->hbalock);
12789 return NO_XRI;
12790 } else {
12791 set_bit(xri, phba->sli4_hba.xri_bmask);
12792 phba->sli4_hba.max_cfg_param.xri_used++;
12793 phba->sli4_hba.xri_count++;
12794 }
12795
12796 spin_unlock_irq(&phba->hbalock);
12797 return xri;
12798}
12799
12800/**
12801 * lpfc_sli4_free_xri - Release an xri for reuse.
12802 * @phba: pointer to lpfc hba data structure.
12803 *
12804 * This routine is invoked to release an xri to the pool of
12805 * available rpis maintained by the driver.
12806 **/
12807void
12808__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12809{
12810 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
12811 phba->sli4_hba.xri_count--;
12812 phba->sli4_hba.max_cfg_param.xri_used--;
12813 }
12814}
12815
12816/**
12817 * lpfc_sli4_free_xri - Release an xri for reuse.
12818 * @phba: pointer to lpfc hba data structure.
12819 *
12820 * This routine is invoked to release an xri to the pool of
12821 * available rpis maintained by the driver.
12822 **/
12823void
12824lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
12825{
12826 spin_lock_irq(&phba->hbalock);
12827 __lpfc_sli4_free_xri(phba, xri);
12828 spin_unlock_irq(&phba->hbalock);
12829}
12830
12831/**
James Smart4f774512009-05-22 14:52:35 -040012832 * lpfc_sli4_next_xritag - Get an xritag for the io
12833 * @phba: Pointer to HBA context object.
12834 *
12835 * This function gets an xritag for the iocb. If there is no unused xritag
12836 * it will return 0xffff.
12837 * The function returns the allocated xritag if successful, else returns zero.
12838 * Zero is not a valid xritag.
12839 * The caller is not required to hold any lock.
12840 **/
12841uint16_t
12842lpfc_sli4_next_xritag(struct lpfc_hba *phba)
12843{
James Smart6d368e52011-05-24 11:44:12 -040012844 uint16_t xri_index;
James Smart4f774512009-05-22 14:52:35 -040012845
James Smart6d368e52011-05-24 11:44:12 -040012846 xri_index = lpfc_sli4_alloc_xri(phba);
12847 if (xri_index != NO_XRI)
12848 return xri_index;
12849
12850 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart4f774512009-05-22 14:52:35 -040012851 "2004 Failed to allocate XRI.last XRITAG is %d"
12852 " Max XRI is %d, Used XRI is %d\n",
James Smart6d368e52011-05-24 11:44:12 -040012853 xri_index,
James Smart4f774512009-05-22 14:52:35 -040012854 phba->sli4_hba.max_cfg_param.max_xri,
12855 phba->sli4_hba.max_cfg_param.xri_used);
James Smart6d368e52011-05-24 11:44:12 -040012856 return NO_XRI;
James Smart4f774512009-05-22 14:52:35 -040012857}
12858
12859/**
James Smart6d368e52011-05-24 11:44:12 -040012860 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
James Smart4f774512009-05-22 14:52:35 -040012861 * @phba: pointer to lpfc hba data structure.
12862 *
12863 * This routine is invoked to post a block of driver's sgl pages to the
12864 * HBA using non-embedded mailbox command. No Lock is held. This routine
12865 * is only called when the driver is loading and after all IO has been
12866 * stopped.
12867 **/
12868int
James Smart6d368e52011-05-24 11:44:12 -040012869lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
James Smart4f774512009-05-22 14:52:35 -040012870{
12871 struct lpfc_sglq *sglq_entry;
12872 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12873 struct sgl_page_pairs *sgl_pg_pairs;
12874 void *viraddr;
12875 LPFC_MBOXQ_t *mbox;
12876 uint32_t reqlen, alloclen, pg_pairs;
12877 uint32_t mbox_tmo;
James Smart6d368e52011-05-24 11:44:12 -040012878 uint16_t xritag_start = 0, lxri = 0;
James Smart4f774512009-05-22 14:52:35 -040012879 int els_xri_cnt, rc = 0;
12880 uint32_t shdr_status, shdr_add_status;
12881 union lpfc_sli4_cfg_shdr *shdr;
12882
12883 /* The number of sgls to be posted */
12884 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
12885
12886 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
12887 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040012888 if (reqlen > SLI4_PAGE_SIZE) {
James Smart4f774512009-05-22 14:52:35 -040012889 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12890 "2559 Block sgl registration required DMA "
12891 "size (%d) great than a page\n", reqlen);
12892 return -ENOMEM;
12893 }
12894 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
James Smart6d368e52011-05-24 11:44:12 -040012895 if (!mbox)
James Smart4f774512009-05-22 14:52:35 -040012896 return -ENOMEM;
James Smart4f774512009-05-22 14:52:35 -040012897
12898 /* Allocate DMA memory and set up the non-embedded mailbox command */
12899 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
12900 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
12901 LPFC_SLI4_MBX_NEMBED);
12902
12903 if (alloclen < reqlen) {
12904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12905 "0285 Allocated DMA memory size (%d) is "
12906 "less than the requested DMA memory "
12907 "size (%d)\n", alloclen, reqlen);
12908 lpfc_sli4_mbox_cmd_free(phba, mbox);
12909 return -ENOMEM;
12910 }
James Smart4f774512009-05-22 14:52:35 -040012911 /* Set up the SGL pages in the non-embedded DMA pages */
James Smart6d368e52011-05-24 11:44:12 -040012912 viraddr = mbox->sge_array->addr[0];
James Smart4f774512009-05-22 14:52:35 -040012913 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
12914 sgl_pg_pairs = &sgl->sgl_pg_pairs;
12915
12916 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
12917 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
James Smart6d368e52011-05-24 11:44:12 -040012918
12919 /*
12920 * Assign the sglq a physical xri only if the driver has not
12921 * initialized those resources. A port reset only needs
12922 * the sglq's posted.
12923 */
12924 if (bf_get(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
12925 LPFC_XRI_RSRC_RDY) {
12926 lxri = lpfc_sli4_next_xritag(phba);
12927 if (lxri == NO_XRI) {
12928 lpfc_sli4_mbox_cmd_free(phba, mbox);
12929 return -ENOMEM;
12930 }
12931 sglq_entry->sli4_lxritag = lxri;
12932 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
12933 }
12934
James Smart4f774512009-05-22 14:52:35 -040012935 /* Set up the sge entry */
12936 sgl_pg_pairs->sgl_pg0_addr_lo =
12937 cpu_to_le32(putPaddrLow(sglq_entry->phys));
12938 sgl_pg_pairs->sgl_pg0_addr_hi =
12939 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
12940 sgl_pg_pairs->sgl_pg1_addr_lo =
12941 cpu_to_le32(putPaddrLow(0));
12942 sgl_pg_pairs->sgl_pg1_addr_hi =
12943 cpu_to_le32(putPaddrHigh(0));
James Smart6d368e52011-05-24 11:44:12 -040012944
James Smart4f774512009-05-22 14:52:35 -040012945 /* Keep the first xritag on the list */
12946 if (pg_pairs == 0)
12947 xritag_start = sglq_entry->sli4_xritag;
12948 sgl_pg_pairs++;
12949 }
James Smart6d368e52011-05-24 11:44:12 -040012950
12951 /* Complete initialization and perform endian conversion. */
James Smart4f774512009-05-22 14:52:35 -040012952 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
James Smart6a9c52c2009-10-02 15:16:51 -040012953 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt);
James Smart4f774512009-05-22 14:52:35 -040012954 sgl->word0 = cpu_to_le32(sgl->word0);
James Smart4f774512009-05-22 14:52:35 -040012955 if (!phba->sli4_hba.intr_enable)
12956 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12957 else {
James Smarta183a152011-10-10 21:32:43 -040012958 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040012959 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12960 }
12961 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
12962 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12963 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12964 if (rc != MBX_TIMEOUT)
12965 lpfc_sli4_mbox_cmd_free(phba, mbox);
12966 if (shdr_status || shdr_add_status || rc) {
12967 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12968 "2513 POST_SGL_BLOCK mailbox command failed "
12969 "status x%x add_status x%x mbx status x%x\n",
12970 shdr_status, shdr_add_status, rc);
12971 rc = -ENXIO;
12972 }
James Smart6d368e52011-05-24 11:44:12 -040012973
12974 if (rc == 0)
12975 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
12976 LPFC_XRI_RSRC_RDY);
12977 return rc;
12978}
12979
12980/**
12981 * lpfc_sli4_post_els_sgl_list_ext - post a block of ELS sgls to the port.
12982 * @phba: pointer to lpfc hba data structure.
12983 *
12984 * This routine is invoked to post a block of driver's sgl pages to the
12985 * HBA using non-embedded mailbox command. No Lock is held. This routine
12986 * is only called when the driver is loading and after all IO has been
12987 * stopped.
12988 **/
12989int
12990lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
12991{
12992 struct lpfc_sglq *sglq_entry;
12993 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
12994 struct sgl_page_pairs *sgl_pg_pairs;
12995 void *viraddr;
12996 LPFC_MBOXQ_t *mbox;
12997 uint32_t reqlen, alloclen, index;
12998 uint32_t mbox_tmo;
12999 uint16_t rsrc_start, rsrc_size, els_xri_cnt;
13000 uint16_t xritag_start = 0, lxri = 0;
13001 struct lpfc_rsrc_blks *rsrc_blk;
13002 int cnt, ttl_cnt, rc = 0;
13003 int loop_cnt;
13004 uint32_t shdr_status, shdr_add_status;
13005 union lpfc_sli4_cfg_shdr *shdr;
13006
13007 /* The number of sgls to be posted */
13008 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
13009
13010 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
13011 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13012 if (reqlen > SLI4_PAGE_SIZE) {
13013 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13014 "2989 Block sgl registration required DMA "
13015 "size (%d) great than a page\n", reqlen);
13016 return -ENOMEM;
13017 }
13018
13019 cnt = 0;
13020 ttl_cnt = 0;
13021 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13022 list) {
13023 rsrc_start = rsrc_blk->rsrc_start;
13024 rsrc_size = rsrc_blk->rsrc_size;
13025
13026 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13027 "3014 Working ELS Extent start %d, cnt %d\n",
13028 rsrc_start, rsrc_size);
13029
13030 loop_cnt = min(els_xri_cnt, rsrc_size);
13031 if (ttl_cnt + loop_cnt >= els_xri_cnt) {
13032 loop_cnt = els_xri_cnt - ttl_cnt;
13033 ttl_cnt = els_xri_cnt;
13034 }
13035
13036 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13037 if (!mbox)
13038 return -ENOMEM;
13039 /*
13040 * Allocate DMA memory and set up the non-embedded mailbox
13041 * command.
13042 */
13043 alloclen = lpfc_sli4_config(phba, mbox,
13044 LPFC_MBOX_SUBSYSTEM_FCOE,
13045 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13046 reqlen, LPFC_SLI4_MBX_NEMBED);
13047 if (alloclen < reqlen) {
13048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13049 "2987 Allocated DMA memory size (%d) "
13050 "is less than the requested DMA memory "
13051 "size (%d)\n", alloclen, reqlen);
13052 lpfc_sli4_mbox_cmd_free(phba, mbox);
13053 return -ENOMEM;
13054 }
13055
13056 /* Set up the SGL pages in the non-embedded DMA pages */
13057 viraddr = mbox->sge_array->addr[0];
13058 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13059 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13060
13061 /*
13062 * The starting resource may not begin at zero. Control
13063 * the loop variants via the block resource parameters,
13064 * but handle the sge pointers with a zero-based index
13065 * that doesn't get reset per loop pass.
13066 */
13067 for (index = rsrc_start;
13068 index < rsrc_start + loop_cnt;
13069 index++) {
13070 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[cnt];
13071
13072 /*
13073 * Assign the sglq a physical xri only if the driver
13074 * has not initialized those resources. A port reset
13075 * only needs the sglq's posted.
13076 */
13077 if (bf_get(lpfc_xri_rsrc_rdy,
13078 &phba->sli4_hba.sli4_flags) !=
13079 LPFC_XRI_RSRC_RDY) {
13080 lxri = lpfc_sli4_next_xritag(phba);
13081 if (lxri == NO_XRI) {
13082 lpfc_sli4_mbox_cmd_free(phba, mbox);
13083 rc = -ENOMEM;
13084 goto err_exit;
13085 }
13086 sglq_entry->sli4_lxritag = lxri;
13087 sglq_entry->sli4_xritag =
13088 phba->sli4_hba.xri_ids[lxri];
13089 }
13090
13091 /* Set up the sge entry */
13092 sgl_pg_pairs->sgl_pg0_addr_lo =
13093 cpu_to_le32(putPaddrLow(sglq_entry->phys));
13094 sgl_pg_pairs->sgl_pg0_addr_hi =
13095 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
13096 sgl_pg_pairs->sgl_pg1_addr_lo =
13097 cpu_to_le32(putPaddrLow(0));
13098 sgl_pg_pairs->sgl_pg1_addr_hi =
13099 cpu_to_le32(putPaddrHigh(0));
13100
13101 /* Track the starting physical XRI for the mailbox. */
13102 if (index == rsrc_start)
13103 xritag_start = sglq_entry->sli4_xritag;
13104 sgl_pg_pairs++;
13105 cnt++;
13106 }
13107
13108 /* Complete initialization and perform endian conversion. */
13109 rsrc_blk->rsrc_used += loop_cnt;
13110 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13111 bf_set(lpfc_post_sgl_pages_xricnt, sgl, loop_cnt);
13112 sgl->word0 = cpu_to_le32(sgl->word0);
13113
13114 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13115 "3015 Post ELS Extent SGL, start %d, "
13116 "cnt %d, used %d\n",
13117 xritag_start, loop_cnt, rsrc_blk->rsrc_used);
13118 if (!phba->sli4_hba.intr_enable)
13119 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13120 else {
James Smarta183a152011-10-10 21:32:43 -040013121 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -040013122 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13123 }
13124 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13125 shdr_status = bf_get(lpfc_mbox_hdr_status,
13126 &shdr->response);
13127 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13128 &shdr->response);
13129 if (rc != MBX_TIMEOUT)
13130 lpfc_sli4_mbox_cmd_free(phba, mbox);
13131 if (shdr_status || shdr_add_status || rc) {
13132 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13133 "2988 POST_SGL_BLOCK mailbox "
13134 "command failed status x%x "
13135 "add_status x%x mbx status x%x\n",
13136 shdr_status, shdr_add_status, rc);
13137 rc = -ENXIO;
13138 goto err_exit;
13139 }
13140 if (ttl_cnt >= els_xri_cnt)
13141 break;
13142 }
13143
13144 err_exit:
13145 if (rc == 0)
13146 bf_set(lpfc_xri_rsrc_rdy, &phba->sli4_hba.sli4_flags,
13147 LPFC_XRI_RSRC_RDY);
James Smart4f774512009-05-22 14:52:35 -040013148 return rc;
13149}
13150
13151/**
13152 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
13153 * @phba: pointer to lpfc hba data structure.
13154 * @sblist: pointer to scsi buffer list.
13155 * @count: number of scsi buffers on the list.
13156 *
13157 * This routine is invoked to post a block of @count scsi sgl pages from a
13158 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13159 * No Lock is held.
13160 *
13161 **/
13162int
13163lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
13164 int cnt)
13165{
13166 struct lpfc_scsi_buf *psb;
13167 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13168 struct sgl_page_pairs *sgl_pg_pairs;
13169 void *viraddr;
13170 LPFC_MBOXQ_t *mbox;
13171 uint32_t reqlen, alloclen, pg_pairs;
13172 uint32_t mbox_tmo;
13173 uint16_t xritag_start = 0;
13174 int rc = 0;
13175 uint32_t shdr_status, shdr_add_status;
13176 dma_addr_t pdma_phys_bpl1;
13177 union lpfc_sli4_cfg_shdr *shdr;
13178
13179 /* Calculate the requested length of the dma memory */
13180 reqlen = cnt * sizeof(struct sgl_page_pairs) +
13181 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
James Smart49198b32010-04-06 15:04:33 -040013182 if (reqlen > SLI4_PAGE_SIZE) {
James Smart4f774512009-05-22 14:52:35 -040013183 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13184 "0217 Block sgl registration required DMA "
13185 "size (%d) great than a page\n", reqlen);
13186 return -ENOMEM;
13187 }
13188 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13189 if (!mbox) {
13190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13191 "0283 Failed to allocate mbox cmd memory\n");
13192 return -ENOMEM;
13193 }
13194
13195 /* Allocate DMA memory and set up the non-embedded mailbox command */
13196 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13197 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
13198 LPFC_SLI4_MBX_NEMBED);
13199
13200 if (alloclen < reqlen) {
13201 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13202 "2561 Allocated DMA memory size (%d) is "
13203 "less than the requested DMA memory "
13204 "size (%d)\n", alloclen, reqlen);
13205 lpfc_sli4_mbox_cmd_free(phba, mbox);
13206 return -ENOMEM;
13207 }
James Smart6d368e52011-05-24 11:44:12 -040013208
James Smart4f774512009-05-22 14:52:35 -040013209 /* Get the first SGE entry from the non-embedded DMA memory */
James Smart4f774512009-05-22 14:52:35 -040013210 viraddr = mbox->sge_array->addr[0];
13211
13212 /* Set up the SGL pages in the non-embedded DMA pages */
13213 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13214 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13215
13216 pg_pairs = 0;
13217 list_for_each_entry(psb, sblist, list) {
13218 /* Set up the sge entry */
13219 sgl_pg_pairs->sgl_pg0_addr_lo =
13220 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13221 sgl_pg_pairs->sgl_pg0_addr_hi =
13222 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13223 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13224 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
13225 else
13226 pdma_phys_bpl1 = 0;
13227 sgl_pg_pairs->sgl_pg1_addr_lo =
13228 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13229 sgl_pg_pairs->sgl_pg1_addr_hi =
13230 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13231 /* Keep the first xritag on the list */
13232 if (pg_pairs == 0)
13233 xritag_start = psb->cur_iocbq.sli4_xritag;
13234 sgl_pg_pairs++;
13235 pg_pairs++;
13236 }
13237 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
13238 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13239 /* Perform endian conversion if necessary */
13240 sgl->word0 = cpu_to_le32(sgl->word0);
13241
13242 if (!phba->sli4_hba.intr_enable)
13243 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13244 else {
James Smarta183a152011-10-10 21:32:43 -040013245 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart4f774512009-05-22 14:52:35 -040013246 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13247 }
13248 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13249 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13250 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13251 if (rc != MBX_TIMEOUT)
13252 lpfc_sli4_mbox_cmd_free(phba, mbox);
13253 if (shdr_status || shdr_add_status || rc) {
13254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13255 "2564 POST_SGL_BLOCK mailbox command failed "
13256 "status x%x add_status x%x mbx status x%x\n",
13257 shdr_status, shdr_add_status, rc);
13258 rc = -ENXIO;
13259 }
13260 return rc;
13261}
13262
13263/**
James Smart6d368e52011-05-24 11:44:12 -040013264 * lpfc_sli4_post_scsi_sgl_blk_ext - post a block of scsi sgls to the port.
13265 * @phba: pointer to lpfc hba data structure.
13266 * @sblist: pointer to scsi buffer list.
13267 * @count: number of scsi buffers on the list.
13268 *
13269 * This routine is invoked to post a block of @count scsi sgl pages from a
13270 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
13271 * No Lock is held.
13272 *
13273 **/
13274int
13275lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
13276 int cnt)
13277{
13278 struct lpfc_scsi_buf *psb = NULL;
13279 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
13280 struct sgl_page_pairs *sgl_pg_pairs;
13281 void *viraddr;
13282 LPFC_MBOXQ_t *mbox;
13283 uint32_t reqlen, alloclen, pg_pairs;
13284 uint32_t mbox_tmo;
13285 uint16_t xri_start = 0, scsi_xri_start;
13286 uint16_t rsrc_range;
13287 int rc = 0, avail_cnt;
13288 uint32_t shdr_status, shdr_add_status;
13289 dma_addr_t pdma_phys_bpl1;
13290 union lpfc_sli4_cfg_shdr *shdr;
13291 struct lpfc_rsrc_blks *rsrc_blk;
13292 uint32_t xri_cnt = 0;
13293
13294 /* Calculate the total requested length of the dma memory */
13295 reqlen = cnt * sizeof(struct sgl_page_pairs) +
13296 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13297 if (reqlen > SLI4_PAGE_SIZE) {
13298 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13299 "2932 Block sgl registration required DMA "
13300 "size (%d) great than a page\n", reqlen);
13301 return -ENOMEM;
13302 }
13303
13304 /*
13305 * The use of extents requires the driver to post the sgl headers
13306 * in multiple postings to meet the contiguous resource assignment.
13307 */
13308 psb = list_prepare_entry(psb, sblist, list);
13309 scsi_xri_start = phba->sli4_hba.scsi_xri_start;
13310 list_for_each_entry(rsrc_blk, &phba->sli4_hba.lpfc_xri_blk_list,
13311 list) {
13312 rsrc_range = rsrc_blk->rsrc_start + rsrc_blk->rsrc_size;
13313 if (rsrc_range < scsi_xri_start)
13314 continue;
13315 else if (rsrc_blk->rsrc_used >= rsrc_blk->rsrc_size)
13316 continue;
13317 else
13318 avail_cnt = rsrc_blk->rsrc_size - rsrc_blk->rsrc_used;
13319
13320 reqlen = (avail_cnt * sizeof(struct sgl_page_pairs)) +
13321 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
13322 /*
13323 * Allocate DMA memory and set up the non-embedded mailbox
13324 * command. The mbox is used to post an SGL page per loop
13325 * but the DMA memory has a use-once semantic so the mailbox
13326 * is used and freed per loop pass.
13327 */
13328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13329 if (!mbox) {
13330 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13331 "2933 Failed to allocate mbox cmd "
13332 "memory\n");
13333 return -ENOMEM;
13334 }
13335 alloclen = lpfc_sli4_config(phba, mbox,
13336 LPFC_MBOX_SUBSYSTEM_FCOE,
13337 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
13338 reqlen,
13339 LPFC_SLI4_MBX_NEMBED);
13340 if (alloclen < reqlen) {
13341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13342 "2934 Allocated DMA memory size (%d) "
13343 "is less than the requested DMA memory "
13344 "size (%d)\n", alloclen, reqlen);
13345 lpfc_sli4_mbox_cmd_free(phba, mbox);
13346 return -ENOMEM;
13347 }
13348
13349 /* Get the first SGE entry from the non-embedded DMA memory */
13350 viraddr = mbox->sge_array->addr[0];
13351
13352 /* Set up the SGL pages in the non-embedded DMA pages */
13353 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
13354 sgl_pg_pairs = &sgl->sgl_pg_pairs;
13355
13356 /* pg_pairs tracks posted SGEs per loop iteration. */
13357 pg_pairs = 0;
13358 list_for_each_entry_continue(psb, sblist, list) {
13359 /* Set up the sge entry */
13360 sgl_pg_pairs->sgl_pg0_addr_lo =
13361 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
13362 sgl_pg_pairs->sgl_pg0_addr_hi =
13363 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
13364 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
13365 pdma_phys_bpl1 = psb->dma_phys_bpl +
13366 SGL_PAGE_SIZE;
13367 else
13368 pdma_phys_bpl1 = 0;
13369 sgl_pg_pairs->sgl_pg1_addr_lo =
13370 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
13371 sgl_pg_pairs->sgl_pg1_addr_hi =
13372 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
13373 /* Keep the first xri for this extent. */
13374 if (pg_pairs == 0)
13375 xri_start = psb->cur_iocbq.sli4_xritag;
13376 sgl_pg_pairs++;
13377 pg_pairs++;
13378 xri_cnt++;
13379
13380 /*
13381 * Track two exit conditions - the loop has constructed
13382 * all of the caller's SGE pairs or all available
13383 * resource IDs in this extent are consumed.
13384 */
13385 if ((xri_cnt == cnt) || (pg_pairs >= avail_cnt))
13386 break;
13387 }
13388 rsrc_blk->rsrc_used += pg_pairs;
13389 bf_set(lpfc_post_sgl_pages_xri, sgl, xri_start);
13390 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
13391
13392 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13393 "3016 Post SCSI Extent SGL, start %d, cnt %d "
13394 "blk use %d\n",
13395 xri_start, pg_pairs, rsrc_blk->rsrc_used);
13396 /* Perform endian conversion if necessary */
13397 sgl->word0 = cpu_to_le32(sgl->word0);
13398 if (!phba->sli4_hba.intr_enable)
13399 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13400 else {
James Smarta183a152011-10-10 21:32:43 -040013401 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart6d368e52011-05-24 11:44:12 -040013402 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13403 }
13404 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
13405 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13406 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13407 &shdr->response);
13408 if (rc != MBX_TIMEOUT)
13409 lpfc_sli4_mbox_cmd_free(phba, mbox);
13410 if (shdr_status || shdr_add_status || rc) {
13411 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13412 "2935 POST_SGL_BLOCK mailbox command "
13413 "failed status x%x add_status x%x "
13414 "mbx status x%x\n",
13415 shdr_status, shdr_add_status, rc);
13416 return -ENXIO;
13417 }
13418
13419 /* Post only what is requested. */
13420 if (xri_cnt >= cnt)
13421 break;
13422 }
13423 return rc;
13424}
13425
13426/**
James Smart4f774512009-05-22 14:52:35 -040013427 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
13428 * @phba: pointer to lpfc_hba struct that the frame was received on
13429 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13430 *
13431 * This function checks the fields in the @fc_hdr to see if the FC frame is a
13432 * valid type of frame that the LPFC driver will handle. This function will
13433 * return a zero if the frame is a valid frame or a non zero value when the
13434 * frame does not pass the check.
13435 **/
13436static int
13437lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
13438{
Tomas Henzl474ffb72010-12-22 16:52:40 +010013439 /* make rctl_names static to save stack space */
13440 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
James Smart4f774512009-05-22 14:52:35 -040013441 char *type_names[] = FC_TYPE_NAMES_INIT;
13442 struct fc_vft_header *fc_vft_hdr;
James Smart546fc852011-03-11 16:06:29 -050013443 uint32_t *header = (uint32_t *) fc_hdr;
James Smart4f774512009-05-22 14:52:35 -040013444
13445 switch (fc_hdr->fh_r_ctl) {
13446 case FC_RCTL_DD_UNCAT: /* uncategorized information */
13447 case FC_RCTL_DD_SOL_DATA: /* solicited data */
13448 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
13449 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
13450 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
13451 case FC_RCTL_DD_DATA_DESC: /* data descriptor */
13452 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
13453 case FC_RCTL_DD_CMD_STATUS: /* command status */
13454 case FC_RCTL_ELS_REQ: /* extended link services request */
13455 case FC_RCTL_ELS_REP: /* extended link services reply */
13456 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
13457 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
13458 case FC_RCTL_BA_NOP: /* basic link service NOP */
13459 case FC_RCTL_BA_ABTS: /* basic link service abort */
13460 case FC_RCTL_BA_RMC: /* remove connection */
13461 case FC_RCTL_BA_ACC: /* basic accept */
13462 case FC_RCTL_BA_RJT: /* basic reject */
13463 case FC_RCTL_BA_PRMT:
13464 case FC_RCTL_ACK_1: /* acknowledge_1 */
13465 case FC_RCTL_ACK_0: /* acknowledge_0 */
13466 case FC_RCTL_P_RJT: /* port reject */
13467 case FC_RCTL_F_RJT: /* fabric reject */
13468 case FC_RCTL_P_BSY: /* port busy */
13469 case FC_RCTL_F_BSY: /* fabric busy to data frame */
13470 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
13471 case FC_RCTL_LCR: /* link credit reset */
13472 case FC_RCTL_END: /* end */
13473 break;
13474 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
13475 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13476 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
13477 return lpfc_fc_frame_check(phba, fc_hdr);
13478 default:
13479 goto drop;
13480 }
13481 switch (fc_hdr->fh_type) {
13482 case FC_TYPE_BLS:
13483 case FC_TYPE_ELS:
13484 case FC_TYPE_FCP:
13485 case FC_TYPE_CT:
13486 break;
13487 case FC_TYPE_IP:
13488 case FC_TYPE_ILS:
13489 default:
13490 goto drop;
13491 }
James Smart546fc852011-03-11 16:06:29 -050013492
James Smart4f774512009-05-22 14:52:35 -040013493 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smart546fc852011-03-11 16:06:29 -050013494 "2538 Received frame rctl:%s type:%s "
13495 "Frame Data:%08x %08x %08x %08x %08x %08x\n",
James Smart4f774512009-05-22 14:52:35 -040013496 rctl_names[fc_hdr->fh_r_ctl],
James Smart546fc852011-03-11 16:06:29 -050013497 type_names[fc_hdr->fh_type],
13498 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
13499 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
13500 be32_to_cpu(header[4]), be32_to_cpu(header[5]));
James Smart4f774512009-05-22 14:52:35 -040013501 return 0;
13502drop:
13503 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13504 "2539 Dropped frame rctl:%s type:%s\n",
13505 rctl_names[fc_hdr->fh_r_ctl],
13506 type_names[fc_hdr->fh_type]);
13507 return 1;
13508}
13509
13510/**
13511 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
13512 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13513 *
13514 * This function processes the FC header to retrieve the VFI from the VF
13515 * header, if one exists. This function will return the VFI if one exists
13516 * or 0 if no VSAN Header exists.
13517 **/
13518static uint32_t
13519lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
13520{
13521 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
13522
13523 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
13524 return 0;
13525 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
13526}
13527
13528/**
13529 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
13530 * @phba: Pointer to the HBA structure to search for the vport on
13531 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
13532 * @fcfi: The FC Fabric ID that the frame came from
13533 *
13534 * This function searches the @phba for a vport that matches the content of the
13535 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
13536 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
13537 * returns the matching vport pointer or NULL if unable to match frame to a
13538 * vport.
13539 **/
13540static struct lpfc_vport *
13541lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13542 uint16_t fcfi)
13543{
13544 struct lpfc_vport **vports;
13545 struct lpfc_vport *vport = NULL;
13546 int i;
13547 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13548 fc_hdr->fh_d_id[1] << 8 |
13549 fc_hdr->fh_d_id[2]);
James Smartbf086112011-08-21 21:48:13 -040013550 if (did == Fabric_DID)
13551 return phba->pport;
James Smart4f774512009-05-22 14:52:35 -040013552 vports = lpfc_create_vport_work_array(phba);
13553 if (vports != NULL)
13554 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
13555 if (phba->fcf.fcfi == fcfi &&
13556 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
13557 vports[i]->fc_myDID == did) {
13558 vport = vports[i];
13559 break;
13560 }
13561 }
13562 lpfc_destroy_vport_work_array(phba, vports);
13563 return vport;
13564}
13565
13566/**
James Smart45ed1192009-10-02 15:17:02 -040013567 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
13568 * @vport: The vport to work on.
13569 *
13570 * This function updates the receive sequence time stamp for this vport. The
13571 * receive sequence time stamp indicates the time that the last frame of the
13572 * the sequence that has been idle for the longest amount of time was received.
13573 * the driver uses this time stamp to indicate if any received sequences have
13574 * timed out.
13575 **/
13576void
13577lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
13578{
13579 struct lpfc_dmabuf *h_buf;
13580 struct hbq_dmabuf *dmabuf = NULL;
13581
13582 /* get the oldest sequence on the rcv list */
13583 h_buf = list_get_first(&vport->rcv_buffer_list,
13584 struct lpfc_dmabuf, list);
13585 if (!h_buf)
13586 return;
13587 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13588 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
13589}
13590
13591/**
13592 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
13593 * @vport: The vport that the received sequences were sent to.
13594 *
13595 * This function cleans up all outstanding received sequences. This is called
13596 * by the driver when a link event or user action invalidates all the received
13597 * sequences.
13598 **/
13599void
13600lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
13601{
13602 struct lpfc_dmabuf *h_buf, *hnext;
13603 struct lpfc_dmabuf *d_buf, *dnext;
13604 struct hbq_dmabuf *dmabuf = NULL;
13605
13606 /* start with the oldest sequence on the rcv list */
13607 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13608 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13609 list_del_init(&dmabuf->hbuf.list);
13610 list_for_each_entry_safe(d_buf, dnext,
13611 &dmabuf->dbuf.list, list) {
13612 list_del_init(&d_buf->list);
13613 lpfc_in_buf_free(vport->phba, d_buf);
13614 }
13615 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13616 }
13617}
13618
13619/**
13620 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
13621 * @vport: The vport that the received sequences were sent to.
13622 *
13623 * This function determines whether any received sequences have timed out by
13624 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
13625 * indicates that there is at least one timed out sequence this routine will
13626 * go through the received sequences one at a time from most inactive to most
13627 * active to determine which ones need to be cleaned up. Once it has determined
13628 * that a sequence needs to be cleaned up it will simply free up the resources
13629 * without sending an abort.
13630 **/
13631void
13632lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
13633{
13634 struct lpfc_dmabuf *h_buf, *hnext;
13635 struct lpfc_dmabuf *d_buf, *dnext;
13636 struct hbq_dmabuf *dmabuf = NULL;
13637 unsigned long timeout;
13638 int abort_count = 0;
13639
13640 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13641 vport->rcv_buffer_time_stamp);
13642 if (list_empty(&vport->rcv_buffer_list) ||
13643 time_before(jiffies, timeout))
13644 return;
13645 /* start with the oldest sequence on the rcv list */
13646 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
13647 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13648 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
13649 dmabuf->time_stamp);
13650 if (time_before(jiffies, timeout))
13651 break;
13652 abort_count++;
13653 list_del_init(&dmabuf->hbuf.list);
13654 list_for_each_entry_safe(d_buf, dnext,
13655 &dmabuf->dbuf.list, list) {
13656 list_del_init(&d_buf->list);
13657 lpfc_in_buf_free(vport->phba, d_buf);
13658 }
13659 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
13660 }
13661 if (abort_count)
13662 lpfc_update_rcv_time_stamp(vport);
13663}
13664
13665/**
James Smart4f774512009-05-22 14:52:35 -040013666 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
13667 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
13668 *
13669 * This function searches through the existing incomplete sequences that have
13670 * been sent to this @vport. If the frame matches one of the incomplete
13671 * sequences then the dbuf in the @dmabuf is added to the list of frames that
13672 * make up that sequence. If no sequence is found that matches this frame then
13673 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
13674 * This function returns a pointer to the first dmabuf in the sequence list that
13675 * the frame was linked to.
13676 **/
13677static struct hbq_dmabuf *
13678lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
13679{
13680 struct fc_frame_header *new_hdr;
13681 struct fc_frame_header *temp_hdr;
13682 struct lpfc_dmabuf *d_buf;
13683 struct lpfc_dmabuf *h_buf;
13684 struct hbq_dmabuf *seq_dmabuf = NULL;
13685 struct hbq_dmabuf *temp_dmabuf = NULL;
13686
James Smart4d9ab992009-10-02 15:16:39 -040013687 INIT_LIST_HEAD(&dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040013688 dmabuf->time_stamp = jiffies;
James Smart4f774512009-05-22 14:52:35 -040013689 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13690 /* Use the hdr_buf to find the sequence that this frame belongs to */
13691 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13692 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13693 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13694 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13695 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13696 continue;
13697 /* found a pending sequence that matches this frame */
13698 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13699 break;
13700 }
13701 if (!seq_dmabuf) {
13702 /*
13703 * This indicates first frame received for this sequence.
13704 * Queue the buffer on the vport's rcv_buffer_list.
13705 */
13706 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
James Smart45ed1192009-10-02 15:17:02 -040013707 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040013708 return dmabuf;
13709 }
13710 temp_hdr = seq_dmabuf->hbuf.virt;
James Smarteeead812009-12-21 17:01:23 -050013711 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
13712 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4d9ab992009-10-02 15:16:39 -040013713 list_del_init(&seq_dmabuf->hbuf.list);
13714 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
13715 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040013716 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040013717 return dmabuf;
13718 }
James Smart45ed1192009-10-02 15:17:02 -040013719 /* move this sequence to the tail to indicate a young sequence */
13720 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
13721 seq_dmabuf->time_stamp = jiffies;
13722 lpfc_update_rcv_time_stamp(vport);
James Smarteeead812009-12-21 17:01:23 -050013723 if (list_empty(&seq_dmabuf->dbuf.list)) {
13724 temp_hdr = dmabuf->hbuf.virt;
13725 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
13726 return seq_dmabuf;
13727 }
James Smart4f774512009-05-22 14:52:35 -040013728 /* find the correct place in the sequence to insert this frame */
13729 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
13730 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
13731 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
13732 /*
13733 * If the frame's sequence count is greater than the frame on
13734 * the list then insert the frame right after this frame
13735 */
James Smarteeead812009-12-21 17:01:23 -050013736 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
13737 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
James Smart4f774512009-05-22 14:52:35 -040013738 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
13739 return seq_dmabuf;
13740 }
13741 }
13742 return NULL;
13743}
13744
13745/**
James Smart6669f9b2009-10-02 15:16:45 -040013746 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
13747 * @vport: pointer to a vitural port
13748 * @dmabuf: pointer to a dmabuf that describes the FC sequence
13749 *
13750 * This function tries to abort from the partially assembed sequence, described
13751 * by the information from basic abbort @dmabuf. It checks to see whether such
13752 * partially assembled sequence held by the driver. If so, it shall free up all
13753 * the frames from the partially assembled sequence.
13754 *
13755 * Return
13756 * true -- if there is matching partially assembled sequence present and all
13757 * the frames freed with the sequence;
13758 * false -- if there is no matching partially assembled sequence present so
13759 * nothing got aborted in the lower layer driver
13760 **/
13761static bool
13762lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
13763 struct hbq_dmabuf *dmabuf)
13764{
13765 struct fc_frame_header *new_hdr;
13766 struct fc_frame_header *temp_hdr;
13767 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
13768 struct hbq_dmabuf *seq_dmabuf = NULL;
13769
13770 /* Use the hdr_buf to find the sequence that matches this frame */
13771 INIT_LIST_HEAD(&dmabuf->dbuf.list);
13772 INIT_LIST_HEAD(&dmabuf->hbuf.list);
13773 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
13774 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
13775 temp_hdr = (struct fc_frame_header *)h_buf->virt;
13776 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
13777 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
13778 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
13779 continue;
13780 /* found a pending sequence that matches this frame */
13781 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
13782 break;
13783 }
13784
13785 /* Free up all the frames from the partially assembled sequence */
13786 if (seq_dmabuf) {
13787 list_for_each_entry_safe(d_buf, n_buf,
13788 &seq_dmabuf->dbuf.list, list) {
13789 list_del_init(&d_buf->list);
13790 lpfc_in_buf_free(vport->phba, d_buf);
13791 }
13792 return true;
13793 }
13794 return false;
13795}
13796
13797/**
James Smart546fc852011-03-11 16:06:29 -050013798 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
James Smart6669f9b2009-10-02 15:16:45 -040013799 * @phba: Pointer to HBA context object.
13800 * @cmd_iocbq: pointer to the command iocbq structure.
13801 * @rsp_iocbq: pointer to the response iocbq structure.
13802 *
James Smart546fc852011-03-11 16:06:29 -050013803 * This function handles the sequence abort response iocb command complete
James Smart6669f9b2009-10-02 15:16:45 -040013804 * event. It properly releases the memory allocated to the sequence abort
13805 * accept iocb.
13806 **/
13807static void
James Smart546fc852011-03-11 16:06:29 -050013808lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
James Smart6669f9b2009-10-02 15:16:45 -040013809 struct lpfc_iocbq *cmd_iocbq,
13810 struct lpfc_iocbq *rsp_iocbq)
13811{
13812 if (cmd_iocbq)
13813 lpfc_sli_release_iocbq(phba, cmd_iocbq);
13814}
13815
13816/**
James Smart6d368e52011-05-24 11:44:12 -040013817 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
13818 * @phba: Pointer to HBA context object.
13819 * @xri: xri id in transaction.
13820 *
13821 * This function validates the xri maps to the known range of XRIs allocated an
13822 * used by the driver.
13823 **/
James Smart7851fe22011-07-22 18:36:52 -040013824uint16_t
James Smart6d368e52011-05-24 11:44:12 -040013825lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
13826 uint16_t xri)
13827{
13828 int i;
13829
13830 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
13831 if (xri == phba->sli4_hba.xri_ids[i])
13832 return i;
13833 }
13834 return NO_XRI;
13835}
13836
13837
13838/**
James Smart546fc852011-03-11 16:06:29 -050013839 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040013840 * @phba: Pointer to HBA context object.
13841 * @fc_hdr: pointer to a FC frame header.
13842 *
James Smart546fc852011-03-11 16:06:29 -050013843 * This function sends a basic response to a previous unsol sequence abort
James Smart6669f9b2009-10-02 15:16:45 -040013844 * event after aborting the sequence handling.
13845 **/
13846static void
James Smart546fc852011-03-11 16:06:29 -050013847lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
James Smart6669f9b2009-10-02 15:16:45 -040013848 struct fc_frame_header *fc_hdr)
13849{
13850 struct lpfc_iocbq *ctiocb = NULL;
13851 struct lpfc_nodelist *ndlp;
James Smart5ffc2662009-11-18 15:39:44 -050013852 uint16_t oxid, rxid;
13853 uint32_t sid, fctl;
James Smart6669f9b2009-10-02 15:16:45 -040013854 IOCB_t *icmd;
James Smart546fc852011-03-11 16:06:29 -050013855 int rc;
James Smart6669f9b2009-10-02 15:16:45 -040013856
13857 if (!lpfc_is_link_up(phba))
13858 return;
13859
13860 sid = sli4_sid_from_fc_hdr(fc_hdr);
13861 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
James Smart5ffc2662009-11-18 15:39:44 -050013862 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
James Smart6669f9b2009-10-02 15:16:45 -040013863
13864 ndlp = lpfc_findnode_did(phba->pport, sid);
13865 if (!ndlp) {
13866 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
13867 "1268 Find ndlp returned NULL for oxid:x%x "
13868 "SID:x%x\n", oxid, sid);
13869 return;
13870 }
James Smart6d368e52011-05-24 11:44:12 -040013871 if (lpfc_sli4_xri_inrange(phba, rxid))
James Smart19ca7602010-11-20 23:11:55 -050013872 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0);
James Smart6669f9b2009-10-02 15:16:45 -040013873
James Smart546fc852011-03-11 16:06:29 -050013874 /* Allocate buffer for rsp iocb */
James Smart6669f9b2009-10-02 15:16:45 -040013875 ctiocb = lpfc_sli_get_iocbq(phba);
13876 if (!ctiocb)
13877 return;
13878
James Smart5ffc2662009-11-18 15:39:44 -050013879 /* Extract the F_CTL field from FC_HDR */
13880 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
13881
James Smart6669f9b2009-10-02 15:16:45 -040013882 icmd = &ctiocb->iocb;
James Smart6669f9b2009-10-02 15:16:45 -040013883 icmd->un.xseq64.bdl.bdeSize = 0;
James Smart5ffc2662009-11-18 15:39:44 -050013884 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
James Smart6669f9b2009-10-02 15:16:45 -040013885 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
13886 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
13887 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
13888
13889 /* Fill in the rest of iocb fields */
13890 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
13891 icmd->ulpBdeCount = 0;
13892 icmd->ulpLe = 1;
13893 icmd->ulpClass = CLASS3;
James Smart6d368e52011-05-24 11:44:12 -040013894 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
James Smartbe858b62010-12-15 17:57:20 -050013895 ctiocb->context1 = ndlp;
James Smart6669f9b2009-10-02 15:16:45 -040013896
James Smart6669f9b2009-10-02 15:16:45 -040013897 ctiocb->iocb_cmpl = NULL;
13898 ctiocb->vport = phba->pport;
James Smart546fc852011-03-11 16:06:29 -050013899 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
James Smart6d368e52011-05-24 11:44:12 -040013900 ctiocb->sli4_lxritag = NO_XRI;
James Smart546fc852011-03-11 16:06:29 -050013901 ctiocb->sli4_xritag = NO_XRI;
13902
13903 /* If the oxid maps to the FCP XRI range or if it is out of range,
13904 * send a BLS_RJT. The driver no longer has that exchange.
13905 * Override the IOCB for a BA_RJT.
13906 */
13907 if (oxid > (phba->sli4_hba.max_cfg_param.max_xri +
13908 phba->sli4_hba.max_cfg_param.xri_base) ||
13909 oxid > (lpfc_sli4_get_els_iocb_cnt(phba) +
13910 phba->sli4_hba.max_cfg_param.xri_base)) {
13911 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
13912 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
13913 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
13914 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
13915 }
James Smart6669f9b2009-10-02 15:16:45 -040013916
James Smart5ffc2662009-11-18 15:39:44 -050013917 if (fctl & FC_FC_EX_CTX) {
13918 /* ABTS sent by responder to CT exchange, construction
13919 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
13920 * field and RX_ID from ABTS for RX_ID field.
13921 */
James Smart546fc852011-03-11 16:06:29 -050013922 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
13923 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
James Smart5ffc2662009-11-18 15:39:44 -050013924 } else {
13925 /* ABTS sent by initiator to CT exchange, construction
13926 * of BA_ACC will need to allocate a new XRI as for the
13927 * XRI_TAG and RX_ID fields.
13928 */
James Smart546fc852011-03-11 16:06:29 -050013929 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
13930 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, NO_XRI);
James Smart5ffc2662009-11-18 15:39:44 -050013931 }
James Smart546fc852011-03-11 16:06:29 -050013932 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
James Smart5ffc2662009-11-18 15:39:44 -050013933
James Smart546fc852011-03-11 16:06:29 -050013934 /* Xmit CT abts response on exchange <xid> */
James Smart6669f9b2009-10-02 15:16:45 -040013935 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
James Smart546fc852011-03-11 16:06:29 -050013936 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
13937 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
13938
13939 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
13940 if (rc == IOCB_ERROR) {
13941 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
13942 "2925 Failed to issue CT ABTS RSP x%x on "
13943 "xri x%x, Data x%x\n",
13944 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
13945 phba->link_state);
13946 lpfc_sli_release_iocbq(phba, ctiocb);
13947 }
James Smart6669f9b2009-10-02 15:16:45 -040013948}
13949
13950/**
13951 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
13952 * @vport: Pointer to the vport on which this sequence was received
13953 * @dmabuf: pointer to a dmabuf that describes the FC sequence
13954 *
13955 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
13956 * receive sequence is only partially assembed by the driver, it shall abort
13957 * the partially assembled frames for the sequence. Otherwise, if the
13958 * unsolicited receive sequence has been completely assembled and passed to
13959 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
13960 * unsolicited sequence has been aborted. After that, it will issue a basic
13961 * accept to accept the abort.
13962 **/
13963void
13964lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
13965 struct hbq_dmabuf *dmabuf)
13966{
13967 struct lpfc_hba *phba = vport->phba;
13968 struct fc_frame_header fc_hdr;
James Smart5ffc2662009-11-18 15:39:44 -050013969 uint32_t fctl;
James Smart6669f9b2009-10-02 15:16:45 -040013970 bool abts_par;
13971
James Smart6669f9b2009-10-02 15:16:45 -040013972 /* Make a copy of fc_hdr before the dmabuf being released */
13973 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
James Smart5ffc2662009-11-18 15:39:44 -050013974 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
James Smart6669f9b2009-10-02 15:16:45 -040013975
James Smart5ffc2662009-11-18 15:39:44 -050013976 if (fctl & FC_FC_EX_CTX) {
13977 /*
13978 * ABTS sent by responder to exchange, just free the buffer
13979 */
James Smart6669f9b2009-10-02 15:16:45 -040013980 lpfc_in_buf_free(phba, &dmabuf->dbuf);
James Smart5ffc2662009-11-18 15:39:44 -050013981 } else {
13982 /*
13983 * ABTS sent by initiator to exchange, need to do cleanup
13984 */
13985 /* Try to abort partially assembled seq */
13986 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
13987
13988 /* Send abort to ULP if partially seq abort failed */
13989 if (abts_par == false)
13990 lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
13991 else
13992 lpfc_in_buf_free(phba, &dmabuf->dbuf);
13993 }
James Smart6669f9b2009-10-02 15:16:45 -040013994 /* Send basic accept (BA_ACC) to the abort requester */
James Smart546fc852011-03-11 16:06:29 -050013995 lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
James Smart6669f9b2009-10-02 15:16:45 -040013996}
13997
13998/**
James Smart4f774512009-05-22 14:52:35 -040013999 * lpfc_seq_complete - Indicates if a sequence is complete
14000 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14001 *
14002 * This function checks the sequence, starting with the frame described by
14003 * @dmabuf, to see if all the frames associated with this sequence are present.
14004 * the frames associated with this sequence are linked to the @dmabuf using the
14005 * dbuf list. This function looks for two major things. 1) That the first frame
14006 * has a sequence count of zero. 2) There is a frame with last frame of sequence
14007 * set. 3) That there are no holes in the sequence count. The function will
14008 * return 1 when the sequence is complete, otherwise it will return 0.
14009 **/
14010static int
14011lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
14012{
14013 struct fc_frame_header *hdr;
14014 struct lpfc_dmabuf *d_buf;
14015 struct hbq_dmabuf *seq_dmabuf;
14016 uint32_t fctl;
14017 int seq_count = 0;
14018
14019 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14020 /* make sure first fame of sequence has a sequence count of zero */
14021 if (hdr->fh_seq_cnt != seq_count)
14022 return 0;
14023 fctl = (hdr->fh_f_ctl[0] << 16 |
14024 hdr->fh_f_ctl[1] << 8 |
14025 hdr->fh_f_ctl[2]);
14026 /* If last frame of sequence we can return success. */
14027 if (fctl & FC_FC_END_SEQ)
14028 return 1;
14029 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
14030 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14031 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14032 /* If there is a hole in the sequence count then fail. */
James Smarteeead812009-12-21 17:01:23 -050014033 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
James Smart4f774512009-05-22 14:52:35 -040014034 return 0;
14035 fctl = (hdr->fh_f_ctl[0] << 16 |
14036 hdr->fh_f_ctl[1] << 8 |
14037 hdr->fh_f_ctl[2]);
14038 /* If last frame of sequence we can return success. */
14039 if (fctl & FC_FC_END_SEQ)
14040 return 1;
14041 }
14042 return 0;
14043}
14044
14045/**
14046 * lpfc_prep_seq - Prep sequence for ULP processing
14047 * @vport: Pointer to the vport on which this sequence was received
14048 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14049 *
14050 * This function takes a sequence, described by a list of frames, and creates
14051 * a list of iocbq structures to describe the sequence. This iocbq list will be
14052 * used to issue to the generic unsolicited sequence handler. This routine
14053 * returns a pointer to the first iocbq in the list. If the function is unable
14054 * to allocate an iocbq then it throw out the received frames that were not
14055 * able to be described and return a pointer to the first iocbq. If unable to
14056 * allocate any iocbqs (including the first) this function will return NULL.
14057 **/
14058static struct lpfc_iocbq *
14059lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
14060{
James Smart7851fe22011-07-22 18:36:52 -040014061 struct hbq_dmabuf *hbq_buf;
James Smart4f774512009-05-22 14:52:35 -040014062 struct lpfc_dmabuf *d_buf, *n_buf;
14063 struct lpfc_iocbq *first_iocbq, *iocbq;
14064 struct fc_frame_header *fc_hdr;
14065 uint32_t sid;
James Smart7851fe22011-07-22 18:36:52 -040014066 uint32_t len, tot_len;
James Smarteeead812009-12-21 17:01:23 -050014067 struct ulp_bde64 *pbde;
James Smart4f774512009-05-22 14:52:35 -040014068
14069 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14070 /* remove from receive buffer list */
14071 list_del_init(&seq_dmabuf->hbuf.list);
James Smart45ed1192009-10-02 15:17:02 -040014072 lpfc_update_rcv_time_stamp(vport);
James Smart4f774512009-05-22 14:52:35 -040014073 /* get the Remote Port's SID */
James Smart6669f9b2009-10-02 15:16:45 -040014074 sid = sli4_sid_from_fc_hdr(fc_hdr);
James Smart7851fe22011-07-22 18:36:52 -040014075 tot_len = 0;
James Smart4f774512009-05-22 14:52:35 -040014076 /* Get an iocbq struct to fill in. */
14077 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
14078 if (first_iocbq) {
14079 /* Initialize the first IOCB. */
James Smart8fa38512009-07-19 10:01:03 -040014080 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
James Smart4f774512009-05-22 14:52:35 -040014081 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
14082 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
James Smart7851fe22011-07-22 18:36:52 -040014083 first_iocbq->iocb.ulpContext = NO_XRI;
14084 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
14085 be16_to_cpu(fc_hdr->fh_ox_id);
14086 /* iocbq is prepped for internal consumption. Physical vpi. */
14087 first_iocbq->iocb.unsli3.rcvsli3.vpi =
14088 vport->phba->vpi_ids[vport->vpi];
James Smart4f774512009-05-22 14:52:35 -040014089 /* put the first buffer into the first IOCBq */
14090 first_iocbq->context2 = &seq_dmabuf->dbuf;
14091 first_iocbq->context3 = NULL;
14092 first_iocbq->iocb.ulpBdeCount = 1;
14093 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14094 LPFC_DATA_BUF_SIZE;
14095 first_iocbq->iocb.un.rcvels.remoteID = sid;
James Smart7851fe22011-07-22 18:36:52 -040014096 tot_len = bf_get(lpfc_rcqe_length,
James Smart4d9ab992009-10-02 15:16:39 -040014097 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
James Smart7851fe22011-07-22 18:36:52 -040014098 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
James Smart4f774512009-05-22 14:52:35 -040014099 }
14100 iocbq = first_iocbq;
14101 /*
14102 * Each IOCBq can have two Buffers assigned, so go through the list
14103 * of buffers for this sequence and save two buffers in each IOCBq
14104 */
14105 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
14106 if (!iocbq) {
14107 lpfc_in_buf_free(vport->phba, d_buf);
14108 continue;
14109 }
14110 if (!iocbq->context3) {
14111 iocbq->context3 = d_buf;
14112 iocbq->iocb.ulpBdeCount++;
James Smarteeead812009-12-21 17:01:23 -050014113 pbde = (struct ulp_bde64 *)
14114 &iocbq->iocb.unsli3.sli3Words[4];
14115 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
James Smart7851fe22011-07-22 18:36:52 -040014116
14117 /* We need to get the size out of the right CQE */
14118 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14119 len = bf_get(lpfc_rcqe_length,
14120 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14121 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
14122 tot_len += len;
James Smart4f774512009-05-22 14:52:35 -040014123 } else {
14124 iocbq = lpfc_sli_get_iocbq(vport->phba);
14125 if (!iocbq) {
14126 if (first_iocbq) {
14127 first_iocbq->iocb.ulpStatus =
14128 IOSTAT_FCP_RSP_ERROR;
14129 first_iocbq->iocb.un.ulpWord[4] =
14130 IOERR_NO_RESOURCES;
14131 }
14132 lpfc_in_buf_free(vport->phba, d_buf);
14133 continue;
14134 }
14135 iocbq->context2 = d_buf;
14136 iocbq->context3 = NULL;
14137 iocbq->iocb.ulpBdeCount = 1;
14138 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
14139 LPFC_DATA_BUF_SIZE;
James Smart7851fe22011-07-22 18:36:52 -040014140
14141 /* We need to get the size out of the right CQE */
14142 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14143 len = bf_get(lpfc_rcqe_length,
14144 &hbq_buf->cq_event.cqe.rcqe_cmpl);
14145 tot_len += len;
14146 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
14147
James Smart4f774512009-05-22 14:52:35 -040014148 iocbq->iocb.un.rcvels.remoteID = sid;
14149 list_add_tail(&iocbq->list, &first_iocbq->list);
14150 }
14151 }
14152 return first_iocbq;
14153}
14154
James Smart6669f9b2009-10-02 15:16:45 -040014155static void
14156lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
14157 struct hbq_dmabuf *seq_dmabuf)
14158{
14159 struct fc_frame_header *fc_hdr;
14160 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
14161 struct lpfc_hba *phba = vport->phba;
14162
14163 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
14164 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
14165 if (!iocbq) {
14166 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14167 "2707 Ring %d handler: Failed to allocate "
14168 "iocb Rctl x%x Type x%x received\n",
14169 LPFC_ELS_RING,
14170 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14171 return;
14172 }
14173 if (!lpfc_complete_unsol_iocb(phba,
14174 &phba->sli.ring[LPFC_ELS_RING],
14175 iocbq, fc_hdr->fh_r_ctl,
14176 fc_hdr->fh_type))
James Smart6d368e52011-05-24 11:44:12 -040014177 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
James Smart6669f9b2009-10-02 15:16:45 -040014178 "2540 Ring %d handler: unexpected Rctl "
14179 "x%x Type x%x received\n",
14180 LPFC_ELS_RING,
14181 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
14182
14183 /* Free iocb created in lpfc_prep_seq */
14184 list_for_each_entry_safe(curr_iocb, next_iocb,
14185 &iocbq->list, list) {
14186 list_del_init(&curr_iocb->list);
14187 lpfc_sli_release_iocbq(phba, curr_iocb);
14188 }
14189 lpfc_sli_release_iocbq(phba, iocbq);
14190}
14191
James Smart4f774512009-05-22 14:52:35 -040014192/**
14193 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
14194 * @phba: Pointer to HBA context object.
14195 *
14196 * This function is called with no lock held. This function processes all
14197 * the received buffers and gives it to upper layers when a received buffer
14198 * indicates that it is the final frame in the sequence. The interrupt
14199 * service routine processes received buffers at interrupt contexts and adds
14200 * received dma buffers to the rb_pend_list queue and signals the worker thread.
14201 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
14202 * appropriate receive function when the final frame in a sequence is received.
14203 **/
James Smart4d9ab992009-10-02 15:16:39 -040014204void
14205lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
14206 struct hbq_dmabuf *dmabuf)
James Smart4f774512009-05-22 14:52:35 -040014207{
James Smart4d9ab992009-10-02 15:16:39 -040014208 struct hbq_dmabuf *seq_dmabuf;
James Smart4f774512009-05-22 14:52:35 -040014209 struct fc_frame_header *fc_hdr;
14210 struct lpfc_vport *vport;
14211 uint32_t fcfi;
James Smart4f774512009-05-22 14:52:35 -040014212
James Smart4f774512009-05-22 14:52:35 -040014213 /* Process each received buffer */
James Smart4d9ab992009-10-02 15:16:39 -040014214 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14215 /* check to see if this a valid type of frame */
14216 if (lpfc_fc_frame_check(phba, fc_hdr)) {
14217 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14218 return;
14219 }
James Smart7851fe22011-07-22 18:36:52 -040014220 if ((bf_get(lpfc_cqe_code,
14221 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
14222 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
14223 &dmabuf->cq_event.cqe.rcqe_cmpl);
14224 else
14225 fcfi = bf_get(lpfc_rcqe_fcf_id,
14226 &dmabuf->cq_event.cqe.rcqe_cmpl);
James Smart4d9ab992009-10-02 15:16:39 -040014227 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
James Smartc8685952009-11-18 15:39:16 -050014228 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) {
James Smart4d9ab992009-10-02 15:16:39 -040014229 /* throw out the frame */
14230 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14231 return;
14232 }
James Smart6669f9b2009-10-02 15:16:45 -040014233 /* Handle the basic abort sequence (BA_ABTS) event */
14234 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
14235 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
14236 return;
14237 }
14238
James Smart4d9ab992009-10-02 15:16:39 -040014239 /* Link this frame */
14240 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
14241 if (!seq_dmabuf) {
14242 /* unable to add frame to vport - throw it out */
14243 lpfc_in_buf_free(phba, &dmabuf->dbuf);
14244 return;
14245 }
14246 /* If not last frame in sequence continue processing frames. */
James Smartdef9c7a2009-12-21 17:02:28 -050014247 if (!lpfc_seq_complete(seq_dmabuf))
James Smart4d9ab992009-10-02 15:16:39 -040014248 return;
James Smartdef9c7a2009-12-21 17:02:28 -050014249
James Smart6669f9b2009-10-02 15:16:45 -040014250 /* Send the complete sequence to the upper layer protocol */
14251 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
James Smart4f774512009-05-22 14:52:35 -040014252}
James Smart6fb120a2009-05-22 14:52:59 -040014253
14254/**
14255 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
14256 * @phba: pointer to lpfc hba data structure.
14257 *
14258 * This routine is invoked to post rpi header templates to the
14259 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040014260 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14261 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040014262 *
14263 * This routine does not require any locks. It's usage is expected
14264 * to be driver load or reset recovery when the driver is
14265 * sequential.
14266 *
14267 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020014268 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040014269 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040014270 * When this error occurs, the driver is not guaranteed
14271 * to have any rpi regions posted to the device and
14272 * must either attempt to repost the regions or take a
14273 * fatal error.
14274 **/
14275int
14276lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
14277{
14278 struct lpfc_rpi_hdr *rpi_page;
14279 uint32_t rc = 0;
James Smart6d368e52011-05-24 11:44:12 -040014280 uint16_t lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040014281
James Smart6d368e52011-05-24 11:44:12 -040014282 /* SLI4 ports that support extents do not require RPI headers. */
14283 if (!phba->sli4_hba.rpi_hdrs_in_use)
14284 goto exit;
14285 if (phba->sli4_hba.extents_in_use)
14286 return -EIO;
14287
James Smart6fb120a2009-05-22 14:52:59 -040014288 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
James Smart6d368e52011-05-24 11:44:12 -040014289 /*
14290 * Assign the rpi headers a physical rpi only if the driver
14291 * has not initialized those resources. A port reset only
14292 * needs the headers posted.
14293 */
14294 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
14295 LPFC_RPI_RSRC_RDY)
14296 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
14297
James Smart6fb120a2009-05-22 14:52:59 -040014298 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
14299 if (rc != MBX_SUCCESS) {
14300 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14301 "2008 Error %d posting all rpi "
14302 "headers\n", rc);
14303 rc = -EIO;
14304 break;
14305 }
14306 }
14307
James Smart6d368e52011-05-24 11:44:12 -040014308 exit:
14309 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
14310 LPFC_RPI_RSRC_RDY);
James Smart6fb120a2009-05-22 14:52:59 -040014311 return rc;
14312}
14313
14314/**
14315 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
14316 * @phba: pointer to lpfc hba data structure.
14317 * @rpi_page: pointer to the rpi memory region.
14318 *
14319 * This routine is invoked to post a single rpi header to the
14320 * HBA consistent with the SLI-4 interface spec. This memory region
14321 * maps up to 64 rpi context regions.
14322 *
14323 * Return codes
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020014324 * 0 - successful
James Smartd439d282010-09-29 11:18:45 -040014325 * -ENOMEM - No available memory
14326 * -EIO - The mailbox failed to complete successfully.
James Smart6fb120a2009-05-22 14:52:59 -040014327 **/
14328int
14329lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
14330{
14331 LPFC_MBOXQ_t *mboxq;
14332 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
14333 uint32_t rc = 0;
James Smart6fb120a2009-05-22 14:52:59 -040014334 uint32_t shdr_status, shdr_add_status;
14335 union lpfc_sli4_cfg_shdr *shdr;
14336
James Smart6d368e52011-05-24 11:44:12 -040014337 /* SLI4 ports that support extents do not require RPI headers. */
14338 if (!phba->sli4_hba.rpi_hdrs_in_use)
14339 return rc;
14340 if (phba->sli4_hba.extents_in_use)
14341 return -EIO;
14342
James Smart6fb120a2009-05-22 14:52:59 -040014343 /* The port is notified of the header region via a mailbox command. */
14344 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14345 if (!mboxq) {
14346 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14347 "2001 Unable to allocate memory for issuing "
14348 "SLI_CONFIG_SPECIAL mailbox command\n");
14349 return -ENOMEM;
14350 }
14351
14352 /* Post all rpi memory regions to the port. */
14353 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
James Smart6fb120a2009-05-22 14:52:59 -040014354 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14355 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
14356 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
James Smartfedd3b72011-02-16 12:39:24 -050014357 sizeof(struct lpfc_sli4_cfg_mhdr),
14358 LPFC_SLI4_MBX_EMBED);
James Smart6d368e52011-05-24 11:44:12 -040014359
14360
14361 /* Post the physical rpi to the port for this rpi header. */
James Smart6fb120a2009-05-22 14:52:59 -040014362 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
14363 rpi_page->start_rpi);
James Smart6d368e52011-05-24 11:44:12 -040014364 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
14365 hdr_tmpl, rpi_page->page_count);
14366
James Smart6fb120a2009-05-22 14:52:59 -040014367 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
14368 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
James Smartf1126682009-06-10 17:22:44 -040014369 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
James Smart6fb120a2009-05-22 14:52:59 -040014370 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
14371 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14372 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14373 if (rc != MBX_TIMEOUT)
14374 mempool_free(mboxq, phba->mbox_mem_pool);
14375 if (shdr_status || shdr_add_status || rc) {
14376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14377 "2514 POST_RPI_HDR mailbox failed with "
14378 "status x%x add_status x%x, mbx status x%x\n",
14379 shdr_status, shdr_add_status, rc);
14380 rc = -ENXIO;
14381 }
14382 return rc;
14383}
14384
14385/**
14386 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
14387 * @phba: pointer to lpfc hba data structure.
14388 *
14389 * This routine is invoked to post rpi header templates to the
14390 * HBA consistent with the SLI-4 interface spec. This routine
James Smart49198b32010-04-06 15:04:33 -040014391 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14392 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
James Smart6fb120a2009-05-22 14:52:59 -040014393 *
14394 * Returns
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020014395 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
James Smart6fb120a2009-05-22 14:52:59 -040014396 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14397 **/
14398int
14399lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
14400{
James Smart6d368e52011-05-24 11:44:12 -040014401 unsigned long rpi;
14402 uint16_t max_rpi, rpi_limit;
14403 uint16_t rpi_remaining, lrpi = 0;
James Smart6fb120a2009-05-22 14:52:59 -040014404 struct lpfc_rpi_hdr *rpi_hdr;
14405
14406 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
James Smart6fb120a2009-05-22 14:52:59 -040014407 rpi_limit = phba->sli4_hba.next_rpi;
14408
14409 /*
James Smart6d368e52011-05-24 11:44:12 -040014410 * Fetch the next logical rpi. Because this index is logical,
14411 * the driver starts at 0 each time.
James Smart6fb120a2009-05-22 14:52:59 -040014412 */
14413 spin_lock_irq(&phba->hbalock);
James Smart6d368e52011-05-24 11:44:12 -040014414 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
14415 if (rpi >= rpi_limit)
James Smart6fb120a2009-05-22 14:52:59 -040014416 rpi = LPFC_RPI_ALLOC_ERROR;
14417 else {
14418 set_bit(rpi, phba->sli4_hba.rpi_bmask);
14419 phba->sli4_hba.max_cfg_param.rpi_used++;
14420 phba->sli4_hba.rpi_count++;
14421 }
14422
14423 /*
14424 * Don't try to allocate more rpi header regions if the device limit
James Smart6d368e52011-05-24 11:44:12 -040014425 * has been exhausted.
James Smart6fb120a2009-05-22 14:52:59 -040014426 */
14427 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
14428 (phba->sli4_hba.rpi_count >= max_rpi)) {
14429 spin_unlock_irq(&phba->hbalock);
14430 return rpi;
14431 }
14432
14433 /*
James Smart6d368e52011-05-24 11:44:12 -040014434 * RPI header postings are not required for SLI4 ports capable of
14435 * extents.
14436 */
14437 if (!phba->sli4_hba.rpi_hdrs_in_use) {
14438 spin_unlock_irq(&phba->hbalock);
14439 return rpi;
14440 }
14441
14442 /*
James Smart6fb120a2009-05-22 14:52:59 -040014443 * If the driver is running low on rpi resources, allocate another
14444 * page now. Note that the next_rpi value is used because
14445 * it represents how many are actually in use whereas max_rpi notes
14446 * how many are supported max by the device.
14447 */
James Smart6d368e52011-05-24 11:44:12 -040014448 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
James Smart6fb120a2009-05-22 14:52:59 -040014449 spin_unlock_irq(&phba->hbalock);
14450 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
14451 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
14452 if (!rpi_hdr) {
14453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14454 "2002 Error Could not grow rpi "
14455 "count\n");
14456 } else {
James Smart6d368e52011-05-24 11:44:12 -040014457 lrpi = rpi_hdr->start_rpi;
14458 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
James Smart6fb120a2009-05-22 14:52:59 -040014459 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
14460 }
14461 }
14462
14463 return rpi;
14464}
14465
14466/**
14467 * lpfc_sli4_free_rpi - Release an rpi for reuse.
14468 * @phba: pointer to lpfc hba data structure.
14469 *
14470 * This routine is invoked to release an rpi to the pool of
14471 * available rpis maintained by the driver.
14472 **/
14473void
James Smartd7c47992010-06-08 18:31:54 -040014474__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14475{
14476 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
14477 phba->sli4_hba.rpi_count--;
14478 phba->sli4_hba.max_cfg_param.rpi_used--;
14479 }
14480}
14481
14482/**
14483 * lpfc_sli4_free_rpi - Release an rpi for reuse.
14484 * @phba: pointer to lpfc hba data structure.
14485 *
14486 * This routine is invoked to release an rpi to the pool of
14487 * available rpis maintained by the driver.
14488 **/
14489void
James Smart6fb120a2009-05-22 14:52:59 -040014490lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
14491{
14492 spin_lock_irq(&phba->hbalock);
James Smartd7c47992010-06-08 18:31:54 -040014493 __lpfc_sli4_free_rpi(phba, rpi);
James Smart6fb120a2009-05-22 14:52:59 -040014494 spin_unlock_irq(&phba->hbalock);
14495}
14496
14497/**
14498 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
14499 * @phba: pointer to lpfc hba data structure.
14500 *
14501 * This routine is invoked to remove the memory region that
14502 * provided rpi via a bitmask.
14503 **/
14504void
14505lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
14506{
14507 kfree(phba->sli4_hba.rpi_bmask);
James Smart6d368e52011-05-24 11:44:12 -040014508 kfree(phba->sli4_hba.rpi_ids);
14509 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
James Smart6fb120a2009-05-22 14:52:59 -040014510}
14511
14512/**
14513 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
14514 * @phba: pointer to lpfc hba data structure.
14515 *
14516 * This routine is invoked to remove the memory region that
14517 * provided rpi via a bitmask.
14518 **/
14519int
14520lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
14521{
14522 LPFC_MBOXQ_t *mboxq;
14523 struct lpfc_hba *phba = ndlp->phba;
14524 int rc;
14525
14526 /* The port is notified of the header region via a mailbox command. */
14527 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14528 if (!mboxq)
14529 return -ENOMEM;
14530
14531 /* Post all rpi memory regions to the port. */
14532 lpfc_resume_rpi(mboxq, ndlp);
14533 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14534 if (rc == MBX_NOT_FINISHED) {
14535 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14536 "2010 Resume RPI Mailbox failed "
14537 "status %d, mbxStatus x%x\n", rc,
14538 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
14539 mempool_free(mboxq, phba->mbox_mem_pool);
14540 return -EIO;
14541 }
14542 return 0;
14543}
14544
14545/**
14546 * lpfc_sli4_init_vpi - Initialize a vpi with the port
James Smart76a95d72010-11-20 23:11:48 -050014547 * @vport: Pointer to the vport for which the vpi is being initialized
James Smart6fb120a2009-05-22 14:52:59 -040014548 *
James Smart76a95d72010-11-20 23:11:48 -050014549 * This routine is invoked to activate a vpi with the port.
James Smart6fb120a2009-05-22 14:52:59 -040014550 *
14551 * Returns:
14552 * 0 success
14553 * -Evalue otherwise
14554 **/
14555int
James Smart76a95d72010-11-20 23:11:48 -050014556lpfc_sli4_init_vpi(struct lpfc_vport *vport)
James Smart6fb120a2009-05-22 14:52:59 -040014557{
14558 LPFC_MBOXQ_t *mboxq;
14559 int rc = 0;
James Smart6a9c52c2009-10-02 15:16:51 -040014560 int retval = MBX_SUCCESS;
James Smart6fb120a2009-05-22 14:52:59 -040014561 uint32_t mbox_tmo;
James Smart76a95d72010-11-20 23:11:48 -050014562 struct lpfc_hba *phba = vport->phba;
James Smart6fb120a2009-05-22 14:52:59 -040014563 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14564 if (!mboxq)
14565 return -ENOMEM;
James Smart76a95d72010-11-20 23:11:48 -050014566 lpfc_init_vpi(phba, mboxq, vport->vpi);
James Smarta183a152011-10-10 21:32:43 -040014567 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
James Smart6fb120a2009-05-22 14:52:59 -040014568 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
James Smart6fb120a2009-05-22 14:52:59 -040014569 if (rc != MBX_SUCCESS) {
James Smart76a95d72010-11-20 23:11:48 -050014570 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
James Smart6fb120a2009-05-22 14:52:59 -040014571 "2022 INIT VPI Mailbox failed "
14572 "status %d, mbxStatus x%x\n", rc,
14573 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
James Smart6a9c52c2009-10-02 15:16:51 -040014574 retval = -EIO;
James Smart6fb120a2009-05-22 14:52:59 -040014575 }
James Smart6a9c52c2009-10-02 15:16:51 -040014576 if (rc != MBX_TIMEOUT)
James Smart76a95d72010-11-20 23:11:48 -050014577 mempool_free(mboxq, vport->phba->mbox_mem_pool);
James Smart6a9c52c2009-10-02 15:16:51 -040014578
14579 return retval;
James Smart6fb120a2009-05-22 14:52:59 -040014580}
14581
14582/**
14583 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
14584 * @phba: pointer to lpfc hba data structure.
14585 * @mboxq: Pointer to mailbox object.
14586 *
14587 * This routine is invoked to manually add a single FCF record. The caller
14588 * must pass a completely initialized FCF_Record. This routine takes
14589 * care of the nonembedded mailbox operations.
14590 **/
14591static void
14592lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
14593{
14594 void *virt_addr;
14595 union lpfc_sli4_cfg_shdr *shdr;
14596 uint32_t shdr_status, shdr_add_status;
14597
14598 virt_addr = mboxq->sge_array->addr[0];
14599 /* The IOCTL status is embedded in the mailbox subheader. */
14600 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
14601 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14602 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14603
14604 if ((shdr_status || shdr_add_status) &&
14605 (shdr_status != STATUS_FCF_IN_USE))
14606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14607 "2558 ADD_FCF_RECORD mailbox failed with "
14608 "status x%x add_status x%x\n",
14609 shdr_status, shdr_add_status);
14610
14611 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14612}
14613
14614/**
14615 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
14616 * @phba: pointer to lpfc hba data structure.
14617 * @fcf_record: pointer to the initialized fcf record to add.
14618 *
14619 * This routine is invoked to manually add a single FCF record. The caller
14620 * must pass a completely initialized FCF_Record. This routine takes
14621 * care of the nonembedded mailbox operations.
14622 **/
14623int
14624lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
14625{
14626 int rc = 0;
14627 LPFC_MBOXQ_t *mboxq;
14628 uint8_t *bytep;
14629 void *virt_addr;
14630 dma_addr_t phys_addr;
14631 struct lpfc_mbx_sge sge;
14632 uint32_t alloc_len, req_len;
14633 uint32_t fcfindex;
14634
14635 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14636 if (!mboxq) {
14637 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14638 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
14639 return -ENOMEM;
14640 }
14641
14642 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
14643 sizeof(uint32_t);
14644
14645 /* Allocate DMA memory and set up the non-embedded mailbox command */
14646 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
14647 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
14648 req_len, LPFC_SLI4_MBX_NEMBED);
14649 if (alloc_len < req_len) {
14650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14651 "2523 Allocated DMA memory size (x%x) is "
14652 "less than the requested DMA memory "
14653 "size (x%x)\n", alloc_len, req_len);
14654 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14655 return -ENOMEM;
14656 }
14657
14658 /*
14659 * Get the first SGE entry from the non-embedded DMA memory. This
14660 * routine only uses a single SGE.
14661 */
14662 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
14663 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
James Smart6fb120a2009-05-22 14:52:59 -040014664 virt_addr = mboxq->sge_array->addr[0];
14665 /*
14666 * Configure the FCF record for FCFI 0. This is the driver's
14667 * hardcoded default and gets used in nonFIP mode.
14668 */
14669 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
14670 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
14671 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
14672
14673 /*
14674 * Copy the fcf_index and the FCF Record Data. The data starts after
14675 * the FCoE header plus word10. The data copy needs to be endian
14676 * correct.
14677 */
14678 bytep += sizeof(uint32_t);
14679 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
14680 mboxq->vport = phba->pport;
14681 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
14682 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14683 if (rc == MBX_NOT_FINISHED) {
14684 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14685 "2515 ADD_FCF_RECORD mailbox failed with "
14686 "status 0x%x\n", rc);
14687 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14688 rc = -EIO;
14689 } else
14690 rc = 0;
14691
14692 return rc;
14693}
14694
14695/**
14696 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
14697 * @phba: pointer to lpfc hba data structure.
14698 * @fcf_record: pointer to the fcf record to write the default data.
14699 * @fcf_index: FCF table entry index.
14700 *
14701 * This routine is invoked to build the driver's default FCF record. The
14702 * values used are hardcoded. This routine handles memory initialization.
14703 *
14704 **/
14705void
14706lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
14707 struct fcf_record *fcf_record,
14708 uint16_t fcf_index)
14709{
14710 memset(fcf_record, 0, sizeof(struct fcf_record));
14711 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
14712 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
14713 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
14714 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
14715 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
14716 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
14717 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
14718 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
14719 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
14720 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
14721 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
14722 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
14723 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
James Smart0c287582009-06-10 17:22:56 -040014724 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
James Smart6fb120a2009-05-22 14:52:59 -040014725 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
14726 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
14727 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
14728 /* Set the VLAN bit map */
14729 if (phba->valid_vlan) {
14730 fcf_record->vlan_bitmap[phba->vlan_id / 8]
14731 = 1 << (phba->vlan_id % 8);
14732 }
14733}
14734
14735/**
James Smart0c9ab6f2010-02-26 14:15:57 -050014736 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
James Smart6fb120a2009-05-22 14:52:59 -040014737 * @phba: pointer to lpfc hba data structure.
14738 * @fcf_index: FCF table entry offset.
14739 *
James Smart0c9ab6f2010-02-26 14:15:57 -050014740 * This routine is invoked to scan the entire FCF table by reading FCF
14741 * record and processing it one at a time starting from the @fcf_index
14742 * for initial FCF discovery or fast FCF failover rediscovery.
14743 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030014744 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050014745 * otherwise.
James Smart6fb120a2009-05-22 14:52:59 -040014746 **/
14747int
James Smart0c9ab6f2010-02-26 14:15:57 -050014748lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
James Smart6fb120a2009-05-22 14:52:59 -040014749{
14750 int rc = 0, error;
14751 LPFC_MBOXQ_t *mboxq;
James Smart6fb120a2009-05-22 14:52:59 -040014752
James Smart32b97932009-07-19 10:01:21 -040014753 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
James Smart6fb120a2009-05-22 14:52:59 -040014754 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14755 if (!mboxq) {
14756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14757 "2000 Failed to allocate mbox for "
14758 "READ_FCF cmd\n");
James Smart4d9ab992009-10-02 15:16:39 -040014759 error = -ENOMEM;
James Smart0c9ab6f2010-02-26 14:15:57 -050014760 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040014761 }
James Smartecfd03c2010-02-12 14:41:27 -050014762 /* Construct the read FCF record mailbox command */
James Smart0c9ab6f2010-02-26 14:15:57 -050014763 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
James Smartecfd03c2010-02-12 14:41:27 -050014764 if (rc) {
14765 error = -EINVAL;
James Smart0c9ab6f2010-02-26 14:15:57 -050014766 goto fail_fcf_scan;
James Smart6fb120a2009-05-22 14:52:59 -040014767 }
James Smartecfd03c2010-02-12 14:41:27 -050014768 /* Issue the mailbox command asynchronously */
James Smart6fb120a2009-05-22 14:52:59 -040014769 mboxq->vport = phba->pport;
James Smart0c9ab6f2010-02-26 14:15:57 -050014770 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
James Smarta93ff372010-10-22 11:06:08 -040014771
14772 spin_lock_irq(&phba->hbalock);
14773 phba->hba_flag |= FCF_TS_INPROG;
14774 spin_unlock_irq(&phba->hbalock);
14775
James Smart6fb120a2009-05-22 14:52:59 -040014776 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
James Smartecfd03c2010-02-12 14:41:27 -050014777 if (rc == MBX_NOT_FINISHED)
James Smart6fb120a2009-05-22 14:52:59 -040014778 error = -EIO;
James Smartecfd03c2010-02-12 14:41:27 -050014779 else {
James Smart38b92ef2010-08-04 16:11:39 -040014780 /* Reset eligible FCF count for new scan */
14781 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
James Smart999d8132010-03-15 11:24:56 -040014782 phba->fcf.eligible_fcf_cnt = 0;
James Smart6fb120a2009-05-22 14:52:59 -040014783 error = 0;
James Smart32b97932009-07-19 10:01:21 -040014784 }
James Smart0c9ab6f2010-02-26 14:15:57 -050014785fail_fcf_scan:
James Smart4d9ab992009-10-02 15:16:39 -040014786 if (error) {
14787 if (mboxq)
14788 lpfc_sli4_mbox_cmd_free(phba, mboxq);
James Smarta93ff372010-10-22 11:06:08 -040014789 /* FCF scan failed, clear FCF_TS_INPROG flag */
James Smart4d9ab992009-10-02 15:16:39 -040014790 spin_lock_irq(&phba->hbalock);
James Smarta93ff372010-10-22 11:06:08 -040014791 phba->hba_flag &= ~FCF_TS_INPROG;
James Smart4d9ab992009-10-02 15:16:39 -040014792 spin_unlock_irq(&phba->hbalock);
14793 }
James Smart6fb120a2009-05-22 14:52:59 -040014794 return error;
14795}
James Smarta0c87cb2009-07-19 10:01:10 -040014796
14797/**
James Smarta93ff372010-10-22 11:06:08 -040014798 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
James Smart0c9ab6f2010-02-26 14:15:57 -050014799 * @phba: pointer to lpfc hba data structure.
14800 * @fcf_index: FCF table entry offset.
14801 *
14802 * This routine is invoked to read an FCF record indicated by @fcf_index
James Smarta93ff372010-10-22 11:06:08 -040014803 * and to use it for FLOGI roundrobin FCF failover.
James Smart0c9ab6f2010-02-26 14:15:57 -050014804 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030014805 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050014806 * otherwise.
14807 **/
14808int
14809lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
14810{
14811 int rc = 0, error;
14812 LPFC_MBOXQ_t *mboxq;
14813
14814 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14815 if (!mboxq) {
14816 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
14817 "2763 Failed to allocate mbox for "
14818 "READ_FCF cmd\n");
14819 error = -ENOMEM;
14820 goto fail_fcf_read;
14821 }
14822 /* Construct the read FCF record mailbox command */
14823 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
14824 if (rc) {
14825 error = -EINVAL;
14826 goto fail_fcf_read;
14827 }
14828 /* Issue the mailbox command asynchronously */
14829 mboxq->vport = phba->pport;
14830 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
14831 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14832 if (rc == MBX_NOT_FINISHED)
14833 error = -EIO;
14834 else
14835 error = 0;
14836
14837fail_fcf_read:
14838 if (error && mboxq)
14839 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14840 return error;
14841}
14842
14843/**
14844 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
14845 * @phba: pointer to lpfc hba data structure.
14846 * @fcf_index: FCF table entry offset.
14847 *
14848 * This routine is invoked to read an FCF record indicated by @fcf_index to
James Smarta93ff372010-10-22 11:06:08 -040014849 * determine whether it's eligible for FLOGI roundrobin failover list.
James Smart0c9ab6f2010-02-26 14:15:57 -050014850 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -030014851 * Return 0 if the mailbox command is submitted successfully, none 0
James Smart0c9ab6f2010-02-26 14:15:57 -050014852 * otherwise.
14853 **/
14854int
14855lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
14856{
14857 int rc = 0, error;
14858 LPFC_MBOXQ_t *mboxq;
14859
14860 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14861 if (!mboxq) {
14862 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
14863 "2758 Failed to allocate mbox for "
14864 "READ_FCF cmd\n");
14865 error = -ENOMEM;
14866 goto fail_fcf_read;
14867 }
14868 /* Construct the read FCF record mailbox command */
14869 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
14870 if (rc) {
14871 error = -EINVAL;
14872 goto fail_fcf_read;
14873 }
14874 /* Issue the mailbox command asynchronously */
14875 mboxq->vport = phba->pport;
14876 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
14877 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14878 if (rc == MBX_NOT_FINISHED)
14879 error = -EIO;
14880 else
14881 error = 0;
14882
14883fail_fcf_read:
14884 if (error && mboxq)
14885 lpfc_sli4_mbox_cmd_free(phba, mboxq);
14886 return error;
14887}
14888
14889/**
James Smart7d791df2011-07-22 18:37:52 -040014890 * lpfc_check_next_fcf_pri
14891 * phba pointer to the lpfc_hba struct for this port.
14892 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
14893 * routine when the rr_bmask is empty. The FCF indecies are put into the
14894 * rr_bmask based on their priority level. Starting from the highest priority
14895 * to the lowest. The most likely FCF candidate will be in the highest
14896 * priority group. When this routine is called it searches the fcf_pri list for
14897 * next lowest priority group and repopulates the rr_bmask with only those
14898 * fcf_indexes.
14899 * returns:
14900 * 1=success 0=failure
14901 **/
14902int
14903lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
14904{
14905 uint16_t next_fcf_pri;
14906 uint16_t last_index;
14907 struct lpfc_fcf_pri *fcf_pri;
14908 int rc;
14909 int ret = 0;
14910
14911 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
14912 LPFC_SLI4_FCF_TBL_INDX_MAX);
14913 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
14914 "3060 Last IDX %d\n", last_index);
14915 if (list_empty(&phba->fcf.fcf_pri_list)) {
14916 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
14917 "3061 Last IDX %d\n", last_index);
14918 return 0; /* Empty rr list */
14919 }
14920 next_fcf_pri = 0;
14921 /*
14922 * Clear the rr_bmask and set all of the bits that are at this
14923 * priority.
14924 */
14925 memset(phba->fcf.fcf_rr_bmask, 0,
14926 sizeof(*phba->fcf.fcf_rr_bmask));
14927 spin_lock_irq(&phba->hbalock);
14928 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14929 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
14930 continue;
14931 /*
14932 * the 1st priority that has not FLOGI failed
14933 * will be the highest.
14934 */
14935 if (!next_fcf_pri)
14936 next_fcf_pri = fcf_pri->fcf_rec.priority;
14937 spin_unlock_irq(&phba->hbalock);
14938 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14939 rc = lpfc_sli4_fcf_rr_index_set(phba,
14940 fcf_pri->fcf_rec.fcf_index);
14941 if (rc)
14942 return 0;
14943 }
14944 spin_lock_irq(&phba->hbalock);
14945 }
14946 /*
14947 * if next_fcf_pri was not set above and the list is not empty then
14948 * we have failed flogis on all of them. So reset flogi failed
14949 * and start at the begining.
14950 */
14951 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
14952 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
14953 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
14954 /*
14955 * the 1st priority that has not FLOGI failed
14956 * will be the highest.
14957 */
14958 if (!next_fcf_pri)
14959 next_fcf_pri = fcf_pri->fcf_rec.priority;
14960 spin_unlock_irq(&phba->hbalock);
14961 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
14962 rc = lpfc_sli4_fcf_rr_index_set(phba,
14963 fcf_pri->fcf_rec.fcf_index);
14964 if (rc)
14965 return 0;
14966 }
14967 spin_lock_irq(&phba->hbalock);
14968 }
14969 } else
14970 ret = 1;
14971 spin_unlock_irq(&phba->hbalock);
14972
14973 return ret;
14974}
14975/**
James Smart0c9ab6f2010-02-26 14:15:57 -050014976 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
14977 * @phba: pointer to lpfc hba data structure.
14978 *
14979 * This routine is to get the next eligible FCF record index in a round
14980 * robin fashion. If the next eligible FCF record index equals to the
James Smarta93ff372010-10-22 11:06:08 -040014981 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
James Smart0c9ab6f2010-02-26 14:15:57 -050014982 * shall be returned, otherwise, the next eligible FCF record's index
14983 * shall be returned.
14984 **/
14985uint16_t
14986lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
14987{
14988 uint16_t next_fcf_index;
14989
James Smart3804dc82010-07-14 15:31:37 -040014990 /* Search start from next bit of currently registered FCF index */
James Smart7d791df2011-07-22 18:37:52 -040014991next_priority:
James Smart3804dc82010-07-14 15:31:37 -040014992 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
14993 LPFC_SLI4_FCF_TBL_INDX_MAX;
James Smart0c9ab6f2010-02-26 14:15:57 -050014994 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
14995 LPFC_SLI4_FCF_TBL_INDX_MAX,
James Smart3804dc82010-07-14 15:31:37 -040014996 next_fcf_index);
14997
James Smart0c9ab6f2010-02-26 14:15:57 -050014998 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
James Smart7d791df2011-07-22 18:37:52 -040014999 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15000 /*
15001 * If we have wrapped then we need to clear the bits that
15002 * have been tested so that we can detect when we should
15003 * change the priority level.
15004 */
James Smart0c9ab6f2010-02-26 14:15:57 -050015005 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
15006 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
James Smart7d791df2011-07-22 18:37:52 -040015007 }
15008
James Smart0c9ab6f2010-02-26 14:15:57 -050015009
James Smart3804dc82010-07-14 15:31:37 -040015010 /* Check roundrobin failover list empty condition */
James Smart7d791df2011-07-22 18:37:52 -040015011 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
15012 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
15013 /*
15014 * If next fcf index is not found check if there are lower
15015 * Priority level fcf's in the fcf_priority list.
15016 * Set up the rr_bmask with all of the avaiable fcf bits
15017 * at that level and continue the selection process.
15018 */
15019 if (lpfc_check_next_fcf_pri_level(phba))
15020 goto next_priority;
James Smart3804dc82010-07-14 15:31:37 -040015021 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15022 "2844 No roundrobin failover FCF available\n");
James Smart7d791df2011-07-22 18:37:52 -040015023 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
15024 return LPFC_FCOE_FCF_NEXT_NONE;
15025 else {
15026 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
15027 "3063 Only FCF available idx %d, flag %x\n",
15028 next_fcf_index,
15029 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
15030 return next_fcf_index;
15031 }
James Smart3804dc82010-07-14 15:31:37 -040015032 }
15033
James Smart7d791df2011-07-22 18:37:52 -040015034 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
15035 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
15036 LPFC_FCF_FLOGI_FAILED)
15037 goto next_priority;
15038
James Smart3804dc82010-07-14 15:31:37 -040015039 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040015040 "2845 Get next roundrobin failover FCF (x%x)\n",
15041 next_fcf_index);
15042
James Smart0c9ab6f2010-02-26 14:15:57 -050015043 return next_fcf_index;
15044}
15045
15046/**
15047 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
15048 * @phba: pointer to lpfc hba data structure.
15049 *
15050 * This routine sets the FCF record index in to the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040015051 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050015052 * does not go beyond the range of the driver allocated bmask dimension
15053 * before setting the bit.
15054 *
15055 * Returns 0 if the index bit successfully set, otherwise, it returns
15056 * -EINVAL.
15057 **/
15058int
15059lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
15060{
15061 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15062 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040015063 "2610 FCF (x%x) reached driver's book "
15064 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050015065 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15066 return -EINVAL;
15067 }
15068 /* Set the eligible FCF record index bmask */
15069 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
15070
James Smart3804dc82010-07-14 15:31:37 -040015071 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040015072 "2790 Set FCF (x%x) to roundrobin FCF failover "
James Smart3804dc82010-07-14 15:31:37 -040015073 "bmask\n", fcf_index);
15074
James Smart0c9ab6f2010-02-26 14:15:57 -050015075 return 0;
15076}
15077
15078/**
James Smart3804dc82010-07-14 15:31:37 -040015079 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
James Smart0c9ab6f2010-02-26 14:15:57 -050015080 * @phba: pointer to lpfc hba data structure.
15081 *
15082 * This routine clears the FCF record index from the eligible bmask for
James Smarta93ff372010-10-22 11:06:08 -040015083 * roundrobin failover search. It checks to make sure that the index
James Smart0c9ab6f2010-02-26 14:15:57 -050015084 * does not go beyond the range of the driver allocated bmask dimension
15085 * before clearing the bit.
15086 **/
15087void
15088lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
15089{
James Smart7d791df2011-07-22 18:37:52 -040015090 struct lpfc_fcf_pri *fcf_pri;
James Smart0c9ab6f2010-02-26 14:15:57 -050015091 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
15092 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040015093 "2762 FCF (x%x) reached driver's book "
15094 "keeping dimension:x%x\n",
James Smart0c9ab6f2010-02-26 14:15:57 -050015095 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
15096 return;
15097 }
15098 /* Clear the eligible FCF record index bmask */
James Smart7d791df2011-07-22 18:37:52 -040015099 spin_lock_irq(&phba->hbalock);
15100 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
15101 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
15102 list_del_init(&fcf_pri->list);
15103 break;
15104 }
15105 }
15106 spin_unlock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050015107 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
James Smart3804dc82010-07-14 15:31:37 -040015108
15109 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040015110 "2791 Clear FCF (x%x) from roundrobin failover "
James Smart3804dc82010-07-14 15:31:37 -040015111 "bmask\n", fcf_index);
James Smart0c9ab6f2010-02-26 14:15:57 -050015112}
15113
15114/**
James Smartecfd03c2010-02-12 14:41:27 -050015115 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
15116 * @phba: pointer to lpfc hba data structure.
15117 *
15118 * This routine is the completion routine for the rediscover FCF table mailbox
15119 * command. If the mailbox command returned failure, it will try to stop the
15120 * FCF rediscover wait timer.
15121 **/
15122void
15123lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
15124{
15125 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15126 uint32_t shdr_status, shdr_add_status;
15127
15128 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15129
15130 shdr_status = bf_get(lpfc_mbox_hdr_status,
15131 &redisc_fcf->header.cfg_shdr.response);
15132 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
15133 &redisc_fcf->header.cfg_shdr.response);
15134 if (shdr_status || shdr_add_status) {
James Smart0c9ab6f2010-02-26 14:15:57 -050015135 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
James Smartecfd03c2010-02-12 14:41:27 -050015136 "2746 Requesting for FCF rediscovery failed "
15137 "status x%x add_status x%x\n",
15138 shdr_status, shdr_add_status);
James Smart0c9ab6f2010-02-26 14:15:57 -050015139 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
James Smartfc2b9892010-02-26 14:15:29 -050015140 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050015141 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050015142 spin_unlock_irq(&phba->hbalock);
15143 /*
15144 * CVL event triggered FCF rediscover request failed,
15145 * last resort to re-try current registered FCF entry.
15146 */
15147 lpfc_retry_pport_discovery(phba);
15148 } else {
15149 spin_lock_irq(&phba->hbalock);
James Smart0c9ab6f2010-02-26 14:15:57 -050015150 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
James Smartfc2b9892010-02-26 14:15:29 -050015151 spin_unlock_irq(&phba->hbalock);
15152 /*
15153 * DEAD FCF event triggered FCF rediscover request
15154 * failed, last resort to fail over as a link down
15155 * to FCF registration.
15156 */
15157 lpfc_sli4_fcf_dead_failthrough(phba);
15158 }
James Smart0c9ab6f2010-02-26 14:15:57 -050015159 } else {
15160 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
James Smarta93ff372010-10-22 11:06:08 -040015161 "2775 Start FCF rediscover quiescent timer\n");
James Smartecfd03c2010-02-12 14:41:27 -050015162 /*
15163 * Start FCF rediscovery wait timer for pending FCF
15164 * before rescan FCF record table.
15165 */
15166 lpfc_fcf_redisc_wait_start_timer(phba);
James Smart0c9ab6f2010-02-26 14:15:57 -050015167 }
James Smartecfd03c2010-02-12 14:41:27 -050015168
15169 mempool_free(mbox, phba->mbox_mem_pool);
15170}
15171
15172/**
James Smart3804dc82010-07-14 15:31:37 -040015173 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
James Smartecfd03c2010-02-12 14:41:27 -050015174 * @phba: pointer to lpfc hba data structure.
15175 *
15176 * This routine is invoked to request for rediscovery of the entire FCF table
15177 * by the port.
15178 **/
15179int
15180lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
15181{
15182 LPFC_MBOXQ_t *mbox;
15183 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
15184 int rc, length;
15185
James Smart0c9ab6f2010-02-26 14:15:57 -050015186 /* Cancel retry delay timers to all vports before FCF rediscover */
15187 lpfc_cancel_all_vport_retry_delay_timer(phba);
15188
James Smartecfd03c2010-02-12 14:41:27 -050015189 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15190 if (!mbox) {
15191 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15192 "2745 Failed to allocate mbox for "
15193 "requesting FCF rediscover.\n");
15194 return -ENOMEM;
15195 }
15196
15197 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
15198 sizeof(struct lpfc_sli4_cfg_mhdr));
15199 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15200 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
15201 length, LPFC_SLI4_MBX_EMBED);
15202
15203 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
15204 /* Set count to 0 for invalidating the entire FCF database */
15205 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
15206
15207 /* Issue the mailbox command asynchronously */
15208 mbox->vport = phba->pport;
15209 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
15210 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
15211
15212 if (rc == MBX_NOT_FINISHED) {
15213 mempool_free(mbox, phba->mbox_mem_pool);
15214 return -EIO;
15215 }
15216 return 0;
15217}
15218
15219/**
James Smartfc2b9892010-02-26 14:15:29 -050015220 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
15221 * @phba: pointer to lpfc hba data structure.
15222 *
15223 * This function is the failover routine as a last resort to the FCF DEAD
15224 * event when driver failed to perform fast FCF failover.
15225 **/
15226void
15227lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
15228{
15229 uint32_t link_state;
15230
15231 /*
15232 * Last resort as FCF DEAD event failover will treat this as
15233 * a link down, but save the link state because we don't want
15234 * it to be changed to Link Down unless it is already down.
15235 */
15236 link_state = phba->link_state;
15237 lpfc_linkdown(phba);
15238 phba->link_state = link_state;
15239
15240 /* Unregister FCF if no devices connected to it */
15241 lpfc_unregister_unused_fcf(phba);
15242}
15243
15244/**
James Smart026abb82011-12-13 13:20:45 -050015245 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040015246 * @phba: pointer to lpfc hba data structure.
James Smart026abb82011-12-13 13:20:45 -050015247 * @rgn23_data: pointer to configure region 23 data.
James Smarta0c87cb2009-07-19 10:01:10 -040015248 *
James Smart026abb82011-12-13 13:20:45 -050015249 * This function gets SLI3 port configure region 23 data through memory dump
15250 * mailbox command. When it successfully retrieves data, the size of the data
15251 * will be returned, otherwise, 0 will be returned.
James Smarta0c87cb2009-07-19 10:01:10 -040015252 **/
James Smart026abb82011-12-13 13:20:45 -050015253static uint32_t
15254lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
James Smarta0c87cb2009-07-19 10:01:10 -040015255{
15256 LPFC_MBOXQ_t *pmb = NULL;
15257 MAILBOX_t *mb;
James Smart026abb82011-12-13 13:20:45 -050015258 uint32_t offset = 0;
James Smarta0c87cb2009-07-19 10:01:10 -040015259 int rc;
15260
James Smart026abb82011-12-13 13:20:45 -050015261 if (!rgn23_data)
15262 return 0;
15263
James Smarta0c87cb2009-07-19 10:01:10 -040015264 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15265 if (!pmb) {
15266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050015267 "2600 failed to allocate mailbox memory\n");
15268 return 0;
James Smarta0c87cb2009-07-19 10:01:10 -040015269 }
15270 mb = &pmb->u.mb;
15271
James Smarta0c87cb2009-07-19 10:01:10 -040015272 do {
15273 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
15274 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
15275
15276 if (rc != MBX_SUCCESS) {
15277 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
James Smart026abb82011-12-13 13:20:45 -050015278 "2601 failed to read config "
15279 "region 23, rc 0x%x Status 0x%x\n",
15280 rc, mb->mbxStatus);
James Smarta0c87cb2009-07-19 10:01:10 -040015281 mb->un.varDmp.word_cnt = 0;
15282 }
15283 /*
15284 * dump mem may return a zero when finished or we got a
15285 * mailbox error, either way we are done.
15286 */
15287 if (mb->un.varDmp.word_cnt == 0)
15288 break;
15289 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
15290 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
15291
15292 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
James Smart026abb82011-12-13 13:20:45 -050015293 rgn23_data + offset,
15294 mb->un.varDmp.word_cnt);
James Smarta0c87cb2009-07-19 10:01:10 -040015295 offset += mb->un.varDmp.word_cnt;
15296 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
15297
James Smart026abb82011-12-13 13:20:45 -050015298 mempool_free(pmb, phba->mbox_mem_pool);
15299 return offset;
15300}
15301
15302/**
15303 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
15304 * @phba: pointer to lpfc hba data structure.
15305 * @rgn23_data: pointer to configure region 23 data.
15306 *
15307 * This function gets SLI4 port configure region 23 data through memory dump
15308 * mailbox command. When it successfully retrieves data, the size of the data
15309 * will be returned, otherwise, 0 will be returned.
15310 **/
15311static uint32_t
15312lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
15313{
15314 LPFC_MBOXQ_t *mboxq = NULL;
15315 struct lpfc_dmabuf *mp = NULL;
15316 struct lpfc_mqe *mqe;
15317 uint32_t data_length = 0;
15318 int rc;
15319
15320 if (!rgn23_data)
15321 return 0;
15322
15323 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15324 if (!mboxq) {
15325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15326 "3105 failed to allocate mailbox memory\n");
15327 return 0;
15328 }
15329
15330 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
15331 goto out;
15332 mqe = &mboxq->u.mqe;
15333 mp = (struct lpfc_dmabuf *) mboxq->context1;
15334 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15335 if (rc)
15336 goto out;
15337 data_length = mqe->un.mb_words[5];
15338 if (data_length == 0)
15339 goto out;
15340 if (data_length > DMP_RGN23_SIZE) {
15341 data_length = 0;
15342 goto out;
15343 }
15344 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
15345out:
15346 mempool_free(mboxq, phba->mbox_mem_pool);
15347 if (mp) {
15348 lpfc_mbuf_free(phba, mp->virt, mp->phys);
15349 kfree(mp);
15350 }
15351 return data_length;
15352}
15353
15354/**
15355 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
15356 * @phba: pointer to lpfc hba data structure.
15357 *
15358 * This function read region 23 and parse TLV for port status to
15359 * decide if the user disaled the port. If the TLV indicates the
15360 * port is disabled, the hba_flag is set accordingly.
15361 **/
15362void
15363lpfc_sli_read_link_ste(struct lpfc_hba *phba)
15364{
15365 uint8_t *rgn23_data = NULL;
15366 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
15367 uint32_t offset = 0;
15368
15369 /* Get adapter Region 23 data */
15370 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
15371 if (!rgn23_data)
15372 goto out;
15373
15374 if (phba->sli_rev < LPFC_SLI_REV4)
15375 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
15376 else {
15377 if_type = bf_get(lpfc_sli_intf_if_type,
15378 &phba->sli4_hba.sli_intf);
15379 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
15380 goto out;
15381 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
15382 }
James Smarta0c87cb2009-07-19 10:01:10 -040015383
15384 if (!data_size)
15385 goto out;
15386
15387 /* Check the region signature first */
15388 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
15389 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15390 "2619 Config region 23 has bad signature\n");
15391 goto out;
15392 }
15393 offset += 4;
15394
15395 /* Check the data structure version */
15396 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
15397 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15398 "2620 Config region 23 has bad version\n");
15399 goto out;
15400 }
15401 offset += 4;
15402
15403 /* Parse TLV entries in the region */
15404 while (offset < data_size) {
15405 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
15406 break;
15407 /*
15408 * If the TLV is not driver specific TLV or driver id is
15409 * not linux driver id, skip the record.
15410 */
15411 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
15412 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
15413 (rgn23_data[offset + 3] != 0)) {
15414 offset += rgn23_data[offset + 1] * 4 + 4;
15415 continue;
15416 }
15417
15418 /* Driver found a driver specific TLV in the config region */
15419 sub_tlv_len = rgn23_data[offset + 1] * 4;
15420 offset += 4;
15421 tlv_offset = 0;
15422
15423 /*
15424 * Search for configured port state sub-TLV.
15425 */
15426 while ((offset < data_size) &&
15427 (tlv_offset < sub_tlv_len)) {
15428 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
15429 offset += 4;
15430 tlv_offset += 4;
15431 break;
15432 }
15433 if (rgn23_data[offset] != PORT_STE_TYPE) {
15434 offset += rgn23_data[offset + 1] * 4 + 4;
15435 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
15436 continue;
15437 }
15438
15439 /* This HBA contains PORT_STE configured */
15440 if (!rgn23_data[offset + 2])
15441 phba->hba_flag |= LINK_DISABLED;
15442
15443 goto out;
15444 }
15445 }
James Smart026abb82011-12-13 13:20:45 -050015446
James Smarta0c87cb2009-07-19 10:01:10 -040015447out:
James Smarta0c87cb2009-07-19 10:01:10 -040015448 kfree(rgn23_data);
15449 return;
15450}
James Smart695a8142010-01-26 23:08:03 -050015451
15452/**
James Smart52d52442011-05-24 11:42:45 -040015453 * lpfc_wr_object - write an object to the firmware
15454 * @phba: HBA structure that indicates port to create a queue on.
15455 * @dmabuf_list: list of dmabufs to write to the port.
15456 * @size: the total byte value of the objects to write to the port.
15457 * @offset: the current offset to be used to start the transfer.
15458 *
15459 * This routine will create a wr_object mailbox command to send to the port.
15460 * the mailbox command will be constructed using the dma buffers described in
15461 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
15462 * BDEs that the imbedded mailbox can support. The @offset variable will be
15463 * used to indicate the starting offset of the transfer and will also return
15464 * the offset after the write object mailbox has completed. @size is used to
15465 * determine the end of the object and whether the eof bit should be set.
15466 *
15467 * Return 0 is successful and offset will contain the the new offset to use
15468 * for the next write.
15469 * Return negative value for error cases.
15470 **/
15471int
15472lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
15473 uint32_t size, uint32_t *offset)
15474{
15475 struct lpfc_mbx_wr_object *wr_object;
15476 LPFC_MBOXQ_t *mbox;
15477 int rc = 0, i = 0;
15478 uint32_t shdr_status, shdr_add_status;
15479 uint32_t mbox_tmo;
15480 union lpfc_sli4_cfg_shdr *shdr;
15481 struct lpfc_dmabuf *dmabuf;
15482 uint32_t written = 0;
15483
15484 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15485 if (!mbox)
15486 return -ENOMEM;
15487
15488 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15489 LPFC_MBOX_OPCODE_WRITE_OBJECT,
15490 sizeof(struct lpfc_mbx_wr_object) -
15491 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
15492
15493 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
15494 wr_object->u.request.write_offset = *offset;
15495 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
15496 wr_object->u.request.object_name[0] =
15497 cpu_to_le32(wr_object->u.request.object_name[0]);
15498 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
15499 list_for_each_entry(dmabuf, dmabuf_list, list) {
15500 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
15501 break;
15502 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
15503 wr_object->u.request.bde[i].addrHigh =
15504 putPaddrHigh(dmabuf->phys);
15505 if (written + SLI4_PAGE_SIZE >= size) {
15506 wr_object->u.request.bde[i].tus.f.bdeSize =
15507 (size - written);
15508 written += (size - written);
15509 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
15510 } else {
15511 wr_object->u.request.bde[i].tus.f.bdeSize =
15512 SLI4_PAGE_SIZE;
15513 written += SLI4_PAGE_SIZE;
15514 }
15515 i++;
15516 }
15517 wr_object->u.request.bde_count = i;
15518 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
15519 if (!phba->sli4_hba.intr_enable)
15520 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15521 else {
James Smarta183a152011-10-10 21:32:43 -040015522 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
James Smart52d52442011-05-24 11:42:45 -040015523 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15524 }
15525 /* The IOCTL status is embedded in the mailbox subheader. */
15526 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
15527 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15528 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15529 if (rc != MBX_TIMEOUT)
15530 mempool_free(mbox, phba->mbox_mem_pool);
15531 if (shdr_status || shdr_add_status || rc) {
15532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15533 "3025 Write Object mailbox failed with "
15534 "status x%x add_status x%x, mbx status x%x\n",
15535 shdr_status, shdr_add_status, rc);
15536 rc = -ENXIO;
15537 } else
15538 *offset += wr_object->u.response.actual_write_length;
15539 return rc;
15540}
15541
15542/**
James Smart695a8142010-01-26 23:08:03 -050015543 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
15544 * @vport: pointer to vport data structure.
15545 *
15546 * This function iterate through the mailboxq and clean up all REG_LOGIN
15547 * and REG_VPI mailbox commands associated with the vport. This function
15548 * is called when driver want to restart discovery of the vport due to
15549 * a Clear Virtual Link event.
15550 **/
15551void
15552lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
15553{
15554 struct lpfc_hba *phba = vport->phba;
15555 LPFC_MBOXQ_t *mb, *nextmb;
15556 struct lpfc_dmabuf *mp;
James Smart78730cf2010-04-06 15:06:30 -040015557 struct lpfc_nodelist *ndlp;
James Smartd439d282010-09-29 11:18:45 -040015558 struct lpfc_nodelist *act_mbx_ndlp = NULL;
James Smart589a52d2010-07-14 15:30:54 -040015559 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
James Smartd439d282010-09-29 11:18:45 -040015560 LIST_HEAD(mbox_cmd_list);
James Smart63e801c2010-11-20 23:14:19 -050015561 uint8_t restart_loop;
James Smart695a8142010-01-26 23:08:03 -050015562
James Smartd439d282010-09-29 11:18:45 -040015563 /* Clean up internally queued mailbox commands with the vport */
James Smart695a8142010-01-26 23:08:03 -050015564 spin_lock_irq(&phba->hbalock);
15565 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
15566 if (mb->vport != vport)
15567 continue;
15568
15569 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15570 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15571 continue;
15572
James Smartd439d282010-09-29 11:18:45 -040015573 list_del(&mb->list);
15574 list_add_tail(&mb->list, &mbox_cmd_list);
15575 }
15576 /* Clean up active mailbox command with the vport */
15577 mb = phba->sli.mbox_active;
15578 if (mb && (mb->vport == vport)) {
15579 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
15580 (mb->u.mb.mbxCommand == MBX_REG_VPI))
15581 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15582 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15583 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
15584 /* Put reference count for delayed processing */
15585 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
15586 /* Unregister the RPI when mailbox complete */
15587 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15588 }
15589 }
James Smart63e801c2010-11-20 23:14:19 -050015590 /* Cleanup any mailbox completions which are not yet processed */
15591 do {
15592 restart_loop = 0;
15593 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
15594 /*
15595 * If this mailox is already processed or it is
15596 * for another vport ignore it.
15597 */
15598 if ((mb->vport != vport) ||
15599 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
15600 continue;
15601
15602 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
15603 (mb->u.mb.mbxCommand != MBX_REG_VPI))
15604 continue;
15605
15606 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15607 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15608 ndlp = (struct lpfc_nodelist *)mb->context2;
15609 /* Unregister the RPI when mailbox complete */
15610 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
15611 restart_loop = 1;
15612 spin_unlock_irq(&phba->hbalock);
15613 spin_lock(shost->host_lock);
15614 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15615 spin_unlock(shost->host_lock);
15616 spin_lock_irq(&phba->hbalock);
15617 break;
15618 }
15619 }
15620 } while (restart_loop);
15621
James Smartd439d282010-09-29 11:18:45 -040015622 spin_unlock_irq(&phba->hbalock);
15623
15624 /* Release the cleaned-up mailbox commands */
15625 while (!list_empty(&mbox_cmd_list)) {
15626 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
James Smart695a8142010-01-26 23:08:03 -050015627 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
15628 mp = (struct lpfc_dmabuf *) (mb->context1);
15629 if (mp) {
15630 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
15631 kfree(mp);
15632 }
James Smart78730cf2010-04-06 15:06:30 -040015633 ndlp = (struct lpfc_nodelist *) mb->context2;
James Smartd439d282010-09-29 11:18:45 -040015634 mb->context2 = NULL;
James Smart78730cf2010-04-06 15:06:30 -040015635 if (ndlp) {
Dan Carpenterec21b3b2010-08-08 00:15:17 +020015636 spin_lock(shost->host_lock);
James Smart589a52d2010-07-14 15:30:54 -040015637 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
Dan Carpenterec21b3b2010-08-08 00:15:17 +020015638 spin_unlock(shost->host_lock);
James Smart78730cf2010-04-06 15:06:30 -040015639 lpfc_nlp_put(ndlp);
James Smart78730cf2010-04-06 15:06:30 -040015640 }
James Smart695a8142010-01-26 23:08:03 -050015641 }
James Smart695a8142010-01-26 23:08:03 -050015642 mempool_free(mb, phba->mbox_mem_pool);
15643 }
James Smartd439d282010-09-29 11:18:45 -040015644
15645 /* Release the ndlp with the cleaned-up active mailbox command */
15646 if (act_mbx_ndlp) {
15647 spin_lock(shost->host_lock);
15648 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
15649 spin_unlock(shost->host_lock);
15650 lpfc_nlp_put(act_mbx_ndlp);
James Smart695a8142010-01-26 23:08:03 -050015651 }
James Smart695a8142010-01-26 23:08:03 -050015652}
15653
James Smart2a9bf3d2010-06-07 15:24:45 -040015654/**
15655 * lpfc_drain_txq - Drain the txq
15656 * @phba: Pointer to HBA context object.
15657 *
15658 * This function attempt to submit IOCBs on the txq
15659 * to the adapter. For SLI4 adapters, the txq contains
15660 * ELS IOCBs that have been deferred because the there
15661 * are no SGLs. This congestion can occur with large
15662 * vport counts during node discovery.
15663 **/
15664
15665uint32_t
15666lpfc_drain_txq(struct lpfc_hba *phba)
15667{
15668 LIST_HEAD(completions);
15669 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
15670 struct lpfc_iocbq *piocbq = 0;
15671 unsigned long iflags = 0;
15672 char *fail_msg = NULL;
15673 struct lpfc_sglq *sglq;
15674 union lpfc_wqe wqe;
15675
15676 spin_lock_irqsave(&phba->hbalock, iflags);
15677 if (pring->txq_cnt > pring->txq_max)
15678 pring->txq_max = pring->txq_cnt;
15679
15680 spin_unlock_irqrestore(&phba->hbalock, iflags);
15681
15682 while (pring->txq_cnt) {
15683 spin_lock_irqsave(&phba->hbalock, iflags);
15684
James Smart19ca7602010-11-20 23:11:55 -050015685 piocbq = lpfc_sli_ringtx_get(phba, pring);
15686 sglq = __lpfc_sli_get_sglq(phba, piocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -040015687 if (!sglq) {
James Smart19ca7602010-11-20 23:11:55 -050015688 __lpfc_sli_ringtx_put(phba, pring, piocbq);
James Smart2a9bf3d2010-06-07 15:24:45 -040015689 spin_unlock_irqrestore(&phba->hbalock, iflags);
15690 break;
15691 } else {
James Smart2a9bf3d2010-06-07 15:24:45 -040015692 if (!piocbq) {
15693 /* The txq_cnt out of sync. This should
15694 * never happen
15695 */
15696 sglq = __lpfc_clear_active_sglq(phba,
James Smart6d368e52011-05-24 11:44:12 -040015697 sglq->sli4_lxritag);
James Smart2a9bf3d2010-06-07 15:24:45 -040015698 spin_unlock_irqrestore(&phba->hbalock, iflags);
15699 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15700 "2823 txq empty and txq_cnt is %d\n ",
15701 pring->txq_cnt);
15702 break;
15703 }
15704 }
15705
15706 /* The xri and iocb resources secured,
15707 * attempt to issue request
15708 */
James Smart6d368e52011-05-24 11:44:12 -040015709 piocbq->sli4_lxritag = sglq->sli4_lxritag;
James Smart2a9bf3d2010-06-07 15:24:45 -040015710 piocbq->sli4_xritag = sglq->sli4_xritag;
15711 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
15712 fail_msg = "to convert bpl to sgl";
15713 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
15714 fail_msg = "to convert iocb to wqe";
15715 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
15716 fail_msg = " - Wq is full";
15717 else
15718 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
15719
15720 if (fail_msg) {
15721 /* Failed means we can't issue and need to cancel */
15722 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15723 "2822 IOCB failed %s iotag 0x%x "
15724 "xri 0x%x\n",
15725 fail_msg,
15726 piocbq->iotag, piocbq->sli4_xritag);
15727 list_add_tail(&piocbq->list, &completions);
15728 }
15729 spin_unlock_irqrestore(&phba->hbalock, iflags);
15730 }
15731
James Smart2a9bf3d2010-06-07 15:24:45 -040015732 /* Cancel all the IOCBs that cannot be issued */
15733 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
15734 IOERR_SLI_ABORTED);
15735
15736 return pring->txq_cnt;
15737}