blob: f1fa2b196e98fa07a51ec9e272ca2df1f313e9af [file] [log] [blame]
Robert Love42e9a922008-12-09 15:10:17 -08001/*
2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Maintained at www.Open-FCoE.org
20 */
21
22/*
23 * Fibre Channel exchange and sequence handling.
24 */
25
26#include <linux/timer.h>
27#include <linux/gfp.h>
28#include <linux/err.h>
29
30#include <scsi/fc/fc_fc2.h>
31
32#include <scsi/libfc.h>
33#include <scsi/fc_encode.h>
34
Robert Love74147052009-06-10 15:31:10 -070035static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
Robert Love42e9a922008-12-09 15:10:17 -080036
37/*
38 * Structure and function definitions for managing Fibre Channel Exchanges
39 * and Sequences.
40 *
41 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
42 *
43 * fc_exch_mgr holds the exchange state for an N port
44 *
45 * fc_exch holds state for one exchange and links to its active sequence.
46 *
47 * fc_seq holds the state for an individual sequence.
48 */
49
50/*
51 * Exchange manager.
52 *
53 * This structure is the center for creating exchanges and sequences.
54 * It manages the allocation of exchange IDs.
55 */
56struct fc_exch_mgr {
57 enum fc_class class; /* default class for sequences */
Vasu Dev96316092009-07-29 17:05:00 -070058 struct kref kref; /* exchange mgr reference count */
Robert Love42e9a922008-12-09 15:10:17 -080059 spinlock_t em_lock; /* exchange manager lock,
60 must be taken before ex_lock */
61 u16 last_xid; /* last allocated exchange ID */
62 u16 min_xid; /* min exchange ID */
63 u16 max_xid; /* max exchange ID */
64 u16 max_read; /* max exchange ID for read */
65 u16 last_read; /* last xid allocated for read */
66 u32 total_exches; /* total allocated exchanges */
67 struct list_head ex_list; /* allocated exchanges list */
68 struct fc_lport *lp; /* fc device instance */
69 mempool_t *ep_pool; /* reserve ep's */
70
71 /*
72 * currently exchange mgr stats are updated but not used.
73 * either stats can be expose via sysfs or remove them
74 * all together if not used XXX
75 */
76 struct {
77 atomic_t no_free_exch;
78 atomic_t no_free_exch_xid;
79 atomic_t xid_not_found;
80 atomic_t xid_busy;
81 atomic_t seq_not_found;
82 atomic_t non_bls_resp;
83 } stats;
84 struct fc_exch **exches; /* for exch pointers indexed by xid */
85};
86#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
87
Vasu Dev96316092009-07-29 17:05:00 -070088struct fc_exch_mgr_anchor {
89 struct list_head ema_list;
90 struct fc_exch_mgr *mp;
91 bool (*match)(struct fc_frame *);
92};
93
Robert Love42e9a922008-12-09 15:10:17 -080094static void fc_exch_rrq(struct fc_exch *);
95static void fc_seq_ls_acc(struct fc_seq *);
96static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
97 enum fc_els_rjt_explan);
98static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
99static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
100static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp);
101
102/*
103 * Internal implementation notes.
104 *
105 * The exchange manager is one by default in libfc but LLD may choose
106 * to have one per CPU. The sequence manager is one per exchange manager
107 * and currently never separated.
108 *
109 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
110 * assigned by the Sequence Initiator that shall be unique for a specific
111 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
112 * qualified by exchange ID, which one might think it would be.
113 * In practice this limits the number of open sequences and exchanges to 256
114 * per session. For most targets we could treat this limit as per exchange.
115 *
116 * The exchange and its sequence are freed when the last sequence is received.
117 * It's possible for the remote port to leave an exchange open without
118 * sending any sequences.
119 *
120 * Notes on reference counts:
121 *
122 * Exchanges are reference counted and exchange gets freed when the reference
123 * count becomes zero.
124 *
125 * Timeouts:
126 * Sequences are timed out for E_D_TOV and R_A_TOV.
127 *
128 * Sequence event handling:
129 *
130 * The following events may occur on initiator sequences:
131 *
132 * Send.
133 * For now, the whole thing is sent.
134 * Receive ACK
135 * This applies only to class F.
136 * The sequence is marked complete.
137 * ULP completion.
138 * The upper layer calls fc_exch_done() when done
139 * with exchange and sequence tuple.
140 * RX-inferred completion.
141 * When we receive the next sequence on the same exchange, we can
142 * retire the previous sequence ID. (XXX not implemented).
143 * Timeout.
144 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
145 * E_D_TOV causes abort and calls upper layer response handler
146 * with FC_EX_TIMEOUT error.
147 * Receive RJT
148 * XXX defer.
149 * Send ABTS
150 * On timeout.
151 *
152 * The following events may occur on recipient sequences:
153 *
154 * Receive
155 * Allocate sequence for first frame received.
156 * Hold during receive handler.
157 * Release when final frame received.
158 * Keep status of last N of these for the ELS RES command. XXX TBD.
159 * Receive ABTS
160 * Deallocate sequence
161 * Send RJT
162 * Deallocate
163 *
164 * For now, we neglect conditions where only part of a sequence was
165 * received or transmitted, or where out-of-order receipt is detected.
166 */
167
168/*
169 * Locking notes:
170 *
171 * The EM code run in a per-CPU worker thread.
172 *
173 * To protect against concurrency between a worker thread code and timers,
174 * sequence allocation and deallocation must be locked.
175 * - exchange refcnt can be done atomicly without locks.
176 * - sequence allocation must be locked by exch lock.
177 * - If the em_lock and ex_lock must be taken at the same time, then the
178 * em_lock must be taken before the ex_lock.
179 */
180
181/*
182 * opcode names for debugging.
183 */
184static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
185
186#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
187
188static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
189 unsigned int max_index)
190{
191 const char *name = NULL;
192
193 if (op < max_index)
194 name = table[op];
195 if (!name)
196 name = "unknown";
197 return name;
198}
199
200static const char *fc_exch_rctl_name(unsigned int op)
201{
202 return fc_exch_name_lookup(op, fc_exch_rctl_names,
203 FC_TABLE_SIZE(fc_exch_rctl_names));
204}
205
206/*
207 * Hold an exchange - keep it from being freed.
208 */
209static void fc_exch_hold(struct fc_exch *ep)
210{
211 atomic_inc(&ep->ex_refcnt);
212}
213
214/*
215 * setup fc hdr by initializing few more FC header fields and sof/eof.
216 * Initialized fields by this func:
217 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
218 * - sof and eof
219 */
220static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
221 u32 f_ctl)
222{
223 struct fc_frame_header *fh = fc_frame_header_get(fp);
224 u16 fill;
225
226 fr_sof(fp) = ep->class;
227 if (ep->seq.cnt)
228 fr_sof(fp) = fc_sof_normal(ep->class);
229
230 if (f_ctl & FC_FC_END_SEQ) {
231 fr_eof(fp) = FC_EOF_T;
232 if (fc_sof_needs_ack(ep->class))
233 fr_eof(fp) = FC_EOF_N;
234 /*
235 * Form f_ctl.
236 * The number of fill bytes to make the length a 4-byte
237 * multiple is the low order 2-bits of the f_ctl.
238 * The fill itself will have been cleared by the frame
239 * allocation.
240 * After this, the length will be even, as expected by
241 * the transport.
242 */
243 fill = fr_len(fp) & 3;
244 if (fill) {
245 fill = 4 - fill;
246 /* TODO, this may be a problem with fragmented skb */
247 skb_put(fp_skb(fp), fill);
248 hton24(fh->fh_f_ctl, f_ctl | fill);
249 }
250 } else {
251 WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
252 fr_eof(fp) = FC_EOF_N;
253 }
254
255 /*
256 * Initialize remainig fh fields
257 * from fc_fill_fc_hdr
258 */
259 fh->fh_ox_id = htons(ep->oxid);
260 fh->fh_rx_id = htons(ep->rxid);
261 fh->fh_seq_id = ep->seq.id;
262 fh->fh_seq_cnt = htons(ep->seq.cnt);
263}
264
265
266/*
267 * Release a reference to an exchange.
268 * If the refcnt goes to zero and the exchange is complete, it is freed.
269 */
270static void fc_exch_release(struct fc_exch *ep)
271{
272 struct fc_exch_mgr *mp;
273
274 if (atomic_dec_and_test(&ep->ex_refcnt)) {
275 mp = ep->em;
276 if (ep->destructor)
277 ep->destructor(&ep->seq, ep->arg);
278 if (ep->lp->tt.exch_put)
279 ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
Julia Lawallaa6cd292009-02-04 22:17:29 +0100280 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
Robert Love42e9a922008-12-09 15:10:17 -0800281 mempool_free(ep, mp->ep_pool);
282 }
283}
284
285static int fc_exch_done_locked(struct fc_exch *ep)
286{
287 int rc = 1;
288
289 /*
290 * We must check for completion in case there are two threads
291 * tyring to complete this. But the rrq code will reuse the
292 * ep, and in that case we only clear the resp and set it as
293 * complete, so it can be reused by the timer to send the rrq.
294 */
295 ep->resp = NULL;
296 if (ep->state & FC_EX_DONE)
297 return rc;
298 ep->esb_stat |= ESB_ST_COMPLETE;
299
300 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
301 ep->state |= FC_EX_DONE;
302 if (cancel_delayed_work(&ep->timeout_work))
303 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
304 rc = 0;
305 }
306 return rc;
307}
308
309static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
310{
311 struct fc_exch_mgr *mp;
312
313 mp = ep->em;
314 spin_lock_bh(&mp->em_lock);
315 WARN_ON(mp->total_exches <= 0);
316 mp->total_exches--;
317 mp->exches[ep->xid - mp->min_xid] = NULL;
318 list_del(&ep->ex_list);
319 spin_unlock_bh(&mp->em_lock);
320 fc_exch_release(ep); /* drop hold for exch in mp */
321}
322
323/*
324 * Internal version of fc_exch_timer_set - used with lock held.
325 */
326static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
327 unsigned int timer_msec)
328{
329 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
330 return;
331
Robert Love74147052009-06-10 15:31:10 -0700332 FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
333
Robert Love42e9a922008-12-09 15:10:17 -0800334 if (schedule_delayed_work(&ep->timeout_work,
335 msecs_to_jiffies(timer_msec)))
336 fc_exch_hold(ep); /* hold for timer */
337}
338
339/*
340 * Set timer for an exchange.
341 * The time is a minimum delay in milliseconds until the timer fires.
342 * Used for upper level protocols to time out the exchange.
343 * The timer is cancelled when it fires or when the exchange completes.
344 * Returns non-zero if a timer couldn't be allocated.
345 */
346static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
347{
348 spin_lock_bh(&ep->ex_lock);
349 fc_exch_timer_set_locked(ep, timer_msec);
350 spin_unlock_bh(&ep->ex_lock);
351}
352
353int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
354{
355 struct fc_seq *sp;
356 struct fc_exch *ep;
357 struct fc_frame *fp;
358 int error;
359
360 ep = fc_seq_exch(req_sp);
361
362 spin_lock_bh(&ep->ex_lock);
363 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
364 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
365 spin_unlock_bh(&ep->ex_lock);
366 return -ENXIO;
367 }
368
369 /*
370 * Send the abort on a new sequence if possible.
371 */
372 sp = fc_seq_start_next_locked(&ep->seq);
373 if (!sp) {
374 spin_unlock_bh(&ep->ex_lock);
375 return -ENOMEM;
376 }
377
378 ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
379 if (timer_msec)
380 fc_exch_timer_set_locked(ep, timer_msec);
381 spin_unlock_bh(&ep->ex_lock);
382
383 /*
384 * If not logged into the fabric, don't send ABTS but leave
385 * sequence active until next timeout.
386 */
387 if (!ep->sid)
388 return 0;
389
390 /*
391 * Send an abort for the sequence that timed out.
392 */
393 fp = fc_frame_alloc(ep->lp, 0);
394 if (fp) {
395 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
396 FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
397 error = fc_seq_send(ep->lp, sp, fp);
398 } else
399 error = -ENOBUFS;
400 return error;
401}
402EXPORT_SYMBOL(fc_seq_exch_abort);
403
404/*
405 * Exchange timeout - handle exchange timer expiration.
406 * The timer will have been cancelled before this is called.
407 */
408static void fc_exch_timeout(struct work_struct *work)
409{
410 struct fc_exch *ep = container_of(work, struct fc_exch,
411 timeout_work.work);
412 struct fc_seq *sp = &ep->seq;
413 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
414 void *arg;
415 u32 e_stat;
416 int rc = 1;
417
418 spin_lock_bh(&ep->ex_lock);
419 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
420 goto unlock;
421
422 e_stat = ep->esb_stat;
423 if (e_stat & ESB_ST_COMPLETE) {
424 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
Vasu Deva0cc1ec2009-07-28 17:33:37 -0700425 spin_unlock_bh(&ep->ex_lock);
Robert Love42e9a922008-12-09 15:10:17 -0800426 if (e_stat & ESB_ST_REC_QUAL)
427 fc_exch_rrq(ep);
Robert Love42e9a922008-12-09 15:10:17 -0800428 goto done;
429 } else {
430 resp = ep->resp;
431 arg = ep->arg;
432 ep->resp = NULL;
433 if (e_stat & ESB_ST_ABNORMAL)
434 rc = fc_exch_done_locked(ep);
435 spin_unlock_bh(&ep->ex_lock);
436 if (!rc)
437 fc_exch_mgr_delete_ep(ep);
438 if (resp)
439 resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
440 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
441 goto done;
442 }
443unlock:
444 spin_unlock_bh(&ep->ex_lock);
445done:
446 /*
447 * This release matches the hold taken when the timer was set.
448 */
449 fc_exch_release(ep);
450}
451
452/*
453 * Allocate a sequence.
454 *
455 * We don't support multiple originated sequences on the same exchange.
456 * By implication, any previously originated sequence on this exchange
457 * is complete, and we reallocate the same sequence.
458 */
459static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
460{
461 struct fc_seq *sp;
462
463 sp = &ep->seq;
464 sp->ssb_stat = 0;
465 sp->cnt = 0;
466 sp->id = seq_id;
467 return sp;
468}
469
470/*
471 * fc_em_alloc_xid - returns an xid based on request type
472 * @lp : ptr to associated lport
473 * @fp : ptr to the assocated frame
474 *
475 * check the associated fc_fsp_pkt to get scsi command type and
476 * command direction to decide from which range this exch id
477 * will be allocated from.
478 *
479 * Returns : 0 or an valid xid
480 */
481static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
482{
483 u16 xid, min, max;
484 u16 *plast;
485 struct fc_exch *ep = NULL;
486
487 if (mp->max_read) {
Yi Zoub277d2a2009-02-27 14:07:21 -0800488 if (fc_fcp_is_read(fr_fsp(fp))) {
Robert Love42e9a922008-12-09 15:10:17 -0800489 min = mp->min_xid;
490 max = mp->max_read;
491 plast = &mp->last_read;
492 } else {
493 min = mp->max_read + 1;
494 max = mp->max_xid;
495 plast = &mp->last_xid;
496 }
497 } else {
498 min = mp->min_xid;
499 max = mp->max_xid;
500 plast = &mp->last_xid;
501 }
502 xid = *plast;
503 do {
504 xid = (xid == max) ? min : xid + 1;
505 ep = mp->exches[xid - mp->min_xid];
506 } while ((ep != NULL) && (xid != *plast));
507
508 if (unlikely(ep))
509 xid = 0;
510 else
511 *plast = xid;
512
513 return xid;
514}
515
516/*
517 * fc_exch_alloc - allocate an exchange.
518 * @mp : ptr to the exchange manager
519 * @xid: input xid
520 *
521 * if xid is supplied zero then assign next free exchange ID
522 * from exchange manager, otherwise use supplied xid.
523 * Returns with exch lock held.
524 */
525struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
526 struct fc_frame *fp, u16 xid)
527{
528 struct fc_exch *ep;
529
530 /* allocate memory for exchange */
531 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
532 if (!ep) {
533 atomic_inc(&mp->stats.no_free_exch);
534 goto out;
535 }
536 memset(ep, 0, sizeof(*ep));
537
538 spin_lock_bh(&mp->em_lock);
539 /* alloc xid if input xid 0 */
540 if (!xid) {
541 /* alloc a new xid */
542 xid = fc_em_alloc_xid(mp, fp);
543 if (!xid) {
Robert Love74147052009-06-10 15:31:10 -0700544 printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
Robert Love42e9a922008-12-09 15:10:17 -0800545 goto err;
546 }
547 }
548
549 fc_exch_hold(ep); /* hold for exch in mp */
550 spin_lock_init(&ep->ex_lock);
551 /*
552 * Hold exch lock for caller to prevent fc_exch_reset()
553 * from releasing exch while fc_exch_alloc() caller is
554 * still working on exch.
555 */
556 spin_lock_bh(&ep->ex_lock);
557
558 mp->exches[xid - mp->min_xid] = ep;
559 list_add_tail(&ep->ex_list, &mp->ex_list);
560 fc_seq_alloc(ep, ep->seq_id++);
561 mp->total_exches++;
562 spin_unlock_bh(&mp->em_lock);
563
564 /*
565 * update exchange
566 */
567 ep->oxid = ep->xid = xid;
568 ep->em = mp;
569 ep->lp = mp->lp;
570 ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
571 ep->rxid = FC_XID_UNKNOWN;
572 ep->class = mp->class;
573 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
574out:
575 return ep;
576err:
577 spin_unlock_bh(&mp->em_lock);
578 atomic_inc(&mp->stats.no_free_exch_xid);
579 mempool_free(ep, mp->ep_pool);
580 return NULL;
581}
582EXPORT_SYMBOL(fc_exch_alloc);
583
584/*
585 * Lookup and hold an exchange.
586 */
587static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
588{
589 struct fc_exch *ep = NULL;
590
591 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
592 spin_lock_bh(&mp->em_lock);
593 ep = mp->exches[xid - mp->min_xid];
594 if (ep) {
595 fc_exch_hold(ep);
596 WARN_ON(ep->xid != xid);
597 }
598 spin_unlock_bh(&mp->em_lock);
599 }
600 return ep;
601}
602
603void fc_exch_done(struct fc_seq *sp)
604{
605 struct fc_exch *ep = fc_seq_exch(sp);
606 int rc;
607
608 spin_lock_bh(&ep->ex_lock);
609 rc = fc_exch_done_locked(ep);
610 spin_unlock_bh(&ep->ex_lock);
611 if (!rc)
612 fc_exch_mgr_delete_ep(ep);
613}
614EXPORT_SYMBOL(fc_exch_done);
615
616/*
617 * Allocate a new exchange as responder.
618 * Sets the responder ID in the frame header.
619 */
620static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
621{
622 struct fc_exch *ep;
623 struct fc_frame_header *fh;
Robert Love42e9a922008-12-09 15:10:17 -0800624
625 ep = mp->lp->tt.exch_get(mp->lp, fp);
626 if (ep) {
627 ep->class = fc_frame_class(fp);
628
629 /*
630 * Set EX_CTX indicating we're responding on this exchange.
631 */
632 ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
633 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
634 fh = fc_frame_header_get(fp);
635 ep->sid = ntoh24(fh->fh_d_id);
636 ep->did = ntoh24(fh->fh_s_id);
637 ep->oid = ep->did;
638
639 /*
640 * Allocated exchange has placed the XID in the
641 * originator field. Move it to the responder field,
642 * and set the originator XID from the frame.
643 */
644 ep->rxid = ep->xid;
645 ep->oxid = ntohs(fh->fh_ox_id);
646 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
647 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
648 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
649
Robert Love42e9a922008-12-09 15:10:17 -0800650 fc_exch_hold(ep); /* hold for caller */
651 spin_unlock_bh(&ep->ex_lock); /* lock from exch_get */
652 }
653 return ep;
654}
655
656/*
657 * Find a sequence for receive where the other end is originating the sequence.
658 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
659 * on the ep that should be released by the caller.
660 */
Robert Loveb2ab99c2009-02-27 10:55:50 -0800661static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp,
662 struct fc_frame *fp)
Robert Love42e9a922008-12-09 15:10:17 -0800663{
664 struct fc_frame_header *fh = fc_frame_header_get(fp);
665 struct fc_exch *ep = NULL;
666 struct fc_seq *sp = NULL;
667 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
668 u32 f_ctl;
669 u16 xid;
670
671 f_ctl = ntoh24(fh->fh_f_ctl);
672 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
673
674 /*
675 * Lookup or create the exchange if we will be creating the sequence.
676 */
677 if (f_ctl & FC_FC_EX_CTX) {
678 xid = ntohs(fh->fh_ox_id); /* we originated exch */
679 ep = fc_exch_find(mp, xid);
680 if (!ep) {
681 atomic_inc(&mp->stats.xid_not_found);
682 reject = FC_RJT_OX_ID;
683 goto out;
684 }
685 if (ep->rxid == FC_XID_UNKNOWN)
686 ep->rxid = ntohs(fh->fh_rx_id);
687 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
688 reject = FC_RJT_OX_ID;
689 goto rel;
690 }
691 } else {
692 xid = ntohs(fh->fh_rx_id); /* we are the responder */
693
694 /*
695 * Special case for MDS issuing an ELS TEST with a
696 * bad rxid of 0.
697 * XXX take this out once we do the proper reject.
698 */
699 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
700 fc_frame_payload_op(fp) == ELS_TEST) {
701 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
702 xid = FC_XID_UNKNOWN;
703 }
704
705 /*
706 * new sequence - find the exchange
707 */
708 ep = fc_exch_find(mp, xid);
709 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
710 if (ep) {
711 atomic_inc(&mp->stats.xid_busy);
712 reject = FC_RJT_RX_ID;
713 goto rel;
714 }
715 ep = fc_exch_resp(mp, fp);
716 if (!ep) {
717 reject = FC_RJT_EXCH_EST; /* XXX */
718 goto out;
719 }
720 xid = ep->xid; /* get our XID */
721 } else if (!ep) {
722 atomic_inc(&mp->stats.xid_not_found);
723 reject = FC_RJT_RX_ID; /* XID not found */
724 goto out;
725 }
726 }
727
728 /*
729 * At this point, we have the exchange held.
730 * Find or create the sequence.
731 */
732 if (fc_sof_is_init(fr_sof(fp))) {
733 sp = fc_seq_start_next(&ep->seq);
734 if (!sp) {
735 reject = FC_RJT_SEQ_XS; /* exchange shortage */
736 goto rel;
737 }
738 sp->id = fh->fh_seq_id;
739 sp->ssb_stat |= SSB_ST_RESP;
740 } else {
741 sp = &ep->seq;
742 if (sp->id != fh->fh_seq_id) {
743 atomic_inc(&mp->stats.seq_not_found);
744 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
745 goto rel;
746 }
747 }
748 WARN_ON(ep != fc_seq_exch(sp));
749
750 if (f_ctl & FC_FC_SEQ_INIT)
751 ep->esb_stat |= ESB_ST_SEQ_INIT;
752
753 fr_seq(fp) = sp;
754out:
755 return reject;
756rel:
757 fc_exch_done(&ep->seq);
758 fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
759 return reject;
760}
761
762/*
763 * Find the sequence for a frame being received.
764 * We originated the sequence, so it should be found.
765 * We may or may not have originated the exchange.
766 * Does not hold the sequence for the caller.
767 */
768static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
769 struct fc_frame *fp)
770{
771 struct fc_frame_header *fh = fc_frame_header_get(fp);
772 struct fc_exch *ep;
773 struct fc_seq *sp = NULL;
774 u32 f_ctl;
775 u16 xid;
776
777 f_ctl = ntoh24(fh->fh_f_ctl);
778 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
779 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
780 ep = fc_exch_find(mp, xid);
781 if (!ep)
782 return NULL;
783 if (ep->seq.id == fh->fh_seq_id) {
784 /*
785 * Save the RX_ID if we didn't previously know it.
786 */
787 sp = &ep->seq;
788 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
789 ep->rxid == FC_XID_UNKNOWN) {
790 ep->rxid = ntohs(fh->fh_rx_id);
791 }
792 }
793 fc_exch_release(ep);
794 return sp;
795}
796
797/*
798 * Set addresses for an exchange.
799 * Note this must be done before the first sequence of the exchange is sent.
800 */
801static void fc_exch_set_addr(struct fc_exch *ep,
802 u32 orig_id, u32 resp_id)
803{
804 ep->oid = orig_id;
805 if (ep->esb_stat & ESB_ST_RESP) {
806 ep->sid = resp_id;
807 ep->did = orig_id;
808 } else {
809 ep->sid = orig_id;
810 ep->did = resp_id;
811 }
812}
813
814static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
815{
816 struct fc_exch *ep = fc_seq_exch(sp);
817
818 sp = fc_seq_alloc(ep, ep->seq_id++);
Robert Love74147052009-06-10 15:31:10 -0700819 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
820 ep->f_ctl, sp->id);
Robert Love42e9a922008-12-09 15:10:17 -0800821 return sp;
822}
823/*
824 * Allocate a new sequence on the same exchange as the supplied sequence.
825 * This will never return NULL.
826 */
827struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
828{
829 struct fc_exch *ep = fc_seq_exch(sp);
830
831 spin_lock_bh(&ep->ex_lock);
Robert Love42e9a922008-12-09 15:10:17 -0800832 sp = fc_seq_start_next_locked(sp);
833 spin_unlock_bh(&ep->ex_lock);
834
835 return sp;
836}
837EXPORT_SYMBOL(fc_seq_start_next);
838
839int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp, struct fc_frame *fp)
840{
841 struct fc_exch *ep;
842 struct fc_frame_header *fh = fc_frame_header_get(fp);
843 int error;
844 u32 f_ctl;
845
846 ep = fc_seq_exch(sp);
847 WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
848
849 f_ctl = ntoh24(fh->fh_f_ctl);
850 fc_exch_setup_hdr(ep, fp, f_ctl);
851
852 /*
853 * update sequence count if this frame is carrying
854 * multiple FC frames when sequence offload is enabled
855 * by LLD.
856 */
857 if (fr_max_payload(fp))
858 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
859 fr_max_payload(fp));
860 else
861 sp->cnt++;
862
863 /*
864 * Send the frame.
865 */
866 error = lp->tt.frame_send(lp, fp);
867
868 /*
869 * Update the exchange and sequence flags,
870 * assuming all frames for the sequence have been sent.
871 * We can only be called to send once for each sequence.
872 */
873 spin_lock_bh(&ep->ex_lock);
874 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
875 if (f_ctl & (FC_FC_END_SEQ | FC_FC_SEQ_INIT))
876 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
877 spin_unlock_bh(&ep->ex_lock);
878 return error;
879}
880EXPORT_SYMBOL(fc_seq_send);
881
882void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
883 struct fc_seq_els_data *els_data)
884{
885 switch (els_cmd) {
886 case ELS_LS_RJT:
887 fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
888 break;
889 case ELS_LS_ACC:
890 fc_seq_ls_acc(sp);
891 break;
892 case ELS_RRQ:
893 fc_exch_els_rrq(sp, els_data->fp);
894 break;
895 case ELS_REC:
896 fc_exch_els_rec(sp, els_data->fp);
897 break;
898 default:
Robert Love74147052009-06-10 15:31:10 -0700899 FC_EXCH_DBG(fc_seq_exch(sp), "Invalid ELS CMD:%x\n", els_cmd);
Robert Love42e9a922008-12-09 15:10:17 -0800900 }
901}
902EXPORT_SYMBOL(fc_seq_els_rsp_send);
903
904/*
905 * Send a sequence, which is also the last sequence in the exchange.
906 */
907static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
908 enum fc_rctl rctl, enum fc_fh_type fh_type)
909{
910 u32 f_ctl;
911 struct fc_exch *ep = fc_seq_exch(sp);
912
913 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
914 f_ctl |= ep->f_ctl;
915 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
916 fc_seq_send(ep->lp, sp, fp);
917}
918
919/*
920 * Send ACK_1 (or equiv.) indicating we received something.
921 * The frame we're acking is supplied.
922 */
923static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
924{
925 struct fc_frame *fp;
926 struct fc_frame_header *rx_fh;
927 struct fc_frame_header *fh;
928 struct fc_exch *ep = fc_seq_exch(sp);
929 struct fc_lport *lp = ep->lp;
930 unsigned int f_ctl;
931
932 /*
933 * Don't send ACKs for class 3.
934 */
935 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
936 fp = fc_frame_alloc(lp, 0);
937 if (!fp)
938 return;
939
940 fh = fc_frame_header_get(fp);
941 fh->fh_r_ctl = FC_RCTL_ACK_1;
942 fh->fh_type = FC_TYPE_BLS;
943
944 /*
945 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
946 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
947 * Bits 9-8 are meaningful (retransmitted or unidirectional).
948 * Last ACK uses bits 7-6 (continue sequence),
949 * bits 5-4 are meaningful (what kind of ACK to use).
950 */
951 rx_fh = fc_frame_header_get(rx_fp);
952 f_ctl = ntoh24(rx_fh->fh_f_ctl);
953 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
954 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
955 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
956 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
957 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
958 hton24(fh->fh_f_ctl, f_ctl);
959
960 fc_exch_setup_hdr(ep, fp, f_ctl);
961 fh->fh_seq_id = rx_fh->fh_seq_id;
962 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
963 fh->fh_parm_offset = htonl(1); /* ack single frame */
964
965 fr_sof(fp) = fr_sof(rx_fp);
966 if (f_ctl & FC_FC_END_SEQ)
967 fr_eof(fp) = FC_EOF_T;
968 else
969 fr_eof(fp) = FC_EOF_N;
970
971 (void) lp->tt.frame_send(lp, fp);
972 }
973}
974
975/*
976 * Send BLS Reject.
977 * This is for rejecting BA_ABTS only.
978 */
Robert Loveb2ab99c2009-02-27 10:55:50 -0800979static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
980 enum fc_ba_rjt_reason reason,
981 enum fc_ba_rjt_explan explan)
Robert Love42e9a922008-12-09 15:10:17 -0800982{
983 struct fc_frame *fp;
984 struct fc_frame_header *rx_fh;
985 struct fc_frame_header *fh;
986 struct fc_ba_rjt *rp;
987 struct fc_lport *lp;
988 unsigned int f_ctl;
989
990 lp = fr_dev(rx_fp);
991 fp = fc_frame_alloc(lp, sizeof(*rp));
992 if (!fp)
993 return;
994 fh = fc_frame_header_get(fp);
995 rx_fh = fc_frame_header_get(rx_fp);
996
997 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
998
999 rp = fc_frame_payload_get(fp, sizeof(*rp));
1000 rp->br_reason = reason;
1001 rp->br_explan = explan;
1002
1003 /*
1004 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1005 */
1006 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1007 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1008 fh->fh_ox_id = rx_fh->fh_rx_id;
1009 fh->fh_rx_id = rx_fh->fh_ox_id;
1010 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1011 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1012 fh->fh_type = FC_TYPE_BLS;
1013
1014 /*
1015 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1016 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1017 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1018 * Last ACK uses bits 7-6 (continue sequence),
1019 * bits 5-4 are meaningful (what kind of ACK to use).
1020 * Always set LAST_SEQ, END_SEQ.
1021 */
1022 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1023 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1024 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1025 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1026 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1027 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1028 f_ctl &= ~FC_FC_FIRST_SEQ;
1029 hton24(fh->fh_f_ctl, f_ctl);
1030
1031 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1032 fr_eof(fp) = FC_EOF_T;
1033 if (fc_sof_needs_ack(fr_sof(fp)))
1034 fr_eof(fp) = FC_EOF_N;
1035
1036 (void) lp->tt.frame_send(lp, fp);
1037}
1038
1039/*
1040 * Handle an incoming ABTS. This would be for target mode usually,
1041 * but could be due to lost FCP transfer ready, confirm or RRQ.
1042 * We always handle this as an exchange abort, ignoring the parameter.
1043 */
1044static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1045{
1046 struct fc_frame *fp;
1047 struct fc_ba_acc *ap;
1048 struct fc_frame_header *fh;
1049 struct fc_seq *sp;
1050
1051 if (!ep)
1052 goto reject;
1053 spin_lock_bh(&ep->ex_lock);
1054 if (ep->esb_stat & ESB_ST_COMPLETE) {
1055 spin_unlock_bh(&ep->ex_lock);
1056 goto reject;
1057 }
1058 if (!(ep->esb_stat & ESB_ST_REC_QUAL))
1059 fc_exch_hold(ep); /* hold for REC_QUAL */
1060 ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
1061 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1062
1063 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1064 if (!fp) {
1065 spin_unlock_bh(&ep->ex_lock);
1066 goto free;
1067 }
1068 fh = fc_frame_header_get(fp);
1069 ap = fc_frame_payload_get(fp, sizeof(*ap));
1070 memset(ap, 0, sizeof(*ap));
1071 sp = &ep->seq;
1072 ap->ba_high_seq_cnt = htons(0xffff);
1073 if (sp->ssb_stat & SSB_ST_RESP) {
1074 ap->ba_seq_id = sp->id;
1075 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1076 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1077 ap->ba_low_seq_cnt = htons(sp->cnt);
1078 }
Vasu Deva7e84f22009-02-27 10:54:51 -08001079 sp = fc_seq_start_next_locked(sp);
Robert Love42e9a922008-12-09 15:10:17 -08001080 spin_unlock_bh(&ep->ex_lock);
1081 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1082 fc_frame_free(rx_fp);
1083 return;
1084
1085reject:
1086 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1087free:
1088 fc_frame_free(rx_fp);
1089}
1090
1091/*
1092 * Handle receive where the other end is originating the sequence.
1093 */
1094static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
1095 struct fc_frame *fp)
1096{
1097 struct fc_frame_header *fh = fc_frame_header_get(fp);
1098 struct fc_seq *sp = NULL;
1099 struct fc_exch *ep = NULL;
1100 enum fc_sof sof;
1101 enum fc_eof eof;
1102 u32 f_ctl;
1103 enum fc_pf_rjt_reason reject;
1104
1105 fr_seq(fp) = NULL;
1106 reject = fc_seq_lookup_recip(mp, fp);
1107 if (reject == FC_RJT_NONE) {
1108 sp = fr_seq(fp); /* sequence will be held */
1109 ep = fc_seq_exch(sp);
1110 sof = fr_sof(fp);
1111 eof = fr_eof(fp);
1112 f_ctl = ntoh24(fh->fh_f_ctl);
1113 fc_seq_send_ack(sp, fp);
1114
1115 /*
1116 * Call the receive function.
1117 *
1118 * The receive function may allocate a new sequence
1119 * over the old one, so we shouldn't change the
1120 * sequence after this.
1121 *
1122 * The frame will be freed by the receive function.
1123 * If new exch resp handler is valid then call that
1124 * first.
1125 */
1126 if (ep->resp)
1127 ep->resp(sp, fp, ep->arg);
1128 else
1129 lp->tt.lport_recv(lp, sp, fp);
1130 fc_exch_release(ep); /* release from lookup */
1131 } else {
Robert Love74147052009-06-10 15:31:10 -07001132 FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject);
Robert Love42e9a922008-12-09 15:10:17 -08001133 fc_frame_free(fp);
1134 }
1135}
1136
1137/*
1138 * Handle receive where the other end is originating the sequence in
1139 * response to our exchange.
1140 */
1141static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1142{
1143 struct fc_frame_header *fh = fc_frame_header_get(fp);
1144 struct fc_seq *sp;
1145 struct fc_exch *ep;
1146 enum fc_sof sof;
1147 u32 f_ctl;
1148 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1149 void *ex_resp_arg;
1150 int rc;
1151
1152 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1153 if (!ep) {
1154 atomic_inc(&mp->stats.xid_not_found);
1155 goto out;
1156 }
Steve Ma30121d12009-05-06 10:52:29 -07001157 if (ep->esb_stat & ESB_ST_COMPLETE) {
1158 atomic_inc(&mp->stats.xid_not_found);
1159 goto out;
1160 }
Robert Love42e9a922008-12-09 15:10:17 -08001161 if (ep->rxid == FC_XID_UNKNOWN)
1162 ep->rxid = ntohs(fh->fh_rx_id);
1163 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1164 atomic_inc(&mp->stats.xid_not_found);
1165 goto rel;
1166 }
1167 if (ep->did != ntoh24(fh->fh_s_id) &&
1168 ep->did != FC_FID_FLOGI) {
1169 atomic_inc(&mp->stats.xid_not_found);
1170 goto rel;
1171 }
1172 sof = fr_sof(fp);
1173 if (fc_sof_is_init(sof)) {
1174 sp = fc_seq_start_next(&ep->seq);
1175 sp->id = fh->fh_seq_id;
1176 sp->ssb_stat |= SSB_ST_RESP;
1177 } else {
1178 sp = &ep->seq;
1179 if (sp->id != fh->fh_seq_id) {
1180 atomic_inc(&mp->stats.seq_not_found);
1181 goto rel;
1182 }
1183 }
1184 f_ctl = ntoh24(fh->fh_f_ctl);
1185 fr_seq(fp) = sp;
1186 if (f_ctl & FC_FC_SEQ_INIT)
1187 ep->esb_stat |= ESB_ST_SEQ_INIT;
1188
1189 if (fc_sof_needs_ack(sof))
1190 fc_seq_send_ack(sp, fp);
1191 resp = ep->resp;
1192 ex_resp_arg = ep->arg;
1193
1194 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1195 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1196 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1197 spin_lock_bh(&ep->ex_lock);
1198 rc = fc_exch_done_locked(ep);
1199 WARN_ON(fc_seq_exch(sp) != ep);
1200 spin_unlock_bh(&ep->ex_lock);
1201 if (!rc)
1202 fc_exch_mgr_delete_ep(ep);
1203 }
1204
1205 /*
1206 * Call the receive function.
1207 * The sequence is held (has a refcnt) for us,
1208 * but not for the receive function.
1209 *
1210 * The receive function may allocate a new sequence
1211 * over the old one, so we shouldn't change the
1212 * sequence after this.
1213 *
1214 * The frame will be freed by the receive function.
1215 * If new exch resp handler is valid then call that
1216 * first.
1217 */
1218 if (resp)
1219 resp(sp, fp, ex_resp_arg);
1220 else
1221 fc_frame_free(fp);
1222 fc_exch_release(ep);
1223 return;
1224rel:
1225 fc_exch_release(ep);
1226out:
1227 fc_frame_free(fp);
1228}
1229
1230/*
1231 * Handle receive for a sequence where other end is responding to our sequence.
1232 */
1233static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1234{
1235 struct fc_seq *sp;
1236
1237 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1238 if (!sp) {
1239 atomic_inc(&mp->stats.xid_not_found);
Robert Love74147052009-06-10 15:31:10 -07001240 FC_EM_DBG(mp, "seq lookup failed\n");
Robert Love42e9a922008-12-09 15:10:17 -08001241 } else {
1242 atomic_inc(&mp->stats.non_bls_resp);
Robert Love74147052009-06-10 15:31:10 -07001243 FC_EM_DBG(mp, "non-BLS response to sequence");
Robert Love42e9a922008-12-09 15:10:17 -08001244 }
1245 fc_frame_free(fp);
1246}
1247
1248/*
1249 * Handle the response to an ABTS for exchange or sequence.
1250 * This can be BA_ACC or BA_RJT.
1251 */
1252static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1253{
1254 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
1255 void *ex_resp_arg;
1256 struct fc_frame_header *fh;
1257 struct fc_ba_acc *ap;
1258 struct fc_seq *sp;
1259 u16 low;
1260 u16 high;
1261 int rc = 1, has_rec = 0;
1262
1263 fh = fc_frame_header_get(fp);
Robert Love74147052009-06-10 15:31:10 -07001264 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1265 fc_exch_rctl_name(fh->fh_r_ctl));
Robert Love42e9a922008-12-09 15:10:17 -08001266
1267 if (cancel_delayed_work_sync(&ep->timeout_work))
1268 fc_exch_release(ep); /* release from pending timer hold */
1269
1270 spin_lock_bh(&ep->ex_lock);
1271 switch (fh->fh_r_ctl) {
1272 case FC_RCTL_BA_ACC:
1273 ap = fc_frame_payload_get(fp, sizeof(*ap));
1274 if (!ap)
1275 break;
1276
1277 /*
1278 * Decide whether to establish a Recovery Qualifier.
1279 * We do this if there is a non-empty SEQ_CNT range and
1280 * SEQ_ID is the same as the one we aborted.
1281 */
1282 low = ntohs(ap->ba_low_seq_cnt);
1283 high = ntohs(ap->ba_high_seq_cnt);
1284 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1285 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1286 ap->ba_seq_id == ep->seq_id) && low != high) {
1287 ep->esb_stat |= ESB_ST_REC_QUAL;
1288 fc_exch_hold(ep); /* hold for recovery qualifier */
1289 has_rec = 1;
1290 }
1291 break;
1292 case FC_RCTL_BA_RJT:
1293 break;
1294 default:
1295 break;
1296 }
1297
1298 resp = ep->resp;
1299 ex_resp_arg = ep->arg;
1300
1301 /* do we need to do some other checks here. Can we reuse more of
1302 * fc_exch_recv_seq_resp
1303 */
1304 sp = &ep->seq;
1305 /*
1306 * do we want to check END_SEQ as well as LAST_SEQ here?
1307 */
1308 if (ep->fh_type != FC_TYPE_FCP &&
1309 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1310 rc = fc_exch_done_locked(ep);
1311 spin_unlock_bh(&ep->ex_lock);
1312 if (!rc)
1313 fc_exch_mgr_delete_ep(ep);
1314
1315 if (resp)
1316 resp(sp, fp, ex_resp_arg);
1317 else
1318 fc_frame_free(fp);
1319
1320 if (has_rec)
1321 fc_exch_timer_set(ep, ep->r_a_tov);
1322
1323}
1324
1325/*
1326 * Receive BLS sequence.
1327 * This is always a sequence initiated by the remote side.
1328 * We may be either the originator or recipient of the exchange.
1329 */
1330static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1331{
1332 struct fc_frame_header *fh;
1333 struct fc_exch *ep;
1334 u32 f_ctl;
1335
1336 fh = fc_frame_header_get(fp);
1337 f_ctl = ntoh24(fh->fh_f_ctl);
1338 fr_seq(fp) = NULL;
1339
1340 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1341 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1342 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1343 spin_lock_bh(&ep->ex_lock);
1344 ep->esb_stat |= ESB_ST_SEQ_INIT;
1345 spin_unlock_bh(&ep->ex_lock);
1346 }
1347 if (f_ctl & FC_FC_SEQ_CTX) {
1348 /*
1349 * A response to a sequence we initiated.
1350 * This should only be ACKs for class 2 or F.
1351 */
1352 switch (fh->fh_r_ctl) {
1353 case FC_RCTL_ACK_1:
1354 case FC_RCTL_ACK_0:
1355 break;
1356 default:
Robert Love74147052009-06-10 15:31:10 -07001357 FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
1358 fh->fh_r_ctl,
1359 fc_exch_rctl_name(fh->fh_r_ctl));
Robert Love42e9a922008-12-09 15:10:17 -08001360 break;
1361 }
1362 fc_frame_free(fp);
1363 } else {
1364 switch (fh->fh_r_ctl) {
1365 case FC_RCTL_BA_RJT:
1366 case FC_RCTL_BA_ACC:
1367 if (ep)
1368 fc_exch_abts_resp(ep, fp);
1369 else
1370 fc_frame_free(fp);
1371 break;
1372 case FC_RCTL_BA_ABTS:
1373 fc_exch_recv_abts(ep, fp);
1374 break;
1375 default: /* ignore junk */
1376 fc_frame_free(fp);
1377 break;
1378 }
1379 }
1380 if (ep)
1381 fc_exch_release(ep); /* release hold taken by fc_exch_find */
1382}
1383
1384/*
1385 * Accept sequence with LS_ACC.
1386 * If this fails due to allocation or transmit congestion, assume the
1387 * originator will repeat the sequence.
1388 */
1389static void fc_seq_ls_acc(struct fc_seq *req_sp)
1390{
1391 struct fc_seq *sp;
1392 struct fc_els_ls_acc *acc;
1393 struct fc_frame *fp;
1394
1395 sp = fc_seq_start_next(req_sp);
1396 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1397 if (fp) {
1398 acc = fc_frame_payload_get(fp, sizeof(*acc));
1399 memset(acc, 0, sizeof(*acc));
1400 acc->la_cmd = ELS_LS_ACC;
1401 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1402 }
1403}
1404
1405/*
1406 * Reject sequence with ELS LS_RJT.
1407 * If this fails due to allocation or transmit congestion, assume the
1408 * originator will repeat the sequence.
1409 */
1410static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
1411 enum fc_els_rjt_explan explan)
1412{
1413 struct fc_seq *sp;
1414 struct fc_els_ls_rjt *rjt;
1415 struct fc_frame *fp;
1416
1417 sp = fc_seq_start_next(req_sp);
1418 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
1419 if (fp) {
1420 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1421 memset(rjt, 0, sizeof(*rjt));
1422 rjt->er_cmd = ELS_LS_RJT;
1423 rjt->er_reason = reason;
1424 rjt->er_explan = explan;
1425 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1426 }
1427}
1428
1429static void fc_exch_reset(struct fc_exch *ep)
1430{
1431 struct fc_seq *sp;
1432 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
1433 void *arg;
1434 int rc = 1;
1435
1436 spin_lock_bh(&ep->ex_lock);
1437 ep->state |= FC_EX_RST_CLEANUP;
1438 /*
1439 * we really want to call del_timer_sync, but cannot due
1440 * to the lport calling with the lport lock held (some resp
1441 * functions can also grab the lport lock which could cause
1442 * a deadlock).
1443 */
1444 if (cancel_delayed_work(&ep->timeout_work))
1445 atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
1446 resp = ep->resp;
1447 ep->resp = NULL;
1448 if (ep->esb_stat & ESB_ST_REC_QUAL)
1449 atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1450 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1451 arg = ep->arg;
1452 sp = &ep->seq;
1453 rc = fc_exch_done_locked(ep);
1454 spin_unlock_bh(&ep->ex_lock);
1455 if (!rc)
1456 fc_exch_mgr_delete_ep(ep);
1457
1458 if (resp)
1459 resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
1460}
1461
1462/*
1463 * Reset an exchange manager, releasing all sequences and exchanges.
1464 * If sid is non-zero, reset only exchanges we source from that FID.
1465 * If did is non-zero, reset only exchanges destined to that FID.
1466 */
Abhijeet Joglekar1f6ff362009-02-27 10:54:35 -08001467void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
Robert Love42e9a922008-12-09 15:10:17 -08001468{
1469 struct fc_exch *ep;
1470 struct fc_exch *next;
Abhijeet Joglekar1f6ff362009-02-27 10:54:35 -08001471 struct fc_exch_mgr *mp = lp->emp;
Robert Love42e9a922008-12-09 15:10:17 -08001472
1473 spin_lock_bh(&mp->em_lock);
1474restart:
1475 list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
1476 if ((sid == 0 || sid == ep->sid) &&
1477 (did == 0 || did == ep->did)) {
1478 fc_exch_hold(ep);
1479 spin_unlock_bh(&mp->em_lock);
1480
1481 fc_exch_reset(ep);
1482
1483 fc_exch_release(ep);
1484 spin_lock_bh(&mp->em_lock);
1485
1486 /*
1487 * must restart loop incase while lock was down
1488 * multiple eps were released.
1489 */
1490 goto restart;
1491 }
1492 }
1493 spin_unlock_bh(&mp->em_lock);
1494}
1495EXPORT_SYMBOL(fc_exch_mgr_reset);
1496
1497/*
1498 * Handle incoming ELS REC - Read Exchange Concise.
1499 * Note that the requesting port may be different than the S_ID in the request.
1500 */
1501static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
1502{
1503 struct fc_frame *fp;
1504 struct fc_exch *ep;
1505 struct fc_exch_mgr *em;
1506 struct fc_els_rec *rp;
1507 struct fc_els_rec_acc *acc;
1508 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
1509 enum fc_els_rjt_explan explan;
1510 u32 sid;
1511 u16 rxid;
1512 u16 oxid;
1513
1514 rp = fc_frame_payload_get(rfp, sizeof(*rp));
1515 explan = ELS_EXPL_INV_LEN;
1516 if (!rp)
1517 goto reject;
1518 sid = ntoh24(rp->rec_s_id);
1519 rxid = ntohs(rp->rec_rx_id);
1520 oxid = ntohs(rp->rec_ox_id);
1521
1522 /*
1523 * Currently it's hard to find the local S_ID from the exchange
1524 * manager. This will eventually be fixed, but for now it's easier
1525 * to lookup the subject exchange twice, once as if we were
1526 * the initiator, and then again if we weren't.
1527 */
1528 em = fc_seq_exch(sp)->em;
1529 ep = fc_exch_find(em, oxid);
1530 explan = ELS_EXPL_OXID_RXID;
1531 if (ep && ep->oid == sid) {
1532 if (ep->rxid != FC_XID_UNKNOWN &&
1533 rxid != FC_XID_UNKNOWN &&
1534 ep->rxid != rxid)
1535 goto rel;
1536 } else {
1537 if (ep)
1538 fc_exch_release(ep);
1539 ep = NULL;
1540 if (rxid != FC_XID_UNKNOWN)
1541 ep = fc_exch_find(em, rxid);
1542 if (!ep)
1543 goto reject;
1544 }
1545
1546 fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
1547 if (!fp) {
1548 fc_exch_done(sp);
1549 goto out;
1550 }
1551 sp = fc_seq_start_next(sp);
1552 acc = fc_frame_payload_get(fp, sizeof(*acc));
1553 memset(acc, 0, sizeof(*acc));
1554 acc->reca_cmd = ELS_LS_ACC;
1555 acc->reca_ox_id = rp->rec_ox_id;
1556 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
1557 acc->reca_rx_id = htons(ep->rxid);
1558 if (ep->sid == ep->oid)
1559 hton24(acc->reca_rfid, ep->did);
1560 else
1561 hton24(acc->reca_rfid, ep->sid);
1562 acc->reca_fc4value = htonl(ep->seq.rec_data);
1563 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
1564 ESB_ST_SEQ_INIT |
1565 ESB_ST_COMPLETE));
1566 sp = fc_seq_start_next(sp);
1567 fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
1568out:
1569 fc_exch_release(ep);
1570 fc_frame_free(rfp);
1571 return;
1572
1573rel:
1574 fc_exch_release(ep);
1575reject:
1576 fc_seq_ls_rjt(sp, reason, explan);
1577 fc_frame_free(rfp);
1578}
1579
1580/*
1581 * Handle response from RRQ.
1582 * Not much to do here, really.
1583 * Should report errors.
1584 *
1585 * TODO: fix error handler.
1586 */
1587static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
1588{
1589 struct fc_exch *aborted_ep = arg;
1590 unsigned int op;
1591
1592 if (IS_ERR(fp)) {
1593 int err = PTR_ERR(fp);
1594
Vasu Dev78342da2009-02-27 10:54:46 -08001595 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
Robert Love42e9a922008-12-09 15:10:17 -08001596 goto cleanup;
Robert Love74147052009-06-10 15:31:10 -07001597 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
1598 "frame error %d\n", err);
Robert Love42e9a922008-12-09 15:10:17 -08001599 return;
1600 }
1601
1602 op = fc_frame_payload_op(fp);
1603 fc_frame_free(fp);
1604
1605 switch (op) {
1606 case ELS_LS_RJT:
Robert Love74147052009-06-10 15:31:10 -07001607 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
Robert Love42e9a922008-12-09 15:10:17 -08001608 /* fall through */
1609 case ELS_LS_ACC:
1610 goto cleanup;
1611 default:
Robert Love74147052009-06-10 15:31:10 -07001612 FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
1613 "for RRQ", op);
Robert Love42e9a922008-12-09 15:10:17 -08001614 return;
1615 }
1616
1617cleanup:
1618 fc_exch_done(&aborted_ep->seq);
1619 /* drop hold for rec qual */
1620 fc_exch_release(aborted_ep);
1621}
1622
1623/*
1624 * Send ELS RRQ - Reinstate Recovery Qualifier.
1625 * This tells the remote port to stop blocking the use of
1626 * the exchange and the seq_cnt range.
1627 */
1628static void fc_exch_rrq(struct fc_exch *ep)
1629{
1630 struct fc_lport *lp;
1631 struct fc_els_rrq *rrq;
1632 struct fc_frame *fp;
Robert Love42e9a922008-12-09 15:10:17 -08001633 u32 did;
1634
1635 lp = ep->lp;
1636
1637 fp = fc_frame_alloc(lp, sizeof(*rrq));
1638 if (!fp)
Vasu Deva0cc1ec2009-07-28 17:33:37 -07001639 goto retry;
1640
Robert Love42e9a922008-12-09 15:10:17 -08001641 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
1642 memset(rrq, 0, sizeof(*rrq));
1643 rrq->rrq_cmd = ELS_RRQ;
1644 hton24(rrq->rrq_s_id, ep->sid);
1645 rrq->rrq_ox_id = htons(ep->oxid);
1646 rrq->rrq_rx_id = htons(ep->rxid);
1647
1648 did = ep->did;
1649 if (ep->esb_stat & ESB_ST_RESP)
1650 did = ep->sid;
1651
1652 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
1653 fc_host_port_id(lp->host), FC_TYPE_ELS,
1654 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1655
Vasu Deva0cc1ec2009-07-28 17:33:37 -07001656 if (fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, ep, lp->e_d_tov))
1657 return;
1658
1659retry:
1660 spin_lock_bh(&ep->ex_lock);
1661 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
1662 spin_unlock_bh(&ep->ex_lock);
1663 /* drop hold for rec qual */
1664 fc_exch_release(ep);
Robert Love42e9a922008-12-09 15:10:17 -08001665 return;
1666 }
Vasu Deva0cc1ec2009-07-28 17:33:37 -07001667 ep->esb_stat |= ESB_ST_REC_QUAL;
1668 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1669 spin_unlock_bh(&ep->ex_lock);
Robert Love42e9a922008-12-09 15:10:17 -08001670}
1671
1672
1673/*
1674 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1675 */
1676static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
1677{
1678 struct fc_exch *ep; /* request or subject exchange */
1679 struct fc_els_rrq *rp;
1680 u32 sid;
1681 u16 xid;
1682 enum fc_els_rjt_explan explan;
1683
1684 rp = fc_frame_payload_get(fp, sizeof(*rp));
1685 explan = ELS_EXPL_INV_LEN;
1686 if (!rp)
1687 goto reject;
1688
1689 /*
1690 * lookup subject exchange.
1691 */
1692 ep = fc_seq_exch(sp);
1693 sid = ntoh24(rp->rrq_s_id); /* subject source */
1694 xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
1695 ep = fc_exch_find(ep->em, xid);
1696
1697 explan = ELS_EXPL_OXID_RXID;
1698 if (!ep)
1699 goto reject;
1700 spin_lock_bh(&ep->ex_lock);
1701 if (ep->oxid != ntohs(rp->rrq_ox_id))
1702 goto unlock_reject;
1703 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
1704 ep->rxid != FC_XID_UNKNOWN)
1705 goto unlock_reject;
1706 explan = ELS_EXPL_SID;
1707 if (ep->sid != sid)
1708 goto unlock_reject;
1709
1710 /*
1711 * Clear Recovery Qualifier state, and cancel timer if complete.
1712 */
1713 if (ep->esb_stat & ESB_ST_REC_QUAL) {
1714 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1715 atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
1716 }
1717 if (ep->esb_stat & ESB_ST_COMPLETE) {
1718 if (cancel_delayed_work(&ep->timeout_work))
1719 atomic_dec(&ep->ex_refcnt); /* drop timer hold */
1720 }
1721
1722 spin_unlock_bh(&ep->ex_lock);
1723
1724 /*
1725 * Send LS_ACC.
1726 */
1727 fc_seq_ls_acc(sp);
1728 fc_frame_free(fp);
1729 return;
1730
1731unlock_reject:
1732 spin_unlock_bh(&ep->ex_lock);
1733 fc_exch_release(ep); /* drop hold from fc_exch_find */
1734reject:
1735 fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
1736 fc_frame_free(fp);
1737}
1738
Vasu Dev96316092009-07-29 17:05:00 -07001739struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
1740 struct fc_exch_mgr *mp,
1741 bool (*match)(struct fc_frame *))
1742{
1743 struct fc_exch_mgr_anchor *ema;
1744
1745 ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
1746 if (!ema)
1747 return ema;
1748
1749 ema->mp = mp;
1750 ema->match = match;
1751 /* add EM anchor to EM anchors list */
1752 list_add_tail(&ema->ema_list, &lport->ema_list);
1753 kref_get(&mp->kref);
1754 return ema;
1755}
1756EXPORT_SYMBOL(fc_exch_mgr_add);
1757
1758static void fc_exch_mgr_destroy(struct kref *kref)
1759{
1760 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
1761
1762 /*
1763 * The total exch count must be zero
1764 * before freeing exchange manager.
1765 */
1766 WARN_ON(mp->total_exches != 0);
1767 mempool_destroy(mp->ep_pool);
1768 kfree(mp);
1769}
1770
1771void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
1772{
1773 /* remove EM anchor from EM anchors list */
1774 list_del(&ema->ema_list);
1775 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
1776 kfree(ema);
1777}
1778EXPORT_SYMBOL(fc_exch_mgr_del);
1779
Robert Love42e9a922008-12-09 15:10:17 -08001780struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
1781 enum fc_class class,
1782 u16 min_xid, u16 max_xid)
1783{
1784 struct fc_exch_mgr *mp;
1785 size_t len;
1786
1787 if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
Robert Love74147052009-06-10 15:31:10 -07001788 FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1789 min_xid, max_xid);
Robert Love42e9a922008-12-09 15:10:17 -08001790 return NULL;
1791 }
1792
1793 /*
1794 * Memory need for EM
1795 */
1796#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
1797 len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
1798 len += sizeof(struct fc_exch_mgr);
1799
1800 mp = kzalloc(len, GFP_ATOMIC);
1801 if (!mp)
1802 return NULL;
1803
1804 mp->class = class;
1805 mp->total_exches = 0;
1806 mp->exches = (struct fc_exch **)(mp + 1);
1807 mp->lp = lp;
1808 /* adjust em exch xid range for offload */
1809 mp->min_xid = min_xid;
1810 mp->max_xid = max_xid;
1811 mp->last_xid = min_xid - 1;
1812 mp->max_read = 0;
1813 mp->last_read = 0;
1814 if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
1815 mp->max_read = lp->lro_xid;
1816 mp->last_read = min_xid - 1;
1817 mp->last_xid = mp->max_read;
1818 } else {
1819 /* disable lro if no xid control over read */
1820 lp->lro_enabled = 0;
1821 }
1822
1823 INIT_LIST_HEAD(&mp->ex_list);
1824 spin_lock_init(&mp->em_lock);
1825
1826 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
1827 if (!mp->ep_pool)
1828 goto free_mp;
1829
1830 return mp;
1831
1832free_mp:
1833 kfree(mp);
1834 return NULL;
1835}
1836EXPORT_SYMBOL(fc_exch_mgr_alloc);
1837
1838void fc_exch_mgr_free(struct fc_exch_mgr *mp)
1839{
1840 WARN_ON(!mp);
1841 /*
1842 * The total exch count must be zero
1843 * before freeing exchange manager.
1844 */
1845 WARN_ON(mp->total_exches != 0);
1846 mempool_destroy(mp->ep_pool);
1847 kfree(mp);
1848}
1849EXPORT_SYMBOL(fc_exch_mgr_free);
1850
1851struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
1852{
1853 if (!lp || !lp->emp)
1854 return NULL;
1855
1856 return fc_exch_alloc(lp->emp, fp, 0);
1857}
1858EXPORT_SYMBOL(fc_exch_get);
1859
1860struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
1861 struct fc_frame *fp,
1862 void (*resp)(struct fc_seq *,
1863 struct fc_frame *fp,
1864 void *arg),
1865 void (*destructor)(struct fc_seq *, void *),
1866 void *arg, u32 timer_msec)
1867{
1868 struct fc_exch *ep;
1869 struct fc_seq *sp = NULL;
1870 struct fc_frame_header *fh;
1871 int rc = 1;
1872
1873 ep = lp->tt.exch_get(lp, fp);
1874 if (!ep) {
1875 fc_frame_free(fp);
1876 return NULL;
1877 }
1878 ep->esb_stat |= ESB_ST_SEQ_INIT;
1879 fh = fc_frame_header_get(fp);
1880 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
1881 ep->resp = resp;
1882 ep->destructor = destructor;
1883 ep->arg = arg;
1884 ep->r_a_tov = FC_DEF_R_A_TOV;
1885 ep->lp = lp;
1886 sp = &ep->seq;
1887
1888 ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
1889 ep->f_ctl = ntoh24(fh->fh_f_ctl);
1890 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
1891 sp->cnt++;
1892
Yi Zoub277d2a2009-02-27 14:07:21 -08001893 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
1894
Robert Love42e9a922008-12-09 15:10:17 -08001895 if (unlikely(lp->tt.frame_send(lp, fp)))
1896 goto err;
1897
1898 if (timer_msec)
1899 fc_exch_timer_set_locked(ep, timer_msec);
1900 ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
1901
1902 if (ep->f_ctl & FC_FC_SEQ_INIT)
1903 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1904 spin_unlock_bh(&ep->ex_lock);
1905 return sp;
1906err:
1907 rc = fc_exch_done_locked(ep);
1908 spin_unlock_bh(&ep->ex_lock);
1909 if (!rc)
1910 fc_exch_mgr_delete_ep(ep);
1911 return NULL;
1912}
1913EXPORT_SYMBOL(fc_exch_seq_send);
1914
1915/*
1916 * Receive a frame
1917 */
1918void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
1919 struct fc_frame *fp)
1920{
1921 struct fc_frame_header *fh = fc_frame_header_get(fp);
1922 u32 f_ctl;
1923
1924 /* lport lock ? */
Joe Eykholtb1d9fd52009-07-29 17:04:22 -07001925 if (!lp || !mp || lp->state == LPORT_ST_DISABLED) {
Robert Love74147052009-06-10 15:31:10 -07001926 FC_LPORT_DBG(lp, "Receiving frames for an lport that "
1927 "has not been initialized correctly\n");
Robert Love42e9a922008-12-09 15:10:17 -08001928 fc_frame_free(fp);
1929 return;
1930 }
1931
1932 /*
1933 * If frame is marked invalid, just drop it.
1934 */
1935 f_ctl = ntoh24(fh->fh_f_ctl);
1936 switch (fr_eof(fp)) {
1937 case FC_EOF_T:
1938 if (f_ctl & FC_FC_END_SEQ)
1939 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
1940 /* fall through */
1941 case FC_EOF_N:
1942 if (fh->fh_type == FC_TYPE_BLS)
1943 fc_exch_recv_bls(mp, fp);
1944 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
1945 FC_FC_EX_CTX)
1946 fc_exch_recv_seq_resp(mp, fp);
1947 else if (f_ctl & FC_FC_SEQ_CTX)
1948 fc_exch_recv_resp(mp, fp);
1949 else
1950 fc_exch_recv_req(lp, mp, fp);
1951 break;
1952 default:
Robert Love74147052009-06-10 15:31:10 -07001953 FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp));
Robert Love42e9a922008-12-09 15:10:17 -08001954 fc_frame_free(fp);
1955 break;
1956 }
1957}
1958EXPORT_SYMBOL(fc_exch_recv);
1959
1960int fc_exch_init(struct fc_lport *lp)
1961{
1962 if (!lp->tt.exch_get) {
1963 /*
1964 * exch_put() should be NULL if
1965 * exch_get() is NULL
1966 */
1967 WARN_ON(lp->tt.exch_put);
1968 lp->tt.exch_get = fc_exch_get;
1969 }
1970
1971 if (!lp->tt.seq_start_next)
1972 lp->tt.seq_start_next = fc_seq_start_next;
1973
1974 if (!lp->tt.exch_seq_send)
1975 lp->tt.exch_seq_send = fc_exch_seq_send;
1976
1977 if (!lp->tt.seq_send)
1978 lp->tt.seq_send = fc_seq_send;
1979
1980 if (!lp->tt.seq_els_rsp_send)
1981 lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
1982
1983 if (!lp->tt.exch_done)
1984 lp->tt.exch_done = fc_exch_done;
1985
1986 if (!lp->tt.exch_mgr_reset)
1987 lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
1988
1989 if (!lp->tt.seq_exch_abort)
1990 lp->tt.seq_exch_abort = fc_seq_exch_abort;
1991
1992 return 0;
1993}
1994EXPORT_SYMBOL(fc_exch_init);
1995
1996int fc_setup_exch_mgr(void)
1997{
1998 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
1999 0, SLAB_HWCACHE_ALIGN, NULL);
2000 if (!fc_em_cachep)
2001 return -ENOMEM;
2002 return 0;
2003}
2004
2005void fc_destroy_exch_mgr(void)
2006{
2007 kmem_cache_destroy(fc_em_cachep);
2008}