blob: a3fde52840ca29b5c046e11533a86b2dcdad67b3 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <linux/skbuff.h>
36#include <linux/timer.h>
37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
Vipul Pandya1cab7752012-12-10 09:30:55 +000041#include <linux/if_vlan.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070042
43#include <net/neighbour.h>
44#include <net/netevent.h>
45#include <net/route.h>
Vipul Pandya1cab7752012-12-10 09:30:55 +000046#include <net/tcp.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070047
48#include "iw_cxgb4.h"
49
50static char *states[] = {
51 "idle",
52 "listen",
53 "connecting",
54 "mpa_wait_req",
55 "mpa_req_sent",
56 "mpa_req_rcvd",
57 "mpa_rep_sent",
58 "fpdu_mode",
59 "aborting",
60 "closing",
61 "moribund",
62 "dead",
63 NULL,
64};
65
Vipul Pandya5be78ee2012-12-10 09:30:54 +000066static int nocong;
67module_param(nocong, int, 0644);
68MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
69
70static int enable_ecn;
71module_param(enable_ecn, int, 0644);
72MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
73
Steve Wiseb52fe092011-03-11 22:30:01 +000074static int dack_mode = 1;
Steve Wiseba6d3922010-06-23 15:46:49 +000075module_param(dack_mode, int, 0644);
Steve Wiseb52fe092011-03-11 22:30:01 +000076MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
Steve Wiseba6d3922010-06-23 15:46:49 +000077
Roland Dreierbe4c9ba2010-05-05 14:45:40 -070078int c4iw_max_read_depth = 8;
79module_param(c4iw_max_read_depth, int, 0644);
80MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
81
Steve Wisecfdda9d2010-04-21 15:30:06 -070082static int enable_tcp_timestamps;
83module_param(enable_tcp_timestamps, int, 0644);
84MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
85
86static int enable_tcp_sack;
87module_param(enable_tcp_sack, int, 0644);
88MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
89
90static int enable_tcp_window_scaling = 1;
91module_param(enable_tcp_window_scaling, int, 0644);
92MODULE_PARM_DESC(enable_tcp_window_scaling,
93 "Enable tcp window scaling (default=1)");
94
95int c4iw_debug;
96module_param(c4iw_debug, int, 0644);
97MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
98
99static int peer2peer;
100module_param(peer2peer, int, 0644);
101MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
102
103static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
104module_param(p2p_type, int, 0644);
105MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
106 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
107
108static int ep_timeout_secs = 60;
109module_param(ep_timeout_secs, int, 0644);
110MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
111 "in seconds (default=60)");
112
113static int mpa_rev = 1;
114module_param(mpa_rev, int, 0644);
115MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530116 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
117 " compliant (default=1)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700118
119static int markers_enabled;
120module_param(markers_enabled, int, 0644);
121MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
122
123static int crc_enabled = 1;
124module_param(crc_enabled, int, 0644);
125MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
126
127static int rcv_win = 256 * 1024;
128module_param(rcv_win, int, 0644);
129MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
130
Steve Wise98ae68b2010-09-10 11:15:41 -0500131static int snd_win = 128 * 1024;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700132module_param(snd_win, int, 0644);
Steve Wise98ae68b2010-09-10 11:15:41 -0500133MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
Steve Wisecfdda9d2010-04-21 15:30:06 -0700134
Steve Wisecfdda9d2010-04-21 15:30:06 -0700135static struct workqueue_struct *workq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700136
137static struct sk_buff_head rxq;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700138
139static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
140static void ep_timeout(unsigned long arg);
141static void connect_reply_upcall(struct c4iw_ep *ep, int status);
142
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700143static LIST_HEAD(timeout_list);
144static spinlock_t timeout_lock;
145
Vipul Pandya325abea2013-01-07 13:11:53 +0000146static void deref_qp(struct c4iw_ep *ep)
147{
148 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
149 clear_bit(QP_REFERENCED, &ep->com.flags);
150}
151
152static void ref_qp(struct c4iw_ep *ep)
153{
154 set_bit(QP_REFERENCED, &ep->com.flags);
155 c4iw_qp_add_ref(&ep->com.qp->ibqp);
156}
157
Steve Wisecfdda9d2010-04-21 15:30:06 -0700158static void start_ep_timer(struct c4iw_ep *ep)
159{
160 PDBG("%s ep %p\n", __func__, ep);
161 if (timer_pending(&ep->timer)) {
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000162 pr_err("%s timer already started! ep %p\n",
163 __func__, ep);
164 return;
165 }
166 clear_bit(TIMEOUT, &ep->com.flags);
167 c4iw_get_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700168 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
169 ep->timer.data = (unsigned long)ep;
170 ep->timer.function = ep_timeout;
171 add_timer(&ep->timer);
172}
173
174static void stop_ep_timer(struct c4iw_ep *ep)
175{
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000176 PDBG("%s ep %p stopping\n", __func__, ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700177 del_timer_sync(&ep->timer);
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000178 if (!test_and_set_bit(TIMEOUT, &ep->com.flags))
179 c4iw_put_ep(&ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700180}
181
182static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
183 struct l2t_entry *l2e)
184{
185 int error = 0;
186
187 if (c4iw_fatal_error(rdev)) {
188 kfree_skb(skb);
189 PDBG("%s - device in error state - dropping\n", __func__);
190 return -EIO;
191 }
192 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
193 if (error < 0)
194 kfree_skb(skb);
Steve Wise74594862010-09-10 11:14:58 -0500195 return error < 0 ? error : 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700196}
197
198int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
199{
200 int error = 0;
201
202 if (c4iw_fatal_error(rdev)) {
203 kfree_skb(skb);
204 PDBG("%s - device in error state - dropping\n", __func__);
205 return -EIO;
206 }
207 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
208 if (error < 0)
209 kfree_skb(skb);
Steve Wise74594862010-09-10 11:14:58 -0500210 return error < 0 ? error : 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700211}
212
213static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
214{
215 struct cpl_tid_release *req;
216
217 skb = get_skb(skb, sizeof *req, GFP_KERNEL);
218 if (!skb)
219 return;
220 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
221 INIT_TP_WR(req, hwtid);
222 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
223 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
224 c4iw_ofld_send(rdev, skb);
225 return;
226}
227
228static void set_emss(struct c4iw_ep *ep, u16 opt)
229{
230 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
231 ep->mss = ep->emss;
232 if (GET_TCPOPT_TSTAMP(opt))
233 ep->emss -= 12;
234 if (ep->emss < 128)
235 ep->emss = 128;
236 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
237 ep->mss, ep->emss);
238}
239
240static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
241{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700242 enum c4iw_ep_state state;
243
Steve Wise2f5b48c2010-09-10 11:15:36 -0500244 mutex_lock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700245 state = epc->state;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500246 mutex_unlock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700247 return state;
248}
249
250static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
251{
252 epc->state = new;
253}
254
255static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
256{
Steve Wise2f5b48c2010-09-10 11:15:36 -0500257 mutex_lock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700258 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
259 __state_set(epc, new);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500260 mutex_unlock(&epc->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700261 return;
262}
263
264static void *alloc_ep(int size, gfp_t gfp)
265{
266 struct c4iw_ep_common *epc;
267
268 epc = kzalloc(size, gfp);
269 if (epc) {
270 kref_init(&epc->kref);
Steve Wise2f5b48c2010-09-10 11:15:36 -0500271 mutex_init(&epc->mutex);
Steve Wiseaadc4df2010-09-10 11:15:25 -0500272 c4iw_init_wr_wait(&epc->wr_wait);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700273 }
274 PDBG("%s alloc ep %p\n", __func__, epc);
275 return epc;
276}
277
278void _c4iw_free_ep(struct kref *kref)
279{
280 struct c4iw_ep *ep;
281
282 ep = container_of(kref, struct c4iw_ep, com.kref);
283 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
Vipul Pandya325abea2013-01-07 13:11:53 +0000284 if (test_bit(QP_REFERENCED, &ep->com.flags))
285 deref_qp(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700286 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
Vipul Pandyafe7e0a42013-01-07 13:11:57 +0000287 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700288 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
289 dst_release(ep->dst);
290 cxgb4_l2t_release(ep->l2t);
291 }
292 kfree(ep);
293}
294
295static void release_ep_resources(struct c4iw_ep *ep)
296{
297 set_bit(RELEASE_RESOURCES, &ep->com.flags);
298 c4iw_put_ep(&ep->com);
299}
300
Steve Wisecfdda9d2010-04-21 15:30:06 -0700301static int status2errno(int status)
302{
303 switch (status) {
304 case CPL_ERR_NONE:
305 return 0;
306 case CPL_ERR_CONN_RESET:
307 return -ECONNRESET;
308 case CPL_ERR_ARP_MISS:
309 return -EHOSTUNREACH;
310 case CPL_ERR_CONN_TIMEDOUT:
311 return -ETIMEDOUT;
312 case CPL_ERR_TCAM_FULL:
313 return -ENOMEM;
314 case CPL_ERR_CONN_EXIST:
315 return -EADDRINUSE;
316 default:
317 return -EIO;
318 }
319}
320
321/*
322 * Try and reuse skbs already allocated...
323 */
324static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
325{
326 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
327 skb_trim(skb, 0);
328 skb_get(skb);
329 skb_reset_transport_header(skb);
330 } else {
331 skb = alloc_skb(len, gfp);
332 }
333 return skb;
334}
335
336static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
337 __be32 peer_ip, __be16 local_port,
338 __be16 peer_port, u8 tos)
339{
340 struct rtable *rt;
David S. Miller31e45432011-05-03 20:25:42 -0700341 struct flowi4 fl4;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700342
David S. Miller31e45432011-05-03 20:25:42 -0700343 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
David S. Miller78fbfd82011-03-12 00:00:52 -0500344 peer_port, local_port, IPPROTO_TCP,
345 tos, 0);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800346 if (IS_ERR(rt))
Steve Wisecfdda9d2010-04-21 15:30:06 -0700347 return NULL;
348 return rt;
349}
350
351static void arp_failure_discard(void *handle, struct sk_buff *skb)
352{
353 PDBG("%s c4iw_dev %p\n", __func__, handle);
354 kfree_skb(skb);
355}
356
357/*
358 * Handle an ARP failure for an active open.
359 */
360static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
361{
362 printk(KERN_ERR MOD "ARP failure duing connect\n");
363 kfree_skb(skb);
364}
365
366/*
367 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
368 * and send it along.
369 */
370static void abort_arp_failure(void *handle, struct sk_buff *skb)
371{
372 struct c4iw_rdev *rdev = handle;
373 struct cpl_abort_req *req = cplhdr(skb);
374
375 PDBG("%s rdev %p\n", __func__, rdev);
376 req->cmd = CPL_ABORT_NO_RST;
377 c4iw_ofld_send(rdev, skb);
378}
379
380static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
381{
382 unsigned int flowclen = 80;
383 struct fw_flowc_wr *flowc;
384 int i;
385
386 skb = get_skb(skb, flowclen, GFP_KERNEL);
387 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
388
389 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
390 FW_FLOWC_WR_NPARAMS(8));
391 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
392 16)) | FW_WR_FLOWID(ep->hwtid));
393
394 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
Steve Wise94788652011-01-21 17:00:34 +0000395 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700396 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
397 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
398 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
399 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
400 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
401 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
402 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
403 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
404 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
405 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
406 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
407 flowc->mnemval[6].val = cpu_to_be32(snd_win);
408 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
409 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
410 /* Pad WR to 16 byte boundary */
411 flowc->mnemval[8].mnemonic = 0;
412 flowc->mnemval[8].val = 0;
413 for (i = 0; i < 9; i++) {
414 flowc->mnemval[i].r4[0] = 0;
415 flowc->mnemval[i].r4[1] = 0;
416 flowc->mnemval[i].r4[2] = 0;
417 }
418
419 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
420 c4iw_ofld_send(&ep->com.dev->rdev, skb);
421}
422
423static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
424{
425 struct cpl_close_con_req *req;
426 struct sk_buff *skb;
427 int wrlen = roundup(sizeof *req, 16);
428
429 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
430 skb = get_skb(NULL, wrlen, gfp);
431 if (!skb) {
432 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
433 return -ENOMEM;
434 }
435 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
436 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
437 req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
438 memset(req, 0, wrlen);
439 INIT_TP_WR(req, ep->hwtid);
440 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
441 ep->hwtid));
442 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
443}
444
445static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
446{
447 struct cpl_abort_req *req;
448 int wrlen = roundup(sizeof *req, 16);
449
450 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
451 skb = get_skb(skb, wrlen, gfp);
452 if (!skb) {
453 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
454 __func__);
455 return -ENOMEM;
456 }
457 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
458 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
459 req = (struct cpl_abort_req *) skb_put(skb, wrlen);
460 memset(req, 0, wrlen);
461 INIT_TP_WR(req, ep->hwtid);
462 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
463 req->cmd = CPL_ABORT_SEND_RST;
464 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
465}
466
Vipul Pandya5be78ee2012-12-10 09:30:54 +0000467#define VLAN_NONE 0xfff
468#define FILTER_SEL_VLAN_NONE 0xffff
469#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
470#define FILTER_SEL_WIDTH_VIN_P_FC \
471 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
472#define FILTER_SEL_WIDTH_TAG_P_FC \
473 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
474#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
475
476static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
477 struct l2t_entry *l2t)
478{
479 unsigned int ntuple = 0;
480 u32 viid;
481
482 switch (dev->rdev.lldi.filt_mode) {
483
484 /* default filter mode */
485 case HW_TPL_FR_MT_PR_IV_P_FC:
486 if (l2t->vlan == VLAN_NONE)
487 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
488 else {
489 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
490 ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
491 }
492 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
493 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
494 break;
495 case HW_TPL_FR_MT_PR_OV_P_FC: {
496 viid = cxgb4_port_viid(l2t->neigh->dev);
497
498 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
499 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
500 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
501 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
502 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
503 break;
504 }
505 default:
506 break;
507 }
508 return ntuple;
509}
510
Steve Wisecfdda9d2010-04-21 15:30:06 -0700511static int send_connect(struct c4iw_ep *ep)
512{
513 struct cpl_act_open_req *req;
514 struct sk_buff *skb;
515 u64 opt0;
516 u32 opt2;
517 unsigned int mtu_idx;
518 int wscale;
519 int wrlen = roundup(sizeof *req, 16);
520
521 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
522
523 skb = get_skb(NULL, wrlen, GFP_KERNEL);
524 if (!skb) {
525 printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
526 __func__);
527 return -ENOMEM;
528 }
Steve Wised4f1a5c2010-07-23 19:12:32 +0000529 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700530
531 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
532 wscale = compute_wscale(rcv_win);
Vipul Pandya5be78ee2012-12-10 09:30:54 +0000533 opt0 = (nocong ? NO_CONG(1) : 0) |
534 KEEP_ALIVE(1) |
Steve Wiseba6d3922010-06-23 15:46:49 +0000535 DELACK(1) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700536 WND_SCALE(wscale) |
537 MSS_IDX(mtu_idx) |
538 L2T_IDX(ep->l2t->idx) |
539 TX_CHAN(ep->tx_chan) |
540 SMAC_SEL(ep->smac_idx) |
541 DSCP(ep->tos) |
Steve Wiseb48f3b92011-03-11 22:30:21 +0000542 ULP_MODE(ULP_MODE_TCPDDP) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700543 RCV_BUFSIZ(rcv_win>>10);
544 opt2 = RX_CHANNEL(0) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +0000545 CCTRL_ECN(enable_ecn) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700546 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
547 if (enable_tcp_timestamps)
548 opt2 |= TSTAMPS_EN(1);
549 if (enable_tcp_sack)
550 opt2 |= SACK_EN(1);
551 if (wscale && enable_tcp_window_scaling)
552 opt2 |= WND_SCALE_EN(1);
553 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
554
555 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
556 INIT_TP_WR(req, 0);
557 OPCODE_TID(req) = cpu_to_be32(
558 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
559 req->local_port = ep->com.local_addr.sin_port;
560 req->peer_port = ep->com.remote_addr.sin_port;
561 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
562 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
563 req->opt0 = cpu_to_be64(opt0);
Vipul Pandya5be78ee2012-12-10 09:30:54 +0000564 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700565 req->opt2 = cpu_to_be32(opt2);
Vipul Pandya793dad92012-12-10 09:30:56 +0000566 set_bit(ACT_OPEN_REQ, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700567 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
568}
569
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530570static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
571 u8 mpa_rev_to_use)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700572{
573 int mpalen, wrlen;
574 struct fw_ofld_tx_data_wr *req;
575 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530576 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700577
578 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
579
580 BUG_ON(skb_cloned(skb));
581
582 mpalen = sizeof(*mpa) + ep->plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530583 if (mpa_rev_to_use == 2)
584 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700585 wrlen = roundup(mpalen + sizeof *req, 16);
586 skb = get_skb(skb, wrlen, GFP_KERNEL);
587 if (!skb) {
588 connect_reply_upcall(ep, -ENOMEM);
589 return;
590 }
591 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
592
593 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
594 memset(req, 0, wrlen);
595 req->op_to_immdlen = cpu_to_be32(
596 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
597 FW_WR_COMPL(1) |
598 FW_WR_IMMDLEN(mpalen));
599 req->flowid_len16 = cpu_to_be32(
600 FW_WR_FLOWID(ep->hwtid) |
601 FW_WR_LEN16(wrlen >> 4));
602 req->plen = cpu_to_be32(mpalen);
603 req->tunnel_to_proxy = cpu_to_be32(
604 FW_OFLD_TX_DATA_WR_FLUSH(1) |
605 FW_OFLD_TX_DATA_WR_SHOVE(1));
606
607 mpa = (struct mpa_message *)(req + 1);
608 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
609 mpa->flags = (crc_enabled ? MPA_CRC : 0) |
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530610 (markers_enabled ? MPA_MARKERS : 0) |
611 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700612 mpa->private_data_size = htons(ep->plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530613 mpa->revision = mpa_rev_to_use;
Kumar Sanghvi01b225e2011-11-28 22:09:15 +0530614 if (mpa_rev_to_use == 1) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530615 ep->tried_with_mpa_v1 = 1;
Kumar Sanghvi01b225e2011-11-28 22:09:15 +0530616 ep->retry_with_mpa_v1 = 0;
617 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700618
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530619 if (mpa_rev_to_use == 2) {
Roland Dreierf747c342012-07-05 14:16:54 -0700620 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
621 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530622 mpa_v2_params.ird = htons((u16)ep->ird);
623 mpa_v2_params.ord = htons((u16)ep->ord);
624
625 if (peer2peer) {
626 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
627 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
628 mpa_v2_params.ord |=
629 htons(MPA_V2_RDMA_WRITE_RTR);
630 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
631 mpa_v2_params.ord |=
632 htons(MPA_V2_RDMA_READ_RTR);
633 }
634 memcpy(mpa->private_data, &mpa_v2_params,
635 sizeof(struct mpa_v2_conn_params));
636
637 if (ep->plen)
638 memcpy(mpa->private_data +
639 sizeof(struct mpa_v2_conn_params),
640 ep->mpa_pkt + sizeof(*mpa), ep->plen);
641 } else
642 if (ep->plen)
643 memcpy(mpa->private_data,
644 ep->mpa_pkt + sizeof(*mpa), ep->plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700645
646 /*
647 * Reference the mpa skb. This ensures the data area
648 * will remain in memory until the hw acks the tx.
649 * Function fw4_ack() will deref it.
650 */
651 skb_get(skb);
652 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
653 BUG_ON(ep->mpa_skb);
654 ep->mpa_skb = skb;
655 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
656 start_ep_timer(ep);
657 state_set(&ep->com, MPA_REQ_SENT);
658 ep->mpa_attr.initiator = 1;
659 return;
660}
661
662static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
663{
664 int mpalen, wrlen;
665 struct fw_ofld_tx_data_wr *req;
666 struct mpa_message *mpa;
667 struct sk_buff *skb;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530668 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700669
670 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
671
672 mpalen = sizeof(*mpa) + plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530673 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
674 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700675 wrlen = roundup(mpalen + sizeof *req, 16);
676
677 skb = get_skb(NULL, wrlen, GFP_KERNEL);
678 if (!skb) {
679 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
680 return -ENOMEM;
681 }
682 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
683
684 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
685 memset(req, 0, wrlen);
686 req->op_to_immdlen = cpu_to_be32(
687 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
688 FW_WR_COMPL(1) |
689 FW_WR_IMMDLEN(mpalen));
690 req->flowid_len16 = cpu_to_be32(
691 FW_WR_FLOWID(ep->hwtid) |
692 FW_WR_LEN16(wrlen >> 4));
693 req->plen = cpu_to_be32(mpalen);
694 req->tunnel_to_proxy = cpu_to_be32(
695 FW_OFLD_TX_DATA_WR_FLUSH(1) |
696 FW_OFLD_TX_DATA_WR_SHOVE(1));
697
698 mpa = (struct mpa_message *)(req + 1);
699 memset(mpa, 0, sizeof(*mpa));
700 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
701 mpa->flags = MPA_REJECT;
Vipul Pandyafe7e0a42013-01-07 13:11:57 +0000702 mpa->revision = ep->mpa_attr.version;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700703 mpa->private_data_size = htons(plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530704
705 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
706 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
Roland Dreierf747c342012-07-05 14:16:54 -0700707 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
708 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530709 mpa_v2_params.ird = htons(((u16)ep->ird) |
710 (peer2peer ? MPA_V2_PEER2PEER_MODEL :
711 0));
712 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
713 (p2p_type ==
714 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
715 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
716 FW_RI_INIT_P2PTYPE_READ_REQ ?
717 MPA_V2_RDMA_READ_RTR : 0) : 0));
718 memcpy(mpa->private_data, &mpa_v2_params,
719 sizeof(struct mpa_v2_conn_params));
720
721 if (ep->plen)
722 memcpy(mpa->private_data +
723 sizeof(struct mpa_v2_conn_params), pdata, plen);
724 } else
725 if (plen)
726 memcpy(mpa->private_data, pdata, plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700727
728 /*
729 * Reference the mpa skb again. This ensures the data area
730 * will remain in memory until the hw acks the tx.
731 * Function fw4_ack() will deref it.
732 */
733 skb_get(skb);
734 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
735 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
736 BUG_ON(ep->mpa_skb);
737 ep->mpa_skb = skb;
738 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
739}
740
741static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
742{
743 int mpalen, wrlen;
744 struct fw_ofld_tx_data_wr *req;
745 struct mpa_message *mpa;
746 struct sk_buff *skb;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530747 struct mpa_v2_conn_params mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700748
749 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
750
751 mpalen = sizeof(*mpa) + plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530752 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
753 mpalen += sizeof(struct mpa_v2_conn_params);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700754 wrlen = roundup(mpalen + sizeof *req, 16);
755
756 skb = get_skb(NULL, wrlen, GFP_KERNEL);
757 if (!skb) {
758 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
759 return -ENOMEM;
760 }
761 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
762
763 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
764 memset(req, 0, wrlen);
765 req->op_to_immdlen = cpu_to_be32(
766 FW_WR_OP(FW_OFLD_TX_DATA_WR) |
767 FW_WR_COMPL(1) |
768 FW_WR_IMMDLEN(mpalen));
769 req->flowid_len16 = cpu_to_be32(
770 FW_WR_FLOWID(ep->hwtid) |
771 FW_WR_LEN16(wrlen >> 4));
772 req->plen = cpu_to_be32(mpalen);
773 req->tunnel_to_proxy = cpu_to_be32(
774 FW_OFLD_TX_DATA_WR_FLUSH(1) |
775 FW_OFLD_TX_DATA_WR_SHOVE(1));
776
777 mpa = (struct mpa_message *)(req + 1);
778 memset(mpa, 0, sizeof(*mpa));
779 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
780 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
781 (markers_enabled ? MPA_MARKERS : 0);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530782 mpa->revision = ep->mpa_attr.version;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700783 mpa->private_data_size = htons(plen);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530784
785 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
786 mpa->flags |= MPA_ENHANCED_RDMA_CONN;
Roland Dreierf747c342012-07-05 14:16:54 -0700787 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
788 sizeof (struct mpa_v2_conn_params));
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530789 mpa_v2_params.ird = htons((u16)ep->ird);
790 mpa_v2_params.ord = htons((u16)ep->ord);
791 if (peer2peer && (ep->mpa_attr.p2p_type !=
792 FW_RI_INIT_P2PTYPE_DISABLED)) {
793 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
794
795 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
796 mpa_v2_params.ord |=
797 htons(MPA_V2_RDMA_WRITE_RTR);
798 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
799 mpa_v2_params.ord |=
800 htons(MPA_V2_RDMA_READ_RTR);
801 }
802
803 memcpy(mpa->private_data, &mpa_v2_params,
804 sizeof(struct mpa_v2_conn_params));
805
806 if (ep->plen)
807 memcpy(mpa->private_data +
808 sizeof(struct mpa_v2_conn_params), pdata, plen);
809 } else
810 if (plen)
811 memcpy(mpa->private_data, pdata, plen);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700812
813 /*
814 * Reference the mpa skb. This ensures the data area
815 * will remain in memory until the hw acks the tx.
816 * Function fw4_ack() will deref it.
817 */
818 skb_get(skb);
819 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
820 ep->mpa_skb = skb;
821 state_set(&ep->com, MPA_REP_SENT);
822 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
823}
824
825static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
826{
827 struct c4iw_ep *ep;
828 struct cpl_act_establish *req = cplhdr(skb);
829 unsigned int tid = GET_TID(req);
830 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
831 struct tid_info *t = dev->rdev.lldi.tids;
832
833 ep = lookup_atid(t, atid);
834
835 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
836 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
837
838 dst_confirm(ep->dst);
839
840 /* setup the hwtid for this connection */
841 ep->hwtid = tid;
842 cxgb4_insert_tid(t, ep, tid);
Vipul Pandya793dad92012-12-10 09:30:56 +0000843 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700844
845 ep->snd_seq = be32_to_cpu(req->snd_isn);
846 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
847
848 set_emss(ep, ntohs(req->tcp_opt));
849
850 /* dealloc the atid */
Vipul Pandya793dad92012-12-10 09:30:56 +0000851 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700852 cxgb4_free_atid(t, atid);
Vipul Pandya793dad92012-12-10 09:30:56 +0000853 set_bit(ACT_ESTAB, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700854
855 /* start MPA negotiation */
856 send_flowc(ep, NULL);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530857 if (ep->retry_with_mpa_v1)
858 send_mpa_req(ep, skb, 1);
859 else
860 send_mpa_req(ep, skb, mpa_rev);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700861
862 return 0;
863}
864
865static void close_complete_upcall(struct c4iw_ep *ep)
866{
867 struct iw_cm_event event;
868
869 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
870 memset(&event, 0, sizeof(event));
871 event.event = IW_CM_EVENT_CLOSE;
872 if (ep->com.cm_id) {
873 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
874 ep, ep->com.cm_id, ep->hwtid);
875 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
876 ep->com.cm_id->rem_ref(ep->com.cm_id);
877 ep->com.cm_id = NULL;
Vipul Pandya793dad92012-12-10 09:30:56 +0000878 set_bit(CLOSE_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700879 }
880}
881
882static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
883{
884 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
885 close_complete_upcall(ep);
886 state_set(&ep->com, ABORTING);
Vipul Pandya793dad92012-12-10 09:30:56 +0000887 set_bit(ABORT_CONN, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700888 return send_abort(ep, skb, gfp);
889}
890
891static void peer_close_upcall(struct c4iw_ep *ep)
892{
893 struct iw_cm_event event;
894
895 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
896 memset(&event, 0, sizeof(event));
897 event.event = IW_CM_EVENT_DISCONNECT;
898 if (ep->com.cm_id) {
899 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
900 ep, ep->com.cm_id, ep->hwtid);
901 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
Vipul Pandya793dad92012-12-10 09:30:56 +0000902 set_bit(DISCONN_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700903 }
904}
905
906static void peer_abort_upcall(struct c4iw_ep *ep)
907{
908 struct iw_cm_event event;
909
910 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
911 memset(&event, 0, sizeof(event));
912 event.event = IW_CM_EVENT_CLOSE;
913 event.status = -ECONNRESET;
914 if (ep->com.cm_id) {
915 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
916 ep->com.cm_id, ep->hwtid);
917 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
918 ep->com.cm_id->rem_ref(ep->com.cm_id);
919 ep->com.cm_id = NULL;
Vipul Pandya793dad92012-12-10 09:30:56 +0000920 set_bit(ABORT_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700921 }
922}
923
924static void connect_reply_upcall(struct c4iw_ep *ep, int status)
925{
926 struct iw_cm_event event;
927
928 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
929 memset(&event, 0, sizeof(event));
930 event.event = IW_CM_EVENT_CONNECT_REPLY;
931 event.status = status;
932 event.local_addr = ep->com.local_addr;
933 event.remote_addr = ep->com.remote_addr;
934
935 if ((status == 0) || (status == -ECONNREFUSED)) {
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530936 if (!ep->tried_with_mpa_v1) {
937 /* this means MPA_v2 is used */
938 event.private_data_len = ep->plen -
939 sizeof(struct mpa_v2_conn_params);
940 event.private_data = ep->mpa_pkt +
941 sizeof(struct mpa_message) +
942 sizeof(struct mpa_v2_conn_params);
943 } else {
944 /* this means MPA_v1 is used */
945 event.private_data_len = ep->plen;
946 event.private_data = ep->mpa_pkt +
947 sizeof(struct mpa_message);
948 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700949 }
Roland Dreier85963e42010-07-19 13:13:09 -0700950
951 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
952 ep->hwtid, status);
Vipul Pandya793dad92012-12-10 09:30:56 +0000953 set_bit(CONN_RPL_UPCALL, &ep->com.history);
Roland Dreier85963e42010-07-19 13:13:09 -0700954 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
955
Steve Wisecfdda9d2010-04-21 15:30:06 -0700956 if (status < 0) {
957 ep->com.cm_id->rem_ref(ep->com.cm_id);
958 ep->com.cm_id = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700959 }
960}
961
962static void connect_request_upcall(struct c4iw_ep *ep)
963{
964 struct iw_cm_event event;
965
966 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
967 memset(&event, 0, sizeof(event));
968 event.event = IW_CM_EVENT_CONNECT_REQUEST;
969 event.local_addr = ep->com.local_addr;
970 event.remote_addr = ep->com.remote_addr;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700971 event.provider_data = ep;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530972 if (!ep->tried_with_mpa_v1) {
973 /* this means MPA_v2 is used */
974 event.ord = ep->ord;
975 event.ird = ep->ird;
976 event.private_data_len = ep->plen -
977 sizeof(struct mpa_v2_conn_params);
978 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
979 sizeof(struct mpa_v2_conn_params);
980 } else {
981 /* this means MPA_v1 is used. Send max supported */
982 event.ord = c4iw_max_read_depth;
983 event.ird = c4iw_max_read_depth;
984 event.private_data_len = ep->plen;
985 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
986 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700987 if (state_read(&ep->parent_ep->com) != DEAD) {
988 c4iw_get_ep(&ep->com);
989 ep->parent_ep->com.cm_id->event_handler(
990 ep->parent_ep->com.cm_id,
991 &event);
992 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000993 set_bit(CONNREQ_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700994 c4iw_put_ep(&ep->parent_ep->com);
995 ep->parent_ep = NULL;
996}
997
998static void established_upcall(struct c4iw_ep *ep)
999{
1000 struct iw_cm_event event;
1001
1002 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1003 memset(&event, 0, sizeof(event));
1004 event.event = IW_CM_EVENT_ESTABLISHED;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301005 event.ird = ep->ird;
1006 event.ord = ep->ord;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001007 if (ep->com.cm_id) {
1008 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1009 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
Vipul Pandya793dad92012-12-10 09:30:56 +00001010 set_bit(ESTAB_UPCALL, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001011 }
1012}
1013
1014static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1015{
1016 struct cpl_rx_data_ack *req;
1017 struct sk_buff *skb;
1018 int wrlen = roundup(sizeof *req, 16);
1019
1020 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1021 skb = get_skb(NULL, wrlen, GFP_KERNEL);
1022 if (!skb) {
1023 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
1024 return 0;
1025 }
1026
1027 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1028 memset(req, 0, wrlen);
1029 INIT_TP_WR(req, ep->hwtid);
1030 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1031 ep->hwtid));
Steve Wiseba6d3922010-06-23 15:46:49 +00001032 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
1033 F_RX_DACK_CHANGE |
1034 V_RX_DACK_MODE(dack_mode));
Steve Wised4f1a5c2010-07-23 19:12:32 +00001035 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001036 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1037 return credits;
1038}
1039
1040static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1041{
1042 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301043 struct mpa_v2_conn_params *mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001044 u16 plen;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301045 u16 resp_ird, resp_ord;
1046 u8 rtr_mismatch = 0, insuff_ird = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001047 struct c4iw_qp_attributes attrs;
1048 enum c4iw_qp_attr_mask mask;
1049 int err;
1050
1051 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1052
1053 /*
1054 * Stop mpa timer. If it expired, then the state has
1055 * changed and we bail since ep_timeout already aborted
1056 * the connection.
1057 */
1058 stop_ep_timer(ep);
1059 if (state_read(&ep->com) != MPA_REQ_SENT)
1060 return;
1061
1062 /*
1063 * If we get more than the supported amount of private data
1064 * then we must fail this connection.
1065 */
1066 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1067 err = -EINVAL;
1068 goto err;
1069 }
1070
1071 /*
1072 * copy the new data into our accumulation buffer.
1073 */
1074 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1075 skb->len);
1076 ep->mpa_pkt_len += skb->len;
1077
1078 /*
1079 * if we don't even have the mpa message, then bail.
1080 */
1081 if (ep->mpa_pkt_len < sizeof(*mpa))
1082 return;
1083 mpa = (struct mpa_message *) ep->mpa_pkt;
1084
1085 /* Validate MPA header. */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301086 if (mpa->revision > mpa_rev) {
1087 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1088 " Received = %d\n", __func__, mpa_rev, mpa->revision);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001089 err = -EPROTO;
1090 goto err;
1091 }
1092 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1093 err = -EPROTO;
1094 goto err;
1095 }
1096
1097 plen = ntohs(mpa->private_data_size);
1098
1099 /*
1100 * Fail if there's too much private data.
1101 */
1102 if (plen > MPA_MAX_PRIVATE_DATA) {
1103 err = -EPROTO;
1104 goto err;
1105 }
1106
1107 /*
1108 * If plen does not account for pkt size
1109 */
1110 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1111 err = -EPROTO;
1112 goto err;
1113 }
1114
1115 ep->plen = (u8) plen;
1116
1117 /*
1118 * If we don't have all the pdata yet, then bail.
1119 * We'll continue process when more data arrives.
1120 */
1121 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1122 return;
1123
1124 if (mpa->flags & MPA_REJECT) {
1125 err = -ECONNREFUSED;
1126 goto err;
1127 }
1128
1129 /*
1130 * If we get here we have accumulated the entire mpa
1131 * start reply message including private data. And
1132 * the MPA header is valid.
1133 */
1134 state_set(&ep->com, FPDU_MODE);
1135 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1136 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1137 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301138 ep->mpa_attr.version = mpa->revision;
1139 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1140
1141 if (mpa->revision == 2) {
1142 ep->mpa_attr.enhanced_rdma_conn =
1143 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1144 if (ep->mpa_attr.enhanced_rdma_conn) {
1145 mpa_v2_params = (struct mpa_v2_conn_params *)
1146 (ep->mpa_pkt + sizeof(*mpa));
1147 resp_ird = ntohs(mpa_v2_params->ird) &
1148 MPA_V2_IRD_ORD_MASK;
1149 resp_ord = ntohs(mpa_v2_params->ord) &
1150 MPA_V2_IRD_ORD_MASK;
1151
1152 /*
1153 * This is a double-check. Ideally, below checks are
1154 * not required since ird/ord stuff has been taken
1155 * care of in c4iw_accept_cr
1156 */
1157 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1158 err = -ENOMEM;
1159 ep->ird = resp_ord;
1160 ep->ord = resp_ird;
1161 insuff_ird = 1;
1162 }
1163
1164 if (ntohs(mpa_v2_params->ird) &
1165 MPA_V2_PEER2PEER_MODEL) {
1166 if (ntohs(mpa_v2_params->ord) &
1167 MPA_V2_RDMA_WRITE_RTR)
1168 ep->mpa_attr.p2p_type =
1169 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1170 else if (ntohs(mpa_v2_params->ord) &
1171 MPA_V2_RDMA_READ_RTR)
1172 ep->mpa_attr.p2p_type =
1173 FW_RI_INIT_P2PTYPE_READ_REQ;
1174 }
1175 }
1176 } else if (mpa->revision == 1)
1177 if (peer2peer)
1178 ep->mpa_attr.p2p_type = p2p_type;
1179
Steve Wisecfdda9d2010-04-21 15:30:06 -07001180 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301181 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1182 "%d\n", __func__, ep->mpa_attr.crc_enabled,
1183 ep->mpa_attr.recv_marker_enabled,
1184 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1185 ep->mpa_attr.p2p_type, p2p_type);
1186
1187 /*
1188 * If responder's RTR does not match with that of initiator, assign
1189 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1190 * generated when moving QP to RTS state.
1191 * A TERM message will be sent after QP has moved to RTS state
1192 */
Kumar Sanghvi91018f82012-02-25 17:45:02 -08001193 if ((ep->mpa_attr.version == 2) && peer2peer &&
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301194 (ep->mpa_attr.p2p_type != p2p_type)) {
1195 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1196 rtr_mismatch = 1;
1197 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001198
1199 attrs.mpa_attr = ep->mpa_attr;
1200 attrs.max_ird = ep->ird;
1201 attrs.max_ord = ep->ord;
1202 attrs.llp_stream_handle = ep;
1203 attrs.next_state = C4IW_QP_STATE_RTS;
1204
1205 mask = C4IW_QP_ATTR_NEXT_STATE |
1206 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1207 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1208
1209 /* bind QP and TID with INIT_WR */
1210 err = c4iw_modify_qp(ep->com.qp->rhp,
1211 ep->com.qp, mask, &attrs, 1);
1212 if (err)
1213 goto err;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301214
1215 /*
1216 * If responder's RTR requirement did not match with what initiator
1217 * supports, generate TERM message
1218 */
1219 if (rtr_mismatch) {
1220 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1221 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1222 attrs.ecode = MPA_NOMATCH_RTR;
1223 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1224 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1225 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1226 err = -ENOMEM;
1227 goto out;
1228 }
1229
1230 /*
1231 * Generate TERM if initiator IRD is not sufficient for responder
1232 * provided ORD. Currently, we do the same behaviour even when
1233 * responder provided IRD is also not sufficient as regards to
1234 * initiator ORD.
1235 */
1236 if (insuff_ird) {
1237 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1238 __func__);
1239 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1240 attrs.ecode = MPA_INSUFF_IRD;
1241 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1242 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1243 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1244 err = -ENOMEM;
1245 goto out;
1246 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001247 goto out;
1248err:
Steve Wiseb21ef162010-06-10 19:02:55 +00001249 state_set(&ep->com, ABORTING);
1250 send_abort(ep, skb, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001251out:
1252 connect_reply_upcall(ep, err);
1253 return;
1254}
1255
1256static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1257{
1258 struct mpa_message *mpa;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301259 struct mpa_v2_conn_params *mpa_v2_params;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001260 u16 plen;
1261
1262 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1263
1264 if (state_read(&ep->com) != MPA_REQ_WAIT)
1265 return;
1266
1267 /*
1268 * If we get more than the supported amount of private data
1269 * then we must fail this connection.
1270 */
1271 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1272 stop_ep_timer(ep);
1273 abort_connection(ep, skb, GFP_KERNEL);
1274 return;
1275 }
1276
1277 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1278
1279 /*
1280 * Copy the new data into our accumulation buffer.
1281 */
1282 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1283 skb->len);
1284 ep->mpa_pkt_len += skb->len;
1285
1286 /*
1287 * If we don't even have the mpa message, then bail.
1288 * We'll continue process when more data arrives.
1289 */
1290 if (ep->mpa_pkt_len < sizeof(*mpa))
1291 return;
1292
1293 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1294 stop_ep_timer(ep);
1295 mpa = (struct mpa_message *) ep->mpa_pkt;
1296
1297 /*
1298 * Validate MPA Header.
1299 */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301300 if (mpa->revision > mpa_rev) {
1301 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1302 " Received = %d\n", __func__, mpa_rev, mpa->revision);
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00001303 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001304 abort_connection(ep, skb, GFP_KERNEL);
1305 return;
1306 }
1307
1308 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00001309 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001310 abort_connection(ep, skb, GFP_KERNEL);
1311 return;
1312 }
1313
1314 plen = ntohs(mpa->private_data_size);
1315
1316 /*
1317 * Fail if there's too much private data.
1318 */
1319 if (plen > MPA_MAX_PRIVATE_DATA) {
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00001320 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001321 abort_connection(ep, skb, GFP_KERNEL);
1322 return;
1323 }
1324
1325 /*
1326 * If plen does not account for pkt size
1327 */
1328 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00001329 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001330 abort_connection(ep, skb, GFP_KERNEL);
1331 return;
1332 }
1333 ep->plen = (u8) plen;
1334
1335 /*
1336 * If we don't have all the pdata yet, then bail.
1337 */
1338 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1339 return;
1340
1341 /*
1342 * If we get here we have accumulated the entire mpa
1343 * start reply message including private data.
1344 */
1345 ep->mpa_attr.initiator = 0;
1346 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1347 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1348 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301349 ep->mpa_attr.version = mpa->revision;
1350 if (mpa->revision == 1)
1351 ep->tried_with_mpa_v1 = 1;
1352 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1353
1354 if (mpa->revision == 2) {
1355 ep->mpa_attr.enhanced_rdma_conn =
1356 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1357 if (ep->mpa_attr.enhanced_rdma_conn) {
1358 mpa_v2_params = (struct mpa_v2_conn_params *)
1359 (ep->mpa_pkt + sizeof(*mpa));
1360 ep->ird = ntohs(mpa_v2_params->ird) &
1361 MPA_V2_IRD_ORD_MASK;
1362 ep->ord = ntohs(mpa_v2_params->ord) &
1363 MPA_V2_IRD_ORD_MASK;
1364 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1365 if (peer2peer) {
1366 if (ntohs(mpa_v2_params->ord) &
1367 MPA_V2_RDMA_WRITE_RTR)
1368 ep->mpa_attr.p2p_type =
1369 FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1370 else if (ntohs(mpa_v2_params->ord) &
1371 MPA_V2_RDMA_READ_RTR)
1372 ep->mpa_attr.p2p_type =
1373 FW_RI_INIT_P2PTYPE_READ_REQ;
1374 }
1375 }
1376 } else if (mpa->revision == 1)
1377 if (peer2peer)
1378 ep->mpa_attr.p2p_type = p2p_type;
1379
Steve Wisecfdda9d2010-04-21 15:30:06 -07001380 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1381 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1382 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1383 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1384 ep->mpa_attr.p2p_type);
1385
1386 state_set(&ep->com, MPA_REQ_RCVD);
1387
1388 /* drive upcall */
1389 connect_request_upcall(ep);
1390 return;
1391}
1392
1393static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1394{
1395 struct c4iw_ep *ep;
1396 struct cpl_rx_data *hdr = cplhdr(skb);
1397 unsigned int dlen = ntohs(hdr->len);
1398 unsigned int tid = GET_TID(hdr);
1399 struct tid_info *t = dev->rdev.lldi.tids;
Vipul Pandya793dad92012-12-10 09:30:56 +00001400 __u8 status = hdr->status;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001401
1402 ep = lookup_tid(t, tid);
1403 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1404 skb_pull(skb, sizeof(*hdr));
1405 skb_trim(skb, dlen);
1406
Steve Wisecfdda9d2010-04-21 15:30:06 -07001407 /* update RX credits */
1408 update_rx_credits(ep, dlen);
1409
1410 switch (state_read(&ep->com)) {
1411 case MPA_REQ_SENT:
Vipul Pandya55abf8d2013-01-07 13:11:50 +00001412 ep->rcv_seq += dlen;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001413 process_mpa_reply(ep, skb);
1414 break;
1415 case MPA_REQ_WAIT:
Vipul Pandya55abf8d2013-01-07 13:11:50 +00001416 ep->rcv_seq += dlen;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001417 process_mpa_request(ep, skb);
1418 break;
Vipul Pandya15579672013-01-07 13:11:52 +00001419 case FPDU_MODE: {
1420 struct c4iw_qp_attributes attrs;
1421 BUG_ON(!ep->com.qp);
Vipul Pandyae8e5b922013-01-07 13:11:55 +00001422 if (status)
Vipul Pandya15579672013-01-07 13:11:52 +00001423 pr_err("%s Unexpected streaming data." \
Vipul Pandya04236df2013-01-07 13:11:54 +00001424 " qpid %u ep %p state %d tid %u status %d\n",
1425 __func__, ep->com.qp->wq.sq.qid, ep,
1426 state_read(&ep->com), ep->hwtid, status);
Vipul Pandya15579672013-01-07 13:11:52 +00001427 attrs.next_state = C4IW_QP_STATE_ERROR;
1428 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1429 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Vipul Pandya55abf8d2013-01-07 13:11:50 +00001430 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001431 break;
1432 }
Vipul Pandya15579672013-01-07 13:11:52 +00001433 default:
1434 break;
1435 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001436 return 0;
1437}
1438
1439static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1440{
1441 struct c4iw_ep *ep;
1442 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001443 int release = 0;
1444 unsigned int tid = GET_TID(rpl);
1445 struct tid_info *t = dev->rdev.lldi.tids;
1446
1447 ep = lookup_tid(t, tid);
Vipul Pandya49840372012-05-18 15:29:29 +05301448 if (!ep) {
1449 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1450 return 0;
1451 }
Wei Yongjun92dd6c32012-09-07 06:51:23 +00001452 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001453 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001454 switch (ep->com.state) {
1455 case ABORTING:
Vipul Pandya91e9c0712013-01-07 13:11:51 +00001456 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001457 __state_set(&ep->com, DEAD);
1458 release = 1;
1459 break;
1460 default:
1461 printk(KERN_ERR "%s ep %p state %d\n",
1462 __func__, ep, ep->com.state);
1463 break;
1464 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001465 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001466
1467 if (release)
1468 release_ep_resources(ep);
1469 return 0;
1470}
1471
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001472static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1473{
1474 struct sk_buff *skb;
1475 struct fw_ofld_connection_wr *req;
1476 unsigned int mtu_idx;
1477 int wscale;
1478
1479 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1480 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1481 memset(req, 0, sizeof(*req));
1482 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1483 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1484 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
1485 ep->l2t));
1486 req->le.lport = ep->com.local_addr.sin_port;
1487 req->le.pport = ep->com.remote_addr.sin_port;
1488 req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
1489 req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
1490 req->tcb.t_state_to_astid =
1491 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
1492 V_FW_OFLD_CONNECTION_WR_ASTID(atid));
1493 req->tcb.cplrxdataack_cplpassacceptrpl =
1494 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001495 req->tcb.tx_max = (__force __be32) jiffies;
Vipul Pandya793dad92012-12-10 09:30:56 +00001496 req->tcb.rcv_adv = htons(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001497 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1498 wscale = compute_wscale(rcv_win);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001499 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001500 (nocong ? NO_CONG(1) : 0) |
1501 KEEP_ALIVE(1) |
1502 DELACK(1) |
1503 WND_SCALE(wscale) |
1504 MSS_IDX(mtu_idx) |
1505 L2T_IDX(ep->l2t->idx) |
1506 TX_CHAN(ep->tx_chan) |
1507 SMAC_SEL(ep->smac_idx) |
1508 DSCP(ep->tos) |
1509 ULP_MODE(ULP_MODE_TCPDDP) |
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001510 RCV_BUFSIZ(rcv_win >> 10));
1511 req->tcb.opt2 = (__force __be32) (PACE(1) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001512 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1513 RX_CHANNEL(0) |
1514 CCTRL_ECN(enable_ecn) |
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001515 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid));
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001516 if (enable_tcp_timestamps)
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001517 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001518 if (enable_tcp_sack)
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001519 req->tcb.opt2 |= (__force __be32) SACK_EN(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001520 if (wscale && enable_tcp_window_scaling)
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001521 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1);
1522 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0);
1523 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2);
Vipul Pandya793dad92012-12-10 09:30:56 +00001524 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1525 set_bit(ACT_OFLD_CONN, &ep->com.history);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001526 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1527}
1528
Steve Wisecfdda9d2010-04-21 15:30:06 -07001529/*
1530 * Return whether a failed active open has allocated a TID
1531 */
1532static inline int act_open_has_tid(int status)
1533{
1534 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1535 status != CPL_ERR_ARP_MISS;
1536}
1537
Vipul Pandya793dad92012-12-10 09:30:56 +00001538#define ACT_OPEN_RETRY_COUNT 2
1539
1540static int c4iw_reconnect(struct c4iw_ep *ep)
1541{
1542 int err = 0;
1543 struct rtable *rt;
1544 struct port_info *pi;
1545 struct net_device *pdev;
1546 int step;
1547 struct neighbour *neigh;
1548
1549 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1550 init_timer(&ep->timer);
1551
1552 /*
1553 * Allocate an active TID to initiate a TCP connection.
1554 */
1555 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1556 if (ep->atid == -1) {
1557 pr_err("%s - cannot alloc atid.\n", __func__);
1558 err = -ENOMEM;
1559 goto fail2;
1560 }
1561 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1562
1563 /* find a route */
1564 rt = find_route(ep->com.dev,
1565 ep->com.cm_id->local_addr.sin_addr.s_addr,
1566 ep->com.cm_id->remote_addr.sin_addr.s_addr,
1567 ep->com.cm_id->local_addr.sin_port,
1568 ep->com.cm_id->remote_addr.sin_port, 0);
1569 if (!rt) {
1570 pr_err("%s - cannot find route.\n", __func__);
1571 err = -EHOSTUNREACH;
1572 goto fail3;
1573 }
1574 ep->dst = &rt->dst;
1575
1576 neigh = dst_neigh_lookup(ep->dst,
1577 &ep->com.cm_id->remote_addr.sin_addr.s_addr);
Zhouyi Zhouaaa0c232013-03-14 17:21:50 +00001578 if (!neigh) {
1579 pr_err("%s - cannot alloc neigh.\n", __func__);
1580 err = -ENOMEM;
1581 goto fail4;
1582 }
1583
Vipul Pandya793dad92012-12-10 09:30:56 +00001584 /* get a l2t entry */
1585 if (neigh->dev->flags & IFF_LOOPBACK) {
1586 PDBG("%s LOOPBACK\n", __func__);
1587 pdev = ip_dev_find(&init_net,
1588 ep->com.cm_id->remote_addr.sin_addr.s_addr);
1589 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1590 neigh, pdev, 0);
1591 pi = (struct port_info *)netdev_priv(pdev);
1592 ep->mtu = pdev->mtu;
1593 ep->tx_chan = cxgb4_port_chan(pdev);
1594 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1595 dev_put(pdev);
1596 } else {
1597 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1598 neigh, neigh->dev, 0);
1599 pi = (struct port_info *)netdev_priv(neigh->dev);
1600 ep->mtu = dst_mtu(ep->dst);
1601 ep->tx_chan = cxgb4_port_chan(neigh->dev);
1602 ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
1603 0x7F) << 1;
1604 }
1605
1606 step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
1607 ep->txq_idx = pi->port_id * step;
1608 ep->ctrlq_idx = pi->port_id;
1609 step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
1610 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
1611
1612 if (!ep->l2t) {
1613 pr_err("%s - cannot alloc l2e.\n", __func__);
1614 err = -ENOMEM;
1615 goto fail4;
1616 }
1617
1618 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1619 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1620 ep->l2t->idx);
1621
1622 state_set(&ep->com, CONNECTING);
1623 ep->tos = 0;
1624
1625 /* send connect request to rnic */
1626 err = send_connect(ep);
1627 if (!err)
1628 goto out;
1629
1630 cxgb4_l2t_release(ep->l2t);
1631fail4:
1632 dst_release(ep->dst);
1633fail3:
1634 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
1635 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1636fail2:
1637 /*
1638 * remember to send notification to upper layer.
1639 * We are in here so the upper layer is not aware that this is
1640 * re-connect attempt and so, upper layer is still waiting for
1641 * response of 1st connect request.
1642 */
1643 connect_reply_upcall(ep, -ECONNRESET);
1644 c4iw_put_ep(&ep->com);
1645out:
1646 return err;
1647}
1648
Steve Wisecfdda9d2010-04-21 15:30:06 -07001649static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1650{
1651 struct c4iw_ep *ep;
1652 struct cpl_act_open_rpl *rpl = cplhdr(skb);
1653 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1654 ntohl(rpl->atid_status)));
1655 struct tid_info *t = dev->rdev.lldi.tids;
1656 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1657
1658 ep = lookup_atid(t, atid);
1659
1660 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1661 status, status2errno(status));
1662
1663 if (status == CPL_ERR_RTX_NEG_ADVICE) {
1664 printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1665 atid);
1666 return 0;
1667 }
1668
Vipul Pandya793dad92012-12-10 09:30:56 +00001669 set_bit(ACT_OPEN_RPL, &ep->com.history);
1670
Vipul Pandyad716a2a2012-05-18 15:29:31 +05301671 /*
1672 * Log interesting failures.
1673 */
1674 switch (status) {
1675 case CPL_ERR_CONN_RESET:
1676 case CPL_ERR_CONN_TIMEDOUT:
1677 break;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001678 case CPL_ERR_TCAM_FULL:
Vipul Pandya793dad92012-12-10 09:30:56 +00001679 if (dev->rdev.lldi.enable_fw_ofld_conn) {
1680 mutex_lock(&dev->rdev.stats.lock);
1681 dev->rdev.stats.tcam_full++;
1682 mutex_unlock(&dev->rdev.stats.lock);
1683 send_fw_act_open_req(ep,
1684 GET_TID_TID(GET_AOPEN_ATID(
1685 ntohl(rpl->atid_status))));
1686 return 0;
1687 }
1688 break;
1689 case CPL_ERR_CONN_EXIST:
1690 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
1691 set_bit(ACT_RETRY_INUSE, &ep->com.history);
1692 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
1693 atid);
1694 cxgb4_free_atid(t, atid);
1695 dst_release(ep->dst);
1696 cxgb4_l2t_release(ep->l2t);
1697 c4iw_reconnect(ep);
1698 return 0;
1699 }
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001700 break;
Vipul Pandyad716a2a2012-05-18 15:29:31 +05301701 default:
1702 printk(KERN_INFO MOD "Active open failure - "
1703 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1704 atid, status, status2errno(status),
1705 &ep->com.local_addr.sin_addr.s_addr,
1706 ntohs(ep->com.local_addr.sin_port),
1707 &ep->com.remote_addr.sin_addr.s_addr,
1708 ntohs(ep->com.remote_addr.sin_port));
1709 break;
1710 }
1711
Steve Wisecfdda9d2010-04-21 15:30:06 -07001712 connect_reply_upcall(ep, status2errno(status));
1713 state_set(&ep->com, DEAD);
1714
1715 if (status && act_open_has_tid(status))
1716 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1717
Vipul Pandya793dad92012-12-10 09:30:56 +00001718 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001719 cxgb4_free_atid(t, atid);
1720 dst_release(ep->dst);
1721 cxgb4_l2t_release(ep->l2t);
1722 c4iw_put_ep(&ep->com);
1723
1724 return 0;
1725}
1726
1727static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1728{
1729 struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1730 struct tid_info *t = dev->rdev.lldi.tids;
1731 unsigned int stid = GET_TID(rpl);
1732 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1733
1734 if (!ep) {
Vipul Pandya1cab7752012-12-10 09:30:55 +00001735 PDBG("%s stid %d lookup failure!\n", __func__, stid);
1736 goto out;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001737 }
1738 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1739 rpl->status, status2errno(rpl->status));
Steve Wised9594d92011-05-09 22:06:22 -07001740 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001741
Vipul Pandya1cab7752012-12-10 09:30:55 +00001742out:
Steve Wisecfdda9d2010-04-21 15:30:06 -07001743 return 0;
1744}
1745
1746static int listen_stop(struct c4iw_listen_ep *ep)
1747{
1748 struct sk_buff *skb;
1749 struct cpl_close_listsvr_req *req;
1750
1751 PDBG("%s ep %p\n", __func__, ep);
1752 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1753 if (!skb) {
1754 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1755 return -ENOMEM;
1756 }
1757 req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1758 INIT_TP_WR(req, 0);
1759 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1760 ep->stid));
1761 req->reply_ctrl = cpu_to_be16(
1762 QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1763 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1764 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1765}
1766
1767static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1768{
1769 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1770 struct tid_info *t = dev->rdev.lldi.tids;
1771 unsigned int stid = GET_TID(rpl);
1772 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1773
1774 PDBG("%s ep %p\n", __func__, ep);
Steve Wised9594d92011-05-09 22:06:22 -07001775 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001776 return 0;
1777}
1778
1779static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1780 struct cpl_pass_accept_req *req)
1781{
1782 struct cpl_pass_accept_rpl *rpl;
1783 unsigned int mtu_idx;
1784 u64 opt0;
1785 u32 opt2;
1786 int wscale;
1787
1788 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1789 BUG_ON(skb_cloned(skb));
1790 skb_trim(skb, sizeof(*rpl));
1791 skb_get(skb);
1792 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1793 wscale = compute_wscale(rcv_win);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001794 opt0 = (nocong ? NO_CONG(1) : 0) |
1795 KEEP_ALIVE(1) |
Steve Wiseba6d3922010-06-23 15:46:49 +00001796 DELACK(1) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07001797 WND_SCALE(wscale) |
1798 MSS_IDX(mtu_idx) |
1799 L2T_IDX(ep->l2t->idx) |
1800 TX_CHAN(ep->tx_chan) |
1801 SMAC_SEL(ep->smac_idx) |
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001802 DSCP(ep->tos >> 2) |
Steve Wiseb48f3b92011-03-11 22:30:21 +00001803 ULP_MODE(ULP_MODE_TCPDDP) |
Steve Wisecfdda9d2010-04-21 15:30:06 -07001804 RCV_BUFSIZ(rcv_win>>10);
1805 opt2 = RX_CHANNEL(0) |
1806 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1807
1808 if (enable_tcp_timestamps && req->tcpopt.tstamp)
1809 opt2 |= TSTAMPS_EN(1);
1810 if (enable_tcp_sack && req->tcpopt.sack)
1811 opt2 |= SACK_EN(1);
1812 if (wscale && enable_tcp_window_scaling)
1813 opt2 |= WND_SCALE_EN(1);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00001814 if (enable_ecn) {
1815 const struct tcphdr *tcph;
1816 u32 hlen = ntohl(req->hdr_len);
1817
1818 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) +
1819 G_IP_HDR_LEN(hlen);
1820 if (tcph->ece && tcph->cwr)
1821 opt2 |= CCTRL_ECN(1);
1822 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001823
1824 rpl = cplhdr(skb);
1825 INIT_TP_WR(rpl, ep->hwtid);
1826 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1827 ep->hwtid));
1828 rpl->opt0 = cpu_to_be64(opt0);
1829 rpl->opt2 = cpu_to_be32(opt2);
Steve Wised4f1a5c2010-07-23 19:12:32 +00001830 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001831 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1832
1833 return;
1834}
1835
1836static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1837 struct sk_buff *skb)
1838{
1839 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1840 peer_ip);
1841 BUG_ON(skb_cloned(skb));
1842 skb_trim(skb, sizeof(struct cpl_tid_release));
1843 skb_get(skb);
1844 release_tid(&dev->rdev, hwtid, skb);
1845 return;
1846}
1847
1848static void get_4tuple(struct cpl_pass_accept_req *req,
1849 __be32 *local_ip, __be32 *peer_ip,
1850 __be16 *local_port, __be16 *peer_port)
1851{
1852 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1853 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1854 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1855 struct tcphdr *tcp = (struct tcphdr *)
1856 ((u8 *)(req + 1) + eth_len + ip_len);
1857
1858 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1859 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1860 ntohs(tcp->dest));
1861
1862 *peer_ip = ip->saddr;
1863 *local_ip = ip->daddr;
1864 *peer_port = tcp->source;
1865 *local_port = tcp->dest;
1866
1867 return;
1868}
1869
David Miller3786cf12011-12-02 16:52:31 +00001870static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
1871 struct c4iw_dev *cdev, bool clear_mpa_v1)
1872{
1873 struct neighbour *n;
1874 int err, step;
1875
David Miller64b70072012-01-24 13:15:57 +00001876 n = dst_neigh_lookup(dst, &peer_ip);
David Miller3786cf12011-12-02 16:52:31 +00001877 if (!n)
David Miller64b70072012-01-24 13:15:57 +00001878 return -ENODEV;
1879
1880 rcu_read_lock();
David Miller3786cf12011-12-02 16:52:31 +00001881 err = -ENOMEM;
1882 if (n->dev->flags & IFF_LOOPBACK) {
1883 struct net_device *pdev;
1884
1885 pdev = ip_dev_find(&init_net, peer_ip);
Thadeu Lima de Souza Cascardo71b43fd2012-05-17 17:51:53 -03001886 if (!pdev) {
1887 err = -ENODEV;
1888 goto out;
1889 }
David Miller3786cf12011-12-02 16:52:31 +00001890 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1891 n, pdev, 0);
1892 if (!ep->l2t)
1893 goto out;
1894 ep->mtu = pdev->mtu;
1895 ep->tx_chan = cxgb4_port_chan(pdev);
1896 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1897 step = cdev->rdev.lldi.ntxq /
1898 cdev->rdev.lldi.nchan;
1899 ep->txq_idx = cxgb4_port_idx(pdev) * step;
1900 step = cdev->rdev.lldi.nrxq /
1901 cdev->rdev.lldi.nchan;
1902 ep->ctrlq_idx = cxgb4_port_idx(pdev);
1903 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1904 cxgb4_port_idx(pdev) * step];
1905 dev_put(pdev);
1906 } else {
1907 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1908 n, n->dev, 0);
1909 if (!ep->l2t)
1910 goto out;
Steve Wisebd61baa2012-04-27 10:24:33 -05001911 ep->mtu = dst_mtu(dst);
David Miller3786cf12011-12-02 16:52:31 +00001912 ep->tx_chan = cxgb4_port_chan(n->dev);
1913 ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
1914 step = cdev->rdev.lldi.ntxq /
1915 cdev->rdev.lldi.nchan;
1916 ep->txq_idx = cxgb4_port_idx(n->dev) * step;
1917 ep->ctrlq_idx = cxgb4_port_idx(n->dev);
1918 step = cdev->rdev.lldi.nrxq /
1919 cdev->rdev.lldi.nchan;
1920 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
1921 cxgb4_port_idx(n->dev) * step];
1922
1923 if (clear_mpa_v1) {
1924 ep->retry_with_mpa_v1 = 0;
1925 ep->tried_with_mpa_v1 = 0;
1926 }
1927 }
1928 err = 0;
1929out:
1930 rcu_read_unlock();
1931
David Miller64b70072012-01-24 13:15:57 +00001932 neigh_release(n);
1933
David Miller3786cf12011-12-02 16:52:31 +00001934 return err;
1935}
1936
Steve Wisecfdda9d2010-04-21 15:30:06 -07001937static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1938{
Vipul Pandya793dad92012-12-10 09:30:56 +00001939 struct c4iw_ep *child_ep = NULL, *parent_ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001940 struct cpl_pass_accept_req *req = cplhdr(skb);
1941 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1942 struct tid_info *t = dev->rdev.lldi.tids;
1943 unsigned int hwtid = GET_TID(req);
1944 struct dst_entry *dst;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001945 struct rtable *rt;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001946 __be32 local_ip, peer_ip = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001947 __be16 local_port, peer_port;
David Miller3786cf12011-12-02 16:52:31 +00001948 int err;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001949 u16 peer_mss = ntohs(req->tcpopt.mss);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001950
1951 parent_ep = lookup_stid(t, stid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00001952 if (!parent_ep) {
1953 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
1954 goto reject;
1955 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001956 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1957
Vipul Pandya1cab7752012-12-10 09:30:55 +00001958 PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
1959 "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
1960 ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
1961 ntohs(peer_port), peer_mss);
1962
Steve Wisecfdda9d2010-04-21 15:30:06 -07001963 if (state_read(&parent_ep->com) != LISTEN) {
1964 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1965 __func__);
1966 goto reject;
1967 }
1968
1969 /* Find output route */
1970 rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1971 GET_POPEN_TOS(ntohl(req->tos_stid)));
1972 if (!rt) {
1973 printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1974 __func__);
1975 goto reject;
1976 }
Changli Gaod8d1f302010-06-10 23:31:35 -07001977 dst = &rt->dst;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001978
1979 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1980 if (!child_ep) {
1981 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1982 __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001983 dst_release(dst);
1984 goto reject;
1985 }
David Miller3786cf12011-12-02 16:52:31 +00001986
1987 err = import_ep(child_ep, peer_ip, dst, dev, false);
1988 if (err) {
1989 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1990 __func__);
1991 dst_release(dst);
1992 kfree(child_ep);
1993 goto reject;
1994 }
1995
Vipul Pandya1cab7752012-12-10 09:30:55 +00001996 if (peer_mss && child_ep->mtu > (peer_mss + 40))
1997 child_ep->mtu = peer_mss + 40;
1998
Steve Wisecfdda9d2010-04-21 15:30:06 -07001999 state_set(&child_ep->com, CONNECTING);
2000 child_ep->com.dev = dev;
2001 child_ep->com.cm_id = NULL;
2002 child_ep->com.local_addr.sin_family = PF_INET;
2003 child_ep->com.local_addr.sin_port = local_port;
2004 child_ep->com.local_addr.sin_addr.s_addr = local_ip;
2005 child_ep->com.remote_addr.sin_family = PF_INET;
2006 child_ep->com.remote_addr.sin_port = peer_port;
2007 child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
2008 c4iw_get_ep(&parent_ep->com);
2009 child_ep->parent_ep = parent_ep;
2010 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002011 child_ep->dst = dst;
2012 child_ep->hwtid = hwtid;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002013
2014 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
David Miller3786cf12011-12-02 16:52:31 +00002015 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002016
2017 init_timer(&child_ep->timer);
2018 cxgb4_insert_tid(t, child_ep, hwtid);
Vipul Pandyab3de6cf2013-01-07 13:11:59 +00002019 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002020 accept_cr(child_ep, peer_ip, skb, req);
Vipul Pandya793dad92012-12-10 09:30:56 +00002021 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002022 goto out;
2023reject:
2024 reject_cr(dev, hwtid, peer_ip, skb);
2025out:
2026 return 0;
2027}
2028
2029static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2030{
2031 struct c4iw_ep *ep;
2032 struct cpl_pass_establish *req = cplhdr(skb);
2033 struct tid_info *t = dev->rdev.lldi.tids;
2034 unsigned int tid = GET_TID(req);
2035
2036 ep = lookup_tid(t, tid);
2037 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2038 ep->snd_seq = be32_to_cpu(req->snd_isn);
2039 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2040
Vipul Pandya1cab7752012-12-10 09:30:55 +00002041 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2042 ntohs(req->tcp_opt));
2043
Steve Wisecfdda9d2010-04-21 15:30:06 -07002044 set_emss(ep, ntohs(req->tcp_opt));
2045
2046 dst_confirm(ep->dst);
2047 state_set(&ep->com, MPA_REQ_WAIT);
2048 start_ep_timer(ep);
2049 send_flowc(ep, skb);
Vipul Pandya793dad92012-12-10 09:30:56 +00002050 set_bit(PASS_ESTAB, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002051
2052 return 0;
2053}
2054
2055static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2056{
2057 struct cpl_peer_close *hdr = cplhdr(skb);
2058 struct c4iw_ep *ep;
2059 struct c4iw_qp_attributes attrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002060 int disconnect = 1;
2061 int release = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002062 struct tid_info *t = dev->rdev.lldi.tids;
2063 unsigned int tid = GET_TID(hdr);
Steve Wise8da7e7a2011-06-14 20:59:27 +00002064 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002065
2066 ep = lookup_tid(t, tid);
2067 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2068 dst_confirm(ep->dst);
2069
Vipul Pandya793dad92012-12-10 09:30:56 +00002070 set_bit(PEER_CLOSE, &ep->com.history);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002071 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002072 switch (ep->com.state) {
2073 case MPA_REQ_WAIT:
2074 __state_set(&ep->com, CLOSING);
2075 break;
2076 case MPA_REQ_SENT:
2077 __state_set(&ep->com, CLOSING);
2078 connect_reply_upcall(ep, -ECONNRESET);
2079 break;
2080 case MPA_REQ_RCVD:
2081
2082 /*
2083 * We're gonna mark this puppy DEAD, but keep
2084 * the reference on it until the ULP accepts or
2085 * rejects the CR. Also wake up anyone waiting
2086 * in rdma connection migration (see c4iw_accept_cr()).
2087 */
2088 __state_set(&ep->com, CLOSING);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002089 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
Steve Wised9594d92011-05-09 22:06:22 -07002090 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002091 break;
2092 case MPA_REP_SENT:
2093 __state_set(&ep->com, CLOSING);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002094 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
Steve Wised9594d92011-05-09 22:06:22 -07002095 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002096 break;
2097 case FPDU_MODE:
Steve Wiseca5a2202010-07-23 19:12:37 +00002098 start_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002099 __state_set(&ep->com, CLOSING);
Steve Wise30c95c22011-05-09 22:06:22 -07002100 attrs.next_state = C4IW_QP_STATE_CLOSING;
Steve Wise8da7e7a2011-06-14 20:59:27 +00002101 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
Steve Wise30c95c22011-05-09 22:06:22 -07002102 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
Steve Wise8da7e7a2011-06-14 20:59:27 +00002103 if (ret != -ECONNRESET) {
2104 peer_close_upcall(ep);
2105 disconnect = 1;
2106 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002107 break;
2108 case ABORTING:
2109 disconnect = 0;
2110 break;
2111 case CLOSING:
2112 __state_set(&ep->com, MORIBUND);
2113 disconnect = 0;
2114 break;
2115 case MORIBUND:
Steve Wiseca5a2202010-07-23 19:12:37 +00002116 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002117 if (ep->com.cm_id && ep->com.qp) {
2118 attrs.next_state = C4IW_QP_STATE_IDLE;
2119 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2120 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2121 }
2122 close_complete_upcall(ep);
2123 __state_set(&ep->com, DEAD);
2124 release = 1;
2125 disconnect = 0;
2126 break;
2127 case DEAD:
2128 disconnect = 0;
2129 break;
2130 default:
2131 BUG_ON(1);
2132 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002133 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002134 if (disconnect)
2135 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2136 if (release)
2137 release_ep_resources(ep);
2138 return 0;
2139}
2140
2141/*
2142 * Returns whether an ABORT_REQ_RSS message is a negative advice.
2143 */
2144static int is_neg_adv_abort(unsigned int status)
2145{
2146 return status == CPL_ERR_RTX_NEG_ADVICE ||
2147 status == CPL_ERR_PERSIST_NEG_ADVICE;
2148}
2149
2150static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2151{
2152 struct cpl_abort_req_rss *req = cplhdr(skb);
2153 struct c4iw_ep *ep;
2154 struct cpl_abort_rpl *rpl;
2155 struct sk_buff *rpl_skb;
2156 struct c4iw_qp_attributes attrs;
2157 int ret;
2158 int release = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002159 struct tid_info *t = dev->rdev.lldi.tids;
2160 unsigned int tid = GET_TID(req);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002161
2162 ep = lookup_tid(t, tid);
2163 if (is_neg_adv_abort(req->status)) {
2164 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2165 ep->hwtid);
2166 return 0;
2167 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002168 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2169 ep->com.state);
Vipul Pandya793dad92012-12-10 09:30:56 +00002170 set_bit(PEER_ABORT, &ep->com.history);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002171
2172 /*
2173 * Wake up any threads in rdma_init() or rdma_fini().
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302174 * However, this is not needed if com state is just
2175 * MPA_REQ_SENT
Steve Wise2f5b48c2010-09-10 11:15:36 -05002176 */
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302177 if (ep->com.state != MPA_REQ_SENT)
2178 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002179
2180 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002181 switch (ep->com.state) {
2182 case CONNECTING:
2183 break;
2184 case MPA_REQ_WAIT:
Steve Wiseca5a2202010-07-23 19:12:37 +00002185 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002186 break;
2187 case MPA_REQ_SENT:
Steve Wiseca5a2202010-07-23 19:12:37 +00002188 stop_ep_timer(ep);
Vipul Pandyafe7e0a42013-01-07 13:11:57 +00002189 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302190 connect_reply_upcall(ep, -ECONNRESET);
2191 else {
2192 /*
2193 * we just don't send notification upwards because we
2194 * want to retry with mpa_v1 without upper layers even
2195 * knowing it.
2196 *
2197 * do some housekeeping so as to re-initiate the
2198 * connection
2199 */
2200 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__,
2201 mpa_rev);
2202 ep->retry_with_mpa_v1 = 1;
2203 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002204 break;
2205 case MPA_REP_SENT:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002206 break;
2207 case MPA_REQ_RCVD:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002208 break;
2209 case MORIBUND:
2210 case CLOSING:
Steve Wiseca5a2202010-07-23 19:12:37 +00002211 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002212 /*FALLTHROUGH*/
2213 case FPDU_MODE:
2214 if (ep->com.cm_id && ep->com.qp) {
2215 attrs.next_state = C4IW_QP_STATE_ERROR;
2216 ret = c4iw_modify_qp(ep->com.qp->rhp,
2217 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2218 &attrs, 1);
2219 if (ret)
2220 printk(KERN_ERR MOD
2221 "%s - qp <- error failed!\n",
2222 __func__);
2223 }
2224 peer_abort_upcall(ep);
2225 break;
2226 case ABORTING:
2227 break;
2228 case DEAD:
2229 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002230 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002231 return 0;
2232 default:
2233 BUG_ON(1);
2234 break;
2235 }
2236 dst_confirm(ep->dst);
2237 if (ep->com.state != ABORTING) {
2238 __state_set(&ep->com, DEAD);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302239 /* we don't release if we want to retry with mpa_v1 */
2240 if (!ep->retry_with_mpa_v1)
2241 release = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002242 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002243 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002244
2245 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
2246 if (!rpl_skb) {
2247 printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
2248 __func__);
2249 release = 1;
2250 goto out;
2251 }
2252 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
2253 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
2254 INIT_TP_WR(rpl, ep->hwtid);
2255 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
2256 rpl->cmd = CPL_ABORT_NO_RST;
2257 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2258out:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002259 if (release)
2260 release_ep_resources(ep);
Vipul Pandyafe7e0a42013-01-07 13:11:57 +00002261 else if (ep->retry_with_mpa_v1) {
2262 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302263 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2264 dst_release(ep->dst);
2265 cxgb4_l2t_release(ep->l2t);
2266 c4iw_reconnect(ep);
2267 }
2268
Steve Wisecfdda9d2010-04-21 15:30:06 -07002269 return 0;
2270}
2271
2272static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2273{
2274 struct c4iw_ep *ep;
2275 struct c4iw_qp_attributes attrs;
2276 struct cpl_close_con_rpl *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002277 int release = 0;
2278 struct tid_info *t = dev->rdev.lldi.tids;
2279 unsigned int tid = GET_TID(rpl);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002280
2281 ep = lookup_tid(t, tid);
2282
2283 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2284 BUG_ON(!ep);
2285
2286 /* The cm_id may be null if we failed to connect */
Steve Wise2f5b48c2010-09-10 11:15:36 -05002287 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002288 switch (ep->com.state) {
2289 case CLOSING:
2290 __state_set(&ep->com, MORIBUND);
2291 break;
2292 case MORIBUND:
Steve Wiseca5a2202010-07-23 19:12:37 +00002293 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002294 if ((ep->com.cm_id) && (ep->com.qp)) {
2295 attrs.next_state = C4IW_QP_STATE_IDLE;
2296 c4iw_modify_qp(ep->com.qp->rhp,
2297 ep->com.qp,
2298 C4IW_QP_ATTR_NEXT_STATE,
2299 &attrs, 1);
2300 }
2301 close_complete_upcall(ep);
2302 __state_set(&ep->com, DEAD);
2303 release = 1;
2304 break;
2305 case ABORTING:
2306 case DEAD:
2307 break;
2308 default:
2309 BUG_ON(1);
2310 break;
2311 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002312 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002313 if (release)
2314 release_ep_resources(ep);
2315 return 0;
2316}
2317
2318static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2319{
Steve Wise0e42c1f2010-09-10 11:15:09 -05002320 struct cpl_rdma_terminate *rpl = cplhdr(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002321 struct tid_info *t = dev->rdev.lldi.tids;
Steve Wise0e42c1f2010-09-10 11:15:09 -05002322 unsigned int tid = GET_TID(rpl);
2323 struct c4iw_ep *ep;
2324 struct c4iw_qp_attributes attrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002325
2326 ep = lookup_tid(t, tid);
Steve Wise0e42c1f2010-09-10 11:15:09 -05002327 BUG_ON(!ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002328
Steve Wise30c95c22011-05-09 22:06:22 -07002329 if (ep && ep->com.qp) {
Steve Wise0e42c1f2010-09-10 11:15:09 -05002330 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2331 ep->com.qp->wq.sq.qid);
2332 attrs.next_state = C4IW_QP_STATE_TERMINATE;
2333 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2334 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2335 } else
Steve Wise30c95c22011-05-09 22:06:22 -07002336 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002337
Steve Wisecfdda9d2010-04-21 15:30:06 -07002338 return 0;
2339}
2340
2341/*
2342 * Upcall from the adapter indicating data has been transmitted.
2343 * For us its just the single MPA request or reply. We can now free
2344 * the skb holding the mpa message.
2345 */
2346static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2347{
2348 struct c4iw_ep *ep;
2349 struct cpl_fw4_ack *hdr = cplhdr(skb);
2350 u8 credits = hdr->credits;
2351 unsigned int tid = GET_TID(hdr);
2352 struct tid_info *t = dev->rdev.lldi.tids;
2353
2354
2355 ep = lookup_tid(t, tid);
2356 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
2357 if (credits == 0) {
Joe Perchesaa1ad262010-10-25 19:44:22 -07002358 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2359 __func__, ep, ep->hwtid, state_read(&ep->com));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002360 return 0;
2361 }
2362
2363 dst_confirm(ep->dst);
2364 if (ep->mpa_skb) {
2365 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2366 "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
2367 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2368 kfree_skb(ep->mpa_skb);
2369 ep->mpa_skb = NULL;
2370 }
2371 return 0;
2372}
2373
Steve Wisecfdda9d2010-04-21 15:30:06 -07002374int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2375{
2376 int err;
2377 struct c4iw_ep *ep = to_ep(cm_id);
2378 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2379
2380 if (state_read(&ep->com) == DEAD) {
2381 c4iw_put_ep(&ep->com);
2382 return -ECONNRESET;
2383 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002384 set_bit(ULP_REJECT, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002385 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2386 if (mpa_rev == 0)
2387 abort_connection(ep, NULL, GFP_KERNEL);
2388 else {
2389 err = send_mpa_reject(ep, pdata, pdata_len);
2390 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2391 }
2392 c4iw_put_ep(&ep->com);
2393 return 0;
2394}
2395
2396int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2397{
2398 int err;
2399 struct c4iw_qp_attributes attrs;
2400 enum c4iw_qp_attr_mask mask;
2401 struct c4iw_ep *ep = to_ep(cm_id);
2402 struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2403 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2404
2405 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2406 if (state_read(&ep->com) == DEAD) {
2407 err = -ECONNRESET;
2408 goto err;
2409 }
2410
2411 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2412 BUG_ON(!qp);
2413
Vipul Pandya793dad92012-12-10 09:30:56 +00002414 set_bit(ULP_ACCEPT, &ep->com.history);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002415 if ((conn_param->ord > c4iw_max_read_depth) ||
2416 (conn_param->ird > c4iw_max_read_depth)) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002417 abort_connection(ep, NULL, GFP_KERNEL);
2418 err = -EINVAL;
2419 goto err;
2420 }
2421
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302422 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2423 if (conn_param->ord > ep->ird) {
2424 ep->ird = conn_param->ird;
2425 ep->ord = conn_param->ord;
2426 send_mpa_reject(ep, conn_param->private_data,
2427 conn_param->private_data_len);
2428 abort_connection(ep, NULL, GFP_KERNEL);
2429 err = -ENOMEM;
2430 goto err;
2431 }
2432 if (conn_param->ird > ep->ord) {
2433 if (!ep->ord)
2434 conn_param->ird = 1;
2435 else {
2436 abort_connection(ep, NULL, GFP_KERNEL);
2437 err = -ENOMEM;
2438 goto err;
2439 }
2440 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002441
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302442 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002443 ep->ird = conn_param->ird;
2444 ep->ord = conn_param->ord;
2445
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302446 if (ep->mpa_attr.version != 2)
2447 if (peer2peer && ep->ird == 0)
2448 ep->ird = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002449
2450 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2451
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302452 cm_id->add_ref(cm_id);
2453 ep->com.cm_id = cm_id;
2454 ep->com.qp = qp;
Vipul Pandya325abea2013-01-07 13:11:53 +00002455 ref_qp(ep);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302456
Steve Wisecfdda9d2010-04-21 15:30:06 -07002457 /* bind QP to EP and move to RTS */
2458 attrs.mpa_attr = ep->mpa_attr;
2459 attrs.max_ird = ep->ird;
2460 attrs.max_ord = ep->ord;
2461 attrs.llp_stream_handle = ep;
2462 attrs.next_state = C4IW_QP_STATE_RTS;
2463
2464 /* bind QP and TID with INIT_WR */
2465 mask = C4IW_QP_ATTR_NEXT_STATE |
2466 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2467 C4IW_QP_ATTR_MPA_ATTR |
2468 C4IW_QP_ATTR_MAX_IRD |
2469 C4IW_QP_ATTR_MAX_ORD;
2470
2471 err = c4iw_modify_qp(ep->com.qp->rhp,
2472 ep->com.qp, mask, &attrs, 1);
2473 if (err)
2474 goto err1;
2475 err = send_mpa_reply(ep, conn_param->private_data,
2476 conn_param->private_data_len);
2477 if (err)
2478 goto err1;
2479
2480 state_set(&ep->com, FPDU_MODE);
2481 established_upcall(ep);
2482 c4iw_put_ep(&ep->com);
2483 return 0;
2484err1:
2485 ep->com.cm_id = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002486 cm_id->rem_ref(cm_id);
2487err:
2488 c4iw_put_ep(&ep->com);
2489 return err;
2490}
2491
2492int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2493{
Steve Wisecfdda9d2010-04-21 15:30:06 -07002494 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2495 struct c4iw_ep *ep;
2496 struct rtable *rt;
David Miller3786cf12011-12-02 16:52:31 +00002497 int err = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002498
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002499 if ((conn_param->ord > c4iw_max_read_depth) ||
2500 (conn_param->ird > c4iw_max_read_depth)) {
2501 err = -EINVAL;
2502 goto out;
2503 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002504 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2505 if (!ep) {
2506 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2507 err = -ENOMEM;
2508 goto out;
2509 }
2510 init_timer(&ep->timer);
2511 ep->plen = conn_param->private_data_len;
2512 if (ep->plen)
2513 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2514 conn_param->private_data, ep->plen);
2515 ep->ird = conn_param->ird;
2516 ep->ord = conn_param->ord;
2517
2518 if (peer2peer && ep->ord == 0)
2519 ep->ord = 1;
2520
2521 cm_id->add_ref(cm_id);
2522 ep->com.dev = dev;
2523 ep->com.cm_id = cm_id;
2524 ep->com.qp = get_qhp(dev, conn_param->qpn);
2525 BUG_ON(!ep->com.qp);
Vipul Pandya325abea2013-01-07 13:11:53 +00002526 ref_qp(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002527 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
2528 ep->com.qp, cm_id);
2529
2530 /*
2531 * Allocate an active TID to initiate a TCP connection.
2532 */
2533 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
2534 if (ep->atid == -1) {
2535 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
2536 err = -ENOMEM;
2537 goto fail2;
2538 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002539 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002540
2541 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
2542 ntohl(cm_id->local_addr.sin_addr.s_addr),
2543 ntohs(cm_id->local_addr.sin_port),
2544 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2545 ntohs(cm_id->remote_addr.sin_port));
2546
2547 /* find a route */
2548 rt = find_route(dev,
2549 cm_id->local_addr.sin_addr.s_addr,
2550 cm_id->remote_addr.sin_addr.s_addr,
2551 cm_id->local_addr.sin_port,
2552 cm_id->remote_addr.sin_port, 0);
2553 if (!rt) {
2554 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2555 err = -EHOSTUNREACH;
2556 goto fail3;
2557 }
Changli Gaod8d1f302010-06-10 23:31:35 -07002558 ep->dst = &rt->dst;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002559
David Miller3786cf12011-12-02 16:52:31 +00002560 err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
2561 ep->dst, ep->com.dev, true);
2562 if (err) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002563 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002564 goto fail4;
2565 }
2566
2567 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2568 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2569 ep->l2t->idx);
2570
2571 state_set(&ep->com, CONNECTING);
2572 ep->tos = 0;
2573 ep->com.local_addr = cm_id->local_addr;
2574 ep->com.remote_addr = cm_id->remote_addr;
2575
2576 /* send connect request to rnic */
2577 err = send_connect(ep);
2578 if (!err)
2579 goto out;
2580
2581 cxgb4_l2t_release(ep->l2t);
2582fail4:
2583 dst_release(ep->dst);
2584fail3:
Vipul Pandya793dad92012-12-10 09:30:56 +00002585 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002586 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2587fail2:
2588 cm_id->rem_ref(cm_id);
2589 c4iw_put_ep(&ep->com);
2590out:
2591 return err;
2592}
2593
2594int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2595{
2596 int err = 0;
2597 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2598 struct c4iw_listen_ep *ep;
2599
Steve Wisecfdda9d2010-04-21 15:30:06 -07002600 might_sleep();
2601
2602 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2603 if (!ep) {
2604 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2605 err = -ENOMEM;
2606 goto fail1;
2607 }
2608 PDBG("%s ep %p\n", __func__, ep);
2609 cm_id->add_ref(cm_id);
2610 ep->com.cm_id = cm_id;
2611 ep->com.dev = dev;
2612 ep->backlog = backlog;
2613 ep->com.local_addr = cm_id->local_addr;
2614
2615 /*
2616 * Allocate a server TID.
2617 */
Vipul Pandya1cab7752012-12-10 09:30:55 +00002618 if (dev->rdev.lldi.enable_fw_ofld_conn)
2619 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
2620 else
2621 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2622
Steve Wisecfdda9d2010-04-21 15:30:06 -07002623 if (ep->stid == -1) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002624 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002625 err = -ENOMEM;
2626 goto fail2;
2627 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002628 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002629 state_set(&ep->com, LISTEN);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002630 if (dev->rdev.lldi.enable_fw_ofld_conn) {
2631 do {
2632 err = cxgb4_create_server_filter(
2633 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2634 ep->com.local_addr.sin_addr.s_addr,
2635 ep->com.local_addr.sin_port,
Vipul Pandya793dad92012-12-10 09:30:56 +00002636 0,
2637 ep->com.dev->rdev.lldi.rxq_ids[0],
2638 0,
2639 0);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002640 if (err == -EBUSY) {
2641 set_current_state(TASK_UNINTERRUPTIBLE);
2642 schedule_timeout(usecs_to_jiffies(100));
2643 }
2644 } while (err == -EBUSY);
2645 } else {
2646 c4iw_init_wr_wait(&ep->com.wr_wait);
2647 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
2648 ep->stid, ep->com.local_addr.sin_addr.s_addr,
2649 ep->com.local_addr.sin_port,
Vipul Pandya793dad92012-12-10 09:30:56 +00002650 0,
Vipul Pandya1cab7752012-12-10 09:30:55 +00002651 ep->com.dev->rdev.lldi.rxq_ids[0]);
2652 if (!err)
2653 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
2654 &ep->com.wr_wait,
2655 0, 0, __func__);
2656 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002657 if (!err) {
2658 cm_id->provider_data = ep;
2659 goto out;
2660 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00002661 pr_err("%s cxgb4_create_server/filter failed err %d " \
2662 "stid %d laddr %08x lport %d\n", \
2663 __func__, err, ep->stid,
2664 ntohl(ep->com.local_addr.sin_addr.s_addr),
2665 ntohs(ep->com.local_addr.sin_port));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002666 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2667fail2:
2668 cm_id->rem_ref(cm_id);
2669 c4iw_put_ep(&ep->com);
2670fail1:
2671out:
2672 return err;
2673}
2674
2675int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2676{
2677 int err;
2678 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2679
2680 PDBG("%s ep %p\n", __func__, ep);
2681
2682 might_sleep();
2683 state_set(&ep->com, DEAD);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002684 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
2685 err = cxgb4_remove_server_filter(
2686 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2687 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
2688 } else {
2689 c4iw_init_wr_wait(&ep->com.wr_wait);
2690 err = listen_stop(ep);
2691 if (err)
2692 goto done;
2693 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
2694 0, 0, __func__);
2695 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002696 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002697 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2698done:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002699 cm_id->rem_ref(cm_id);
2700 c4iw_put_ep(&ep->com);
2701 return err;
2702}
2703
2704int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2705{
2706 int ret = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002707 int close = 0;
2708 int fatal = 0;
2709 struct c4iw_rdev *rdev;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002710
Steve Wise2f5b48c2010-09-10 11:15:36 -05002711 mutex_lock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002712
2713 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2714 states[ep->com.state], abrupt);
2715
2716 rdev = &ep->com.dev->rdev;
2717 if (c4iw_fatal_error(rdev)) {
2718 fatal = 1;
2719 close_complete_upcall(ep);
2720 ep->com.state = DEAD;
2721 }
2722 switch (ep->com.state) {
2723 case MPA_REQ_WAIT:
2724 case MPA_REQ_SENT:
2725 case MPA_REQ_RCVD:
2726 case MPA_REP_SENT:
2727 case FPDU_MODE:
2728 close = 1;
2729 if (abrupt)
2730 ep->com.state = ABORTING;
2731 else {
2732 ep->com.state = CLOSING;
Steve Wiseca5a2202010-07-23 19:12:37 +00002733 start_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002734 }
2735 set_bit(CLOSE_SENT, &ep->com.flags);
2736 break;
2737 case CLOSING:
2738 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2739 close = 1;
2740 if (abrupt) {
Steve Wiseca5a2202010-07-23 19:12:37 +00002741 stop_ep_timer(ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002742 ep->com.state = ABORTING;
2743 } else
2744 ep->com.state = MORIBUND;
2745 }
2746 break;
2747 case MORIBUND:
2748 case ABORTING:
2749 case DEAD:
2750 PDBG("%s ignoring disconnect ep %p state %u\n",
2751 __func__, ep, ep->com.state);
2752 break;
2753 default:
2754 BUG();
2755 break;
2756 }
2757
Steve Wisecfdda9d2010-04-21 15:30:06 -07002758 if (close) {
Steve Wise8da7e7a2011-06-14 20:59:27 +00002759 if (abrupt) {
Vipul Pandya793dad92012-12-10 09:30:56 +00002760 set_bit(EP_DISC_ABORT, &ep->com.history);
Steve Wise8da7e7a2011-06-14 20:59:27 +00002761 close_complete_upcall(ep);
2762 ret = send_abort(ep, NULL, gfp);
Vipul Pandya793dad92012-12-10 09:30:56 +00002763 } else {
2764 set_bit(EP_DISC_CLOSE, &ep->com.history);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002765 ret = send_halfclose(ep, gfp);
Vipul Pandya793dad92012-12-10 09:30:56 +00002766 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002767 if (ret)
2768 fatal = 1;
2769 }
Steve Wise8da7e7a2011-06-14 20:59:27 +00002770 mutex_unlock(&ep->com.mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002771 if (fatal)
2772 release_ep_resources(ep);
2773 return ret;
2774}
2775
Vipul Pandya1cab7752012-12-10 09:30:55 +00002776static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2777 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2778{
2779 struct c4iw_ep *ep;
Vipul Pandya793dad92012-12-10 09:30:56 +00002780 int atid = be32_to_cpu(req->tid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002781
Vipul Pandyaef5d6352013-01-07 13:12:00 +00002782 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
2783 (__force u32) req->tid);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002784 if (!ep)
2785 return;
2786
2787 switch (req->retval) {
2788 case FW_ENOMEM:
Vipul Pandya793dad92012-12-10 09:30:56 +00002789 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
2790 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2791 send_fw_act_open_req(ep, atid);
2792 return;
2793 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00002794 case FW_EADDRINUSE:
Vipul Pandya793dad92012-12-10 09:30:56 +00002795 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2796 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2797 send_fw_act_open_req(ep, atid);
2798 return;
2799 }
Vipul Pandya1cab7752012-12-10 09:30:55 +00002800 break;
2801 default:
2802 pr_info("%s unexpected ofld conn wr retval %d\n",
2803 __func__, req->retval);
2804 break;
2805 }
Vipul Pandya793dad92012-12-10 09:30:56 +00002806 pr_err("active ofld_connect_wr failure %d atid %d\n",
2807 req->retval, atid);
2808 mutex_lock(&dev->rdev.stats.lock);
2809 dev->rdev.stats.act_ofld_conn_fails++;
2810 mutex_unlock(&dev->rdev.stats.lock);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002811 connect_reply_upcall(ep, status2errno(req->retval));
Vipul Pandya793dad92012-12-10 09:30:56 +00002812 state_set(&ep->com, DEAD);
2813 remove_handle(dev, &dev->atid_idr, atid);
2814 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
2815 dst_release(ep->dst);
2816 cxgb4_l2t_release(ep->l2t);
2817 c4iw_put_ep(&ep->com);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002818}
2819
2820static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2821 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2822{
2823 struct sk_buff *rpl_skb;
2824 struct cpl_pass_accept_req *cpl;
2825 int ret;
2826
Paul Bolle710a3112013-02-05 20:51:30 +00002827 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002828 BUG_ON(!rpl_skb);
2829 if (req->retval) {
2830 PDBG("%s passive open failure %d\n", __func__, req->retval);
Vipul Pandya793dad92012-12-10 09:30:56 +00002831 mutex_lock(&dev->rdev.stats.lock);
2832 dev->rdev.stats.pas_ofld_conn_fails++;
2833 mutex_unlock(&dev->rdev.stats.lock);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002834 kfree_skb(rpl_skb);
2835 } else {
2836 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
2837 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
Vipul Pandyaef5d6352013-01-07 13:12:00 +00002838 (__force u32) htonl(
2839 (__force u32) req->tid)));
Vipul Pandya1cab7752012-12-10 09:30:55 +00002840 ret = pass_accept_req(dev, rpl_skb);
2841 if (!ret)
2842 kfree_skb(rpl_skb);
2843 }
2844 return;
2845}
2846
2847static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
Steve Wise2f5b48c2010-09-10 11:15:36 -05002848{
2849 struct cpl_fw6_msg *rpl = cplhdr(skb);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002850 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
2851
2852 switch (rpl->type) {
2853 case FW6_TYPE_CQE:
2854 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2855 break;
2856 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
2857 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
2858 switch (req->t_state) {
2859 case TCP_SYN_SENT:
2860 active_ofld_conn_reply(dev, skb, req);
2861 break;
2862 case TCP_SYN_RECV:
2863 passive_ofld_conn_reply(dev, skb, req);
2864 break;
2865 default:
2866 pr_err("%s unexpected ofld conn wr state %d\n",
2867 __func__, req->t_state);
2868 break;
2869 }
2870 break;
2871 }
2872 return 0;
2873}
2874
2875static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2876{
2877 u32 l2info;
2878 u16 vlantag, len, hdr_len;
2879 u8 intf;
2880 struct cpl_rx_pkt *cpl = cplhdr(skb);
2881 struct cpl_pass_accept_req *req;
2882 struct tcp_options_received tmp_opt;
2883
2884 /* Store values from cpl_rx_pkt in temporary location. */
Vipul Pandyaef5d6352013-01-07 13:12:00 +00002885 vlantag = (__force u16) cpl->vlan;
2886 len = (__force u16) cpl->len;
2887 l2info = (__force u32) cpl->l2info;
2888 hdr_len = (__force u16) cpl->hdr_len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002889 intf = cpl->iff;
2890
2891 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
2892
2893 /*
2894 * We need to parse the TCP options from SYN packet.
2895 * to generate cpl_pass_accept_req.
2896 */
2897 memset(&tmp_opt, 0, sizeof(tmp_opt));
2898 tcp_clear_options(&tmp_opt);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00002899 tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
Vipul Pandya1cab7752012-12-10 09:30:55 +00002900
2901 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2902 memset(req, 0, sizeof(*req));
2903 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
Vipul Pandyaef5d6352013-01-07 13:12:00 +00002904 V_SYN_MAC_IDX(G_RX_MACIDX(
2905 (__force int) htonl(l2info))) |
Vipul Pandya1cab7752012-12-10 09:30:55 +00002906 F_SYN_XACT_MATCH);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00002907 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
2908 (__force int) htonl(l2info))) |
2909 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
2910 (__force int) htons(hdr_len))) |
2911 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
2912 (__force int) htons(hdr_len))) |
2913 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
2914 (__force int) htonl(l2info))));
2915 req->vlan = (__force __be16) vlantag;
2916 req->len = (__force __be16) len;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002917 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
2918 PASS_OPEN_TOS(tos));
2919 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
2920 if (tmp_opt.wscale_ok)
2921 req->tcpopt.wsf = tmp_opt.snd_wscale;
2922 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
2923 if (tmp_opt.sack_ok)
2924 req->tcpopt.sack = 1;
2925 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
2926 return;
2927}
2928
2929static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
2930 __be32 laddr, __be16 lport,
2931 __be32 raddr, __be16 rport,
2932 u32 rcv_isn, u32 filter, u16 window,
2933 u32 rss_qid, u8 port_id)
2934{
2935 struct sk_buff *req_skb;
2936 struct fw_ofld_connection_wr *req;
2937 struct cpl_pass_accept_req *cpl = cplhdr(skb);
2938
2939 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
2940 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
2941 memset(req, 0, sizeof(*req));
2942 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
2943 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
2944 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
Vipul Pandyaef5d6352013-01-07 13:12:00 +00002945 req->le.filter = (__force __be32) filter;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002946 req->le.lport = lport;
2947 req->le.pport = rport;
2948 req->le.u.ipv4.lip = laddr;
2949 req->le.u.ipv4.pip = raddr;
2950 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
2951 req->tcb.rcv_adv = htons(window);
2952 req->tcb.t_state_to_astid =
2953 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
2954 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
2955 V_FW_OFLD_CONNECTION_WR_ASTID(
2956 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
2957
2958 /*
2959 * We store the qid in opt2 which will be used by the firmware
2960 * to send us the wr response.
2961 */
2962 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
2963
2964 /*
2965 * We initialize the MSS index in TCB to 0xF.
2966 * So that when driver sends cpl_pass_accept_rpl
2967 * TCB picks up the correct value. If this was 0
2968 * TP will ignore any value > 0 for MSS index.
2969 */
2970 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
Paul Bolle710a3112013-02-05 20:51:30 +00002971 req->cookie = (unsigned long)skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +00002972
2973 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
2974 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
2975}
2976
2977/*
2978 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
2979 * messages when a filter is being used instead of server to
2980 * redirect a syn packet. When packets hit filter they are redirected
2981 * to the offload queue and driver tries to establish the connection
2982 * using firmware work request.
2983 */
2984static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
2985{
2986 int stid;
2987 unsigned int filter;
2988 struct ethhdr *eh = NULL;
2989 struct vlan_ethhdr *vlan_eh = NULL;
2990 struct iphdr *iph;
2991 struct tcphdr *tcph;
2992 struct rss_header *rss = (void *)skb->data;
2993 struct cpl_rx_pkt *cpl = (void *)skb->data;
2994 struct cpl_pass_accept_req *req = (void *)(rss + 1);
2995 struct l2t_entry *e;
2996 struct dst_entry *dst;
2997 struct rtable *rt;
2998 struct c4iw_ep *lep;
2999 u16 window;
3000 struct port_info *pi;
3001 struct net_device *pdev;
3002 u16 rss_qid;
3003 int step;
3004 u32 tx_chan;
3005 struct neighbour *neigh;
3006
3007 /* Drop all non-SYN packets */
3008 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
3009 goto reject;
3010
3011 /*
3012 * Drop all packets which did not hit the filter.
3013 * Unlikely to happen.
3014 */
3015 if (!(rss->filter_hit && rss->filter_tid))
3016 goto reject;
3017
3018 /*
3019 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3020 */
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003021 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
3022 - dev->rdev.lldi.tids->sftid_base
Vipul Pandya1cab7752012-12-10 09:30:55 +00003023 + dev->rdev.lldi.tids->nstids;
3024
3025 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3026 if (!lep) {
3027 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
3028 goto reject;
3029 }
3030
3031 if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
3032 eh = (struct ethhdr *)(req + 1);
3033 iph = (struct iphdr *)(eh + 1);
3034 } else {
3035 vlan_eh = (struct vlan_ethhdr *)(req + 1);
3036 iph = (struct iphdr *)(vlan_eh + 1);
3037 skb->vlan_tci = ntohs(cpl->vlan);
3038 }
3039
3040 if (iph->version != 0x4)
3041 goto reject;
3042
3043 tcph = (struct tcphdr *)(iph + 1);
3044 skb_set_network_header(skb, (void *)iph - (void *)rss);
3045 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3046 skb_get(skb);
3047
3048 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3049 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3050 ntohs(tcph->source), iph->tos);
3051
3052 rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
3053 iph->tos);
3054 if (!rt) {
3055 pr_err("%s - failed to find dst entry!\n",
3056 __func__);
3057 goto reject;
3058 }
3059 dst = &rt->dst;
3060 neigh = dst_neigh_lookup_skb(dst, skb);
3061
Zhouyi Zhouaaa0c232013-03-14 17:21:50 +00003062 if (!neigh) {
3063 pr_err("%s - failed to allocate neigh!\n",
3064 __func__);
3065 goto free_dst;
3066 }
3067
Vipul Pandya1cab7752012-12-10 09:30:55 +00003068 if (neigh->dev->flags & IFF_LOOPBACK) {
3069 pdev = ip_dev_find(&init_net, iph->daddr);
3070 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3071 pdev, 0);
3072 pi = (struct port_info *)netdev_priv(pdev);
3073 tx_chan = cxgb4_port_chan(pdev);
3074 dev_put(pdev);
3075 } else {
3076 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3077 neigh->dev, 0);
3078 pi = (struct port_info *)netdev_priv(neigh->dev);
3079 tx_chan = cxgb4_port_chan(neigh->dev);
3080 }
3081 if (!e) {
3082 pr_err("%s - failed to allocate l2t entry!\n",
3083 __func__);
3084 goto free_dst;
3085 }
3086
3087 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3088 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003089 window = (__force u16) htons((__force u16)tcph->window);
Vipul Pandya1cab7752012-12-10 09:30:55 +00003090
3091 /* Calcuate filter portion for LE region. */
Vipul Pandyaef5d6352013-01-07 13:12:00 +00003092 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
Vipul Pandya1cab7752012-12-10 09:30:55 +00003093
3094 /*
3095 * Synthesize the cpl_pass_accept_req. We have everything except the
3096 * TID. Once firmware sends a reply with TID we update the TID field
3097 * in cpl and pass it through the regular cpl_pass_accept_req path.
3098 */
3099 build_cpl_pass_accept_req(skb, stid, iph->tos);
3100 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3101 tcph->source, ntohl(tcph->seq), filter, window,
3102 rss_qid, pi->port_id);
3103 cxgb4_l2t_release(e);
3104free_dst:
3105 dst_release(dst);
3106reject:
Steve Wise2f5b48c2010-09-10 11:15:36 -05003107 return 0;
3108}
3109
Steve Wisecfdda9d2010-04-21 15:30:06 -07003110/*
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003111 * These are the real handlers that are called from a
3112 * work queue.
3113 */
3114static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
3115 [CPL_ACT_ESTABLISH] = act_establish,
3116 [CPL_ACT_OPEN_RPL] = act_open_rpl,
3117 [CPL_RX_DATA] = rx_data,
3118 [CPL_ABORT_RPL_RSS] = abort_rpl,
3119 [CPL_ABORT_RPL] = abort_rpl,
3120 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
3121 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
3122 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
3123 [CPL_PASS_ESTABLISH] = pass_establish,
3124 [CPL_PEER_CLOSE] = peer_close,
3125 [CPL_ABORT_REQ_RSS] = peer_abort,
3126 [CPL_CLOSE_CON_RPL] = close_con_rpl,
3127 [CPL_RDMA_TERMINATE] = terminate,
Steve Wise2f5b48c2010-09-10 11:15:36 -05003128 [CPL_FW4_ACK] = fw4_ack,
Vipul Pandya1cab7752012-12-10 09:30:55 +00003129 [CPL_FW6_MSG] = deferred_fw6_msg,
3130 [CPL_RX_PKT] = rx_pkt
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003131};
3132
3133static void process_timeout(struct c4iw_ep *ep)
3134{
3135 struct c4iw_qp_attributes attrs;
3136 int abort = 1;
3137
Steve Wise2f5b48c2010-09-10 11:15:36 -05003138 mutex_lock(&ep->com.mutex);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003139 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
3140 ep->com.state);
Vipul Pandya793dad92012-12-10 09:30:56 +00003141 set_bit(TIMEDOUT, &ep->com.history);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003142 switch (ep->com.state) {
3143 case MPA_REQ_SENT:
3144 __state_set(&ep->com, ABORTING);
3145 connect_reply_upcall(ep, -ETIMEDOUT);
3146 break;
3147 case MPA_REQ_WAIT:
3148 __state_set(&ep->com, ABORTING);
3149 break;
3150 case CLOSING:
3151 case MORIBUND:
3152 if (ep->com.cm_id && ep->com.qp) {
3153 attrs.next_state = C4IW_QP_STATE_ERROR;
3154 c4iw_modify_qp(ep->com.qp->rhp,
3155 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
3156 &attrs, 1);
3157 }
3158 __state_set(&ep->com, ABORTING);
3159 break;
3160 default:
Julia Lawall76f267b2012-11-03 10:58:27 +00003161 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003162 __func__, ep, ep->hwtid, ep->com.state);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003163 abort = 0;
3164 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05003165 mutex_unlock(&ep->com.mutex);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003166 if (abort)
3167 abort_connection(ep, NULL, GFP_KERNEL);
3168 c4iw_put_ep(&ep->com);
3169}
3170
3171static void process_timedout_eps(void)
3172{
3173 struct c4iw_ep *ep;
3174
3175 spin_lock_irq(&timeout_lock);
3176 while (!list_empty(&timeout_list)) {
3177 struct list_head *tmp;
3178
3179 tmp = timeout_list.next;
3180 list_del(tmp);
3181 spin_unlock_irq(&timeout_lock);
3182 ep = list_entry(tmp, struct c4iw_ep, entry);
3183 process_timeout(ep);
3184 spin_lock_irq(&timeout_lock);
3185 }
3186 spin_unlock_irq(&timeout_lock);
3187}
3188
3189static void process_work(struct work_struct *work)
3190{
3191 struct sk_buff *skb = NULL;
3192 struct c4iw_dev *dev;
Dan Carpenterc1d73562010-05-31 14:00:53 +00003193 struct cpl_act_establish *rpl;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003194 unsigned int opcode;
3195 int ret;
3196
3197 while ((skb = skb_dequeue(&rxq))) {
3198 rpl = cplhdr(skb);
3199 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3200 opcode = rpl->ot.opcode;
3201
3202 BUG_ON(!work_handlers[opcode]);
3203 ret = work_handlers[opcode](dev, skb);
3204 if (!ret)
3205 kfree_skb(skb);
3206 }
3207 process_timedout_eps();
3208}
3209
3210static DECLARE_WORK(skb_work, process_work);
3211
3212static void ep_timeout(unsigned long arg)
3213{
3214 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003215 int kickit = 0;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003216
3217 spin_lock(&timeout_lock);
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003218 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
3219 list_add_tail(&ep->entry, &timeout_list);
3220 kickit = 1;
3221 }
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003222 spin_unlock(&timeout_lock);
Vipul Pandya1ec779c2013-01-07 13:11:56 +00003223 if (kickit)
3224 queue_work(workq, &skb_work);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003225}
3226
3227/*
Steve Wisecfdda9d2010-04-21 15:30:06 -07003228 * All the CM events are handled on a work queue to have a safe context.
3229 */
3230static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
3231{
3232
3233 /*
3234 * Save dev in the skb->cb area.
3235 */
3236 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
3237
3238 /*
3239 * Queue the skb and schedule the worker thread.
3240 */
3241 skb_queue_tail(&rxq, skb);
3242 queue_work(workq, &skb_work);
3243 return 0;
3244}
3245
3246static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
3247{
3248 struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
3249
3250 if (rpl->status != CPL_ERR_NONE) {
3251 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
3252 "for tid %u\n", rpl->status, GET_TID(rpl));
3253 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05003254 kfree_skb(skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003255 return 0;
3256}
3257
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003258static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3259{
3260 struct cpl_fw6_msg *rpl = cplhdr(skb);
3261 struct c4iw_wr_wait *wr_waitp;
3262 int ret;
3263
3264 PDBG("%s type %u\n", __func__, rpl->type);
3265
3266 switch (rpl->type) {
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003267 case FW6_TYPE_WR_RPL:
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003268 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
Roland Dreierc8e081a2010-09-27 17:51:04 -07003269 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003270 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
Steve Wised9594d92011-05-09 22:06:22 -07003271 if (wr_waitp)
3272 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
Steve Wise2f5b48c2010-09-10 11:15:36 -05003273 kfree_skb(skb);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003274 break;
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003275 case FW6_TYPE_CQE:
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003276 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
Vipul Pandya1cab7752012-12-10 09:30:55 +00003277 sched(dev, skb);
Vipul Pandya5be78ee2012-12-10 09:30:54 +00003278 break;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003279 default:
3280 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
3281 rpl->type);
Steve Wise2f5b48c2010-09-10 11:15:36 -05003282 kfree_skb(skb);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003283 break;
3284 }
3285 return 0;
3286}
3287
Steve Wise8da7e7a2011-06-14 20:59:27 +00003288static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3289{
3290 struct cpl_abort_req_rss *req = cplhdr(skb);
3291 struct c4iw_ep *ep;
3292 struct tid_info *t = dev->rdev.lldi.tids;
3293 unsigned int tid = GET_TID(req);
3294
3295 ep = lookup_tid(t, tid);
Steve Wise14b92222012-04-30 15:31:29 -05003296 if (!ep) {
3297 printk(KERN_WARNING MOD
3298 "Abort on non-existent endpoint, tid %d\n", tid);
3299 kfree_skb(skb);
3300 return 0;
3301 }
Steve Wise8da7e7a2011-06-14 20:59:27 +00003302 if (is_neg_adv_abort(req->status)) {
3303 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
3304 ep->hwtid);
3305 kfree_skb(skb);
3306 return 0;
3307 }
3308 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
3309 ep->com.state);
3310
3311 /*
3312 * Wake up any threads in rdma_init() or rdma_fini().
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00003313 * However, if we are on MPAv2 and want to retry with MPAv1
3314 * then, don't wake up yet.
Steve Wise8da7e7a2011-06-14 20:59:27 +00003315 */
Vipul Pandya7c0a33d2013-01-07 13:11:58 +00003316 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
3317 if (ep->com.state != MPA_REQ_SENT)
3318 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
3319 } else
3320 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
Steve Wise8da7e7a2011-06-14 20:59:27 +00003321 sched(dev, skb);
3322 return 0;
3323}
3324
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003325/*
3326 * Most upcalls from the T4 Core go to sched() to
3327 * schedule the processing on a work queue.
3328 */
3329c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
3330 [CPL_ACT_ESTABLISH] = sched,
3331 [CPL_ACT_OPEN_RPL] = sched,
3332 [CPL_RX_DATA] = sched,
3333 [CPL_ABORT_RPL_RSS] = sched,
3334 [CPL_ABORT_RPL] = sched,
3335 [CPL_PASS_OPEN_RPL] = sched,
3336 [CPL_CLOSE_LISTSRV_RPL] = sched,
3337 [CPL_PASS_ACCEPT_REQ] = sched,
3338 [CPL_PASS_ESTABLISH] = sched,
3339 [CPL_PEER_CLOSE] = sched,
3340 [CPL_CLOSE_CON_RPL] = sched,
Steve Wise8da7e7a2011-06-14 20:59:27 +00003341 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003342 [CPL_RDMA_TERMINATE] = sched,
3343 [CPL_FW4_ACK] = sched,
3344 [CPL_SET_TCB_RPL] = set_tcb_rpl,
Vipul Pandya1cab7752012-12-10 09:30:55 +00003345 [CPL_FW6_MSG] = fw6_msg,
3346 [CPL_RX_PKT] = sched
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003347};
3348
Steve Wisecfdda9d2010-04-21 15:30:06 -07003349int __init c4iw_cm_init(void)
3350{
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003351 spin_lock_init(&timeout_lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07003352 skb_queue_head_init(&rxq);
3353
3354 workq = create_singlethread_workqueue("iw_cxgb4");
3355 if (!workq)
3356 return -ENOMEM;
3357
Steve Wisecfdda9d2010-04-21 15:30:06 -07003358 return 0;
3359}
3360
3361void __exit c4iw_cm_term(void)
3362{
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07003363 WARN_ON(!list_empty(&timeout_list));
Steve Wisecfdda9d2010-04-21 15:30:06 -07003364 flush_workqueue(workq);
3365 destroy_workqueue(workq);
3366}