blob: b8cb25ebce50b888c91b515e6ae5ebecaca4d299 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +000035#include <rdma/ib_sa.h>
36#include <rdma/ib_cache.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070037
38#include <linux/mlx4/cmd.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/gfp.h>
Or Gerlitzc3779132011-06-15 14:51:27 +000040#include <rdma/ib_pma.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070041
42#include "mlx4_ib.h"
43
44enum {
45 MLX4_IB_VENDOR_CLASS1 = 0x9,
46 MLX4_IB_VENDOR_CLASS2 = 0xa
47};
48
Jack Morgensteinfc065732012-08-03 08:40:42 +000049#define MLX4_TUN_SEND_WRID_SHIFT 34
50#define MLX4_TUN_QPN_SHIFT 32
51#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
52#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
53
54#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
55#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
56
57struct mlx4_mad_rcv_buf {
58 struct ib_grh grh;
59 u8 payload[256];
60} __packed;
61
62struct mlx4_mad_snd_buf {
63 u8 payload[256];
64} __packed;
65
66struct mlx4_tunnel_mad {
67 struct ib_grh grh;
68 struct mlx4_ib_tunnel_header hdr;
69 struct ib_mad mad;
70} __packed;
71
72struct mlx4_rcv_tunnel_mad {
73 struct mlx4_rcv_tunnel_hdr hdr;
74 struct ib_grh grh;
75 struct ib_mad mad;
76} __packed;
77
Oren Duerb9c5d6a2012-08-03 08:40:46 +000078static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
79
80__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
81{
82 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
83 cpu_to_be64(0xff00000000000000LL);
84}
85
Jack Morgenstein0a9a0182012-08-03 08:40:45 +000086int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
Roland Dreier225c7b12007-05-08 18:00:38 -070087 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
88 void *in_mad, void *response_mad)
89{
90 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
91 void *inbox;
92 int err;
93 u32 in_modifier = port;
94 u8 op_modifier = 0;
95
96 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
97 if (IS_ERR(inmailbox))
98 return PTR_ERR(inmailbox);
99 inbox = inmailbox->buf;
100
101 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
102 if (IS_ERR(outmailbox)) {
103 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
104 return PTR_ERR(outmailbox);
105 }
106
107 memcpy(inbox, in_mad, 256);
108
109 /*
110 * Key check traps can't be generated unless we have in_wc to
111 * tell us where to send the trap.
112 */
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000113 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
Roland Dreier225c7b12007-05-08 18:00:38 -0700114 op_modifier |= 0x1;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000115 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
Roland Dreier225c7b12007-05-08 18:00:38 -0700116 op_modifier |= 0x2;
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000117 if (mlx4_is_mfunc(dev->dev) &&
118 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
119 op_modifier |= 0x8;
Roland Dreier225c7b12007-05-08 18:00:38 -0700120
121 if (in_wc) {
122 struct {
123 __be32 my_qpn;
124 u32 reserved1;
125 __be32 rqpn;
126 u8 sl;
127 u8 g_path;
128 u16 reserved2[2];
129 __be16 pkey;
130 u32 reserved3[11];
131 u8 grh[40];
132 } *ext_info;
133
134 memset(inbox + 256, 0, 256);
135 ext_info = inbox + 256;
136
137 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
138 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
139 ext_info->sl = in_wc->sl << 4;
140 ext_info->g_path = in_wc->dlid_path_bits |
141 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
142 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
143
144 if (in_grh)
145 memcpy(ext_info->grh, in_grh, 40);
146
147 op_modifier |= 0x4;
148
149 in_modifier |= in_wc->slid << 16;
150 }
151
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000152 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
153 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000154 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000155 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
Roland Dreier225c7b12007-05-08 18:00:38 -0700156
Ilpo Järvinenfe11cb62007-08-16 01:02:07 +0300157 if (!err)
Roland Dreier225c7b12007-05-08 18:00:38 -0700158 memcpy(response_mad, outmailbox->buf, 256);
159
160 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
161 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
162
163 return err;
164}
165
166static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
167{
168 struct ib_ah *new_ah;
169 struct ib_ah_attr ah_attr;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000170 unsigned long flags;
Roland Dreier225c7b12007-05-08 18:00:38 -0700171
172 if (!dev->send_agent[port_num - 1][0])
173 return;
174
175 memset(&ah_attr, 0, sizeof ah_attr);
176 ah_attr.dlid = lid;
177 ah_attr.sl = sl;
178 ah_attr.port_num = port_num;
179
180 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
181 &ah_attr);
182 if (IS_ERR(new_ah))
183 return;
184
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000185 spin_lock_irqsave(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700186 if (dev->sm_ah[port_num - 1])
187 ib_destroy_ah(dev->sm_ah[port_num - 1]);
188 dev->sm_ah[port_num - 1] = new_ah;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000189 spin_unlock_irqrestore(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700190}
191
192/*
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300193 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
194 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
Roland Dreier225c7b12007-05-08 18:00:38 -0700195 */
Moni Shouaf0f6f342009-01-28 14:54:35 -0800196static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300197 u16 prev_lid)
Roland Dreier225c7b12007-05-08 18:00:38 -0700198{
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300199 struct ib_port_info *pinfo;
200 u16 lid;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000201 __be16 *base;
202 u32 bn, pkey_change_bitmap;
203 int i;
204
Roland Dreier225c7b12007-05-08 18:00:38 -0700205
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300206 struct mlx4_ib_dev *dev = to_mdev(ibdev);
Roland Dreier225c7b12007-05-08 18:00:38 -0700207 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
208 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300209 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
210 switch (mad->mad_hdr.attr_id) {
211 case IB_SMP_ATTR_PORT_INFO:
212 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
213 lid = be16_to_cpu(pinfo->lid);
Roland Dreier225c7b12007-05-08 18:00:38 -0700214
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300215 update_sm_ah(dev, port_num,
Roland Dreier225c7b12007-05-08 18:00:38 -0700216 be16_to_cpu(pinfo->sm_lid),
217 pinfo->neighbormtu_mastersmsl & 0xf);
218
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300219 if (pinfo->clientrereg_resv_subnetto & 0x80)
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000220 handle_client_rereg_event(dev, port_num);
Roland Dreier225c7b12007-05-08 18:00:38 -0700221
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300222 if (prev_lid != lid)
223 mlx4_ib_dispatch_event(dev, port_num,
224 IB_EVENT_LID_CHANGE);
225 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700226
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300227 case IB_SMP_ATTR_PKEY_TABLE:
Jack Morgenstein54679e12012-08-03 08:40:43 +0000228 if (!mlx4_is_mfunc(dev->dev)) {
229 mlx4_ib_dispatch_event(dev, port_num,
230 IB_EVENT_PKEY_CHANGE);
231 break;
232 }
233
234 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
235 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
236 pkey_change_bitmap = 0;
237 for (i = 0; i < 32; i++) {
238 pr_debug("PKEY[%d] = x%x\n",
239 i + bn*32, be16_to_cpu(base[i]));
240 if (be16_to_cpu(base[i]) !=
241 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
242 pkey_change_bitmap |= (1 << i);
243 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
244 be16_to_cpu(base[i]);
245 }
246 }
247 pr_debug("PKEY Change event: port=%d, "
248 "block=0x%x, change_bitmap=0x%x\n",
249 port_num, bn, pkey_change_bitmap);
250
251 if (pkey_change_bitmap)
252 mlx4_ib_dispatch_event(dev, port_num,
253 IB_EVENT_PKEY_CHANGE);
254
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300255 break;
256
257 case IB_SMP_ATTR_GUID_INFO:
Jack Morgenstein66349612012-06-19 11:21:44 +0300258 /* paravirtualized master's guid is guid 0 -- does not change */
259 if (!mlx4_is_master(dev->dev))
260 mlx4_ib_dispatch_event(dev, port_num,
261 IB_EVENT_GID_CHANGE);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300262 break;
263 default:
264 break;
Roland Dreier225c7b12007-05-08 18:00:38 -0700265 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700266}
267
268static void node_desc_override(struct ib_device *dev,
269 struct ib_mad *mad)
270{
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000271 unsigned long flags;
272
Roland Dreier225c7b12007-05-08 18:00:38 -0700273 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
274 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
275 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
276 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000277 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700278 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000279 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700280 }
281}
282
283static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
284{
285 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
286 struct ib_mad_send_buf *send_buf;
287 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
288 int ret;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000289 unsigned long flags;
Roland Dreier225c7b12007-05-08 18:00:38 -0700290
291 if (agent) {
292 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
293 IB_MGMT_MAD_DATA, GFP_ATOMIC);
Dan Carpenter13974902011-01-10 17:42:06 -0800294 if (IS_ERR(send_buf))
295 return;
Roland Dreier225c7b12007-05-08 18:00:38 -0700296 /*
297 * We rely here on the fact that MLX QPs don't use the
298 * address handle after the send is posted (this is
299 * wrong following the IB spec strictly, but we know
300 * it's OK for our devices).
301 */
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000302 spin_lock_irqsave(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700303 memcpy(send_buf->mad, mad, sizeof *mad);
304 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
305 ret = ib_post_send_mad(send_buf, NULL);
306 else
307 ret = -EINVAL;
Jack Morgensteindf7fba62012-08-03 08:26:45 +0000308 spin_unlock_irqrestore(&dev->sm_lock, flags);
Roland Dreier225c7b12007-05-08 18:00:38 -0700309
310 if (ret)
311 ib_free_send_mad(send_buf);
312 }
313}
314
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000315static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
316 struct ib_sa_mad *sa_mad)
317{
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000318 int ret = 0;
319
320 /* dispatch to different sa handlers */
321 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
322 case IB_SA_ATTR_MC_MEMBER_REC:
323 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
324 break;
325 default:
326 break;
327 }
328 return ret;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000329}
330
331int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
332{
333 struct mlx4_ib_dev *dev = to_mdev(ibdev);
334 int i;
335
336 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
337 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
338 return i;
339 }
340 return -1;
341}
342
343
344static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix,
345 u8 *full_pk_ix, u8 *partial_pk_ix,
346 int *is_full_member)
347{
348 u16 search_pkey;
349 int fm;
350 int err = 0;
351 u16 pk;
352
353 err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey);
354 if (err)
355 return err;
356
357 fm = (search_pkey & 0x8000) ? 1 : 0;
358 if (fm) {
359 *full_pk_ix = ph_pkey_ix;
360 search_pkey &= 0x7FFF;
361 } else {
362 *partial_pk_ix = ph_pkey_ix;
363 search_pkey |= 0x8000;
364 }
365
366 if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk))
367 pk = 0xFFFF;
368
369 if (fm)
370 *partial_pk_ix = (pk & 0xFF);
371 else
372 *full_pk_ix = (pk & 0xFF);
373
374 *is_full_member = fm;
375 return err;
376}
377
378int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
379 enum ib_qp_type dest_qpt, struct ib_wc *wc,
380 struct ib_grh *grh, struct ib_mad *mad)
381{
382 struct ib_sge list;
383 struct ib_send_wr wr, *bad_wr;
384 struct mlx4_ib_demux_pv_ctx *tun_ctx;
385 struct mlx4_ib_demux_pv_qp *tun_qp;
386 struct mlx4_rcv_tunnel_mad *tun_mad;
387 struct ib_ah_attr attr;
388 struct ib_ah *ah;
389 struct ib_qp *src_qp = NULL;
390 unsigned tun_tx_ix = 0;
391 int dqpn;
392 int ret = 0;
393 int i;
394 int is_full_member = 0;
395 u16 tun_pkey_ix;
396 u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0;
397
398 if (dest_qpt > IB_QPT_GSI)
399 return -EINVAL;
400
401 tun_ctx = dev->sriov.demux[port-1].tun[slave];
402
403 /* check if proxy qp created */
404 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
405 return -EAGAIN;
406
407 /* QP0 forwarding only for Dom0 */
408 if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
409 return -EINVAL;
410
411 if (!dest_qpt)
412 tun_qp = &tun_ctx->qp[0];
413 else
414 tun_qp = &tun_ctx->qp[1];
415
416 /* compute pkey index for slave */
417 /* get physical pkey -- virtualized Dom0 pkey to phys*/
418 if (dest_qpt) {
419 ph_pkey_ix =
420 dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index];
421
422 /* now, translate this to the slave pkey index */
423 ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
424 &partial_pk_ix, &is_full_member);
425 if (ret)
426 return -EINVAL;
427
428 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
429 if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) ||
430 (is_full_member &&
431 (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
432 break;
433 }
434 if (i == dev->dev->caps.pkey_table_len[port])
435 return -EINVAL;
436 tun_pkey_ix = i;
437 } else
438 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
439
440 dqpn = dev->dev->caps.sqp_start + 8 * slave + port + (dest_qpt * 2) - 1;
441
442 /* get tunnel tx data buf for slave */
443 src_qp = tun_qp->qp;
444
445 /* create ah. Just need an empty one with the port num for the post send.
446 * The driver will set the force loopback bit in post_send */
447 memset(&attr, 0, sizeof attr);
448 attr.port_num = port;
449 ah = ib_create_ah(tun_ctx->pd, &attr);
450 if (IS_ERR(ah))
451 return -ENOMEM;
452
453 /* allocate tunnel tx buf after pass failure returns */
454 spin_lock(&tun_qp->tx_lock);
455 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
456 (MLX4_NUM_TUNNEL_BUFS - 1))
457 ret = -EAGAIN;
458 else
459 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
460 spin_unlock(&tun_qp->tx_lock);
461 if (ret)
462 goto out;
463
464 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
465 if (tun_qp->tx_ring[tun_tx_ix].ah)
466 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
467 tun_qp->tx_ring[tun_tx_ix].ah = ah;
468 ib_dma_sync_single_for_cpu(&dev->ib_dev,
469 tun_qp->tx_ring[tun_tx_ix].buf.map,
470 sizeof (struct mlx4_rcv_tunnel_mad),
471 DMA_TO_DEVICE);
472
473 /* copy over to tunnel buffer */
474 if (grh)
475 memcpy(&tun_mad->grh, grh, sizeof *grh);
476 memcpy(&tun_mad->mad, mad, sizeof *mad);
477
478 /* adjust tunnel data */
479 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
480 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
481 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
482 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
483 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
484
485 ib_dma_sync_single_for_device(&dev->ib_dev,
486 tun_qp->tx_ring[tun_tx_ix].buf.map,
487 sizeof (struct mlx4_rcv_tunnel_mad),
488 DMA_TO_DEVICE);
489
490 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
491 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
492 list.lkey = tun_ctx->mr->lkey;
493
494 wr.wr.ud.ah = ah;
495 wr.wr.ud.port_num = port;
496 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
497 wr.wr.ud.remote_qpn = dqpn;
498 wr.next = NULL;
499 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
500 wr.sg_list = &list;
501 wr.num_sge = 1;
502 wr.opcode = IB_WR_SEND;
503 wr.send_flags = IB_SEND_SIGNALED;
504
505 ret = ib_post_send(src_qp, &wr, &bad_wr);
506out:
507 if (ret)
508 ib_destroy_ah(ah);
509 return ret;
510}
511
512static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
513 struct ib_wc *wc, struct ib_grh *grh,
514 struct ib_mad *mad)
515{
516 struct mlx4_ib_dev *dev = to_mdev(ibdev);
517 int err;
518 int slave;
519 u8 *slave_id;
520
521 /* Initially assume that this mad is for us */
522 slave = mlx4_master_func_num(dev->dev);
523
524 /* See if the slave id is encoded in a response mad */
525 if (mad->mad_hdr.method & 0x80) {
526 slave_id = (u8 *) &mad->mad_hdr.tid;
527 slave = *slave_id;
528 if (slave != 255) /*255 indicates the dom0*/
529 *slave_id = 0; /* remap tid */
530 }
531
532 /* If a grh is present, we demux according to it */
533 if (wc->wc_flags & IB_WC_GRH) {
534 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
535 if (slave < 0) {
536 mlx4_ib_warn(ibdev, "failed matching grh\n");
537 return -ENOENT;
538 }
539 }
540 /* Class-specific handling */
541 switch (mad->mad_hdr.mgmt_class) {
542 case IB_MGMT_CLASS_SUBN_ADM:
543 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
544 (struct ib_sa_mad *) mad))
545 return 0;
546 break;
Amir Vadai3cf69cc2012-08-03 08:40:47 +0000547 case IB_MGMT_CLASS_CM:
548 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
549 return 0;
550 break;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000551 case IB_MGMT_CLASS_DEVICE_MGMT:
552 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
553 return 0;
554 break;
555 default:
556 /* Drop unsupported classes for slaves in tunnel mode */
557 if (slave != mlx4_master_func_num(dev->dev)) {
558 pr_debug("dropping unsupported ingress mad from class:%d "
559 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
560 return 0;
561 }
562 }
563 /*make sure that no slave==255 was not handled yet.*/
564 if (slave >= dev->dev->caps.sqp_demux) {
565 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
566 slave, dev->dev->caps.sqp_demux);
567 return -ENOENT;
568 }
569
570 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
571 if (err)
572 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
573 slave, err);
574 return 0;
575}
576
Or Gerlitzc3779132011-06-15 14:51:27 +0000577static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
Roland Dreier225c7b12007-05-08 18:00:38 -0700578 struct ib_wc *in_wc, struct ib_grh *in_grh,
579 struct ib_mad *in_mad, struct ib_mad *out_mad)
580{
Moni Shouaf0f6f342009-01-28 14:54:35 -0800581 u16 slid, prev_lid = 0;
Roland Dreier225c7b12007-05-08 18:00:38 -0700582 int err;
Moni Shouaf0f6f342009-01-28 14:54:35 -0800583 struct ib_port_attr pattr;
Roland Dreier225c7b12007-05-08 18:00:38 -0700584
Jack Morgensteinb1d8eb52012-06-19 11:21:35 +0300585 if (in_wc && in_wc->qp->qp_num) {
586 pr_debug("received MAD: slid:%d sqpn:%d "
587 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
588 in_wc->slid, in_wc->src_qp,
589 in_wc->dlid_path_bits,
590 in_wc->qp->qp_num,
591 in_wc->wc_flags,
592 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
593 be16_to_cpu(in_mad->mad_hdr.attr_id));
594 if (in_wc->wc_flags & IB_WC_GRH) {
595 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
596 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
597 be64_to_cpu(in_grh->sgid.global.interface_id));
598 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
599 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
600 be64_to_cpu(in_grh->dgid.global.interface_id));
601 }
602 }
603
Roland Dreier225c7b12007-05-08 18:00:38 -0700604 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
605
606 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
607 forward_trap(to_mdev(ibdev), port_num, in_mad);
608 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
609 }
610
611 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
612 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
613 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
614 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
615 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
616 return IB_MAD_RESULT_SUCCESS;
617
618 /*
Jack Morgensteina6f7fea2012-01-26 16:41:33 +0200619 * Don't process SMInfo queries -- the SMA can't handle them.
Roland Dreier225c7b12007-05-08 18:00:38 -0700620 */
Jack Morgensteina6f7fea2012-01-26 16:41:33 +0200621 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
Roland Dreier225c7b12007-05-08 18:00:38 -0700622 return IB_MAD_RESULT_SUCCESS;
623 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
624 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
Eli Cohen6578cf32008-07-14 23:48:45 -0700625 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
626 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700627 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
628 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
629 return IB_MAD_RESULT_SUCCESS;
630 } else
631 return IB_MAD_RESULT_SUCCESS;
632
Moni Shouaf0f6f342009-01-28 14:54:35 -0800633 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
634 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
635 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
636 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
637 !ib_query_port(ibdev, port_num, &pattr))
638 prev_lid = pattr.lid;
639
Roland Dreier225c7b12007-05-08 18:00:38 -0700640 err = mlx4_MAD_IFC(to_mdev(ibdev),
Jack Morgenstein0a9a0182012-08-03 08:40:45 +0000641 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
642 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
643 MLX4_MAD_IFC_NET_VIEW,
Roland Dreier225c7b12007-05-08 18:00:38 -0700644 port_num, in_wc, in_grh, in_mad, out_mad);
645 if (err)
646 return IB_MAD_RESULT_FAILURE;
647
648 if (!out_mad->mad_hdr.status) {
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300649 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
650 smp_snoop(ibdev, port_num, in_mad, prev_lid);
Roland Dreier225c7b12007-05-08 18:00:38 -0700651 node_desc_override(ibdev, out_mad);
652 }
653
654 /* set return bit in status of directed route responses */
655 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
656 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
657
658 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
659 /* no response for trap repress */
660 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
661
662 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
663}
664
Or Gerlitzc3779132011-06-15 14:51:27 +0000665static void edit_counter(struct mlx4_counter *cnt,
666 struct ib_pma_portcounters *pma_cnt)
667{
668 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
669 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
670 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
671 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
672}
673
674static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
675 struct ib_wc *in_wc, struct ib_grh *in_grh,
676 struct ib_mad *in_mad, struct ib_mad *out_mad)
677{
678 struct mlx4_cmd_mailbox *mailbox;
679 struct mlx4_ib_dev *dev = to_mdev(ibdev);
680 int err;
681 u32 inmod = dev->counters[port_num - 1] & 0xffff;
682 u8 mode;
683
684 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
685 return -EINVAL;
686
687 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
688 if (IS_ERR(mailbox))
689 return IB_MAD_RESULT_FAILURE;
690
691 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
Jack Morgensteinf9baff52011-12-13 04:10:51 +0000692 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
693 MLX4_CMD_WRAPPED);
Or Gerlitzc3779132011-06-15 14:51:27 +0000694 if (err)
695 err = IB_MAD_RESULT_FAILURE;
696 else {
697 memset(out_mad->data, 0, sizeof out_mad->data);
698 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
699 switch (mode & 0xf) {
700 case 0:
701 edit_counter(mailbox->buf,
702 (void *)(out_mad->data + 40));
703 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
704 break;
705 default:
706 err = IB_MAD_RESULT_FAILURE;
707 }
708 }
709
710 mlx4_free_cmd_mailbox(dev->dev, mailbox);
711
712 return err;
713}
714
715int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
716 struct ib_wc *in_wc, struct ib_grh *in_grh,
717 struct ib_mad *in_mad, struct ib_mad *out_mad)
718{
719 switch (rdma_port_get_link_layer(ibdev, port_num)) {
720 case IB_LINK_LAYER_INFINIBAND:
721 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
722 in_grh, in_mad, out_mad);
723 case IB_LINK_LAYER_ETHERNET:
724 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
725 in_grh, in_mad, out_mad);
726 default:
727 return -EINVAL;
728 }
729}
730
Roland Dreier225c7b12007-05-08 18:00:38 -0700731static void send_handler(struct ib_mad_agent *agent,
732 struct ib_mad_send_wc *mad_send_wc)
733{
734 ib_free_send_mad(mad_send_wc->send_buf);
735}
736
737int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
738{
739 struct ib_mad_agent *agent;
740 int p, q;
741 int ret;
Eli Cohenfa417f72010-10-24 21:08:52 -0700742 enum rdma_link_layer ll;
Roland Dreier225c7b12007-05-08 18:00:38 -0700743
Eli Cohenfa417f72010-10-24 21:08:52 -0700744 for (p = 0; p < dev->num_ports; ++p) {
745 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
Roland Dreier225c7b12007-05-08 18:00:38 -0700746 for (q = 0; q <= 1; ++q) {
Eli Cohenfa417f72010-10-24 21:08:52 -0700747 if (ll == IB_LINK_LAYER_INFINIBAND) {
748 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
749 q ? IB_QPT_GSI : IB_QPT_SMI,
750 NULL, 0, send_handler,
751 NULL, NULL);
752 if (IS_ERR(agent)) {
753 ret = PTR_ERR(agent);
754 goto err;
755 }
756 dev->send_agent[p][q] = agent;
757 } else
758 dev->send_agent[p][q] = NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700759 }
Eli Cohenfa417f72010-10-24 21:08:52 -0700760 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700761
762 return 0;
763
764err:
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700765 for (p = 0; p < dev->num_ports; ++p)
Roland Dreier225c7b12007-05-08 18:00:38 -0700766 for (q = 0; q <= 1; ++q)
767 if (dev->send_agent[p][q])
768 ib_unregister_mad_agent(dev->send_agent[p][q]);
769
770 return ret;
771}
772
773void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
774{
775 struct ib_mad_agent *agent;
776 int p, q;
777
Yevgeny Petrilin7ff93f82008-10-22 15:38:42 -0700778 for (p = 0; p < dev->num_ports; ++p) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700779 for (q = 0; q <= 1; ++q) {
780 agent = dev->send_agent[p][q];
Eli Cohenfa417f72010-10-24 21:08:52 -0700781 if (agent) {
782 dev->send_agent[p][q] = NULL;
783 ib_unregister_mad_agent(agent);
784 }
Roland Dreier225c7b12007-05-08 18:00:38 -0700785 }
786
787 if (dev->sm_ah[p])
788 ib_destroy_ah(dev->sm_ah[p]);
789 }
790}
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300791
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000792static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
793{
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000794 /* re-configure the alias-guid and mcg's */
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000795 if (mlx4_is_master(dev->dev)) {
Jack Morgensteina0c64a12012-08-03 08:40:49 +0000796 mlx4_ib_invalidate_all_guid_record(dev, port_num);
797
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000798 if (!dev->sriov.is_going_down)
799 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
800 }
801 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
802}
803
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300804void handle_port_mgmt_change_event(struct work_struct *work)
805{
806 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
807 struct mlx4_ib_dev *dev = ew->ib_dev;
808 struct mlx4_eqe *eqe = &(ew->ib_eqe);
809 u8 port = eqe->event.port_mgmt_change.port;
810 u32 changed_attr;
811
812 switch (eqe->subtype) {
813 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
814 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
815
816 /* Update the SM ah - This should be done before handling
817 the other changed attributes so that MADs can be sent to the SM */
818 if (changed_attr & MSTR_SM_CHANGE_MASK) {
819 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
820 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
821 update_sm_ah(dev, port, lid, sl);
822 }
823
824 /* Check if it is a lid change event */
825 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
826 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
827
828 /* Generate GUID changed event */
829 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
830 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
831
832 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000833 handle_client_rereg_event(dev, port);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300834 break;
835
836 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
837 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
838 break;
839 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
Jack Morgenstein66349612012-06-19 11:21:44 +0300840 /* paravirtualized master's guid is guid 0 -- does not change */
841 if (!mlx4_is_master(dev->dev))
842 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
Jack Morgenstein00f5ce92012-06-19 11:21:40 +0300843 break;
844 default:
845 pr_warn("Unsupported subtype 0x%x for "
846 "Port Management Change event\n", eqe->subtype);
847 }
848
849 kfree(ew);
850}
851
852void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
853 enum ib_event_type type)
854{
855 struct ib_event event;
856
857 event.device = &dev->ib_dev;
858 event.element.port_num = port_num;
859 event.event = type;
860
861 ib_dispatch_event(&event);
862}
Jack Morgensteinfc065732012-08-03 08:40:42 +0000863
864static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
865{
866 unsigned long flags;
867 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
868 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
869 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
870 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
871 queue_work(ctx->wq, &ctx->work);
872 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
873}
874
875static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
876 struct mlx4_ib_demux_pv_qp *tun_qp,
877 int index)
878{
879 struct ib_sge sg_list;
880 struct ib_recv_wr recv_wr, *bad_recv_wr;
881 int size;
882
883 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
884 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
885
886 sg_list.addr = tun_qp->ring[index].map;
887 sg_list.length = size;
888 sg_list.lkey = ctx->mr->lkey;
889
890 recv_wr.next = NULL;
891 recv_wr.sg_list = &sg_list;
892 recv_wr.num_sge = 1;
893 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
894 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
895 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
896 size, DMA_FROM_DEVICE);
897 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
898}
899
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000900static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
901 int slave, struct ib_sa_mad *sa_mad)
902{
Oren Duerb9c5d6a2012-08-03 08:40:46 +0000903 int ret = 0;
904
905 /* dispatch to different sa handlers */
906 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
907 case IB_SA_ATTR_MC_MEMBER_REC:
908 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
909 break;
910 default:
911 break;
912 }
913 return ret;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +0000914}
915
916static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
917{
918 int slave_start = dev->dev->caps.sqp_start + 8 * slave;
919
920 return (qpn >= slave_start && qpn <= slave_start + 1);
921}
922
923
924int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
925 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
926 u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
927{
928 struct ib_sge list;
929 struct ib_send_wr wr, *bad_wr;
930 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
931 struct mlx4_ib_demux_pv_qp *sqp;
932 struct mlx4_mad_snd_buf *sqp_mad;
933 struct ib_ah *ah;
934 struct ib_qp *send_qp = NULL;
935 unsigned wire_tx_ix = 0;
936 int ret = 0;
937 u16 wire_pkey_ix;
938 int src_qpnum;
939 u8 sgid_index;
940
941
942 sqp_ctx = dev->sriov.sqps[port-1];
943
944 /* check if proxy qp created */
945 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
946 return -EAGAIN;
947
948 /* QP0 forwarding only for Dom0 */
949 if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
950 return -EINVAL;
951
952 if (dest_qpt == IB_QPT_SMI) {
953 src_qpnum = 0;
954 sqp = &sqp_ctx->qp[0];
955 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
956 } else {
957 src_qpnum = 1;
958 sqp = &sqp_ctx->qp[1];
959 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
960 }
961
962 send_qp = sqp->qp;
963
964 /* create ah */
965 sgid_index = attr->grh.sgid_index;
966 attr->grh.sgid_index = 0;
967 ah = ib_create_ah(sqp_ctx->pd, attr);
968 if (IS_ERR(ah))
969 return -ENOMEM;
970 attr->grh.sgid_index = sgid_index;
971 to_mah(ah)->av.ib.gid_index = sgid_index;
972 /* get rid of force-loopback bit */
973 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
974 spin_lock(&sqp->tx_lock);
975 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
976 (MLX4_NUM_TUNNEL_BUFS - 1))
977 ret = -EAGAIN;
978 else
979 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
980 spin_unlock(&sqp->tx_lock);
981 if (ret)
982 goto out;
983
984 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
985 if (sqp->tx_ring[wire_tx_ix].ah)
986 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
987 sqp->tx_ring[wire_tx_ix].ah = ah;
988 ib_dma_sync_single_for_cpu(&dev->ib_dev,
989 sqp->tx_ring[wire_tx_ix].buf.map,
990 sizeof (struct mlx4_mad_snd_buf),
991 DMA_TO_DEVICE);
992
993 memcpy(&sqp_mad->payload, mad, sizeof *mad);
994
995 ib_dma_sync_single_for_device(&dev->ib_dev,
996 sqp->tx_ring[wire_tx_ix].buf.map,
997 sizeof (struct mlx4_mad_snd_buf),
998 DMA_TO_DEVICE);
999
1000 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1001 list.length = sizeof (struct mlx4_mad_snd_buf);
1002 list.lkey = sqp_ctx->mr->lkey;
1003
1004 wr.wr.ud.ah = ah;
1005 wr.wr.ud.port_num = port;
1006 wr.wr.ud.pkey_index = wire_pkey_ix;
1007 wr.wr.ud.remote_qkey = qkey;
1008 wr.wr.ud.remote_qpn = remote_qpn;
1009 wr.next = NULL;
1010 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1011 wr.sg_list = &list;
1012 wr.num_sge = 1;
1013 wr.opcode = IB_WR_SEND;
1014 wr.send_flags = IB_SEND_SIGNALED;
1015
1016 ret = ib_post_send(send_qp, &wr, &bad_wr);
1017out:
1018 if (ret)
1019 ib_destroy_ah(ah);
1020 return ret;
1021}
1022
1023static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1024{
1025 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1026 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1027 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1028 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1029 struct mlx4_ib_ah ah;
1030 struct ib_ah_attr ah_attr;
1031 u8 *slave_id;
1032 int slave;
1033
1034 /* Get slave that sent this packet */
1035 if (wc->src_qp < dev->dev->caps.sqp_start ||
1036 wc->src_qp >= dev->dev->caps.base_tunnel_sqpn ||
1037 (wc->src_qp & 0x1) != ctx->port - 1 ||
1038 wc->src_qp & 0x4) {
1039 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1040 return;
1041 }
1042 slave = ((wc->src_qp & ~0x7) - dev->dev->caps.sqp_start) / 8;
1043 if (slave != ctx->slave) {
1044 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1045 "belongs to another slave\n", wc->src_qp);
1046 return;
1047 }
1048 if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1049 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1050 "non-master trying to send QP0 packets\n", wc->src_qp);
1051 return;
1052 }
1053
1054 /* Map transaction ID */
1055 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1056 sizeof (struct mlx4_tunnel_mad),
1057 DMA_FROM_DEVICE);
1058 switch (tunnel->mad.mad_hdr.method) {
1059 case IB_MGMT_METHOD_SET:
1060 case IB_MGMT_METHOD_GET:
1061 case IB_MGMT_METHOD_REPORT:
1062 case IB_SA_METHOD_GET_TABLE:
1063 case IB_SA_METHOD_DELETE:
1064 case IB_SA_METHOD_GET_MULTI:
1065 case IB_SA_METHOD_GET_TRACE_TBL:
1066 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1067 if (*slave_id) {
1068 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1069 "class:%d slave:%d\n", *slave_id,
1070 tunnel->mad.mad_hdr.mgmt_class, slave);
1071 return;
1072 } else
1073 *slave_id = slave;
1074 default:
1075 /* nothing */;
1076 }
1077
1078 /* Class-specific handling */
1079 switch (tunnel->mad.mad_hdr.mgmt_class) {
1080 case IB_MGMT_CLASS_SUBN_ADM:
1081 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1082 (struct ib_sa_mad *) &tunnel->mad))
1083 return;
1084 break;
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001085 case IB_MGMT_CLASS_CM:
1086 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1087 (struct ib_mad *) &tunnel->mad))
1088 return;
1089 break;
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001090 case IB_MGMT_CLASS_DEVICE_MGMT:
1091 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1092 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1093 return;
1094 break;
1095 default:
1096 /* Drop unsupported classes for slaves in tunnel mode */
1097 if (slave != mlx4_master_func_num(dev->dev)) {
1098 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1099 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1100 return;
1101 }
1102 }
1103
1104 /* We are using standard ib_core services to send the mad, so generate a
1105 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1106 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1107 ah.ibah.device = ctx->ib_dev;
1108 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1109 if ((ah_attr.ah_flags & IB_AH_GRH) &&
1110 (ah_attr.grh.sgid_index != slave)) {
1111 mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
1112 slave, ah_attr.grh.sgid_index);
1113 return;
1114 }
1115
1116 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1117 is_proxy_qp0(dev, wc->src_qp, slave) ?
1118 IB_QPT_SMI : IB_QPT_GSI,
1119 be16_to_cpu(tunnel->hdr.pkey_index),
1120 be32_to_cpu(tunnel->hdr.remote_qpn),
1121 be32_to_cpu(tunnel->hdr.qkey),
1122 &ah_attr, &tunnel->mad);
1123}
1124
Jack Morgensteinfc065732012-08-03 08:40:42 +00001125static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1126 enum ib_qp_type qp_type, int is_tun)
1127{
1128 int i;
1129 struct mlx4_ib_demux_pv_qp *tun_qp;
1130 int rx_buf_size, tx_buf_size;
1131
1132 if (qp_type > IB_QPT_GSI)
1133 return -EINVAL;
1134
1135 tun_qp = &ctx->qp[qp_type];
1136
1137 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1138 GFP_KERNEL);
1139 if (!tun_qp->ring)
1140 return -ENOMEM;
1141
1142 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1143 sizeof (struct mlx4_ib_tun_tx_buf),
1144 GFP_KERNEL);
1145 if (!tun_qp->tx_ring) {
1146 kfree(tun_qp->ring);
1147 tun_qp->ring = NULL;
1148 return -ENOMEM;
1149 }
1150
1151 if (is_tun) {
1152 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1153 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1154 } else {
1155 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1156 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1157 }
1158
1159 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1160 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1161 if (!tun_qp->ring[i].addr)
1162 goto err;
1163 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1164 tun_qp->ring[i].addr,
1165 rx_buf_size,
1166 DMA_FROM_DEVICE);
1167 }
1168
1169 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1170 tun_qp->tx_ring[i].buf.addr =
1171 kmalloc(tx_buf_size, GFP_KERNEL);
1172 if (!tun_qp->tx_ring[i].buf.addr)
1173 goto tx_err;
1174 tun_qp->tx_ring[i].buf.map =
1175 ib_dma_map_single(ctx->ib_dev,
1176 tun_qp->tx_ring[i].buf.addr,
1177 tx_buf_size,
1178 DMA_TO_DEVICE);
1179 tun_qp->tx_ring[i].ah = NULL;
1180 }
1181 spin_lock_init(&tun_qp->tx_lock);
1182 tun_qp->tx_ix_head = 0;
1183 tun_qp->tx_ix_tail = 0;
1184 tun_qp->proxy_qpt = qp_type;
1185
1186 return 0;
1187
1188tx_err:
1189 while (i > 0) {
1190 --i;
1191 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1192 tx_buf_size, DMA_TO_DEVICE);
1193 kfree(tun_qp->tx_ring[i].buf.addr);
1194 }
1195 kfree(tun_qp->tx_ring);
1196 tun_qp->tx_ring = NULL;
1197 i = MLX4_NUM_TUNNEL_BUFS;
1198err:
1199 while (i > 0) {
1200 --i;
1201 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1202 rx_buf_size, DMA_FROM_DEVICE);
1203 kfree(tun_qp->ring[i].addr);
1204 }
1205 kfree(tun_qp->ring);
1206 tun_qp->ring = NULL;
1207 return -ENOMEM;
1208}
1209
1210static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1211 enum ib_qp_type qp_type, int is_tun)
1212{
1213 int i;
1214 struct mlx4_ib_demux_pv_qp *tun_qp;
1215 int rx_buf_size, tx_buf_size;
1216
1217 if (qp_type > IB_QPT_GSI)
1218 return;
1219
1220 tun_qp = &ctx->qp[qp_type];
1221 if (is_tun) {
1222 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1223 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1224 } else {
1225 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1226 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1227 }
1228
1229
1230 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1231 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1232 rx_buf_size, DMA_FROM_DEVICE);
1233 kfree(tun_qp->ring[i].addr);
1234 }
1235
1236 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1237 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1238 tx_buf_size, DMA_TO_DEVICE);
1239 kfree(tun_qp->tx_ring[i].buf.addr);
1240 if (tun_qp->tx_ring[i].ah)
1241 ib_destroy_ah(tun_qp->tx_ring[i].ah);
1242 }
1243 kfree(tun_qp->tx_ring);
1244 kfree(tun_qp->ring);
1245}
1246
1247static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1248{
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001249 struct mlx4_ib_demux_pv_ctx *ctx;
1250 struct mlx4_ib_demux_pv_qp *tun_qp;
1251 struct ib_wc wc;
1252 int ret;
1253 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1254 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1255
1256 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1257 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1258 if (wc.status == IB_WC_SUCCESS) {
1259 switch (wc.opcode) {
1260 case IB_WC_RECV:
1261 mlx4_ib_multiplex_mad(ctx, &wc);
1262 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1263 wc.wr_id &
1264 (MLX4_NUM_TUNNEL_BUFS - 1));
1265 if (ret)
1266 pr_err("Failed reposting tunnel "
1267 "buf:%lld\n", wc.wr_id);
1268 break;
1269 case IB_WC_SEND:
1270 pr_debug("received tunnel send completion:"
1271 "wrid=0x%llx, status=0x%x\n",
1272 wc.wr_id, wc.status);
1273 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1274 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1275 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1276 = NULL;
1277 spin_lock(&tun_qp->tx_lock);
1278 tun_qp->tx_ix_tail++;
1279 spin_unlock(&tun_qp->tx_lock);
1280
1281 break;
1282 default:
1283 break;
1284 }
1285 } else {
1286 pr_debug("mlx4_ib: completion error in tunnel: %d."
1287 " status = %d, wrid = 0x%llx\n",
1288 ctx->slave, wc.status, wc.wr_id);
1289 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1290 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1291 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1292 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1293 = NULL;
1294 spin_lock(&tun_qp->tx_lock);
1295 tun_qp->tx_ix_tail++;
1296 spin_unlock(&tun_qp->tx_lock);
1297 }
1298 }
1299 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00001300}
1301
1302static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1303{
1304 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1305
1306 /* It's worse than that! He's dead, Jim! */
1307 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1308 event->event, sqp->port);
1309}
1310
1311static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1312 enum ib_qp_type qp_type, int create_tun)
1313{
1314 int i, ret;
1315 struct mlx4_ib_demux_pv_qp *tun_qp;
1316 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1317 struct ib_qp_attr attr;
1318 int qp_attr_mask_INIT;
1319
1320 if (qp_type > IB_QPT_GSI)
1321 return -EINVAL;
1322
1323 tun_qp = &ctx->qp[qp_type];
1324
1325 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1326 qp_init_attr.init_attr.send_cq = ctx->cq;
1327 qp_init_attr.init_attr.recv_cq = ctx->cq;
1328 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1329 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1330 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1331 qp_init_attr.init_attr.cap.max_send_sge = 1;
1332 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1333 if (create_tun) {
1334 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1335 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1336 qp_init_attr.port = ctx->port;
1337 qp_init_attr.slave = ctx->slave;
1338 qp_init_attr.proxy_qp_type = qp_type;
1339 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1340 IB_QP_QKEY | IB_QP_PORT;
1341 } else {
1342 qp_init_attr.init_attr.qp_type = qp_type;
1343 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1344 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1345 }
1346 qp_init_attr.init_attr.port_num = ctx->port;
1347 qp_init_attr.init_attr.qp_context = ctx;
1348 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1349 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1350 if (IS_ERR(tun_qp->qp)) {
1351 ret = PTR_ERR(tun_qp->qp);
1352 tun_qp->qp = NULL;
1353 pr_err("Couldn't create %s QP (%d)\n",
1354 create_tun ? "tunnel" : "special", ret);
1355 return ret;
1356 }
1357
1358 memset(&attr, 0, sizeof attr);
1359 attr.qp_state = IB_QPS_INIT;
1360 attr.pkey_index =
1361 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1362 attr.qkey = IB_QP1_QKEY;
1363 attr.port_num = ctx->port;
1364 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1365 if (ret) {
1366 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1367 create_tun ? "tunnel" : "special", ret);
1368 goto err_qp;
1369 }
1370 attr.qp_state = IB_QPS_RTR;
1371 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1372 if (ret) {
1373 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1374 create_tun ? "tunnel" : "special", ret);
1375 goto err_qp;
1376 }
1377 attr.qp_state = IB_QPS_RTS;
1378 attr.sq_psn = 0;
1379 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1380 if (ret) {
1381 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1382 create_tun ? "tunnel" : "special", ret);
1383 goto err_qp;
1384 }
1385
1386 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1387 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1388 if (ret) {
1389 pr_err(" mlx4_ib_post_pv_buf error"
1390 " (err = %d, i = %d)\n", ret, i);
1391 goto err_qp;
1392 }
1393 }
1394 return 0;
1395
1396err_qp:
1397 ib_destroy_qp(tun_qp->qp);
1398 tun_qp->qp = NULL;
1399 return ret;
1400}
1401
1402/*
1403 * IB MAD completion callback for real SQPs
1404 */
1405static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1406{
Jack Morgenstein37bfc7c2012-08-03 08:40:44 +00001407 struct mlx4_ib_demux_pv_ctx *ctx;
1408 struct mlx4_ib_demux_pv_qp *sqp;
1409 struct ib_wc wc;
1410 struct ib_grh *grh;
1411 struct ib_mad *mad;
1412
1413 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1414 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1415
1416 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1417 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1418 if (wc.status == IB_WC_SUCCESS) {
1419 switch (wc.opcode) {
1420 case IB_WC_SEND:
1421 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1422 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1423 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1424 = NULL;
1425 spin_lock(&sqp->tx_lock);
1426 sqp->tx_ix_tail++;
1427 spin_unlock(&sqp->tx_lock);
1428 break;
1429 case IB_WC_RECV:
1430 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1431 (sqp->ring[wc.wr_id &
1432 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1433 grh = &(((struct mlx4_mad_rcv_buf *)
1434 (sqp->ring[wc.wr_id &
1435 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1436 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1437 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1438 (MLX4_NUM_TUNNEL_BUFS - 1)))
1439 pr_err("Failed reposting SQP "
1440 "buf:%lld\n", wc.wr_id);
1441 break;
1442 default:
1443 BUG_ON(1);
1444 break;
1445 }
1446 } else {
1447 pr_debug("mlx4_ib: completion error in tunnel: %d."
1448 " status = %d, wrid = 0x%llx\n",
1449 ctx->slave, wc.status, wc.wr_id);
1450 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1451 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1452 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1453 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1454 = NULL;
1455 spin_lock(&sqp->tx_lock);
1456 sqp->tx_ix_tail++;
1457 spin_unlock(&sqp->tx_lock);
1458 }
1459 }
1460 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00001461}
1462
1463static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1464 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1465{
1466 struct mlx4_ib_demux_pv_ctx *ctx;
1467
1468 *ret_ctx = NULL;
1469 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1470 if (!ctx) {
1471 pr_err("failed allocating pv resource context "
1472 "for port %d, slave %d\n", port, slave);
1473 return -ENOMEM;
1474 }
1475
1476 ctx->ib_dev = &dev->ib_dev;
1477 ctx->port = port;
1478 ctx->slave = slave;
1479 *ret_ctx = ctx;
1480 return 0;
1481}
1482
1483static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1484{
1485 if (dev->sriov.demux[port - 1].tun[slave]) {
1486 kfree(dev->sriov.demux[port - 1].tun[slave]);
1487 dev->sriov.demux[port - 1].tun[slave] = NULL;
1488 }
1489}
1490
1491static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1492 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1493{
1494 int ret, cq_size;
1495
1496 ctx->state = DEMUX_PV_STATE_STARTING;
1497 /* have QP0 only on port owner, and only if link layer is IB */
1498 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1499 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1500 ctx->has_smi = 1;
1501
1502 if (ctx->has_smi) {
1503 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1504 if (ret) {
1505 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1506 goto err_out;
1507 }
1508 }
1509
1510 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1511 if (ret) {
1512 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1513 goto err_out_qp0;
1514 }
1515
1516 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1517 if (ctx->has_smi)
1518 cq_size *= 2;
1519
1520 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1521 NULL, ctx, cq_size, 0);
1522 if (IS_ERR(ctx->cq)) {
1523 ret = PTR_ERR(ctx->cq);
1524 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1525 goto err_buf;
1526 }
1527
1528 ctx->pd = ib_alloc_pd(ctx->ib_dev);
1529 if (IS_ERR(ctx->pd)) {
1530 ret = PTR_ERR(ctx->pd);
1531 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1532 goto err_cq;
1533 }
1534
1535 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1536 if (IS_ERR(ctx->mr)) {
1537 ret = PTR_ERR(ctx->mr);
1538 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1539 goto err_pd;
1540 }
1541
1542 if (ctx->has_smi) {
1543 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1544 if (ret) {
1545 pr_err("Couldn't create %s QP0 (%d)\n",
1546 create_tun ? "tunnel for" : "", ret);
1547 goto err_mr;
1548 }
1549 }
1550
1551 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1552 if (ret) {
1553 pr_err("Couldn't create %s QP1 (%d)\n",
1554 create_tun ? "tunnel for" : "", ret);
1555 goto err_qp0;
1556 }
1557
1558 if (create_tun)
1559 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
1560 else
1561 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
1562
1563 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1564
1565 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1566 if (ret) {
1567 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
1568 goto err_wq;
1569 }
1570 ctx->state = DEMUX_PV_STATE_ACTIVE;
1571 return 0;
1572
1573err_wq:
1574 ctx->wq = NULL;
1575 ib_destroy_qp(ctx->qp[1].qp);
1576 ctx->qp[1].qp = NULL;
1577
1578
1579err_qp0:
1580 if (ctx->has_smi)
1581 ib_destroy_qp(ctx->qp[0].qp);
1582 ctx->qp[0].qp = NULL;
1583
1584err_mr:
1585 ib_dereg_mr(ctx->mr);
1586 ctx->mr = NULL;
1587
1588err_pd:
1589 ib_dealloc_pd(ctx->pd);
1590 ctx->pd = NULL;
1591
1592err_cq:
1593 ib_destroy_cq(ctx->cq);
1594 ctx->cq = NULL;
1595
1596err_buf:
1597 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
1598
1599err_out_qp0:
1600 if (ctx->has_smi)
1601 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
1602err_out:
1603 ctx->state = DEMUX_PV_STATE_DOWN;
1604 return ret;
1605}
1606
1607static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
1608 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
1609{
1610 if (!ctx)
1611 return;
1612 if (ctx->state > DEMUX_PV_STATE_DOWN) {
1613 ctx->state = DEMUX_PV_STATE_DOWNING;
1614 if (flush)
1615 flush_workqueue(ctx->wq);
1616 if (ctx->has_smi) {
1617 ib_destroy_qp(ctx->qp[0].qp);
1618 ctx->qp[0].qp = NULL;
1619 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
1620 }
1621 ib_destroy_qp(ctx->qp[1].qp);
1622 ctx->qp[1].qp = NULL;
1623 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
1624 ib_dereg_mr(ctx->mr);
1625 ctx->mr = NULL;
1626 ib_dealloc_pd(ctx->pd);
1627 ctx->pd = NULL;
1628 ib_destroy_cq(ctx->cq);
1629 ctx->cq = NULL;
1630 ctx->state = DEMUX_PV_STATE_DOWN;
1631 }
1632}
1633
1634static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
1635 int port, int do_init)
1636{
1637 int ret = 0;
1638
1639 if (!do_init) {
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001640 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
Jack Morgensteinfc065732012-08-03 08:40:42 +00001641 /* for master, destroy real sqp resources */
1642 if (slave == mlx4_master_func_num(dev->dev))
1643 destroy_pv_resources(dev, slave, port,
1644 dev->sriov.sqps[port - 1], 1);
1645 /* destroy the tunnel qp resources */
1646 destroy_pv_resources(dev, slave, port,
1647 dev->sriov.demux[port - 1].tun[slave], 1);
1648 return 0;
1649 }
1650
1651 /* create the tunnel qp resources */
1652 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
1653 dev->sriov.demux[port - 1].tun[slave]);
1654
1655 /* for master, create the real sqp resources */
1656 if (!ret && slave == mlx4_master_func_num(dev->dev))
1657 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
1658 dev->sriov.sqps[port - 1]);
1659 return ret;
1660}
1661
1662void mlx4_ib_tunnels_update_work(struct work_struct *work)
1663{
1664 struct mlx4_ib_demux_work *dmxw;
1665
1666 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
1667 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
1668 dmxw->do_init);
1669 kfree(dmxw);
1670 return;
1671}
1672
1673static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1674 struct mlx4_ib_demux_ctx *ctx,
1675 int port)
1676{
1677 char name[12];
1678 int ret = 0;
1679 int i;
1680
1681 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
1682 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
1683 if (!ctx->tun)
1684 return -ENOMEM;
1685
1686 ctx->dev = dev;
1687 ctx->port = port;
1688 ctx->ib_dev = &dev->ib_dev;
1689
1690 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1691 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1692 if (ret) {
1693 ret = -ENOMEM;
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001694 goto err_mcg;
Jack Morgensteinfc065732012-08-03 08:40:42 +00001695 }
1696 }
1697
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001698 ret = mlx4_ib_mcg_port_init(ctx);
1699 if (ret) {
1700 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
1701 goto err_mcg;
1702 }
1703
Jack Morgensteinfc065732012-08-03 08:40:42 +00001704 snprintf(name, sizeof name, "mlx4_ibt%d", port);
1705 ctx->wq = create_singlethread_workqueue(name);
1706 if (!ctx->wq) {
1707 pr_err("Failed to create tunnelling WQ for port %d\n", port);
1708 ret = -ENOMEM;
1709 goto err_wq;
1710 }
1711
1712 snprintf(name, sizeof name, "mlx4_ibud%d", port);
1713 ctx->ud_wq = create_singlethread_workqueue(name);
1714 if (!ctx->ud_wq) {
1715 pr_err("Failed to create up/down WQ for port %d\n", port);
1716 ret = -ENOMEM;
1717 goto err_udwq;
1718 }
1719
1720 return 0;
1721
1722err_udwq:
1723 destroy_workqueue(ctx->wq);
1724 ctx->wq = NULL;
1725
1726err_wq:
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001727 mlx4_ib_mcg_port_cleanup(ctx, 1);
1728err_mcg:
Jack Morgensteinfc065732012-08-03 08:40:42 +00001729 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
1730 free_pv_object(dev, i, port);
1731 kfree(ctx->tun);
1732 ctx->tun = NULL;
1733 return ret;
1734}
1735
1736static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
1737{
1738 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
1739 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
1740 flush_workqueue(sqp_ctx->wq);
1741 if (sqp_ctx->has_smi) {
1742 ib_destroy_qp(sqp_ctx->qp[0].qp);
1743 sqp_ctx->qp[0].qp = NULL;
1744 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
1745 }
1746 ib_destroy_qp(sqp_ctx->qp[1].qp);
1747 sqp_ctx->qp[1].qp = NULL;
1748 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
1749 ib_dereg_mr(sqp_ctx->mr);
1750 sqp_ctx->mr = NULL;
1751 ib_dealloc_pd(sqp_ctx->pd);
1752 sqp_ctx->pd = NULL;
1753 ib_destroy_cq(sqp_ctx->cq);
1754 sqp_ctx->cq = NULL;
1755 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
1756 }
1757}
1758
1759static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
1760{
1761 int i;
1762 if (ctx) {
1763 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
Oren Duerb9c5d6a2012-08-03 08:40:46 +00001764 mlx4_ib_mcg_port_cleanup(ctx, 1);
Jack Morgensteinfc065732012-08-03 08:40:42 +00001765 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1766 if (!ctx->tun[i])
1767 continue;
1768 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
1769 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
1770 }
1771 flush_workqueue(ctx->wq);
1772 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1773 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
1774 free_pv_object(dev, i, ctx->port);
1775 }
1776 kfree(ctx->tun);
1777 destroy_workqueue(ctx->ud_wq);
1778 destroy_workqueue(ctx->wq);
1779 }
1780}
1781
1782static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
1783{
1784 int i;
1785
1786 if (!mlx4_is_master(dev->dev))
1787 return;
1788 /* initialize or tear down tunnel QPs for the master */
1789 for (i = 0; i < dev->dev->caps.num_ports; i++)
1790 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
1791 return;
1792}
1793
1794int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
1795{
1796 int i = 0;
1797 int err;
1798
1799 if (!mlx4_is_mfunc(dev->dev))
1800 return 0;
1801
1802 dev->sriov.is_going_down = 0;
1803 spin_lock_init(&dev->sriov.going_down_lock);
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001804 mlx4_ib_cm_paravirt_init(dev);
Jack Morgensteinfc065732012-08-03 08:40:42 +00001805
1806 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
1807
1808 if (mlx4_is_slave(dev->dev)) {
1809 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
1810 return 0;
1811 }
1812
Jack Morgensteina0c64a12012-08-03 08:40:49 +00001813 err = mlx4_ib_init_alias_guid_service(dev);
1814 if (err) {
1815 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
1816 goto paravirt_err;
1817 }
1818
Jack Morgensteinfc065732012-08-03 08:40:42 +00001819 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
1820 dev->dev->caps.sqp_demux);
1821 for (i = 0; i < dev->num_ports; i++) {
Jack Morgensteina0c64a12012-08-03 08:40:49 +00001822 union ib_gid gid;
1823 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
1824 if (err)
1825 goto demux_err;
1826 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
Jack Morgensteinfc065732012-08-03 08:40:42 +00001827 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
1828 &dev->sriov.sqps[i]);
1829 if (err)
1830 goto demux_err;
1831 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
1832 if (err)
1833 goto demux_err;
1834 }
1835 mlx4_ib_master_tunnels(dev, 1);
1836 return 0;
1837
1838demux_err:
1839 while (i > 0) {
1840 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
1841 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
1842 --i;
1843 }
Jack Morgensteina0c64a12012-08-03 08:40:49 +00001844 mlx4_ib_destroy_alias_guid_service(dev);
1845
1846paravirt_err:
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001847 mlx4_ib_cm_paravirt_clean(dev, -1);
Jack Morgensteinfc065732012-08-03 08:40:42 +00001848
1849 return err;
1850}
1851
1852void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
1853{
1854 int i;
1855 unsigned long flags;
1856
1857 if (!mlx4_is_mfunc(dev->dev))
1858 return;
1859
1860 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1861 dev->sriov.is_going_down = 1;
1862 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001863 if (mlx4_is_master(dev->dev)) {
Jack Morgensteinfc065732012-08-03 08:40:42 +00001864 for (i = 0; i < dev->num_ports; i++) {
1865 flush_workqueue(dev->sriov.demux[i].ud_wq);
1866 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
1867 kfree(dev->sriov.sqps[i]);
1868 dev->sriov.sqps[i] = NULL;
1869 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
1870 }
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001871
1872 mlx4_ib_cm_paravirt_clean(dev, -1);
Jack Morgensteina0c64a12012-08-03 08:40:49 +00001873 mlx4_ib_destroy_alias_guid_service(dev);
Amir Vadai3cf69cc2012-08-03 08:40:47 +00001874 }
Jack Morgensteinfc065732012-08-03 08:40:42 +00001875}