blob: 40187e26d2b95ecc74dbccefe6dcbc54419847b4 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iw_cxgb4.h"
33
34static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
35 struct c4iw_dev_ucontext *uctx)
36{
37 /*
38 * uP clears EQ contexts when the connection exits rdma mode,
39 * so no need to post a RESET WR for these EQs.
40 */
41 dma_free_coherent(&(rdev->lldi.pdev->dev),
42 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +000043 dma_unmap_addr(&wq->rq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -070044 dma_free_coherent(&(rdev->lldi.pdev->dev),
45 wq->sq.memsize, wq->sq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +000046 dma_unmap_addr(&wq->sq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -070047 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
48 kfree(wq->rq.sw_rq);
49 kfree(wq->sq.sw_sq);
50 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
51 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
52 return 0;
53}
54
55static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
56 struct t4_cq *rcq, struct t4_cq *scq,
57 struct c4iw_dev_ucontext *uctx)
58{
59 int user = (uctx != &rdev->uctx);
60 struct fw_ri_res_wr *res_wr;
61 struct fw_ri_res *res;
62 int wr_len;
63 struct c4iw_wr_wait wr_wait;
64 struct sk_buff *skb;
65 int ret;
66 int eqsize;
67
68 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
69 if (!wq->sq.qid)
70 return -ENOMEM;
71
72 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
73 if (!wq->rq.qid)
74 goto err1;
75
76 if (!user) {
77 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
78 GFP_KERNEL);
79 if (!wq->sq.sw_sq)
80 goto err2;
81
82 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
83 GFP_KERNEL);
84 if (!wq->rq.sw_rq)
85 goto err3;
86 }
87
88 /*
89 * RQT must be a power of 2.
90 */
91 wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
92 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
93 if (!wq->rq.rqt_hwaddr)
94 goto err4;
95
96 wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
97 wq->sq.memsize, &(wq->sq.dma_addr),
98 GFP_KERNEL);
99 if (!wq->sq.queue)
100 goto err5;
101 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000102 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103
104 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
105 wq->rq.memsize, &(wq->rq.dma_addr),
106 GFP_KERNEL);
107 if (!wq->rq.queue)
108 goto err6;
109 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
110 __func__, wq->sq.queue,
111 (unsigned long long)virt_to_phys(wq->sq.queue),
112 wq->rq.queue,
113 (unsigned long long)virt_to_phys(wq->rq.queue));
114 memset(wq->rq.queue, 0, wq->rq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000115 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700116
117 wq->db = rdev->lldi.db_reg;
118 wq->gts = rdev->lldi.gts_reg;
119 if (user) {
120 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
121 (wq->sq.qid << rdev->qpshift);
122 wq->sq.udb &= PAGE_MASK;
123 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
124 (wq->rq.qid << rdev->qpshift);
125 wq->rq.udb &= PAGE_MASK;
126 }
127 wq->rdev = rdev;
128 wq->rq.msn = 1;
129
130 /* build fw_ri_res_wr */
131 wr_len = sizeof *res_wr + 2 * sizeof *res;
132
David Rientjesd3c814e2010-07-21 02:44:56 +0000133 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700134 if (!skb) {
135 ret = -ENOMEM;
136 goto err7;
137 }
138 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
139
140 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
141 memset(res_wr, 0, wr_len);
142 res_wr->op_nres = cpu_to_be32(
143 FW_WR_OP(FW_RI_RES_WR) |
144 V_FW_RI_RES_WR_NRES(2) |
145 FW_WR_COMPL(1));
146 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700147 res_wr->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700148 res = res_wr->res;
149 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
150 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
151
152 /*
153 * eqsize is the number of 64B entries plus the status page size.
154 */
155 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
156
157 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
158 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
159 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
160 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
161 V_FW_RI_RES_WR_IQID(scq->cqid));
162 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
163 V_FW_RI_RES_WR_DCAEN(0) |
164 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000165 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700166 V_FW_RI_RES_WR_FBMAX(3) |
167 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
168 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
169 V_FW_RI_RES_WR_EQSIZE(eqsize));
170 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
171 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
172 res++;
173 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
174 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
175
176 /*
177 * eqsize is the number of 64B entries plus the status page size.
178 */
179 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
180 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
181 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
182 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
183 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
184 V_FW_RI_RES_WR_IQID(rcq->cqid));
185 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
186 V_FW_RI_RES_WR_DCAEN(0) |
187 V_FW_RI_RES_WR_DCACPU(0) |
Steve Wised37ac312010-06-10 19:03:00 +0000188 V_FW_RI_RES_WR_FBMIN(2) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700189 V_FW_RI_RES_WR_FBMAX(3) |
190 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
191 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
192 V_FW_RI_RES_WR_EQSIZE(eqsize));
193 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
194 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
195
196 c4iw_init_wr_wait(&wr_wait);
197
198 ret = c4iw_ofld_send(rdev, skb);
199 if (ret)
200 goto err7;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500201 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700202 if (ret)
203 goto err7;
204
205 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
206 __func__, wq->sq.qid, wq->rq.qid, wq->db,
207 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
208
209 return 0;
210err7:
211 dma_free_coherent(&(rdev->lldi.pdev->dev),
212 wq->rq.memsize, wq->rq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000213 dma_unmap_addr(&wq->rq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700214err6:
215 dma_free_coherent(&(rdev->lldi.pdev->dev),
216 wq->sq.memsize, wq->sq.queue,
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000217 dma_unmap_addr(&wq->sq, mapping));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700218err5:
219 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
220err4:
221 kfree(wq->rq.sw_rq);
222err3:
223 kfree(wq->sq.sw_sq);
224err2:
225 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
226err1:
227 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
228 return -ENOMEM;
229}
230
Steve Wised37ac312010-06-10 19:03:00 +0000231static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
232 struct ib_send_wr *wr, int max, u32 *plenp)
233{
234 u8 *dstp, *srcp;
235 u32 plen = 0;
236 int i;
237 int rem, len;
238
239 dstp = (u8 *)immdp->data;
240 for (i = 0; i < wr->num_sge; i++) {
241 if ((plen + wr->sg_list[i].length) > max)
242 return -EMSGSIZE;
243 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
244 plen += wr->sg_list[i].length;
245 rem = wr->sg_list[i].length;
246 while (rem) {
247 if (dstp == (u8 *)&sq->queue[sq->size])
248 dstp = (u8 *)sq->queue;
249 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
250 len = rem;
251 else
252 len = (u8 *)&sq->queue[sq->size] - dstp;
253 memcpy(dstp, srcp, len);
254 dstp += len;
255 srcp += len;
256 rem -= len;
257 }
258 }
Steve Wise13fecb82010-09-10 11:14:53 -0500259 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
260 if (len)
261 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000262 immdp->op = FW_RI_DATA_IMMD;
263 immdp->r1 = 0;
264 immdp->r2 = 0;
265 immdp->immdlen = cpu_to_be32(plen);
266 *plenp = plen;
267 return 0;
268}
269
270static int build_isgl(__be64 *queue_start, __be64 *queue_end,
271 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
272 int num_sge, u32 *plenp)
273
Steve Wisecfdda9d2010-04-21 15:30:06 -0700274{
275 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000276 u32 plen = 0;
277 __be64 *flitp = (__be64 *)isglp->sge;
278
279 for (i = 0; i < num_sge; i++) {
280 if ((plen + sg_list[i].length) < plen)
281 return -EMSGSIZE;
282 plen += sg_list[i].length;
283 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
284 sg_list[i].length);
285 if (++flitp == queue_end)
286 flitp = queue_start;
287 *flitp = cpu_to_be64(sg_list[i].addr);
288 if (++flitp == queue_end)
289 flitp = queue_start;
290 }
Steve Wise13fecb82010-09-10 11:14:53 -0500291 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000292 isglp->op = FW_RI_DATA_ISGL;
293 isglp->r1 = 0;
294 isglp->nsge = cpu_to_be16(num_sge);
295 isglp->r2 = 0;
296 if (plenp)
297 *plenp = plen;
298 return 0;
299}
300
301static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
302 struct ib_send_wr *wr, u8 *len16)
303{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700304 u32 plen;
305 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000306 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700307
308 if (wr->num_sge > T4_MAX_SEND_SGE)
309 return -EINVAL;
310 switch (wr->opcode) {
311 case IB_WR_SEND:
312 if (wr->send_flags & IB_SEND_SOLICITED)
313 wqe->send.sendop_pkd = cpu_to_be32(
314 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
315 else
316 wqe->send.sendop_pkd = cpu_to_be32(
317 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
318 wqe->send.stag_inv = 0;
319 break;
320 case IB_WR_SEND_WITH_INV:
321 if (wr->send_flags & IB_SEND_SOLICITED)
322 wqe->send.sendop_pkd = cpu_to_be32(
323 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
324 else
325 wqe->send.sendop_pkd = cpu_to_be32(
326 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
327 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
328 break;
329
330 default:
331 return -EINVAL;
332 }
Steve Wised37ac312010-06-10 19:03:00 +0000333
Steve Wisecfdda9d2010-04-21 15:30:06 -0700334 plen = 0;
335 if (wr->num_sge) {
336 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000337 ret = build_immd(sq, wqe->send.u.immd_src, wr,
338 T4_MAX_SEND_INLINE, &plen);
339 if (ret)
340 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700341 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
342 plen;
343 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000344 ret = build_isgl((__be64 *)sq->queue,
345 (__be64 *)&sq->queue[sq->size],
346 wqe->send.u.isgl_src,
347 wr->sg_list, wr->num_sge, &plen);
348 if (ret)
349 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700350 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
351 wr->num_sge * sizeof(struct fw_ri_sge);
352 }
353 } else {
354 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
355 wqe->send.u.immd_src[0].r1 = 0;
356 wqe->send.u.immd_src[0].r2 = 0;
357 wqe->send.u.immd_src[0].immdlen = 0;
358 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000359 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700360 }
361 *len16 = DIV_ROUND_UP(size, 16);
362 wqe->send.plen = cpu_to_be32(plen);
363 return 0;
364}
365
Steve Wised37ac312010-06-10 19:03:00 +0000366static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
367 struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700368{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700369 u32 plen;
370 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000371 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700372
Steve Wised37ac312010-06-10 19:03:00 +0000373 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700374 return -EINVAL;
375 wqe->write.r2 = 0;
376 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
377 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700378 if (wr->num_sge) {
379 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000380 ret = build_immd(sq, wqe->write.u.immd_src, wr,
381 T4_MAX_WRITE_INLINE, &plen);
382 if (ret)
383 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700384 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
385 plen;
386 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000387 ret = build_isgl((__be64 *)sq->queue,
388 (__be64 *)&sq->queue[sq->size],
389 wqe->write.u.isgl_src,
390 wr->sg_list, wr->num_sge, &plen);
391 if (ret)
392 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700393 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
394 wr->num_sge * sizeof(struct fw_ri_sge);
395 }
396 } else {
397 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
398 wqe->write.u.immd_src[0].r1 = 0;
399 wqe->write.u.immd_src[0].r2 = 0;
400 wqe->write.u.immd_src[0].immdlen = 0;
401 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000402 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700403 }
404 *len16 = DIV_ROUND_UP(size, 16);
405 wqe->write.plen = cpu_to_be32(plen);
406 return 0;
407}
408
409static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
410{
411 if (wr->num_sge > 1)
412 return -EINVAL;
413 if (wr->num_sge) {
414 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
415 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
416 >> 32));
417 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
418 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
419 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
420 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
421 >> 32));
422 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
423 } else {
424 wqe->read.stag_src = cpu_to_be32(2);
425 wqe->read.to_src_hi = 0;
426 wqe->read.to_src_lo = 0;
427 wqe->read.stag_sink = cpu_to_be32(2);
428 wqe->read.plen = 0;
429 wqe->read.to_sink_hi = 0;
430 wqe->read.to_sink_lo = 0;
431 }
432 wqe->read.r2 = 0;
433 wqe->read.r5 = 0;
434 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
435 return 0;
436}
437
438static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
439 struct ib_recv_wr *wr, u8 *len16)
440{
Steve Wised37ac312010-06-10 19:03:00 +0000441 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700442
Steve Wised37ac312010-06-10 19:03:00 +0000443 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
444 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
445 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
446 if (ret)
447 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700448 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
449 wr->num_sge * sizeof(struct fw_ri_sge), 16);
450 return 0;
451}
452
453static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
454{
455
456 struct fw_ri_immd *imdp;
457 __be64 *p;
458 int i;
459 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
460
461 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
462 return -EINVAL;
463
464 wqe->fr.qpbinde_to_dcacpu = 0;
465 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
466 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
467 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
468 wqe->fr.len_hi = 0;
469 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
470 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
471 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
472 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
473 0xffffffff);
474 if (pbllen > T4_MAX_FR_IMMD) {
475 struct c4iw_fr_page_list *c4pl =
476 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
477 struct fw_ri_dsgl *sglp;
478
479 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
480 sglp->op = FW_RI_DATA_DSGL;
481 sglp->r1 = 0;
482 sglp->nsge = cpu_to_be16(1);
483 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
484 sglp->len0 = cpu_to_be32(pbllen);
485
486 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16);
487 } else {
488 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
489 imdp->op = FW_RI_DATA_IMMD;
490 imdp->r1 = 0;
491 imdp->r2 = 0;
492 imdp->immdlen = cpu_to_be32(pbllen);
493 p = (__be64 *)(imdp + 1);
494 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++)
495 *p = cpu_to_be64(
496 (u64)wr->wr.fast_reg.page_list->page_list[i]);
497 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen,
498 16);
499 }
500 return 0;
501}
502
503static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
504 u8 *len16)
505{
506 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
507 wqe->inv.r2 = 0;
508 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
509 return 0;
510}
511
512void c4iw_qp_add_ref(struct ib_qp *qp)
513{
514 PDBG("%s ib_qp %p\n", __func__, qp);
515 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
516}
517
518void c4iw_qp_rem_ref(struct ib_qp *qp)
519{
520 PDBG("%s ib_qp %p\n", __func__, qp);
521 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
522 wake_up(&(to_c4iw_qp(qp)->wait));
523}
524
525int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
526 struct ib_send_wr **bad_wr)
527{
528 int err = 0;
529 u8 len16 = 0;
530 enum fw_wr_opcodes fw_opcode = 0;
531 enum fw_ri_wr_flags fw_flags;
532 struct c4iw_qp *qhp;
533 union t4_wr *wqe;
534 u32 num_wrs;
535 struct t4_swsqe *swsqe;
536 unsigned long flag;
537 u16 idx = 0;
538
539 qhp = to_c4iw_qp(ibqp);
540 spin_lock_irqsave(&qhp->lock, flag);
541 if (t4_wq_in_error(&qhp->wq)) {
542 spin_unlock_irqrestore(&qhp->lock, flag);
543 return -EINVAL;
544 }
545 num_wrs = t4_sq_avail(&qhp->wq);
546 if (num_wrs == 0) {
547 spin_unlock_irqrestore(&qhp->lock, flag);
548 return -ENOMEM;
549 }
550 while (wr) {
551 if (num_wrs == 0) {
552 err = -ENOMEM;
553 *bad_wr = wr;
554 break;
555 }
Steve Wised37ac312010-06-10 19:03:00 +0000556 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
557 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
558
Steve Wisecfdda9d2010-04-21 15:30:06 -0700559 fw_flags = 0;
560 if (wr->send_flags & IB_SEND_SOLICITED)
561 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
562 if (wr->send_flags & IB_SEND_SIGNALED)
563 fw_flags |= FW_RI_COMPLETION_FLAG;
564 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
565 switch (wr->opcode) {
566 case IB_WR_SEND_WITH_INV:
567 case IB_WR_SEND:
568 if (wr->send_flags & IB_SEND_FENCE)
569 fw_flags |= FW_RI_READ_FENCE_FLAG;
570 fw_opcode = FW_RI_SEND_WR;
571 if (wr->opcode == IB_WR_SEND)
572 swsqe->opcode = FW_RI_SEND;
573 else
574 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +0000575 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700576 break;
577 case IB_WR_RDMA_WRITE:
578 fw_opcode = FW_RI_RDMA_WRITE_WR;
579 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +0000580 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700581 break;
582 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -0500583 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700584 fw_opcode = FW_RI_RDMA_READ_WR;
585 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise2f1fb502010-05-20 16:58:16 -0500586 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
587 fw_flags |= FW_RI_RDMA_READ_INVALIDATE;
588 else
589 fw_flags = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700590 err = build_rdma_read(wqe, wr, &len16);
591 if (err)
592 break;
593 swsqe->read_len = wr->sg_list[0].length;
594 if (!qhp->wq.sq.oldest_read)
595 qhp->wq.sq.oldest_read = swsqe;
596 break;
597 case IB_WR_FAST_REG_MR:
598 fw_opcode = FW_RI_FR_NSMR_WR;
599 swsqe->opcode = FW_RI_FAST_REGISTER;
600 err = build_fastreg(wqe, wr, &len16);
601 break;
602 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -0500603 if (wr->send_flags & IB_SEND_FENCE)
604 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700605 fw_opcode = FW_RI_INV_LSTAG_WR;
606 swsqe->opcode = FW_RI_LOCAL_INV;
607 err = build_inv_stag(wqe, wr, &len16);
608 break;
609 default:
610 PDBG("%s post of type=%d TBD!\n", __func__,
611 wr->opcode);
612 err = -EINVAL;
613 }
614 if (err) {
615 *bad_wr = wr;
616 break;
617 }
618 swsqe->idx = qhp->wq.sq.pidx;
619 swsqe->complete = 0;
620 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
621 swsqe->wr_id = wr->wr_id;
622
623 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
624
625 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
626 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
627 swsqe->opcode, swsqe->read_len);
628 wr = wr->next;
629 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +0000630 t4_sq_produce(&qhp->wq, len16);
631 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700632 }
633 if (t4_wq_db_enabled(&qhp->wq))
634 t4_ring_sq_db(&qhp->wq, idx);
635 spin_unlock_irqrestore(&qhp->lock, flag);
636 return err;
637}
638
639int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
640 struct ib_recv_wr **bad_wr)
641{
642 int err = 0;
643 struct c4iw_qp *qhp;
644 union t4_recv_wr *wqe;
645 u32 num_wrs;
646 u8 len16 = 0;
647 unsigned long flag;
648 u16 idx = 0;
649
650 qhp = to_c4iw_qp(ibqp);
651 spin_lock_irqsave(&qhp->lock, flag);
652 if (t4_wq_in_error(&qhp->wq)) {
653 spin_unlock_irqrestore(&qhp->lock, flag);
654 return -EINVAL;
655 }
656 num_wrs = t4_rq_avail(&qhp->wq);
657 if (num_wrs == 0) {
658 spin_unlock_irqrestore(&qhp->lock, flag);
659 return -ENOMEM;
660 }
661 while (wr) {
662 if (wr->num_sge > T4_MAX_RECV_SGE) {
663 err = -EINVAL;
664 *bad_wr = wr;
665 break;
666 }
Steve Wised37ac312010-06-10 19:03:00 +0000667 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
668 qhp->wq.rq.wq_pidx *
669 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700670 if (num_wrs)
671 err = build_rdma_recv(qhp, wqe, wr, &len16);
672 else
673 err = -ENOMEM;
674 if (err) {
675 *bad_wr = wr;
676 break;
677 }
678
679 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
680
681 wqe->recv.opcode = FW_RI_RECV_WR;
682 wqe->recv.r1 = 0;
683 wqe->recv.wrid = qhp->wq.rq.pidx;
684 wqe->recv.r2[0] = 0;
685 wqe->recv.r2[1] = 0;
686 wqe->recv.r2[2] = 0;
687 wqe->recv.len16 = len16;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700688 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
689 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +0000690 t4_rq_produce(&qhp->wq, len16);
691 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700692 wr = wr->next;
693 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700694 }
695 if (t4_wq_db_enabled(&qhp->wq))
696 t4_ring_rq_db(&qhp->wq, idx);
697 spin_unlock_irqrestore(&qhp->lock, flag);
698 return err;
699}
700
701int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
702{
703 return -ENOSYS;
704}
705
706static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
707 u8 *ecode)
708{
709 int status;
710 int tagged;
711 int opcode;
712 int rqtype;
713 int send_inv;
714
715 if (!err_cqe) {
716 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
717 *ecode = 0;
718 return;
719 }
720
721 status = CQE_STATUS(err_cqe);
722 opcode = CQE_OPCODE(err_cqe);
723 rqtype = RQ_TYPE(err_cqe);
724 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
725 (opcode == FW_RI_SEND_WITH_SE_INV);
726 tagged = (opcode == FW_RI_RDMA_WRITE) ||
727 (rqtype && (opcode == FW_RI_READ_RESP));
728
729 switch (status) {
730 case T4_ERR_STAG:
731 if (send_inv) {
732 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
733 *ecode = RDMAP_CANT_INV_STAG;
734 } else {
735 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
736 *ecode = RDMAP_INV_STAG;
737 }
738 break;
739 case T4_ERR_PDID:
740 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
741 if ((opcode == FW_RI_SEND_WITH_INV) ||
742 (opcode == FW_RI_SEND_WITH_SE_INV))
743 *ecode = RDMAP_CANT_INV_STAG;
744 else
745 *ecode = RDMAP_STAG_NOT_ASSOC;
746 break;
747 case T4_ERR_QPID:
748 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
749 *ecode = RDMAP_STAG_NOT_ASSOC;
750 break;
751 case T4_ERR_ACCESS:
752 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
753 *ecode = RDMAP_ACC_VIOL;
754 break;
755 case T4_ERR_WRAP:
756 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
757 *ecode = RDMAP_TO_WRAP;
758 break;
759 case T4_ERR_BOUND:
760 if (tagged) {
761 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
762 *ecode = DDPT_BASE_BOUNDS;
763 } else {
764 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
765 *ecode = RDMAP_BASE_BOUNDS;
766 }
767 break;
768 case T4_ERR_INVALIDATE_SHARED_MR:
769 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
770 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
771 *ecode = RDMAP_CANT_INV_STAG;
772 break;
773 case T4_ERR_ECC:
774 case T4_ERR_ECC_PSTAG:
775 case T4_ERR_INTERNAL_ERR:
776 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
777 *ecode = 0;
778 break;
779 case T4_ERR_OUT_OF_RQE:
780 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
781 *ecode = DDPU_INV_MSN_NOBUF;
782 break;
783 case T4_ERR_PBL_ADDR_BOUND:
784 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
785 *ecode = DDPT_BASE_BOUNDS;
786 break;
787 case T4_ERR_CRC:
788 *layer_type = LAYER_MPA|DDP_LLP;
789 *ecode = MPA_CRC_ERR;
790 break;
791 case T4_ERR_MARKER:
792 *layer_type = LAYER_MPA|DDP_LLP;
793 *ecode = MPA_MARKER_ERR;
794 break;
795 case T4_ERR_PDU_LEN_ERR:
796 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
797 *ecode = DDPU_MSG_TOOBIG;
798 break;
799 case T4_ERR_DDP_VERSION:
800 if (tagged) {
801 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
802 *ecode = DDPT_INV_VERS;
803 } else {
804 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
805 *ecode = DDPU_INV_VERS;
806 }
807 break;
808 case T4_ERR_RDMA_VERSION:
809 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
810 *ecode = RDMAP_INV_VERS;
811 break;
812 case T4_ERR_OPCODE:
813 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
814 *ecode = RDMAP_INV_OPCODE;
815 break;
816 case T4_ERR_DDP_QUEUE_NUM:
817 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
818 *ecode = DDPU_INV_QN;
819 break;
820 case T4_ERR_MSN:
821 case T4_ERR_MSN_GAP:
822 case T4_ERR_MSN_RANGE:
823 case T4_ERR_IRD_OVERFLOW:
824 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
825 *ecode = DDPU_INV_MSN_RANGE;
826 break;
827 case T4_ERR_TBIT:
828 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
829 *ecode = 0;
830 break;
831 case T4_ERR_MO:
832 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
833 *ecode = DDPU_INV_MO;
834 break;
835 default:
836 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
837 *ecode = 0;
838 break;
839 }
840}
841
842int c4iw_post_zb_read(struct c4iw_qp *qhp)
843{
844 union t4_wr *wqe;
845 struct sk_buff *skb;
846 u8 len16;
847
848 PDBG("%s enter\n", __func__);
849 skb = alloc_skb(40, GFP_KERNEL);
850 if (!skb) {
851 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
852 return -ENOMEM;
853 }
854 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
855
856 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
857 memset(wqe, 0, sizeof wqe->read);
858 wqe->read.r2 = cpu_to_be64(0);
859 wqe->read.stag_sink = cpu_to_be32(1);
860 wqe->read.to_sink_hi = cpu_to_be32(0);
861 wqe->read.to_sink_lo = cpu_to_be32(1);
862 wqe->read.stag_src = cpu_to_be32(1);
863 wqe->read.plen = cpu_to_be32(0);
864 wqe->read.to_src_hi = cpu_to_be32(0);
865 wqe->read.to_src_lo = cpu_to_be32(1);
866 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
867 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
868
869 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
870}
871
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700872static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
873 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700874{
875 struct fw_ri_wr *wqe;
876 struct sk_buff *skb;
877 struct terminate_message *term;
878
879 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
880 qhp->ep->hwtid);
881
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700882 skb = alloc_skb(sizeof *wqe, gfp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700883 if (!skb)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700884 return;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700885 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
886
887 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
888 memset(wqe, 0, sizeof *wqe);
889 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR));
890 wqe->flowid_len16 = cpu_to_be32(
891 FW_WR_FLOWID(qhp->ep->hwtid) |
892 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
893
894 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
895 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
896 term = (struct terminate_message *)wqe->u.terminate.termmsg;
897 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700898 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700899}
900
901/*
902 * Assumes qhp lock is held.
903 */
904static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
905 struct c4iw_cq *schp, unsigned long *flag)
906{
907 int count;
908 int flushed;
909
910 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
911 /* take a ref on the qhp since we must release the lock */
912 atomic_inc(&qhp->refcnt);
913 spin_unlock_irqrestore(&qhp->lock, *flag);
914
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200915 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wisecfdda9d2010-04-21 15:30:06 -0700916 spin_lock_irqsave(&rchp->lock, *flag);
917 spin_lock(&qhp->lock);
918 c4iw_flush_hw_cq(&rchp->cq);
919 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
920 flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
921 spin_unlock(&qhp->lock);
922 spin_unlock_irqrestore(&rchp->lock, *flag);
923 if (flushed)
924 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
925
Uwe Kleine-König732bee72010-06-11 12:16:59 +0200926 /* locking hierarchy: cq lock first, then qp lock. */
Steve Wisecfdda9d2010-04-21 15:30:06 -0700927 spin_lock_irqsave(&schp->lock, *flag);
928 spin_lock(&qhp->lock);
929 c4iw_flush_hw_cq(&schp->cq);
930 c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
931 flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
932 spin_unlock(&qhp->lock);
933 spin_unlock_irqrestore(&schp->lock, *flag);
934 if (flushed)
935 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
936
937 /* deref */
938 if (atomic_dec_and_test(&qhp->refcnt))
939 wake_up(&qhp->wait);
940
941 spin_lock_irqsave(&qhp->lock, *flag);
942}
943
944static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag)
945{
946 struct c4iw_cq *rchp, *schp;
947
948 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
949 schp = get_chp(qhp->rhp, qhp->attr.scq);
950
951 if (qhp->ibqp.uobject) {
952 t4_set_wq_in_error(&qhp->wq);
953 t4_set_cq_in_error(&rchp->cq);
954 if (schp != rchp)
955 t4_set_cq_in_error(&schp->cq);
956 return;
957 }
958 __flush_qp(qhp, rchp, schp, flag);
959}
960
Steve Wise73d6fca2010-07-23 19:12:27 +0000961static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
962 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700963{
964 struct fw_ri_wr *wqe;
965 int ret;
966 struct c4iw_wr_wait wr_wait;
967 struct sk_buff *skb;
968
969 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
Steve Wise73d6fca2010-07-23 19:12:27 +0000970 ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700971
David Rientjesd3c814e2010-07-21 02:44:56 +0000972 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700973 if (!skb)
974 return -ENOMEM;
Steve Wise73d6fca2010-07-23 19:12:27 +0000975 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700976
977 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
978 memset(wqe, 0, sizeof *wqe);
979 wqe->op_compl = cpu_to_be32(
980 FW_WR_OP(FW_RI_INIT_WR) |
981 FW_WR_COMPL(1));
982 wqe->flowid_len16 = cpu_to_be32(
Steve Wise73d6fca2010-07-23 19:12:27 +0000983 FW_WR_FLOWID(ep->hwtid) |
Steve Wisecfdda9d2010-04-21 15:30:06 -0700984 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
Roland Dreierc8e081a2010-09-27 17:51:04 -0700985 wqe->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700986
987 wqe->u.fini.type = FW_RI_TYPE_FINI;
988 c4iw_init_wr_wait(&wr_wait);
989 ret = c4iw_ofld_send(&rhp->rdev, skb);
990 if (ret)
991 goto out;
992
Steve Wiseaadc4df2010-09-10 11:15:25 -0500993 ret = c4iw_wait_for_reply(&rhp->rdev, &wr_wait, qhp->ep->hwtid,
994 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700995out:
996 PDBG("%s ret %d\n", __func__, ret);
997 return ret;
998}
999
1000static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1001{
1002 memset(&init->u, 0, sizeof init->u);
1003 switch (p2p_type) {
1004 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1005 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1006 init->u.write.stag_sink = cpu_to_be32(1);
1007 init->u.write.to_sink = cpu_to_be64(1);
1008 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1009 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1010 sizeof(struct fw_ri_immd),
1011 16);
1012 break;
1013 case FW_RI_INIT_P2PTYPE_READ_REQ:
1014 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1015 init->u.read.stag_src = cpu_to_be32(1);
1016 init->u.read.to_src_lo = cpu_to_be32(1);
1017 init->u.read.stag_sink = cpu_to_be32(1);
1018 init->u.read.to_sink_lo = cpu_to_be32(1);
1019 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1020 break;
1021 }
1022}
1023
1024static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1025{
1026 struct fw_ri_wr *wqe;
1027 int ret;
1028 struct c4iw_wr_wait wr_wait;
1029 struct sk_buff *skb;
1030
1031 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1032 qhp->ep->hwtid);
1033
David Rientjesd3c814e2010-07-21 02:44:56 +00001034 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001035 if (!skb)
1036 return -ENOMEM;
1037 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1038
1039 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1040 memset(wqe, 0, sizeof *wqe);
1041 wqe->op_compl = cpu_to_be32(
1042 FW_WR_OP(FW_RI_INIT_WR) |
1043 FW_WR_COMPL(1));
1044 wqe->flowid_len16 = cpu_to_be32(
1045 FW_WR_FLOWID(qhp->ep->hwtid) |
1046 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
1047
Roland Dreierc8e081a2010-09-27 17:51:04 -07001048 wqe->cookie = (unsigned long) &wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001049
1050 wqe->u.init.type = FW_RI_TYPE_INIT;
1051 wqe->u.init.mpareqbit_p2ptype =
1052 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1053 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1054 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1055 if (qhp->attr.mpa_attr.recv_marker_enabled)
1056 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1057 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1058 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1059 if (qhp->attr.mpa_attr.crc_enabled)
1060 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1061
1062 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1063 FW_RI_QP_RDMA_WRITE_ENABLE |
1064 FW_RI_QP_BIND_ENABLE;
1065 if (!qhp->ibqp.uobject)
1066 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1067 FW_RI_QP_STAG0_ENABLE;
1068 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1069 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1070 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1071 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1072 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1073 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1074 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1075 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1076 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1077 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1078 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1079 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1080 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1081 rhp->rdev.lldi.vr->rq.start);
1082 if (qhp->attr.mpa_attr.initiator)
1083 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1084
1085 c4iw_init_wr_wait(&wr_wait);
1086 ret = c4iw_ofld_send(&rhp->rdev, skb);
1087 if (ret)
1088 goto out;
1089
Steve Wiseaadc4df2010-09-10 11:15:25 -05001090 ret = c4iw_wait_for_reply(&rhp->rdev, &wr_wait, qhp->ep->hwtid,
1091 qhp->wq.sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001092out:
1093 PDBG("%s ret %d\n", __func__, ret);
1094 return ret;
1095}
1096
1097int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1098 enum c4iw_qp_attr_mask mask,
1099 struct c4iw_qp_attributes *attrs,
1100 int internal)
1101{
1102 int ret = 0;
1103 struct c4iw_qp_attributes newattr = qhp->attr;
1104 unsigned long flag;
1105 int disconnect = 0;
1106 int terminate = 0;
1107 int abort = 0;
1108 int free = 0;
1109 struct c4iw_ep *ep = NULL;
1110
1111 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1112 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1113 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1114
1115 spin_lock_irqsave(&qhp->lock, flag);
1116
1117 /* Process attr changes if in IDLE */
1118 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1119 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1120 ret = -EIO;
1121 goto out;
1122 }
1123 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1124 newattr.enable_rdma_read = attrs->enable_rdma_read;
1125 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1126 newattr.enable_rdma_write = attrs->enable_rdma_write;
1127 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1128 newattr.enable_bind = attrs->enable_bind;
1129 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001130 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001131 ret = -EINVAL;
1132 goto out;
1133 }
1134 newattr.max_ord = attrs->max_ord;
1135 }
1136 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001137 if (attrs->max_ird > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001138 ret = -EINVAL;
1139 goto out;
1140 }
1141 newattr.max_ird = attrs->max_ird;
1142 }
1143 qhp->attr = newattr;
1144 }
1145
1146 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1147 goto out;
1148 if (qhp->attr.state == attrs->next_state)
1149 goto out;
1150
1151 switch (qhp->attr.state) {
1152 case C4IW_QP_STATE_IDLE:
1153 switch (attrs->next_state) {
1154 case C4IW_QP_STATE_RTS:
1155 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1156 ret = -EINVAL;
1157 goto out;
1158 }
1159 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1160 ret = -EINVAL;
1161 goto out;
1162 }
1163 qhp->attr.mpa_attr = attrs->mpa_attr;
1164 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1165 qhp->ep = qhp->attr.llp_stream_handle;
1166 qhp->attr.state = C4IW_QP_STATE_RTS;
1167
1168 /*
1169 * Ref the endpoint here and deref when we
1170 * disassociate the endpoint from the QP. This
1171 * happens in CLOSING->IDLE transition or *->ERROR
1172 * transition.
1173 */
1174 c4iw_get_ep(&qhp->ep->com);
1175 spin_unlock_irqrestore(&qhp->lock, flag);
1176 ret = rdma_init(rhp, qhp);
1177 spin_lock_irqsave(&qhp->lock, flag);
1178 if (ret)
1179 goto err;
1180 break;
1181 case C4IW_QP_STATE_ERROR:
1182 qhp->attr.state = C4IW_QP_STATE_ERROR;
1183 flush_qp(qhp, &flag);
1184 break;
1185 default:
1186 ret = -EINVAL;
1187 goto out;
1188 }
1189 break;
1190 case C4IW_QP_STATE_RTS:
1191 switch (attrs->next_state) {
1192 case C4IW_QP_STATE_CLOSING:
1193 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1194 qhp->attr.state = C4IW_QP_STATE_CLOSING;
Steve Wise73d6fca2010-07-23 19:12:27 +00001195 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001196 if (!internal) {
1197 abort = 0;
1198 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001199 c4iw_get_ep(&ep->com);
1200 }
1201 spin_unlock_irqrestore(&qhp->lock, flag);
Steve Wise73d6fca2010-07-23 19:12:27 +00001202 ret = rdma_fini(rhp, qhp, ep);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001203 spin_lock_irqsave(&qhp->lock, flag);
1204 if (ret) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001205 c4iw_get_ep(&ep->com);
1206 disconnect = abort = 1;
1207 goto err;
1208 }
1209 break;
1210 case C4IW_QP_STATE_TERMINATE:
1211 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1212 if (qhp->ibqp.uobject)
1213 t4_set_wq_in_error(&qhp->wq);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001214 ep = qhp->ep;
1215 c4iw_get_ep(&ep->com);
Steve Wise0e42c1f2010-09-10 11:15:09 -05001216 if (!internal)
1217 terminate = 1;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001218 disconnect = 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001219 break;
1220 case C4IW_QP_STATE_ERROR:
1221 qhp->attr.state = C4IW_QP_STATE_ERROR;
1222 if (!internal) {
1223 abort = 1;
1224 disconnect = 1;
1225 ep = qhp->ep;
1226 c4iw_get_ep(&ep->com);
1227 }
1228 goto err;
1229 break;
1230 default:
1231 ret = -EINVAL;
1232 goto out;
1233 }
1234 break;
1235 case C4IW_QP_STATE_CLOSING:
1236 if (!internal) {
1237 ret = -EINVAL;
1238 goto out;
1239 }
1240 switch (attrs->next_state) {
1241 case C4IW_QP_STATE_IDLE:
1242 flush_qp(qhp, &flag);
1243 qhp->attr.state = C4IW_QP_STATE_IDLE;
1244 qhp->attr.llp_stream_handle = NULL;
1245 c4iw_put_ep(&qhp->ep->com);
1246 qhp->ep = NULL;
1247 wake_up(&qhp->wait);
1248 break;
1249 case C4IW_QP_STATE_ERROR:
1250 goto err;
1251 default:
1252 ret = -EINVAL;
1253 goto err;
1254 }
1255 break;
1256 case C4IW_QP_STATE_ERROR:
1257 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1258 ret = -EINVAL;
1259 goto out;
1260 }
1261 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 qhp->attr.state = C4IW_QP_STATE_IDLE;
1266 break;
1267 case C4IW_QP_STATE_TERMINATE:
1268 if (!internal) {
1269 ret = -EINVAL;
1270 goto out;
1271 }
1272 goto err;
1273 break;
1274 default:
1275 printk(KERN_ERR "%s in a bad state %d\n",
1276 __func__, qhp->attr.state);
1277 ret = -EINVAL;
1278 goto err;
1279 break;
1280 }
1281 goto out;
1282err:
1283 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1284 qhp->wq.sq.qid);
1285
1286 /* disassociate the LLP connection */
1287 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05001288 if (!ep)
1289 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001290 qhp->ep = NULL;
1291 qhp->attr.state = C4IW_QP_STATE_ERROR;
1292 free = 1;
1293 wake_up(&qhp->wait);
1294 BUG_ON(!ep);
1295 flush_qp(qhp, &flag);
1296out:
1297 spin_unlock_irqrestore(&qhp->lock, flag);
1298
1299 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001300 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001301
1302 /*
1303 * If disconnect is 1, then we need to initiate a disconnect
1304 * on the EP. This can be a normal close (RTS->CLOSING) or
1305 * an abnormal close (RTS/CLOSING->ERROR).
1306 */
1307 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001308 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1309 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001310 c4iw_put_ep(&ep->com);
1311 }
1312
1313 /*
1314 * If free is 1, then we've disassociated the EP from the QP
1315 * and we need to dereference the EP.
1316 */
1317 if (free)
1318 c4iw_put_ep(&ep->com);
1319
1320 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1321 return ret;
1322}
1323
1324int c4iw_destroy_qp(struct ib_qp *ib_qp)
1325{
1326 struct c4iw_dev *rhp;
1327 struct c4iw_qp *qhp;
1328 struct c4iw_qp_attributes attrs;
1329 struct c4iw_ucontext *ucontext;
1330
1331 qhp = to_c4iw_qp(ib_qp);
1332 rhp = qhp->rhp;
1333
1334 attrs.next_state = C4IW_QP_STATE_ERROR;
1335 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1336 wait_event(qhp->wait, !qhp->ep);
1337
1338 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001339 atomic_dec(&qhp->refcnt);
1340 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1341
1342 ucontext = ib_qp->uobject ?
1343 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1344 destroy_qp(&rhp->rdev, &qhp->wq,
1345 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1346
1347 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1348 kfree(qhp);
1349 return 0;
1350}
1351
1352struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1353 struct ib_udata *udata)
1354{
1355 struct c4iw_dev *rhp;
1356 struct c4iw_qp *qhp;
1357 struct c4iw_pd *php;
1358 struct c4iw_cq *schp;
1359 struct c4iw_cq *rchp;
1360 struct c4iw_create_qp_resp uresp;
1361 int sqsize, rqsize;
1362 struct c4iw_ucontext *ucontext;
1363 int ret;
1364 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
1365
1366 PDBG("%s ib_pd %p\n", __func__, pd);
1367
1368 if (attrs->qp_type != IB_QPT_RC)
1369 return ERR_PTR(-EINVAL);
1370
1371 php = to_c4iw_pd(pd);
1372 rhp = php->rhp;
1373 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1374 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1375 if (!schp || !rchp)
1376 return ERR_PTR(-EINVAL);
1377
1378 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1379 return ERR_PTR(-EINVAL);
1380
1381 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1382 if (rqsize > T4_MAX_RQ_SIZE)
1383 return ERR_PTR(-E2BIG);
1384
1385 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1386 if (sqsize > T4_MAX_SQ_SIZE)
1387 return ERR_PTR(-E2BIG);
1388
1389 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1390
1391
1392 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1393 if (!qhp)
1394 return ERR_PTR(-ENOMEM);
1395 qhp->wq.sq.size = sqsize;
1396 qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
1397 qhp->wq.rq.size = rqsize;
1398 qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
1399
1400 if (ucontext) {
1401 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1402 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1403 }
1404
1405 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1406 __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
1407
1408 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1409 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1410 if (ret)
1411 goto err1;
1412
1413 attrs->cap.max_recv_wr = rqsize - 1;
1414 attrs->cap.max_send_wr = sqsize - 1;
1415 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1416
1417 qhp->rhp = rhp;
1418 qhp->attr.pd = php->pdid;
1419 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1420 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1421 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1422 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1423 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1424 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1425 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1426 qhp->attr.state = C4IW_QP_STATE_IDLE;
1427 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1428 qhp->attr.enable_rdma_read = 1;
1429 qhp->attr.enable_rdma_write = 1;
1430 qhp->attr.enable_bind = 1;
1431 qhp->attr.max_ord = 1;
1432 qhp->attr.max_ird = 1;
1433 spin_lock_init(&qhp->lock);
1434 init_waitqueue_head(&qhp->wait);
1435 atomic_set(&qhp->refcnt, 1);
1436
1437 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1438 if (ret)
1439 goto err2;
1440
Steve Wisecfdda9d2010-04-21 15:30:06 -07001441 if (udata) {
1442 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1443 if (!mm1) {
1444 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001445 goto err3;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001446 }
1447 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1448 if (!mm2) {
1449 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001450 goto err4;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001451 }
1452 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1453 if (!mm3) {
1454 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001455 goto err5;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001456 }
1457 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1458 if (!mm4) {
1459 ret = -ENOMEM;
Steve Wise30a6a622010-05-20 16:58:21 -05001460 goto err6;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001461 }
1462
1463 uresp.qid_mask = rhp->rdev.qpmask;
1464 uresp.sqid = qhp->wq.sq.qid;
1465 uresp.sq_size = qhp->wq.sq.size;
1466 uresp.sq_memsize = qhp->wq.sq.memsize;
1467 uresp.rqid = qhp->wq.rq.qid;
1468 uresp.rq_size = qhp->wq.rq.size;
1469 uresp.rq_memsize = qhp->wq.rq.memsize;
1470 spin_lock(&ucontext->mmap_lock);
1471 uresp.sq_key = ucontext->key;
1472 ucontext->key += PAGE_SIZE;
1473 uresp.rq_key = ucontext->key;
1474 ucontext->key += PAGE_SIZE;
1475 uresp.sq_db_gts_key = ucontext->key;
1476 ucontext->key += PAGE_SIZE;
1477 uresp.rq_db_gts_key = ucontext->key;
1478 ucontext->key += PAGE_SIZE;
1479 spin_unlock(&ucontext->mmap_lock);
1480 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1481 if (ret)
Steve Wise30a6a622010-05-20 16:58:21 -05001482 goto err7;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001483 mm1->key = uresp.sq_key;
1484 mm1->addr = virt_to_phys(qhp->wq.sq.queue);
1485 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1486 insert_mmap(ucontext, mm1);
1487 mm2->key = uresp.rq_key;
1488 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1489 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1490 insert_mmap(ucontext, mm2);
1491 mm3->key = uresp.sq_db_gts_key;
1492 mm3->addr = qhp->wq.sq.udb;
1493 mm3->len = PAGE_SIZE;
1494 insert_mmap(ucontext, mm3);
1495 mm4->key = uresp.rq_db_gts_key;
1496 mm4->addr = qhp->wq.rq.udb;
1497 mm4->len = PAGE_SIZE;
1498 insert_mmap(ucontext, mm4);
1499 }
1500 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1501 init_timer(&(qhp->timer));
1502 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1503 __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
1504 qhp->wq.sq.qid);
1505 return &qhp->ibqp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001506err7:
Steve Wise30a6a622010-05-20 16:58:21 -05001507 kfree(mm4);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001508err6:
Steve Wise30a6a622010-05-20 16:58:21 -05001509 kfree(mm3);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001510err5:
Steve Wise30a6a622010-05-20 16:58:21 -05001511 kfree(mm2);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001512err4:
Steve Wise30a6a622010-05-20 16:58:21 -05001513 kfree(mm1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001514err3:
1515 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1516err2:
1517 destroy_qp(&rhp->rdev, &qhp->wq,
1518 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1519err1:
1520 kfree(qhp);
1521 return ERR_PTR(ret);
1522}
1523
1524int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1525 int attr_mask, struct ib_udata *udata)
1526{
1527 struct c4iw_dev *rhp;
1528 struct c4iw_qp *qhp;
1529 enum c4iw_qp_attr_mask mask = 0;
1530 struct c4iw_qp_attributes attrs;
1531
1532 PDBG("%s ib_qp %p\n", __func__, ibqp);
1533
1534 /* iwarp does not support the RTR state */
1535 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1536 attr_mask &= ~IB_QP_STATE;
1537
1538 /* Make sure we still have something left to do */
1539 if (!attr_mask)
1540 return 0;
1541
1542 memset(&attrs, 0, sizeof attrs);
1543 qhp = to_c4iw_qp(ibqp);
1544 rhp = qhp->rhp;
1545
1546 attrs.next_state = c4iw_convert_state(attr->qp_state);
1547 attrs.enable_rdma_read = (attr->qp_access_flags &
1548 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1549 attrs.enable_rdma_write = (attr->qp_access_flags &
1550 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1551 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1552
1553
1554 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1555 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1556 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1557 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1558 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1559
1560 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1561}
1562
1563struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1564{
1565 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1566 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1567}