| Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This software is available to you under a choice of one of two | 
 | 5 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 6 |  * General Public License (GPL) Version 2, available from the file | 
 | 7 |  * COPYING in the main directory of this source tree, or the | 
 | 8 |  * OpenIB.org BSD license below: | 
 | 9 |  * | 
 | 10 |  *     Redistribution and use in source and binary forms, with or | 
 | 11 |  *     without modification, are permitted provided that the following | 
 | 12 |  *     conditions are met: | 
 | 13 |  * | 
 | 14 |  *      - Redistributions of source code must retain the above | 
 | 15 |  *	  copyright notice, this list of conditions and the following | 
 | 16 |  *	  disclaimer. | 
 | 17 |  * | 
 | 18 |  *      - Redistributions in binary form must reproduce the above | 
 | 19 |  *	  copyright notice, this list of conditions and the following | 
 | 20 |  *	  disclaimer in the documentation and/or other materials | 
 | 21 |  *	  provided with the distribution. | 
 | 22 |  * | 
 | 23 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 24 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 25 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 26 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 27 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 28 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 29 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 30 |  * SOFTWARE. | 
 | 31 |  */ | 
 | 32 |  | 
 | 33 | #include "iw_cxgb4.h" | 
 | 34 |  | 
 | 35 | static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | 
 | 36 | 		      struct c4iw_dev_ucontext *uctx) | 
 | 37 | { | 
 | 38 | 	struct fw_ri_res_wr *res_wr; | 
 | 39 | 	struct fw_ri_res *res; | 
 | 40 | 	int wr_len; | 
 | 41 | 	struct c4iw_wr_wait wr_wait; | 
 | 42 | 	struct sk_buff *skb; | 
 | 43 | 	int ret; | 
 | 44 |  | 
 | 45 | 	wr_len = sizeof *res_wr + sizeof *res; | 
 | 46 | 	skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 
 | 47 | 	if (!skb) | 
 | 48 | 		return -ENOMEM; | 
 | 49 | 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | 
 | 50 |  | 
 | 51 | 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); | 
 | 52 | 	memset(res_wr, 0, wr_len); | 
 | 53 | 	res_wr->op_nres = cpu_to_be32( | 
 | 54 | 			FW_WR_OP(FW_RI_RES_WR) | | 
 | 55 | 			V_FW_RI_RES_WR_NRES(1) | | 
 | 56 | 			FW_WR_COMPL(1)); | 
 | 57 | 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 
 | 58 | 	res_wr->cookie = (u64)&wr_wait; | 
 | 59 | 	res = res_wr->res; | 
 | 60 | 	res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 
 | 61 | 	res->u.cq.op = FW_RI_RES_OP_RESET; | 
 | 62 | 	res->u.cq.iqid = cpu_to_be32(cq->cqid); | 
 | 63 |  | 
 | 64 | 	c4iw_init_wr_wait(&wr_wait); | 
 | 65 | 	ret = c4iw_ofld_send(rdev, skb); | 
 | 66 | 	if (!ret) { | 
 | 67 | 		wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 
 | 68 | 		if (!wr_wait.done) { | 
 | 69 | 			printk(KERN_ERR MOD "Device %s not responding!\n", | 
 | 70 | 			       pci_name(rdev->lldi.pdev)); | 
 | 71 | 			rdev->flags = T4_FATAL_ERROR; | 
 | 72 | 			ret = -EIO; | 
 | 73 | 		} else | 
 | 74 | 			ret = wr_wait.ret; | 
 | 75 | 	} | 
 | 76 |  | 
 | 77 | 	kfree(cq->sw_queue); | 
 | 78 | 	dma_free_coherent(&(rdev->lldi.pdev->dev), | 
 | 79 | 			  cq->memsize, cq->queue, | 
 | 80 | 			  pci_unmap_addr(cq, mapping)); | 
 | 81 | 	c4iw_put_cqid(rdev, cq->cqid, uctx); | 
 | 82 | 	return ret; | 
 | 83 | } | 
 | 84 |  | 
 | 85 | static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | 
 | 86 | 		     struct c4iw_dev_ucontext *uctx) | 
 | 87 | { | 
 | 88 | 	struct fw_ri_res_wr *res_wr; | 
 | 89 | 	struct fw_ri_res *res; | 
 | 90 | 	int wr_len; | 
 | 91 | 	int user = (uctx != &rdev->uctx); | 
 | 92 | 	struct c4iw_wr_wait wr_wait; | 
 | 93 | 	int ret; | 
 | 94 | 	struct sk_buff *skb; | 
 | 95 |  | 
 | 96 | 	cq->cqid = c4iw_get_cqid(rdev, uctx); | 
 | 97 | 	if (!cq->cqid) { | 
 | 98 | 		ret = -ENOMEM; | 
 | 99 | 		goto err1; | 
 | 100 | 	} | 
 | 101 |  | 
 | 102 | 	if (!user) { | 
 | 103 | 		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); | 
 | 104 | 		if (!cq->sw_queue) { | 
 | 105 | 			ret = -ENOMEM; | 
 | 106 | 			goto err2; | 
 | 107 | 		} | 
 | 108 | 	} | 
 | 109 | 	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, | 
 | 110 | 				       &cq->dma_addr, GFP_KERNEL); | 
 | 111 | 	if (!cq->queue) { | 
 | 112 | 		ret = -ENOMEM; | 
 | 113 | 		goto err3; | 
 | 114 | 	} | 
 | 115 | 	pci_unmap_addr_set(cq, mapping, cq->dma_addr); | 
 | 116 | 	memset(cq->queue, 0, cq->memsize); | 
 | 117 |  | 
 | 118 | 	/* build fw_ri_res_wr */ | 
 | 119 | 	wr_len = sizeof *res_wr + sizeof *res; | 
 | 120 |  | 
 | 121 | 	skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 
 | 122 | 	if (!skb) { | 
 | 123 | 		ret = -ENOMEM; | 
 | 124 | 		goto err4; | 
 | 125 | 	} | 
 | 126 | 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | 
 | 127 |  | 
 | 128 | 	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); | 
 | 129 | 	memset(res_wr, 0, wr_len); | 
 | 130 | 	res_wr->op_nres = cpu_to_be32( | 
 | 131 | 			FW_WR_OP(FW_RI_RES_WR) | | 
 | 132 | 			V_FW_RI_RES_WR_NRES(1) | | 
 | 133 | 			FW_WR_COMPL(1)); | 
 | 134 | 	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 
 | 135 | 	res_wr->cookie = (u64)&wr_wait; | 
 | 136 | 	res = res_wr->res; | 
 | 137 | 	res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 
 | 138 | 	res->u.cq.op = FW_RI_RES_OP_WRITE; | 
 | 139 | 	res->u.cq.iqid = cpu_to_be32(cq->cqid); | 
 | 140 | 	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( | 
 | 141 | 			V_FW_RI_RES_WR_IQANUS(0) | | 
 | 142 | 			V_FW_RI_RES_WR_IQANUD(1) | | 
 | 143 | 			F_FW_RI_RES_WR_IQANDST | | 
 | 144 | 			V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids)); | 
 | 145 | 	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( | 
 | 146 | 			F_FW_RI_RES_WR_IQDROPRSS | | 
 | 147 | 			V_FW_RI_RES_WR_IQPCIECH(2) | | 
 | 148 | 			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) | | 
 | 149 | 			F_FW_RI_RES_WR_IQO | | 
 | 150 | 			V_FW_RI_RES_WR_IQESIZE(1)); | 
 | 151 | 	res->u.cq.iqsize = cpu_to_be16(cq->size); | 
 | 152 | 	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); | 
 | 153 |  | 
 | 154 | 	c4iw_init_wr_wait(&wr_wait); | 
 | 155 |  | 
 | 156 | 	ret = c4iw_ofld_send(rdev, skb); | 
 | 157 | 	if (ret) | 
 | 158 | 		goto err4; | 
 | 159 | 	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait); | 
 | 160 | 	wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 
 | 161 | 	if (!wr_wait.done) { | 
 | 162 | 		printk(KERN_ERR MOD "Device %s not responding!\n", | 
 | 163 | 		       pci_name(rdev->lldi.pdev)); | 
 | 164 | 		rdev->flags = T4_FATAL_ERROR; | 
 | 165 | 		ret = -EIO; | 
 | 166 | 	} else | 
 | 167 | 		ret = wr_wait.ret; | 
 | 168 | 	if (ret) | 
 | 169 | 		goto err4; | 
 | 170 |  | 
 | 171 | 	cq->gen = 1; | 
 | 172 | 	cq->gts = rdev->lldi.gts_reg; | 
 | 173 | 	cq->rdev = rdev; | 
 | 174 | 	if (user) { | 
 | 175 | 		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | 
 | 176 | 					(cq->cqid << rdev->cqshift); | 
 | 177 | 		cq->ugts &= PAGE_MASK; | 
 | 178 | 	} | 
 | 179 | 	return 0; | 
 | 180 | err4: | 
 | 181 | 	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, | 
 | 182 | 			  pci_unmap_addr(cq, mapping)); | 
 | 183 | err3: | 
 | 184 | 	kfree(cq->sw_queue); | 
 | 185 | err2: | 
 | 186 | 	c4iw_put_cqid(rdev, cq->cqid, uctx); | 
 | 187 | err1: | 
 | 188 | 	return ret; | 
 | 189 | } | 
 | 190 |  | 
 | 191 | static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) | 
 | 192 | { | 
 | 193 | 	struct t4_cqe cqe; | 
 | 194 |  | 
 | 195 | 	PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, | 
 | 196 | 	     wq, cq, cq->sw_cidx, cq->sw_pidx); | 
 | 197 | 	memset(&cqe, 0, sizeof(cqe)); | 
 | 198 | 	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | | 
 | 199 | 				 V_CQE_OPCODE(FW_RI_SEND) | | 
 | 200 | 				 V_CQE_TYPE(0) | | 
 | 201 | 				 V_CQE_SWCQE(1) | | 
 | 202 | 				 V_CQE_QPID(wq->rq.qid)); | 
 | 203 | 	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); | 
 | 204 | 	cq->sw_queue[cq->sw_pidx] = cqe; | 
 | 205 | 	t4_swcq_produce(cq); | 
 | 206 | } | 
 | 207 |  | 
 | 208 | int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) | 
 | 209 | { | 
 | 210 | 	int flushed = 0; | 
 | 211 | 	int in_use = wq->rq.in_use - count; | 
 | 212 |  | 
 | 213 | 	BUG_ON(in_use < 0); | 
 | 214 | 	PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, | 
 | 215 | 	     wq, cq, wq->rq.in_use, count); | 
 | 216 | 	while (in_use--) { | 
 | 217 | 		insert_recv_cqe(wq, cq); | 
 | 218 | 		flushed++; | 
 | 219 | 	} | 
 | 220 | 	return flushed; | 
 | 221 | } | 
 | 222 |  | 
 | 223 | static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, | 
 | 224 | 			  struct t4_swsqe *swcqe) | 
 | 225 | { | 
 | 226 | 	struct t4_cqe cqe; | 
 | 227 |  | 
 | 228 | 	PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, | 
 | 229 | 	     wq, cq, cq->sw_cidx, cq->sw_pidx); | 
 | 230 | 	memset(&cqe, 0, sizeof(cqe)); | 
 | 231 | 	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | | 
 | 232 | 				 V_CQE_OPCODE(swcqe->opcode) | | 
 | 233 | 				 V_CQE_TYPE(1) | | 
 | 234 | 				 V_CQE_SWCQE(1) | | 
 | 235 | 				 V_CQE_QPID(wq->sq.qid)); | 
 | 236 | 	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; | 
 | 237 | 	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); | 
 | 238 | 	cq->sw_queue[cq->sw_pidx] = cqe; | 
 | 239 | 	t4_swcq_produce(cq); | 
 | 240 | } | 
 | 241 |  | 
 | 242 | int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count) | 
 | 243 | { | 
 | 244 | 	int flushed = 0; | 
 | 245 | 	struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count]; | 
 | 246 | 	int in_use = wq->sq.in_use - count; | 
 | 247 |  | 
 | 248 | 	BUG_ON(in_use < 0); | 
 | 249 | 	while (in_use--) { | 
 | 250 | 		swsqe->signaled = 0; | 
 | 251 | 		insert_sq_cqe(wq, cq, swsqe); | 
 | 252 | 		swsqe++; | 
 | 253 | 		if (swsqe == (wq->sq.sw_sq + wq->sq.size)) | 
 | 254 | 			swsqe = wq->sq.sw_sq; | 
 | 255 | 		flushed++; | 
 | 256 | 	} | 
 | 257 | 	return flushed; | 
 | 258 | } | 
 | 259 |  | 
 | 260 | /* | 
 | 261 |  * Move all CQEs from the HWCQ into the SWCQ. | 
 | 262 |  */ | 
 | 263 | void c4iw_flush_hw_cq(struct t4_cq *cq) | 
 | 264 | { | 
 | 265 | 	struct t4_cqe *cqe = NULL, *swcqe; | 
 | 266 | 	int ret; | 
 | 267 |  | 
 | 268 | 	PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid); | 
 | 269 | 	ret = t4_next_hw_cqe(cq, &cqe); | 
 | 270 | 	while (!ret) { | 
 | 271 | 		PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n", | 
 | 272 | 		     __func__, cq->cidx, cq->sw_pidx); | 
 | 273 | 		swcqe = &cq->sw_queue[cq->sw_pidx]; | 
 | 274 | 		*swcqe = *cqe; | 
 | 275 | 		swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); | 
 | 276 | 		t4_swcq_produce(cq); | 
 | 277 | 		t4_hwcq_consume(cq); | 
 | 278 | 		ret = t4_next_hw_cqe(cq, &cqe); | 
 | 279 | 	} | 
 | 280 | } | 
 | 281 |  | 
 | 282 | static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) | 
 | 283 | { | 
 | 284 | 	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) | 
 | 285 | 		return 0; | 
 | 286 |  | 
 | 287 | 	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) | 
 | 288 | 		return 0; | 
 | 289 |  | 
 | 290 | 	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) | 
 | 291 | 		return 0; | 
 | 292 |  | 
 | 293 | 	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) | 
 | 294 | 		return 0; | 
 | 295 | 	return 1; | 
 | 296 | } | 
 | 297 |  | 
 | 298 | void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count) | 
 | 299 | { | 
 | 300 | 	struct t4_cqe *cqe; | 
 | 301 | 	u32 ptr; | 
 | 302 |  | 
 | 303 | 	*count = 0; | 
 | 304 | 	ptr = cq->sw_cidx; | 
 | 305 | 	while (ptr != cq->sw_pidx) { | 
 | 306 | 		cqe = &cq->sw_queue[ptr]; | 
 | 307 | 		if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && | 
 | 308 | 				      wq->sq.oldest_read)) && | 
 | 309 | 		    (CQE_QPID(cqe) == wq->sq.qid)) | 
 | 310 | 			(*count)++; | 
 | 311 | 		if (++ptr == cq->size) | 
 | 312 | 			ptr = 0; | 
 | 313 | 	} | 
 | 314 | 	PDBG("%s cq %p count %d\n", __func__, cq, *count); | 
 | 315 | } | 
 | 316 |  | 
 | 317 | void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) | 
 | 318 | { | 
 | 319 | 	struct t4_cqe *cqe; | 
 | 320 | 	u32 ptr; | 
 | 321 |  | 
 | 322 | 	*count = 0; | 
 | 323 | 	PDBG("%s count zero %d\n", __func__, *count); | 
 | 324 | 	ptr = cq->sw_cidx; | 
 | 325 | 	while (ptr != cq->sw_pidx) { | 
 | 326 | 		cqe = &cq->sw_queue[ptr]; | 
 | 327 | 		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && | 
 | 328 | 		    (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq)) | 
 | 329 | 			(*count)++; | 
 | 330 | 		if (++ptr == cq->size) | 
 | 331 | 			ptr = 0; | 
 | 332 | 	} | 
 | 333 | 	PDBG("%s cq %p count %d\n", __func__, cq, *count); | 
 | 334 | } | 
 | 335 |  | 
 | 336 | static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) | 
 | 337 | { | 
 | 338 | 	struct t4_swsqe *swsqe; | 
 | 339 | 	u16 ptr = wq->sq.cidx; | 
 | 340 | 	int count = wq->sq.in_use; | 
 | 341 | 	int unsignaled = 0; | 
 | 342 |  | 
 | 343 | 	swsqe = &wq->sq.sw_sq[ptr]; | 
 | 344 | 	while (count--) | 
 | 345 | 		if (!swsqe->signaled) { | 
 | 346 | 			if (++ptr == wq->sq.size) | 
 | 347 | 				ptr = 0; | 
 | 348 | 			swsqe = &wq->sq.sw_sq[ptr]; | 
 | 349 | 			unsignaled++; | 
 | 350 | 		} else if (swsqe->complete) { | 
 | 351 |  | 
 | 352 | 			/* | 
 | 353 | 			 * Insert this completed cqe into the swcq. | 
 | 354 | 			 */ | 
 | 355 | 			PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", | 
 | 356 | 			     __func__, ptr, cq->sw_pidx); | 
 | 357 | 			swsqe->cqe.header |= htonl(V_CQE_SWCQE(1)); | 
 | 358 | 			cq->sw_queue[cq->sw_pidx] = swsqe->cqe; | 
 | 359 | 			t4_swcq_produce(cq); | 
 | 360 | 			swsqe->signaled = 0; | 
 | 361 | 			wq->sq.in_use -= unsignaled; | 
 | 362 | 			break; | 
 | 363 | 		} else | 
 | 364 | 			break; | 
 | 365 | } | 
 | 366 |  | 
 | 367 | static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, | 
 | 368 | 				struct t4_cqe *read_cqe) | 
 | 369 | { | 
 | 370 | 	read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; | 
 | 371 | 	read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len); | 
 | 372 | 	read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) | | 
 | 373 | 				 V_CQE_SWCQE(SW_CQE(hw_cqe)) | | 
 | 374 | 				 V_CQE_OPCODE(FW_RI_READ_REQ) | | 
 | 375 | 				 V_CQE_TYPE(1)); | 
| Steve Wise | 84172de | 2010-05-20 16:57:43 -0500 | [diff] [blame] | 376 | 	read_cqe->bits_type_ts = hw_cqe->bits_type_ts; | 
| Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 377 | } | 
 | 378 |  | 
 | 379 | /* | 
 | 380 |  * Return a ptr to the next read wr in the SWSQ or NULL. | 
 | 381 |  */ | 
 | 382 | static void advance_oldest_read(struct t4_wq *wq) | 
 | 383 | { | 
 | 384 |  | 
 | 385 | 	u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; | 
 | 386 |  | 
 | 387 | 	if (rptr == wq->sq.size) | 
 | 388 | 		rptr = 0; | 
 | 389 | 	while (rptr != wq->sq.pidx) { | 
 | 390 | 		wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; | 
 | 391 |  | 
 | 392 | 		if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) | 
 | 393 | 			return; | 
 | 394 | 		if (++rptr == wq->sq.size) | 
 | 395 | 			rptr = 0; | 
 | 396 | 	} | 
 | 397 | 	wq->sq.oldest_read = NULL; | 
 | 398 | } | 
 | 399 |  | 
 | 400 | /* | 
 | 401 |  * poll_cq | 
 | 402 |  * | 
 | 403 |  * Caller must: | 
 | 404 |  *     check the validity of the first CQE, | 
 | 405 |  *     supply the wq assicated with the qpid. | 
 | 406 |  * | 
 | 407 |  * credit: cq credit to return to sge. | 
 | 408 |  * cqe_flushed: 1 iff the CQE is flushed. | 
 | 409 |  * cqe: copy of the polled CQE. | 
 | 410 |  * | 
 | 411 |  * return value: | 
 | 412 |  *    0		    CQE returned ok. | 
 | 413 |  *    -EAGAIN       CQE skipped, try again. | 
 | 414 |  *    -EOVERFLOW    CQ overflow detected. | 
 | 415 |  */ | 
 | 416 | static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, | 
 | 417 | 		   u8 *cqe_flushed, u64 *cookie, u32 *credit) | 
 | 418 | { | 
 | 419 | 	int ret = 0; | 
 | 420 | 	struct t4_cqe *hw_cqe, read_cqe; | 
 | 421 |  | 
 | 422 | 	*cqe_flushed = 0; | 
 | 423 | 	*credit = 0; | 
 | 424 | 	ret = t4_next_cqe(cq, &hw_cqe); | 
 | 425 | 	if (ret) | 
 | 426 | 		return ret; | 
 | 427 |  | 
 | 428 | 	PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x" | 
 | 429 | 	     " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", | 
 | 430 | 	     __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), | 
 | 431 | 	     CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), | 
 | 432 | 	     CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), | 
 | 433 | 	     CQE_WRID_LOW(hw_cqe)); | 
 | 434 |  | 
 | 435 | 	/* | 
 | 436 | 	 * skip cqe's not affiliated with a QP. | 
 | 437 | 	 */ | 
 | 438 | 	if (wq == NULL) { | 
 | 439 | 		ret = -EAGAIN; | 
 | 440 | 		goto skip_cqe; | 
 | 441 | 	} | 
 | 442 |  | 
 | 443 | 	/* | 
 | 444 | 	 * Gotta tweak READ completions: | 
 | 445 | 	 *	1) the cqe doesn't contain the sq_wptr from the wr. | 
 | 446 | 	 *	2) opcode not reflected from the wr. | 
 | 447 | 	 *	3) read_len not reflected from the wr. | 
 | 448 | 	 *	4) cq_type is RQ_TYPE not SQ_TYPE. | 
 | 449 | 	 */ | 
 | 450 | 	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { | 
 | 451 |  | 
 | 452 | 		/* | 
 | 453 | 		 * If this is an unsolicited read response, then the read | 
 | 454 | 		 * was generated by the kernel driver as part of peer-2-peer | 
 | 455 | 		 * connection setup.  So ignore the completion. | 
 | 456 | 		 */ | 
 | 457 | 		if (!wq->sq.oldest_read) { | 
 | 458 | 			if (CQE_STATUS(hw_cqe)) | 
 | 459 | 				t4_set_wq_in_error(wq); | 
 | 460 | 			ret = -EAGAIN; | 
 | 461 | 			goto skip_cqe; | 
 | 462 | 		} | 
 | 463 |  | 
 | 464 | 		/* | 
 | 465 | 		 * Don't write to the HWCQ, so create a new read req CQE | 
 | 466 | 		 * in local memory. | 
 | 467 | 		 */ | 
 | 468 | 		create_read_req_cqe(wq, hw_cqe, &read_cqe); | 
 | 469 | 		hw_cqe = &read_cqe; | 
 | 470 | 		advance_oldest_read(wq); | 
 | 471 | 	} | 
 | 472 |  | 
 | 473 | 	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { | 
 | 474 | 		*cqe_flushed = t4_wq_in_error(wq); | 
 | 475 | 		t4_set_wq_in_error(wq); | 
 | 476 | 		goto proc_cqe; | 
 | 477 | 	} | 
 | 478 |  | 
 | 479 | 	/* | 
 | 480 | 	 * RECV completion. | 
 | 481 | 	 */ | 
 | 482 | 	if (RQ_TYPE(hw_cqe)) { | 
 | 483 |  | 
 | 484 | 		/* | 
 | 485 | 		 * HW only validates 4 bits of MSN.  So we must validate that | 
 | 486 | 		 * the MSN in the SEND is the next expected MSN.  If its not, | 
 | 487 | 		 * then we complete this with T4_ERR_MSN and mark the wq in | 
 | 488 | 		 * error. | 
 | 489 | 		 */ | 
 | 490 |  | 
 | 491 | 		if (t4_rq_empty(wq)) { | 
 | 492 | 			t4_set_wq_in_error(wq); | 
 | 493 | 			ret = -EAGAIN; | 
 | 494 | 			goto skip_cqe; | 
 | 495 | 		} | 
 | 496 | 		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { | 
 | 497 | 			t4_set_wq_in_error(wq); | 
 | 498 | 			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN)); | 
 | 499 | 			goto proc_cqe; | 
 | 500 | 		} | 
 | 501 | 		goto proc_cqe; | 
 | 502 | 	} | 
 | 503 |  | 
 | 504 | 	/* | 
 | 505 | 	 * If we get here its a send completion. | 
 | 506 | 	 * | 
 | 507 | 	 * Handle out of order completion. These get stuffed | 
 | 508 | 	 * in the SW SQ. Then the SW SQ is walked to move any | 
 | 509 | 	 * now in-order completions into the SW CQ.  This handles | 
 | 510 | 	 * 2 cases: | 
 | 511 | 	 *	1) reaping unsignaled WRs when the first subsequent | 
 | 512 | 	 *	   signaled WR is completed. | 
 | 513 | 	 *	2) out of order read completions. | 
 | 514 | 	 */ | 
 | 515 | 	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { | 
 | 516 | 		struct t4_swsqe *swsqe; | 
 | 517 |  | 
 | 518 | 		PDBG("%s out of order completion going in sw_sq at idx %u\n", | 
 | 519 | 		     __func__, CQE_WRID_SQ_IDX(hw_cqe)); | 
 | 520 | 		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; | 
 | 521 | 		swsqe->cqe = *hw_cqe; | 
 | 522 | 		swsqe->complete = 1; | 
 | 523 | 		ret = -EAGAIN; | 
 | 524 | 		goto flush_wq; | 
 | 525 | 	} | 
 | 526 |  | 
 | 527 | proc_cqe: | 
 | 528 | 	*cqe = *hw_cqe; | 
 | 529 |  | 
 | 530 | 	/* | 
 | 531 | 	 * Reap the associated WR(s) that are freed up with this | 
 | 532 | 	 * completion. | 
 | 533 | 	 */ | 
 | 534 | 	if (SQ_TYPE(hw_cqe)) { | 
 | 535 | 		wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe); | 
 | 536 | 		PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); | 
 | 537 | 		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; | 
 | 538 | 		t4_sq_consume(wq); | 
 | 539 | 	} else { | 
 | 540 | 		PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); | 
 | 541 | 		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; | 
 | 542 | 		BUG_ON(t4_rq_empty(wq)); | 
 | 543 | 		t4_rq_consume(wq); | 
 | 544 | 	} | 
 | 545 |  | 
 | 546 | flush_wq: | 
 | 547 | 	/* | 
 | 548 | 	 * Flush any completed cqes that are now in-order. | 
 | 549 | 	 */ | 
 | 550 | 	flush_completed_wrs(wq, cq); | 
 | 551 |  | 
 | 552 | skip_cqe: | 
 | 553 | 	if (SW_CQE(hw_cqe)) { | 
 | 554 | 		PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n", | 
 | 555 | 		     __func__, cq, cq->cqid, cq->sw_cidx); | 
 | 556 | 		t4_swcq_consume(cq); | 
 | 557 | 	} else { | 
 | 558 | 		PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n", | 
 | 559 | 		     __func__, cq, cq->cqid, cq->cidx); | 
 | 560 | 		t4_hwcq_consume(cq); | 
 | 561 | 	} | 
 | 562 | 	return ret; | 
 | 563 | } | 
 | 564 |  | 
 | 565 | /* | 
 | 566 |  * Get one cq entry from c4iw and map it to openib. | 
 | 567 |  * | 
 | 568 |  * Returns: | 
 | 569 |  *	0			cqe returned | 
 | 570 |  *	-ENODATA		EMPTY; | 
 | 571 |  *	-EAGAIN			caller must try again | 
 | 572 |  *	any other -errno	fatal error | 
 | 573 |  */ | 
 | 574 | static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | 
 | 575 | { | 
 | 576 | 	struct c4iw_qp *qhp = NULL; | 
 | 577 | 	struct t4_cqe cqe = {0, 0}, *rd_cqe; | 
 | 578 | 	struct t4_wq *wq; | 
 | 579 | 	u32 credit = 0; | 
 | 580 | 	u8 cqe_flushed; | 
 | 581 | 	u64 cookie = 0; | 
 | 582 | 	int ret; | 
 | 583 |  | 
 | 584 | 	ret = t4_next_cqe(&chp->cq, &rd_cqe); | 
 | 585 |  | 
 | 586 | 	if (ret) | 
 | 587 | 		return ret; | 
 | 588 |  | 
 | 589 | 	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); | 
 | 590 | 	if (!qhp) | 
 | 591 | 		wq = NULL; | 
 | 592 | 	else { | 
 | 593 | 		spin_lock(&qhp->lock); | 
 | 594 | 		wq = &(qhp->wq); | 
 | 595 | 	} | 
 | 596 | 	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); | 
 | 597 | 	if (ret) | 
 | 598 | 		goto out; | 
 | 599 |  | 
 | 600 | 	wc->wr_id = cookie; | 
 | 601 | 	wc->qp = &qhp->ibqp; | 
 | 602 | 	wc->vendor_err = CQE_STATUS(&cqe); | 
 | 603 | 	wc->wc_flags = 0; | 
 | 604 |  | 
 | 605 | 	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x " | 
 | 606 | 	     "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe), | 
 | 607 | 	     CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe), | 
 | 608 | 	     CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie); | 
 | 609 |  | 
 | 610 | 	if (CQE_TYPE(&cqe) == 0) { | 
 | 611 | 		if (!CQE_STATUS(&cqe)) | 
 | 612 | 			wc->byte_len = CQE_LEN(&cqe); | 
 | 613 | 		else | 
 | 614 | 			wc->byte_len = 0; | 
 | 615 | 		wc->opcode = IB_WC_RECV; | 
 | 616 | 		if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV || | 
 | 617 | 		    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { | 
 | 618 | 			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); | 
 | 619 | 			wc->wc_flags |= IB_WC_WITH_INVALIDATE; | 
 | 620 | 		} | 
 | 621 | 	} else { | 
 | 622 | 		switch (CQE_OPCODE(&cqe)) { | 
 | 623 | 		case FW_RI_RDMA_WRITE: | 
 | 624 | 			wc->opcode = IB_WC_RDMA_WRITE; | 
 | 625 | 			break; | 
 | 626 | 		case FW_RI_READ_REQ: | 
 | 627 | 			wc->opcode = IB_WC_RDMA_READ; | 
 | 628 | 			wc->byte_len = CQE_LEN(&cqe); | 
 | 629 | 			break; | 
 | 630 | 		case FW_RI_SEND_WITH_INV: | 
 | 631 | 		case FW_RI_SEND_WITH_SE_INV: | 
 | 632 | 			wc->opcode = IB_WC_SEND; | 
 | 633 | 			wc->wc_flags |= IB_WC_WITH_INVALIDATE; | 
 | 634 | 			break; | 
 | 635 | 		case FW_RI_SEND: | 
 | 636 | 		case FW_RI_SEND_WITH_SE: | 
 | 637 | 			wc->opcode = IB_WC_SEND; | 
 | 638 | 			break; | 
 | 639 | 		case FW_RI_BIND_MW: | 
 | 640 | 			wc->opcode = IB_WC_BIND_MW; | 
 | 641 | 			break; | 
 | 642 |  | 
 | 643 | 		case FW_RI_LOCAL_INV: | 
 | 644 | 			wc->opcode = IB_WC_LOCAL_INV; | 
 | 645 | 			break; | 
 | 646 | 		case FW_RI_FAST_REGISTER: | 
 | 647 | 			wc->opcode = IB_WC_FAST_REG_MR; | 
 | 648 | 			break; | 
 | 649 | 		default: | 
 | 650 | 			printk(KERN_ERR MOD "Unexpected opcode %d " | 
 | 651 | 			       "in the CQE received for QPID=0x%0x\n", | 
 | 652 | 			       CQE_OPCODE(&cqe), CQE_QPID(&cqe)); | 
 | 653 | 			ret = -EINVAL; | 
 | 654 | 			goto out; | 
 | 655 | 		} | 
 | 656 | 	} | 
 | 657 |  | 
 | 658 | 	if (cqe_flushed) | 
 | 659 | 		wc->status = IB_WC_WR_FLUSH_ERR; | 
 | 660 | 	else { | 
 | 661 |  | 
 | 662 | 		switch (CQE_STATUS(&cqe)) { | 
 | 663 | 		case T4_ERR_SUCCESS: | 
 | 664 | 			wc->status = IB_WC_SUCCESS; | 
 | 665 | 			break; | 
 | 666 | 		case T4_ERR_STAG: | 
 | 667 | 			wc->status = IB_WC_LOC_ACCESS_ERR; | 
 | 668 | 			break; | 
 | 669 | 		case T4_ERR_PDID: | 
 | 670 | 			wc->status = IB_WC_LOC_PROT_ERR; | 
 | 671 | 			break; | 
 | 672 | 		case T4_ERR_QPID: | 
 | 673 | 		case T4_ERR_ACCESS: | 
 | 674 | 			wc->status = IB_WC_LOC_ACCESS_ERR; | 
 | 675 | 			break; | 
 | 676 | 		case T4_ERR_WRAP: | 
 | 677 | 			wc->status = IB_WC_GENERAL_ERR; | 
 | 678 | 			break; | 
 | 679 | 		case T4_ERR_BOUND: | 
 | 680 | 			wc->status = IB_WC_LOC_LEN_ERR; | 
 | 681 | 			break; | 
 | 682 | 		case T4_ERR_INVALIDATE_SHARED_MR: | 
 | 683 | 		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: | 
 | 684 | 			wc->status = IB_WC_MW_BIND_ERR; | 
 | 685 | 			break; | 
 | 686 | 		case T4_ERR_CRC: | 
 | 687 | 		case T4_ERR_MARKER: | 
 | 688 | 		case T4_ERR_PDU_LEN_ERR: | 
 | 689 | 		case T4_ERR_OUT_OF_RQE: | 
 | 690 | 		case T4_ERR_DDP_VERSION: | 
 | 691 | 		case T4_ERR_RDMA_VERSION: | 
 | 692 | 		case T4_ERR_DDP_QUEUE_NUM: | 
 | 693 | 		case T4_ERR_MSN: | 
 | 694 | 		case T4_ERR_TBIT: | 
 | 695 | 		case T4_ERR_MO: | 
 | 696 | 		case T4_ERR_MSN_RANGE: | 
 | 697 | 		case T4_ERR_IRD_OVERFLOW: | 
 | 698 | 		case T4_ERR_OPCODE: | 
 | 699 | 			wc->status = IB_WC_FATAL_ERR; | 
 | 700 | 			break; | 
 | 701 | 		case T4_ERR_SWFLUSH: | 
 | 702 | 			wc->status = IB_WC_WR_FLUSH_ERR; | 
 | 703 | 			break; | 
 | 704 | 		default: | 
 | 705 | 			printk(KERN_ERR MOD | 
 | 706 | 			       "Unexpected cqe_status 0x%x for QPID=0x%0x\n", | 
 | 707 | 			       CQE_STATUS(&cqe), CQE_QPID(&cqe)); | 
 | 708 | 			ret = -EINVAL; | 
 | 709 | 		} | 
 | 710 | 	} | 
 | 711 | out: | 
 | 712 | 	if (wq) | 
 | 713 | 		spin_unlock(&qhp->lock); | 
 | 714 | 	return ret; | 
 | 715 | } | 
 | 716 |  | 
 | 717 | int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | 
 | 718 | { | 
 | 719 | 	struct c4iw_cq *chp; | 
 | 720 | 	unsigned long flags; | 
 | 721 | 	int npolled; | 
 | 722 | 	int err = 0; | 
 | 723 |  | 
 | 724 | 	chp = to_c4iw_cq(ibcq); | 
 | 725 |  | 
 | 726 | 	spin_lock_irqsave(&chp->lock, flags); | 
 | 727 | 	for (npolled = 0; npolled < num_entries; ++npolled) { | 
 | 728 | 		do { | 
 | 729 | 			err = c4iw_poll_cq_one(chp, wc + npolled); | 
 | 730 | 		} while (err == -EAGAIN); | 
 | 731 | 		if (err) | 
 | 732 | 			break; | 
 | 733 | 	} | 
 | 734 | 	spin_unlock_irqrestore(&chp->lock, flags); | 
 | 735 | 	return !err || err == -ENODATA ? npolled : err; | 
 | 736 | } | 
 | 737 |  | 
 | 738 | int c4iw_destroy_cq(struct ib_cq *ib_cq) | 
 | 739 | { | 
 | 740 | 	struct c4iw_cq *chp; | 
 | 741 | 	struct c4iw_ucontext *ucontext; | 
 | 742 |  | 
 | 743 | 	PDBG("%s ib_cq %p\n", __func__, ib_cq); | 
 | 744 | 	chp = to_c4iw_cq(ib_cq); | 
 | 745 |  | 
 | 746 | 	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); | 
 | 747 | 	atomic_dec(&chp->refcnt); | 
 | 748 | 	wait_event(chp->wait, !atomic_read(&chp->refcnt)); | 
 | 749 |  | 
 | 750 | 	ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) | 
 | 751 | 				  : NULL; | 
 | 752 | 	destroy_cq(&chp->rhp->rdev, &chp->cq, | 
 | 753 | 		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); | 
 | 754 | 	kfree(chp); | 
 | 755 | 	return 0; | 
 | 756 | } | 
 | 757 |  | 
 | 758 | struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | 
 | 759 | 			     int vector, struct ib_ucontext *ib_context, | 
 | 760 | 			     struct ib_udata *udata) | 
 | 761 | { | 
 | 762 | 	struct c4iw_dev *rhp; | 
 | 763 | 	struct c4iw_cq *chp; | 
 | 764 | 	struct c4iw_create_cq_resp uresp; | 
 | 765 | 	struct c4iw_ucontext *ucontext = NULL; | 
 | 766 | 	int ret; | 
 | 767 | 	size_t memsize; | 
 | 768 | 	struct c4iw_mm_entry *mm, *mm2; | 
 | 769 |  | 
 | 770 | 	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); | 
 | 771 |  | 
 | 772 | 	rhp = to_c4iw_dev(ibdev); | 
 | 773 |  | 
 | 774 | 	chp = kzalloc(sizeof(*chp), GFP_KERNEL); | 
 | 775 | 	if (!chp) | 
 | 776 | 		return ERR_PTR(-ENOMEM); | 
 | 777 |  | 
 | 778 | 	if (ib_context) | 
 | 779 | 		ucontext = to_c4iw_ucontext(ib_context); | 
 | 780 |  | 
 | 781 | 	/* account for the status page. */ | 
 | 782 | 	entries++; | 
 | 783 |  | 
| Steve Wise | 895cf5f | 2010-05-20 16:57:38 -0500 | [diff] [blame] | 784 | 	/* IQ needs one extra entry to differentiate full vs empty. */ | 
 | 785 | 	entries++; | 
 | 786 |  | 
| Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 787 | 	/* | 
 | 788 | 	 * entries must be multiple of 16 for HW. | 
 | 789 | 	 */ | 
 | 790 | 	entries = roundup(entries, 16); | 
 | 791 | 	memsize = entries * sizeof *chp->cq.queue; | 
 | 792 |  | 
 | 793 | 	/* | 
 | 794 | 	 * memsize must be a multiple of the page size if its a user cq. | 
 | 795 | 	 */ | 
 | 796 | 	if (ucontext) | 
 | 797 | 		memsize = roundup(memsize, PAGE_SIZE); | 
 | 798 | 	chp->cq.size = entries; | 
 | 799 | 	chp->cq.memsize = memsize; | 
 | 800 |  | 
 | 801 | 	ret = create_cq(&rhp->rdev, &chp->cq, | 
 | 802 | 			ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | 
 | 803 | 	if (ret) | 
 | 804 | 		goto err1; | 
 | 805 |  | 
 | 806 | 	chp->rhp = rhp; | 
 | 807 | 	chp->cq.size--;				/* status page */ | 
| Steve Wise | 895cf5f | 2010-05-20 16:57:38 -0500 | [diff] [blame] | 808 | 	chp->ibcq.cqe = chp->cq.size - 1; | 
| Steve Wise | cfdda9d | 2010-04-21 15:30:06 -0700 | [diff] [blame] | 809 | 	spin_lock_init(&chp->lock); | 
 | 810 | 	atomic_set(&chp->refcnt, 1); | 
 | 811 | 	init_waitqueue_head(&chp->wait); | 
 | 812 | 	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); | 
 | 813 | 	if (ret) | 
 | 814 | 		goto err2; | 
 | 815 |  | 
 | 816 | 	if (ucontext) { | 
 | 817 | 		mm = kmalloc(sizeof *mm, GFP_KERNEL); | 
 | 818 | 		if (!mm) | 
 | 819 | 			goto err3; | 
 | 820 | 		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | 
 | 821 | 		if (!mm2) | 
 | 822 | 			goto err4; | 
 | 823 |  | 
 | 824 | 		uresp.qid_mask = rhp->rdev.cqmask; | 
 | 825 | 		uresp.cqid = chp->cq.cqid; | 
 | 826 | 		uresp.size = chp->cq.size; | 
 | 827 | 		uresp.memsize = chp->cq.memsize; | 
 | 828 | 		spin_lock(&ucontext->mmap_lock); | 
 | 829 | 		uresp.key = ucontext->key; | 
 | 830 | 		ucontext->key += PAGE_SIZE; | 
 | 831 | 		uresp.gts_key = ucontext->key; | 
 | 832 | 		ucontext->key += PAGE_SIZE; | 
 | 833 | 		spin_unlock(&ucontext->mmap_lock); | 
 | 834 | 		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | 
 | 835 | 		if (ret) | 
 | 836 | 			goto err5; | 
 | 837 |  | 
 | 838 | 		mm->key = uresp.key; | 
 | 839 | 		mm->addr = virt_to_phys(chp->cq.queue); | 
 | 840 | 		mm->len = chp->cq.memsize; | 
 | 841 | 		insert_mmap(ucontext, mm); | 
 | 842 |  | 
 | 843 | 		mm2->key = uresp.gts_key; | 
 | 844 | 		mm2->addr = chp->cq.ugts; | 
 | 845 | 		mm2->len = PAGE_SIZE; | 
 | 846 | 		insert_mmap(ucontext, mm2); | 
 | 847 | 	} | 
 | 848 | 	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", | 
 | 849 | 	     __func__, chp->cq.cqid, chp, chp->cq.size, | 
 | 850 | 	     chp->cq.memsize, | 
 | 851 | 	     (unsigned long long) chp->cq.dma_addr); | 
 | 852 | 	return &chp->ibcq; | 
 | 853 | err5: | 
 | 854 | 	kfree(mm2); | 
 | 855 | err4: | 
 | 856 | 	kfree(mm); | 
 | 857 | err3: | 
 | 858 | 	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); | 
 | 859 | err2: | 
 | 860 | 	destroy_cq(&chp->rhp->rdev, &chp->cq, | 
 | 861 | 		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | 
 | 862 | err1: | 
 | 863 | 	kfree(chp); | 
 | 864 | 	return ERR_PTR(ret); | 
 | 865 | } | 
 | 866 |  | 
 | 867 | int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) | 
 | 868 | { | 
 | 869 | 	return -ENOSYS; | 
 | 870 | } | 
 | 871 |  | 
 | 872 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) | 
 | 873 | { | 
 | 874 | 	struct c4iw_cq *chp; | 
 | 875 | 	int ret; | 
 | 876 | 	unsigned long flag; | 
 | 877 |  | 
 | 878 | 	chp = to_c4iw_cq(ibcq); | 
 | 879 | 	spin_lock_irqsave(&chp->lock, flag); | 
 | 880 | 	ret = t4_arm_cq(&chp->cq, | 
 | 881 | 			(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); | 
 | 882 | 	spin_unlock_irqrestore(&chp->lock, flag); | 
 | 883 | 	if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) | 
 | 884 | 		ret = 0; | 
 | 885 | 	return ret; | 
 | 886 | } |