| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2006 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This software is available to you under a choice of one of two | 
 | 5 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 6 |  * General Public License (GPL) Version 2, available from the file | 
 | 7 |  * COPYING in the main directory of this source tree, or the | 
 | 8 |  * OpenIB.org BSD license below: | 
 | 9 |  * | 
 | 10 |  *     Redistribution and use in source and binary forms, with or | 
 | 11 |  *     without modification, are permitted provided that the following | 
 | 12 |  *     conditions are met: | 
 | 13 |  * | 
 | 14 |  *      - Redistributions of source code must retain the above | 
 | 15 |  *        copyright notice, this list of conditions and the following | 
 | 16 |  *        disclaimer. | 
 | 17 |  * | 
 | 18 |  *      - Redistributions in binary form must reproduce the above | 
 | 19 |  *        copyright notice, this list of conditions and the following | 
 | 20 |  *        disclaimer in the documentation and/or other materials | 
 | 21 |  *        provided with the distribution. | 
 | 22 |  * | 
 | 23 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 24 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 25 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 26 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 27 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 28 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 29 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 30 |  * SOFTWARE. | 
 | 31 |  * | 
 | 32 |  */ | 
 | 33 | #include <linux/kernel.h> | 
 | 34 | #include <linux/in.h> | 
 | 35 | #include <linux/device.h> | 
 | 36 | #include <linux/dmapool.h> | 
 | 37 |  | 
 | 38 | #include "rds.h" | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 39 | #include "ib.h" | 
 | 40 |  | 
| Zach Brown | 59f740a | 2010-08-03 13:52:47 -0700 | [diff] [blame] | 41 | static char *rds_ib_wc_status_strings[] = { | 
 | 42 | #define RDS_IB_WC_STATUS_STR(foo) \ | 
 | 43 | 		[IB_WC_##foo] = __stringify(IB_WC_##foo) | 
 | 44 | 	RDS_IB_WC_STATUS_STR(SUCCESS), | 
 | 45 | 	RDS_IB_WC_STATUS_STR(LOC_LEN_ERR), | 
 | 46 | 	RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR), | 
 | 47 | 	RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR), | 
 | 48 | 	RDS_IB_WC_STATUS_STR(LOC_PROT_ERR), | 
 | 49 | 	RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR), | 
 | 50 | 	RDS_IB_WC_STATUS_STR(MW_BIND_ERR), | 
 | 51 | 	RDS_IB_WC_STATUS_STR(BAD_RESP_ERR), | 
 | 52 | 	RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR), | 
 | 53 | 	RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR), | 
 | 54 | 	RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR), | 
 | 55 | 	RDS_IB_WC_STATUS_STR(REM_OP_ERR), | 
 | 56 | 	RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR), | 
 | 57 | 	RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR), | 
 | 58 | 	RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR), | 
 | 59 | 	RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR), | 
 | 60 | 	RDS_IB_WC_STATUS_STR(REM_ABORT_ERR), | 
 | 61 | 	RDS_IB_WC_STATUS_STR(INV_EECN_ERR), | 
 | 62 | 	RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR), | 
 | 63 | 	RDS_IB_WC_STATUS_STR(FATAL_ERR), | 
 | 64 | 	RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR), | 
 | 65 | 	RDS_IB_WC_STATUS_STR(GENERAL_ERR), | 
 | 66 | #undef RDS_IB_WC_STATUS_STR | 
 | 67 | }; | 
 | 68 |  | 
 | 69 | char *rds_ib_wc_status_str(enum ib_wc_status status) | 
 | 70 | { | 
 | 71 | 	return rds_str_array(rds_ib_wc_status_strings, | 
 | 72 | 			     ARRAY_SIZE(rds_ib_wc_status_strings), status); | 
 | 73 | } | 
 | 74 |  | 
| Andy Grover | 9c03039 | 2010-01-12 14:43:06 -0800 | [diff] [blame] | 75 | /* | 
 | 76 |  * Convert IB-specific error message to RDS error message and call core | 
 | 77 |  * completion handler. | 
 | 78 |  */ | 
 | 79 | static void rds_ib_send_complete(struct rds_message *rm, | 
 | 80 | 				 int wc_status, | 
 | 81 | 				 void (*complete)(struct rds_message *rm, int status)) | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 82 | { | 
 | 83 | 	int notify_status; | 
 | 84 |  | 
 | 85 | 	switch (wc_status) { | 
 | 86 | 	case IB_WC_WR_FLUSH_ERR: | 
 | 87 | 		return; | 
 | 88 |  | 
 | 89 | 	case IB_WC_SUCCESS: | 
 | 90 | 		notify_status = RDS_RDMA_SUCCESS; | 
 | 91 | 		break; | 
 | 92 |  | 
 | 93 | 	case IB_WC_REM_ACCESS_ERR: | 
 | 94 | 		notify_status = RDS_RDMA_REMOTE_ERROR; | 
 | 95 | 		break; | 
 | 96 |  | 
 | 97 | 	default: | 
 | 98 | 		notify_status = RDS_RDMA_OTHER_ERROR; | 
 | 99 | 		break; | 
 | 100 | 	} | 
| Andy Grover | 9c03039 | 2010-01-12 14:43:06 -0800 | [diff] [blame] | 101 | 	complete(rm, notify_status); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 102 | } | 
 | 103 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 104 | static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, | 
 | 105 | 				   struct rm_data_op *op, | 
 | 106 | 				   int wc_status) | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 107 | { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 108 | 	if (op->op_nents) | 
 | 109 | 		ib_dma_unmap_sg(ic->i_cm_id->device, | 
 | 110 | 				op->op_sg, op->op_nents, | 
 | 111 | 				DMA_TO_DEVICE); | 
 | 112 | } | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 113 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 114 | static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, | 
 | 115 | 				   struct rm_rdma_op *op, | 
 | 116 | 				   int wc_status) | 
 | 117 | { | 
 | 118 | 	if (op->op_mapped) { | 
 | 119 | 		ib_dma_unmap_sg(ic->i_cm_id->device, | 
 | 120 | 				op->op_sg, op->op_nents, | 
 | 121 | 				op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 
 | 122 | 		op->op_mapped = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 123 | 	} | 
 | 124 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 125 | 	/* If the user asked for a completion notification on this | 
 | 126 | 	 * message, we can implement three different semantics: | 
 | 127 | 	 *  1.	Notify when we received the ACK on the RDS message | 
 | 128 | 	 *	that was queued with the RDMA. This provides reliable | 
 | 129 | 	 *	notification of RDMA status at the expense of a one-way | 
 | 130 | 	 *	packet delay. | 
 | 131 | 	 *  2.	Notify when the IB stack gives us the completion event for | 
 | 132 | 	 *	the RDMA operation. | 
 | 133 | 	 *  3.	Notify when the IB stack gives us the completion event for | 
 | 134 | 	 *	the accompanying RDS messages. | 
 | 135 | 	 * Here, we implement approach #3. To implement approach #2, | 
 | 136 | 	 * we would need to take an event for the rdma WR. To implement #1, | 
 | 137 | 	 * don't call rds_rdma_send_complete at all, and fall back to the notify | 
 | 138 | 	 * handling in the ACK processing code. | 
 | 139 | 	 * | 
 | 140 | 	 * Note: There's no need to explicitly sync any RDMA buffers using | 
 | 141 | 	 * ib_dma_sync_sg_for_cpu - the completion for the RDMA | 
 | 142 | 	 * operation itself unmapped the RDMA buffers, which takes care | 
 | 143 | 	 * of synching. | 
 | 144 | 	 */ | 
 | 145 | 	rds_ib_send_complete(container_of(op, struct rds_message, rdma), | 
 | 146 | 			     wc_status, rds_rdma_send_complete); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 147 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 148 | 	if (op->op_write) | 
 | 149 | 		rds_stats_add(s_send_rdma_bytes, op->op_bytes); | 
 | 150 | 	else | 
 | 151 | 		rds_stats_add(s_recv_rdma_bytes, op->op_bytes); | 
 | 152 | } | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 153 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 154 | static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, | 
 | 155 | 				     struct rm_atomic_op *op, | 
 | 156 | 				     int wc_status) | 
 | 157 | { | 
 | 158 | 	/* unmap atomic recvbuf */ | 
 | 159 | 	if (op->op_mapped) { | 
 | 160 | 		ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, | 
 | 161 | 				DMA_FROM_DEVICE); | 
 | 162 | 		op->op_mapped = 0; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 163 | 	} | 
 | 164 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 165 | 	rds_ib_send_complete(container_of(op, struct rds_message, atomic), | 
 | 166 | 			     wc_status, rds_atomic_send_complete); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 167 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 168 | 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) | 
| Andy Grover | 51e2cba | 2010-03-29 17:47:30 -0700 | [diff] [blame] | 169 | 		rds_ib_stats_inc(s_ib_atomic_cswp); | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 170 | 	else | 
| Andy Grover | 51e2cba | 2010-03-29 17:47:30 -0700 | [diff] [blame] | 171 | 		rds_ib_stats_inc(s_ib_atomic_fadd); | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 172 | } | 
 | 173 |  | 
 | 174 | /* | 
 | 175 |  * Unmap the resources associated with a struct send_work. | 
 | 176 |  * | 
 | 177 |  * Returns the rm for no good reason other than it is unobtainable | 
 | 178 |  * other than by switching on wr.opcode, currently, and the caller, | 
 | 179 |  * the event handler, needs it. | 
 | 180 |  */ | 
 | 181 | static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, | 
 | 182 | 						struct rds_ib_send_work *send, | 
 | 183 | 						int wc_status) | 
 | 184 | { | 
 | 185 | 	struct rds_message *rm = NULL; | 
 | 186 |  | 
 | 187 | 	/* In the error case, wc.opcode sometimes contains garbage */ | 
 | 188 | 	switch (send->s_wr.opcode) { | 
 | 189 | 	case IB_WR_SEND: | 
 | 190 | 		if (send->s_op) { | 
 | 191 | 			rm = container_of(send->s_op, struct rds_message, data); | 
 | 192 | 			rds_ib_send_unmap_data(ic, send->s_op, wc_status); | 
 | 193 | 		} | 
 | 194 | 		break; | 
 | 195 | 	case IB_WR_RDMA_WRITE: | 
 | 196 | 	case IB_WR_RDMA_READ: | 
 | 197 | 		if (send->s_op) { | 
 | 198 | 			rm = container_of(send->s_op, struct rds_message, rdma); | 
 | 199 | 			rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); | 
 | 200 | 		} | 
 | 201 | 		break; | 
 | 202 | 	case IB_WR_ATOMIC_FETCH_AND_ADD: | 
 | 203 | 	case IB_WR_ATOMIC_CMP_AND_SWP: | 
 | 204 | 		if (send->s_op) { | 
 | 205 | 			rm = container_of(send->s_op, struct rds_message, atomic); | 
 | 206 | 			rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); | 
 | 207 | 		} | 
 | 208 | 		break; | 
 | 209 | 	default: | 
 | 210 | 		if (printk_ratelimit()) | 
 | 211 | 			printk(KERN_NOTICE | 
 | 212 | 			       "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", | 
 | 213 | 			       __func__, send->s_wr.opcode); | 
 | 214 | 		break; | 
 | 215 | 	} | 
 | 216 |  | 
 | 217 | 	send->s_wr.opcode = 0xdead; | 
 | 218 |  | 
 | 219 | 	return rm; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 220 | } | 
 | 221 |  | 
 | 222 | void rds_ib_send_init_ring(struct rds_ib_connection *ic) | 
 | 223 | { | 
 | 224 | 	struct rds_ib_send_work *send; | 
 | 225 | 	u32 i; | 
 | 226 |  | 
 | 227 | 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { | 
 | 228 | 		struct ib_sge *sge; | 
 | 229 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 230 | 		send->s_op = NULL; | 
 | 231 |  | 
 | 232 | 		send->s_wr.wr_id = i; | 
 | 233 | 		send->s_wr.sg_list = send->s_sge; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 234 | 		send->s_wr.ex.imm_data = 0; | 
 | 235 |  | 
| Andy Grover | 919ced4 | 2010-01-13 16:32:24 -0800 | [diff] [blame] | 236 | 		sge = &send->s_sge[0]; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 237 | 		sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); | 
 | 238 | 		sge->length = sizeof(struct rds_header); | 
 | 239 | 		sge->lkey = ic->i_mr->lkey; | 
| Andy Grover | 919ced4 | 2010-01-13 16:32:24 -0800 | [diff] [blame] | 240 |  | 
 | 241 | 		send->s_sge[1].lkey = ic->i_mr->lkey; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 242 | 	} | 
 | 243 | } | 
 | 244 |  | 
 | 245 | void rds_ib_send_clear_ring(struct rds_ib_connection *ic) | 
 | 246 | { | 
 | 247 | 	struct rds_ib_send_work *send; | 
 | 248 | 	u32 i; | 
 | 249 |  | 
 | 250 | 	for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 251 | 		if (send->s_op && send->s_wr.opcode != 0xdead) | 
 | 252 | 			rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 253 | 	} | 
 | 254 | } | 
 | 255 |  | 
 | 256 | /* | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 257 |  * The only fast path caller always has a non-zero nr, so we don't | 
 | 258 |  * bother testing nr before performing the atomic sub. | 
 | 259 |  */ | 
 | 260 | static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) | 
 | 261 | { | 
 | 262 | 	if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && | 
 | 263 | 	    waitqueue_active(&rds_ib_ring_empty_wait)) | 
 | 264 | 		wake_up(&rds_ib_ring_empty_wait); | 
 | 265 | 	BUG_ON(atomic_read(&ic->i_signaled_sends) < 0); | 
 | 266 | } | 
 | 267 |  | 
 | 268 | /* | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 269 |  * The _oldest/_free ring operations here race cleanly with the alloc/unalloc | 
 | 270 |  * operations performed in the send path.  As the sender allocs and potentially | 
 | 271 |  * unallocs the next free entry in the ring it doesn't alter which is | 
 | 272 |  * the next to be freed, which is what this is concerned with. | 
 | 273 |  */ | 
 | 274 | void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) | 
 | 275 | { | 
 | 276 | 	struct rds_connection *conn = context; | 
 | 277 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 278 | 	struct rds_message *rm = NULL; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 279 | 	struct ib_wc wc; | 
 | 280 | 	struct rds_ib_send_work *send; | 
 | 281 | 	u32 completed; | 
 | 282 | 	u32 oldest; | 
 | 283 | 	u32 i = 0; | 
 | 284 | 	int ret; | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 285 | 	int nr_sig = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 286 |  | 
 | 287 | 	rdsdebug("cq %p conn %p\n", cq, conn); | 
 | 288 | 	rds_ib_stats_inc(s_ib_tx_cq_call); | 
 | 289 | 	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 
 | 290 | 	if (ret) | 
 | 291 | 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret); | 
 | 292 |  | 
 | 293 | 	while (ib_poll_cq(cq, 1, &wc) > 0) { | 
| Zach Brown | 59f740a | 2010-08-03 13:52:47 -0700 | [diff] [blame] | 294 | 		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", | 
 | 295 | 			 (unsigned long long)wc.wr_id, wc.status, | 
 | 296 | 			 rds_ib_wc_status_str(wc.status), wc.byte_len, | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 297 | 			 be32_to_cpu(wc.ex.imm_data)); | 
 | 298 | 		rds_ib_stats_inc(s_ib_tx_cq_event); | 
 | 299 |  | 
 | 300 | 		if (wc.wr_id == RDS_IB_ACK_WR_ID) { | 
 | 301 | 			if (ic->i_ack_queued + HZ/2 < jiffies) | 
 | 302 | 				rds_ib_stats_inc(s_ib_tx_stalled); | 
 | 303 | 			rds_ib_ack_send_complete(ic); | 
 | 304 | 			continue; | 
 | 305 | 		} | 
 | 306 |  | 
 | 307 | 		oldest = rds_ib_ring_oldest(&ic->i_send_ring); | 
 | 308 |  | 
 | 309 | 		completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest); | 
 | 310 |  | 
 | 311 | 		for (i = 0; i < completed; i++) { | 
 | 312 | 			send = &ic->i_sends[oldest]; | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 313 | 			if (send->s_wr.send_flags & IB_SEND_SIGNALED) | 
 | 314 | 				nr_sig++; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 315 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 316 | 			rm = rds_ib_send_unmap_op(ic, send, wc.status); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 317 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 318 | 			if (send->s_queued + HZ/2 < jiffies) | 
 | 319 | 				rds_ib_stats_inc(s_ib_tx_stalled); | 
 | 320 |  | 
| Chris Mason | c9e6538 | 2010-05-11 15:14:16 -0700 | [diff] [blame] | 321 | 			if (send->s_op) { | 
 | 322 | 				if (send->s_op == rm->m_final_op) { | 
 | 323 | 					/* If anyone waited for this message to get flushed out, wake | 
 | 324 | 					 * them up now */ | 
 | 325 | 					rds_message_unmapped(rm); | 
 | 326 | 				} | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 327 | 				rds_message_put(rm); | 
 | 328 | 				send->s_op = NULL; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 329 | 			} | 
 | 330 |  | 
 | 331 | 			oldest = (oldest + 1) % ic->i_send_ring.w_nr; | 
 | 332 | 		} | 
 | 333 |  | 
 | 334 | 		rds_ib_ring_free(&ic->i_send_ring, completed); | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 335 | 		rds_ib_sub_signaled(ic, nr_sig); | 
 | 336 | 		nr_sig = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 337 |  | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 338 | 		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || | 
 | 339 | 		    test_bit(0, &conn->c_map_queued)) | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 340 | 			queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 
 | 341 |  | 
 | 342 | 		/* We expect errors as the qp is drained during shutdown */ | 
 | 343 | 		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) { | 
| Zach Brown | 59f740a | 2010-08-03 13:52:47 -0700 | [diff] [blame] | 344 | 			rds_ib_conn_error(conn, "send completion on %pI4 had status " | 
 | 345 | 					  "%u (%s), disconnecting and reconnecting\n", | 
 | 346 | 					  &conn->c_faddr, wc.status, | 
 | 347 | 					  rds_ib_wc_status_str(wc.status)); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 348 | 		} | 
 | 349 | 	} | 
 | 350 | } | 
 | 351 |  | 
 | 352 | /* | 
 | 353 |  * This is the main function for allocating credits when sending | 
 | 354 |  * messages. | 
 | 355 |  * | 
 | 356 |  * Conceptually, we have two counters: | 
 | 357 |  *  -	send credits: this tells us how many WRs we're allowed | 
 | 358 |  *	to submit without overruning the reciever's queue. For | 
 | 359 |  *	each SEND WR we post, we decrement this by one. | 
 | 360 |  * | 
 | 361 |  *  -	posted credits: this tells us how many WRs we recently | 
 | 362 |  *	posted to the receive queue. This value is transferred | 
 | 363 |  *	to the peer as a "credit update" in a RDS header field. | 
 | 364 |  *	Every time we transmit credits to the peer, we subtract | 
 | 365 |  *	the amount of transferred credits from this counter. | 
 | 366 |  * | 
 | 367 |  * It is essential that we avoid situations where both sides have | 
 | 368 |  * exhausted their send credits, and are unable to send new credits | 
 | 369 |  * to the peer. We achieve this by requiring that we send at least | 
 | 370 |  * one credit update to the peer before exhausting our credits. | 
 | 371 |  * When new credits arrive, we subtract one credit that is withheld | 
 | 372 |  * until we've posted new buffers and are ready to transmit these | 
 | 373 |  * credits (see rds_ib_send_add_credits below). | 
 | 374 |  * | 
 | 375 |  * The RDS send code is essentially single-threaded; rds_send_xmit | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 376 |  * sets RDS_IN_XMIT to ensure exclusive access to the send ring. | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 377 |  * However, the ACK sending code is independent and can race with | 
 | 378 |  * message SENDs. | 
 | 379 |  * | 
 | 380 |  * In the send path, we need to update the counters for send credits | 
 | 381 |  * and the counter of posted buffers atomically - when we use the | 
 | 382 |  * last available credit, we cannot allow another thread to race us | 
 | 383 |  * and grab the posted credits counter.  Hence, we have to use a | 
 | 384 |  * spinlock to protect the credit counter, or use atomics. | 
 | 385 |  * | 
 | 386 |  * Spinlocks shared between the send and the receive path are bad, | 
 | 387 |  * because they create unnecessary delays. An early implementation | 
 | 388 |  * using a spinlock showed a 5% degradation in throughput at some | 
 | 389 |  * loads. | 
 | 390 |  * | 
 | 391 |  * This implementation avoids spinlocks completely, putting both | 
 | 392 |  * counters into a single atomic, and updating that atomic using | 
 | 393 |  * atomic_add (in the receive path, when receiving fresh credits), | 
 | 394 |  * and using atomic_cmpxchg when updating the two counters. | 
 | 395 |  */ | 
 | 396 | int rds_ib_send_grab_credits(struct rds_ib_connection *ic, | 
| Steve Wise | 7b70d03 | 2009-04-09 14:09:39 +0000 | [diff] [blame] | 397 | 			     u32 wanted, u32 *adv_credits, int need_posted, int max_posted) | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 398 | { | 
 | 399 | 	unsigned int avail, posted, got = 0, advertise; | 
 | 400 | 	long oldval, newval; | 
 | 401 |  | 
 | 402 | 	*adv_credits = 0; | 
 | 403 | 	if (!ic->i_flowctl) | 
 | 404 | 		return wanted; | 
 | 405 |  | 
 | 406 | try_again: | 
 | 407 | 	advertise = 0; | 
 | 408 | 	oldval = newval = atomic_read(&ic->i_credits); | 
 | 409 | 	posted = IB_GET_POST_CREDITS(oldval); | 
 | 410 | 	avail = IB_GET_SEND_CREDITS(oldval); | 
 | 411 |  | 
 | 412 | 	rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n", | 
 | 413 | 			wanted, avail, posted); | 
 | 414 |  | 
 | 415 | 	/* The last credit must be used to send a credit update. */ | 
 | 416 | 	if (avail && !posted) | 
 | 417 | 		avail--; | 
 | 418 |  | 
 | 419 | 	if (avail < wanted) { | 
 | 420 | 		struct rds_connection *conn = ic->i_cm_id->context; | 
 | 421 |  | 
 | 422 | 		/* Oops, there aren't that many credits left! */ | 
 | 423 | 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags); | 
 | 424 | 		got = avail; | 
 | 425 | 	} else { | 
 | 426 | 		/* Sometimes you get what you want, lalala. */ | 
 | 427 | 		got = wanted; | 
 | 428 | 	} | 
 | 429 | 	newval -= IB_SET_SEND_CREDITS(got); | 
 | 430 |  | 
 | 431 | 	/* | 
 | 432 | 	 * If need_posted is non-zero, then the caller wants | 
 | 433 | 	 * the posted regardless of whether any send credits are | 
 | 434 | 	 * available. | 
 | 435 | 	 */ | 
 | 436 | 	if (posted && (got || need_posted)) { | 
| Steve Wise | 7b70d03 | 2009-04-09 14:09:39 +0000 | [diff] [blame] | 437 | 		advertise = min_t(unsigned int, posted, max_posted); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 438 | 		newval -= IB_SET_POST_CREDITS(advertise); | 
 | 439 | 	} | 
 | 440 |  | 
 | 441 | 	/* Finally bill everything */ | 
 | 442 | 	if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) | 
 | 443 | 		goto try_again; | 
 | 444 |  | 
 | 445 | 	*adv_credits = advertise; | 
 | 446 | 	return got; | 
 | 447 | } | 
 | 448 |  | 
 | 449 | void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) | 
 | 450 | { | 
 | 451 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 452 |  | 
 | 453 | 	if (credits == 0) | 
 | 454 | 		return; | 
 | 455 |  | 
 | 456 | 	rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n", | 
 | 457 | 			credits, | 
 | 458 | 			IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), | 
 | 459 | 			test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); | 
 | 460 |  | 
 | 461 | 	atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); | 
 | 462 | 	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | 
 | 463 | 		queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 
 | 464 |  | 
 | 465 | 	WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); | 
 | 466 |  | 
 | 467 | 	rds_ib_stats_inc(s_ib_rx_credit_updates); | 
 | 468 | } | 
 | 469 |  | 
 | 470 | void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted) | 
 | 471 | { | 
 | 472 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 473 |  | 
 | 474 | 	if (posted == 0) | 
 | 475 | 		return; | 
 | 476 |  | 
 | 477 | 	atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); | 
 | 478 |  | 
 | 479 | 	/* Decide whether to send an update to the peer now. | 
 | 480 | 	 * If we would send a credit update for every single buffer we | 
 | 481 | 	 * post, we would end up with an ACK storm (ACK arrives, | 
 | 482 | 	 * consumes buffer, we refill the ring, send ACK to remote | 
 | 483 | 	 * advertising the newly posted buffer... ad inf) | 
 | 484 | 	 * | 
 | 485 | 	 * Performance pretty much depends on how often we send | 
 | 486 | 	 * credit updates - too frequent updates mean lots of ACKs. | 
 | 487 | 	 * Too infrequent updates, and the peer will run out of | 
 | 488 | 	 * credits and has to throttle. | 
 | 489 | 	 * For the time being, 16 seems to be a good compromise. | 
 | 490 | 	 */ | 
 | 491 | 	if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) | 
 | 492 | 		set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | 
 | 493 | } | 
 | 494 |  | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 495 | static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic, | 
 | 496 | 					     struct rds_ib_send_work *send, | 
 | 497 | 					     bool notify) | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 498 | { | 
 | 499 | 	/* | 
 | 500 | 	 * We want to delay signaling completions just enough to get | 
 | 501 | 	 * the batching benefits but not so much that we create dead time | 
 | 502 | 	 * on the wire. | 
 | 503 | 	 */ | 
 | 504 | 	if (ic->i_unsignaled_wrs-- == 0 || notify) { | 
 | 505 | 		ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; | 
 | 506 | 		send->s_wr.send_flags |= IB_SEND_SIGNALED; | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 507 | 		return 1; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 508 | 	} | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 509 | 	return 0; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 510 | } | 
 | 511 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 512 | /* | 
 | 513 |  * This can be called multiple times for a given message.  The first time | 
 | 514 |  * we see a message we map its scatterlist into the IB device so that | 
 | 515 |  * we can provide that mapped address to the IB scatter gather entries | 
 | 516 |  * in the IB work requests.  We translate the scatterlist into a series | 
 | 517 |  * of work requests that fragment the message.  These work requests complete | 
 | 518 |  * in order so we pass ownership of the message to the completion handler | 
 | 519 |  * once we send the final fragment. | 
 | 520 |  * | 
 | 521 |  * The RDS core uses the c_send_lock to only enter this function once | 
 | 522 |  * per connection.  This makes sure that the tx ring alloc/unalloc pairs | 
 | 523 |  * don't get out of sync and confuse the ring. | 
 | 524 |  */ | 
 | 525 | int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | 
 | 526 | 		unsigned int hdr_off, unsigned int sg, unsigned int off) | 
 | 527 | { | 
 | 528 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 529 | 	struct ib_device *dev = ic->i_cm_id->device; | 
 | 530 | 	struct rds_ib_send_work *send = NULL; | 
 | 531 | 	struct rds_ib_send_work *first; | 
 | 532 | 	struct rds_ib_send_work *prev; | 
 | 533 | 	struct ib_send_wr *failed_wr; | 
 | 534 | 	struct scatterlist *scat; | 
 | 535 | 	u32 pos; | 
 | 536 | 	u32 i; | 
 | 537 | 	u32 work_alloc; | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 538 | 	u32 credit_alloc = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 539 | 	u32 posted; | 
 | 540 | 	u32 adv_credits = 0; | 
 | 541 | 	int send_flags = 0; | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 542 | 	int bytes_sent = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 543 | 	int ret; | 
 | 544 | 	int flow_controlled = 0; | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 545 | 	int nr_sig = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 546 |  | 
 | 547 | 	BUG_ON(off % RDS_FRAG_SIZE); | 
 | 548 | 	BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); | 
 | 549 |  | 
| Andy Grover | 2e7b3b9 | 2010-03-11 13:49:59 +0000 | [diff] [blame] | 550 | 	/* Do not send cong updates to IB loopback */ | 
 | 551 | 	if (conn->c_loopback | 
 | 552 | 	    && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | 
 | 553 | 		rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 
 | 554 | 		return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 
 | 555 | 	} | 
 | 556 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 557 | 	/* FIXME we may overallocate here */ | 
 | 558 | 	if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) | 
 | 559 | 		i = 1; | 
 | 560 | 	else | 
 | 561 | 		i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); | 
 | 562 |  | 
 | 563 | 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | 
 | 564 | 	if (work_alloc == 0) { | 
 | 565 | 		set_bit(RDS_LL_SEND_FULL, &conn->c_flags); | 
 | 566 | 		rds_ib_stats_inc(s_ib_tx_ring_full); | 
 | 567 | 		ret = -ENOMEM; | 
 | 568 | 		goto out; | 
 | 569 | 	} | 
 | 570 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 571 | 	if (ic->i_flowctl) { | 
| Steve Wise | 7b70d03 | 2009-04-09 14:09:39 +0000 | [diff] [blame] | 572 | 		credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 573 | 		adv_credits += posted; | 
 | 574 | 		if (credit_alloc < work_alloc) { | 
 | 575 | 			rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); | 
 | 576 | 			work_alloc = credit_alloc; | 
| Andy Grover | c8de3f1 | 2010-01-15 15:55:26 -0800 | [diff] [blame] | 577 | 			flow_controlled = 1; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 578 | 		} | 
 | 579 | 		if (work_alloc == 0) { | 
| Steve Wise | d39e060 | 2009-04-09 14:09:38 +0000 | [diff] [blame] | 580 | 			set_bit(RDS_LL_SEND_FULL, &conn->c_flags); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 581 | 			rds_ib_stats_inc(s_ib_tx_throttle); | 
 | 582 | 			ret = -ENOMEM; | 
 | 583 | 			goto out; | 
 | 584 | 		} | 
 | 585 | 	} | 
 | 586 |  | 
 | 587 | 	/* map the message the first time we see it */ | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 588 | 	if (!ic->i_data_op) { | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 589 | 		if (rm->data.op_nents) { | 
 | 590 | 			rm->data.op_count = ib_dma_map_sg(dev, | 
 | 591 | 							  rm->data.op_sg, | 
 | 592 | 							  rm->data.op_nents, | 
 | 593 | 							  DMA_TO_DEVICE); | 
 | 594 | 			rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); | 
 | 595 | 			if (rm->data.op_count == 0) { | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 596 | 				rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); | 
 | 597 | 				rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 
 | 598 | 				ret = -ENOMEM; /* XXX ? */ | 
 | 599 | 				goto out; | 
 | 600 | 			} | 
 | 601 | 		} else { | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 602 | 			rm->data.op_count = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 603 | 		} | 
 | 604 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 605 | 		rds_message_addref(rm); | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 606 | 		ic->i_data_op = &rm->data; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 607 |  | 
 | 608 | 		/* Finalize the header */ | 
 | 609 | 		if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) | 
 | 610 | 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; | 
 | 611 | 		if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) | 
 | 612 | 			rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; | 
 | 613 |  | 
 | 614 | 		/* If it has a RDMA op, tell the peer we did it. This is | 
 | 615 | 		 * used by the peer to release use-once RDMA MRs. */ | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 616 | 		if (rm->rdma.op_active) { | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 617 | 			struct rds_ext_header_rdma ext_hdr; | 
 | 618 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 619 | 			ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 620 | 			rds_message_add_extension(&rm->m_inc.i_hdr, | 
 | 621 | 					RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); | 
 | 622 | 		} | 
 | 623 | 		if (rm->m_rdma_cookie) { | 
 | 624 | 			rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, | 
 | 625 | 					rds_rdma_cookie_key(rm->m_rdma_cookie), | 
 | 626 | 					rds_rdma_cookie_offset(rm->m_rdma_cookie)); | 
 | 627 | 		} | 
 | 628 |  | 
 | 629 | 		/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so | 
 | 630 | 		 * we should not do this unless we have a chance of at least | 
 | 631 | 		 * sticking the header into the send ring. Which is why we | 
 | 632 | 		 * should call rds_ib_ring_alloc first. */ | 
 | 633 | 		rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); | 
 | 634 | 		rds_message_make_checksum(&rm->m_inc.i_hdr); | 
 | 635 |  | 
 | 636 | 		/* | 
 | 637 | 		 * Update adv_credits since we reset the ACK_REQUIRED bit. | 
 | 638 | 		 */ | 
| Andy Grover | c8de3f1 | 2010-01-15 15:55:26 -0800 | [diff] [blame] | 639 | 		if (ic->i_flowctl) { | 
 | 640 | 			rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); | 
 | 641 | 			adv_credits += posted; | 
 | 642 | 			BUG_ON(adv_credits > 255); | 
 | 643 | 		} | 
| Andy Grover | 735f61e | 2010-03-11 13:49:55 +0000 | [diff] [blame] | 644 | 	} | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 645 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 646 | 	/* Sometimes you want to put a fence between an RDMA | 
 | 647 | 	 * READ and the following SEND. | 
 | 648 | 	 * We could either do this all the time | 
 | 649 | 	 * or when requested by the user. Right now, we let | 
 | 650 | 	 * the application choose. | 
 | 651 | 	 */ | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 652 | 	if (rm->rdma.op_active && rm->rdma.op_fence) | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 653 | 		send_flags = IB_SEND_FENCE; | 
 | 654 |  | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 655 | 	/* Each frag gets a header. Msgs may be 0 bytes */ | 
 | 656 | 	send = &ic->i_sends[pos]; | 
 | 657 | 	first = send; | 
 | 658 | 	prev = NULL; | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 659 | 	scat = &ic->i_data_op->op_sg[sg]; | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 660 | 	i = 0; | 
 | 661 | 	do { | 
 | 662 | 		unsigned int len = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 663 |  | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 664 | 		/* Set up the header */ | 
 | 665 | 		send->s_wr.send_flags = send_flags; | 
 | 666 | 		send->s_wr.opcode = IB_WR_SEND; | 
 | 667 | 		send->s_wr.num_sge = 1; | 
 | 668 | 		send->s_wr.next = NULL; | 
 | 669 | 		send->s_queued = jiffies; | 
 | 670 | 		send->s_op = NULL; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 671 |  | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 672 | 		send->s_sge[0].addr = ic->i_send_hdrs_dma | 
 | 673 | 			+ (pos * sizeof(struct rds_header)); | 
 | 674 | 		send->s_sge[0].length = sizeof(struct rds_header); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 675 |  | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 676 | 		memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 677 |  | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 678 | 		/* Set up the data, if present */ | 
 | 679 | 		if (i < work_alloc | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 680 | 		    && scat != &rm->data.op_sg[rm->data.op_count]) { | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 681 | 			len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off); | 
 | 682 | 			send->s_wr.num_sge = 2; | 
 | 683 |  | 
 | 684 | 			send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off; | 
 | 685 | 			send->s_sge[1].length = len; | 
 | 686 |  | 
 | 687 | 			bytes_sent += len; | 
 | 688 | 			off += len; | 
 | 689 | 			if (off == ib_sg_dma_len(dev, scat)) { | 
 | 690 | 				scat++; | 
 | 691 | 				off = 0; | 
 | 692 | 			} | 
 | 693 | 		} | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 694 |  | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 695 | 		rds_ib_set_wr_signal_state(ic, send, 0); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 696 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 697 | 		/* | 
 | 698 | 		 * Always signal the last one if we're stopping due to flow control. | 
 | 699 | 		 */ | 
| Andy Grover | c8de3f1 | 2010-01-15 15:55:26 -0800 | [diff] [blame] | 700 | 		if (ic->i_flowctl && flow_controlled && i == (work_alloc-1)) | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 701 | 			send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 
 | 702 |  | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 703 | 		if (send->s_wr.send_flags & IB_SEND_SIGNALED) | 
 | 704 | 			nr_sig++; | 
 | 705 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 706 | 		rdsdebug("send %p wr %p num_sge %u next %p\n", send, | 
 | 707 | 			 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); | 
 | 708 |  | 
| Andy Grover | c8de3f1 | 2010-01-15 15:55:26 -0800 | [diff] [blame] | 709 | 		if (ic->i_flowctl && adv_credits) { | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 710 | 			struct rds_header *hdr = &ic->i_send_hdrs[pos]; | 
 | 711 |  | 
 | 712 | 			/* add credit and redo the header checksum */ | 
 | 713 | 			hdr->h_credit = adv_credits; | 
 | 714 | 			rds_message_make_checksum(hdr); | 
 | 715 | 			adv_credits = 0; | 
 | 716 | 			rds_ib_stats_inc(s_ib_tx_credit_updates); | 
 | 717 | 		} | 
 | 718 |  | 
 | 719 | 		if (prev) | 
 | 720 | 			prev->s_wr.next = &send->s_wr; | 
 | 721 | 		prev = send; | 
 | 722 |  | 
 | 723 | 		pos = (pos + 1) % ic->i_send_ring.w_nr; | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 724 | 		send = &ic->i_sends[pos]; | 
 | 725 | 		i++; | 
 | 726 |  | 
 | 727 | 	} while (i < work_alloc | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 728 | 		 && scat != &rm->data.op_sg[rm->data.op_count]); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 729 |  | 
 | 730 | 	/* Account the RDS header in the number of bytes we sent, but just once. | 
 | 731 | 	 * The caller has no concept of fragmentation. */ | 
 | 732 | 	if (hdr_off == 0) | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 733 | 		bytes_sent += sizeof(struct rds_header); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 734 |  | 
 | 735 | 	/* if we finished the message then send completion owns it */ | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 736 | 	if (scat == &rm->data.op_sg[rm->data.op_count]) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 737 | 		prev->s_op = ic->i_data_op; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 738 | 		prev->s_wr.send_flags |= IB_SEND_SOLICITED; | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 739 | 		ic->i_data_op = NULL; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 740 | 	} | 
 | 741 |  | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 742 | 	/* Put back wrs & credits we didn't use */ | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 743 | 	if (i < work_alloc) { | 
 | 744 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); | 
 | 745 | 		work_alloc = i; | 
 | 746 | 	} | 
 | 747 | 	if (ic->i_flowctl && i < credit_alloc) | 
 | 748 | 		rds_ib_send_add_credits(conn, credit_alloc - i); | 
 | 749 |  | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 750 | 	if (nr_sig) | 
 | 751 | 		atomic_add(nr_sig, &ic->i_signaled_sends); | 
 | 752 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 753 | 	/* XXX need to worry about failed_wr and partial sends. */ | 
 | 754 | 	failed_wr = &first->s_wr; | 
 | 755 | 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); | 
 | 756 | 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, | 
 | 757 | 		 first, &first->s_wr, ret, failed_wr); | 
 | 758 | 	BUG_ON(failed_wr != &first->s_wr); | 
 | 759 | 	if (ret) { | 
 | 760 | 		printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 " | 
 | 761 | 		       "returned %d\n", &conn->c_faddr, ret); | 
 | 762 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 763 | 		rds_ib_sub_signaled(ic, nr_sig); | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 764 | 		if (prev->s_op) { | 
 | 765 | 			ic->i_data_op = prev->s_op; | 
 | 766 | 			prev->s_op = NULL; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 767 | 		} | 
| Andy Grover | 735f61e | 2010-03-11 13:49:55 +0000 | [diff] [blame] | 768 |  | 
 | 769 | 		rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 770 | 		goto out; | 
 | 771 | 	} | 
 | 772 |  | 
| Andy Grover | da5a06c | 2010-01-14 12:18:11 -0800 | [diff] [blame] | 773 | 	ret = bytes_sent; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 774 | out: | 
 | 775 | 	BUG_ON(adv_credits); | 
 | 776 | 	return ret; | 
 | 777 | } | 
 | 778 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 779 | /* | 
 | 780 |  * Issue atomic operation. | 
 | 781 |  * A simplified version of the rdma case, we always map 1 SG, and | 
 | 782 |  * only 8 bytes, for the return value from the atomic operation. | 
 | 783 |  */ | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 784 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 785 | { | 
 | 786 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 787 | 	struct rds_ib_send_work *send = NULL; | 
 | 788 | 	struct ib_send_wr *failed_wr; | 
 | 789 | 	struct rds_ib_device *rds_ibdev; | 
 | 790 | 	u32 pos; | 
 | 791 | 	u32 work_alloc; | 
 | 792 | 	int ret; | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 793 | 	int nr_sig = 0; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 794 |  | 
 | 795 | 	rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); | 
 | 796 |  | 
 | 797 | 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); | 
 | 798 | 	if (work_alloc != 1) { | 
 | 799 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 
 | 800 | 		rds_ib_stats_inc(s_ib_tx_ring_full); | 
 | 801 | 		ret = -ENOMEM; | 
 | 802 | 		goto out; | 
 | 803 | 	} | 
 | 804 |  | 
 | 805 | 	/* address of send request in ring */ | 
 | 806 | 	send = &ic->i_sends[pos]; | 
 | 807 | 	send->s_queued = jiffies; | 
 | 808 |  | 
 | 809 | 	if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { | 
| Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 810 | 		send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; | 
 | 811 | 		send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare; | 
 | 812 | 		send->s_wr.wr.atomic.swap = op->op_m_cswp.swap; | 
 | 813 | 		send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask; | 
 | 814 | 		send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 815 | 	} else { /* FADD */ | 
| Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 816 | 		send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; | 
 | 817 | 		send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 818 | 		send->s_wr.wr.atomic.swap = 0; | 
| Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 819 | 		send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask; | 
 | 820 | 		send->s_wr.wr.atomic.swap_mask = 0; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 821 | 	} | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 822 | 	nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 823 | 	send->s_wr.num_sge = 1; | 
 | 824 | 	send->s_wr.next = NULL; | 
 | 825 | 	send->s_wr.wr.atomic.remote_addr = op->op_remote_addr; | 
 | 826 | 	send->s_wr.wr.atomic.rkey = op->op_rkey; | 
| Chris Mason | 1cc2228 | 2010-05-11 16:15:35 -0700 | [diff] [blame] | 827 | 	send->s_op = op; | 
 | 828 | 	rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 829 |  | 
 | 830 | 	/* map 8 byte retval buffer to the device */ | 
 | 831 | 	ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); | 
 | 832 | 	rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret); | 
 | 833 | 	if (ret != 1) { | 
 | 834 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 
 | 835 | 		rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); | 
 | 836 | 		ret = -ENOMEM; /* XXX ? */ | 
 | 837 | 		goto out; | 
 | 838 | 	} | 
 | 839 |  | 
 | 840 | 	/* Convert our struct scatterlist to struct ib_sge */ | 
 | 841 | 	send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg); | 
 | 842 | 	send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg); | 
 | 843 | 	send->s_sge[0].lkey = ic->i_mr->lkey; | 
 | 844 |  | 
 | 845 | 	rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr, | 
 | 846 | 		 send->s_sge[0].addr, send->s_sge[0].length); | 
 | 847 |  | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 848 | 	if (nr_sig) | 
 | 849 | 		atomic_add(nr_sig, &ic->i_signaled_sends); | 
 | 850 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 851 | 	failed_wr = &send->s_wr; | 
 | 852 | 	ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); | 
 | 853 | 	rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, | 
 | 854 | 		 send, &send->s_wr, ret, failed_wr); | 
 | 855 | 	BUG_ON(failed_wr != &send->s_wr); | 
 | 856 | 	if (ret) { | 
 | 857 | 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " | 
 | 858 | 		       "returned %d\n", &conn->c_faddr, ret); | 
 | 859 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 860 | 		rds_ib_sub_signaled(ic, nr_sig); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 861 | 		goto out; | 
 | 862 | 	} | 
 | 863 |  | 
 | 864 | 	if (unlikely(failed_wr != &send->s_wr)) { | 
 | 865 | 		printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); | 
 | 866 | 		BUG_ON(failed_wr != &send->s_wr); | 
 | 867 | 	} | 
 | 868 |  | 
 | 869 | out: | 
 | 870 | 	return ret; | 
 | 871 | } | 
 | 872 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 873 | int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 874 | { | 
 | 875 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 876 | 	struct rds_ib_send_work *send = NULL; | 
 | 877 | 	struct rds_ib_send_work *first; | 
 | 878 | 	struct rds_ib_send_work *prev; | 
 | 879 | 	struct ib_send_wr *failed_wr; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 880 | 	struct scatterlist *scat; | 
 | 881 | 	unsigned long len; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 882 | 	u64 remote_addr = op->op_remote_addr; | 
| Zach Brown | 89bf9d4 | 2010-05-18 15:44:50 -0700 | [diff] [blame] | 883 | 	u32 max_sge = ic->rds_ibdev->max_sge; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 884 | 	u32 pos; | 
 | 885 | 	u32 work_alloc; | 
 | 886 | 	u32 i; | 
 | 887 | 	u32 j; | 
 | 888 | 	int sent; | 
 | 889 | 	int ret; | 
 | 890 | 	int num_sge; | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 891 | 	int nr_sig = 0; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 892 |  | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 893 | 	/* map the op the first time we see it */ | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 894 | 	if (!op->op_mapped) { | 
 | 895 | 		op->op_count = ib_dma_map_sg(ic->i_cm_id->device, | 
 | 896 | 					     op->op_sg, op->op_nents, (op->op_write) ? | 
 | 897 | 					     DMA_TO_DEVICE : DMA_FROM_DEVICE); | 
 | 898 | 		rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); | 
 | 899 | 		if (op->op_count == 0) { | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 900 | 			rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); | 
 | 901 | 			ret = -ENOMEM; /* XXX ? */ | 
 | 902 | 			goto out; | 
 | 903 | 		} | 
 | 904 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 905 | 		op->op_mapped = 1; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 906 | 	} | 
 | 907 |  | 
 | 908 | 	/* | 
 | 909 | 	 * Instead of knowing how to return a partial rdma read/write we insist that there | 
 | 910 | 	 * be enough work requests to send the entire message. | 
 | 911 | 	 */ | 
| Zach Brown | 89bf9d4 | 2010-05-18 15:44:50 -0700 | [diff] [blame] | 912 | 	i = ceil(op->op_count, max_sge); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 913 |  | 
 | 914 | 	work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); | 
 | 915 | 	if (work_alloc != i) { | 
 | 916 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 
 | 917 | 		rds_ib_stats_inc(s_ib_tx_ring_full); | 
 | 918 | 		ret = -ENOMEM; | 
 | 919 | 		goto out; | 
 | 920 | 	} | 
 | 921 |  | 
 | 922 | 	send = &ic->i_sends[pos]; | 
 | 923 | 	first = send; | 
 | 924 | 	prev = NULL; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 925 | 	scat = &op->op_sg[0]; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 926 | 	sent = 0; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 927 | 	num_sge = op->op_count; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 928 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 929 | 	for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 930 | 		send->s_wr.send_flags = 0; | 
 | 931 | 		send->s_queued = jiffies; | 
| Chris Mason | 1cc2228 | 2010-05-11 16:15:35 -0700 | [diff] [blame] | 932 | 		send->s_op = NULL; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 933 |  | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 934 | 		nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 935 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 936 | 		send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 937 | 		send->s_wr.wr.rdma.remote_addr = remote_addr; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 938 | 		send->s_wr.wr.rdma.rkey = op->op_rkey; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 939 |  | 
| Zach Brown | 89bf9d4 | 2010-05-18 15:44:50 -0700 | [diff] [blame] | 940 | 		if (num_sge > max_sge) { | 
 | 941 | 			send->s_wr.num_sge = max_sge; | 
 | 942 | 			num_sge -= max_sge; | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 943 | 		} else { | 
 | 944 | 			send->s_wr.num_sge = num_sge; | 
 | 945 | 		} | 
 | 946 |  | 
 | 947 | 		send->s_wr.next = NULL; | 
 | 948 |  | 
 | 949 | 		if (prev) | 
 | 950 | 			prev->s_wr.next = &send->s_wr; | 
 | 951 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 952 | 		for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 953 | 			len = ib_sg_dma_len(ic->i_cm_id->device, scat); | 
 | 954 | 			send->s_sge[j].addr = | 
 | 955 | 				 ib_sg_dma_address(ic->i_cm_id->device, scat); | 
 | 956 | 			send->s_sge[j].length = len; | 
 | 957 | 			send->s_sge[j].lkey = ic->i_mr->lkey; | 
 | 958 |  | 
 | 959 | 			sent += len; | 
 | 960 | 			rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); | 
 | 961 |  | 
 | 962 | 			remote_addr += len; | 
 | 963 | 			scat++; | 
 | 964 | 		} | 
 | 965 |  | 
 | 966 | 		rdsdebug("send %p wr %p num_sge %u next %p\n", send, | 
 | 967 | 			&send->s_wr, send->s_wr.num_sge, send->s_wr.next); | 
 | 968 |  | 
 | 969 | 		prev = send; | 
 | 970 | 		if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) | 
 | 971 | 			send = ic->i_sends; | 
 | 972 | 	} | 
 | 973 |  | 
| Chris Mason | 1cc2228 | 2010-05-11 16:15:35 -0700 | [diff] [blame] | 974 | 	/* give a reference to the last op */ | 
 | 975 | 	if (scat == &op->op_sg[op->op_count]) { | 
 | 976 | 		prev->s_op = op; | 
 | 977 | 		rds_message_addref(container_of(op, struct rds_message, rdma)); | 
 | 978 | 	} | 
 | 979 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 980 | 	if (i < work_alloc) { | 
 | 981 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); | 
 | 982 | 		work_alloc = i; | 
 | 983 | 	} | 
 | 984 |  | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 985 | 	if (nr_sig) | 
 | 986 | 		atomic_add(nr_sig, &ic->i_signaled_sends); | 
 | 987 |  | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 988 | 	failed_wr = &first->s_wr; | 
 | 989 | 	ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); | 
 | 990 | 	rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, | 
 | 991 | 		 first, &first->s_wr, ret, failed_wr); | 
 | 992 | 	BUG_ON(failed_wr != &first->s_wr); | 
 | 993 | 	if (ret) { | 
 | 994 | 		printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " | 
 | 995 | 		       "returned %d\n", &conn->c_faddr, ret); | 
 | 996 | 		rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); | 
| Zach Brown | f046011 | 2010-07-14 13:55:35 -0700 | [diff] [blame] | 997 | 		rds_ib_sub_signaled(ic, nr_sig); | 
| Andy Grover | 6a0979d | 2009-02-24 15:30:33 +0000 | [diff] [blame] | 998 | 		goto out; | 
 | 999 | 	} | 
 | 1000 |  | 
 | 1001 | 	if (unlikely(failed_wr != &first->s_wr)) { | 
 | 1002 | 		printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); | 
 | 1003 | 		BUG_ON(failed_wr != &first->s_wr); | 
 | 1004 | 	} | 
 | 1005 |  | 
 | 1006 |  | 
 | 1007 | out: | 
 | 1008 | 	return ret; | 
 | 1009 | } | 
 | 1010 |  | 
 | 1011 | void rds_ib_xmit_complete(struct rds_connection *conn) | 
 | 1012 | { | 
 | 1013 | 	struct rds_ib_connection *ic = conn->c_transport_data; | 
 | 1014 |  | 
 | 1015 | 	/* We may have a pending ACK or window update we were unable | 
 | 1016 | 	 * to send previously (due to flow control). Try again. */ | 
 | 1017 | 	rds_ib_attempt_ack(ic); | 
 | 1018 | } |