| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2006 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This software is available to you under a choice of one of two | 
 | 5 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 6 |  * General Public License (GPL) Version 2, available from the file | 
 | 7 |  * COPYING in the main directory of this source tree, or the | 
 | 8 |  * OpenIB.org BSD license below: | 
 | 9 |  * | 
 | 10 |  *     Redistribution and use in source and binary forms, with or | 
 | 11 |  *     without modification, are permitted provided that the following | 
 | 12 |  *     conditions are met: | 
 | 13 |  * | 
 | 14 |  *      - Redistributions of source code must retain the above | 
 | 15 |  *        copyright notice, this list of conditions and the following | 
 | 16 |  *        disclaimer. | 
 | 17 |  * | 
 | 18 |  *      - Redistributions in binary form must reproduce the above | 
 | 19 |  *        copyright notice, this list of conditions and the following | 
 | 20 |  *        disclaimer in the documentation and/or other materials | 
 | 21 |  *        provided with the distribution. | 
 | 22 |  * | 
 | 23 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 24 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 25 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 26 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 27 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 28 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 29 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 30 |  * SOFTWARE. | 
 | 31 |  * | 
 | 32 |  */ | 
 | 33 | #include <linux/kernel.h> | 
| Paul Gortmaker | d9b9384 | 2011-09-18 13:21:27 -0400 | [diff] [blame] | 34 | #include <linux/moduleparam.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/gfp.h> | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 36 | #include <net/sock.h> | 
 | 37 | #include <linux/in.h> | 
 | 38 | #include <linux/list.h> | 
| Manuel Zerpies | cb0a605 | 2011-06-16 02:09:57 +0000 | [diff] [blame] | 39 | #include <linux/ratelimit.h> | 
| Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 40 | #include <linux/export.h> | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 41 |  | 
 | 42 | #include "rds.h" | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 43 |  | 
 | 44 | /* When transmitting messages in rds_send_xmit, we need to emerge from | 
 | 45 |  * time to time and briefly release the CPU. Otherwise the softlock watchdog | 
 | 46 |  * will kick our shin. | 
 | 47 |  * Also, it seems fairer to not let one busy connection stall all the | 
 | 48 |  * others. | 
 | 49 |  * | 
 | 50 |  * send_batch_count is the number of times we'll loop in send_xmit. Setting | 
 | 51 |  * it to 0 will restore the old behavior (where we looped until we had | 
 | 52 |  * drained the queue). | 
 | 53 |  */ | 
 | 54 | static int send_batch_count = 64; | 
 | 55 | module_param(send_batch_count, int, 0444); | 
 | 56 | MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); | 
 | 57 |  | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 58 | static void rds_send_remove_from_sock(struct list_head *messages, int status); | 
 | 59 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 60 | /* | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 61 |  * Reset the send state.  Callers must ensure that this doesn't race with | 
 | 62 |  * rds_send_xmit(). | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 63 |  */ | 
 | 64 | void rds_send_reset(struct rds_connection *conn) | 
 | 65 | { | 
 | 66 | 	struct rds_message *rm, *tmp; | 
 | 67 | 	unsigned long flags; | 
 | 68 |  | 
 | 69 | 	if (conn->c_xmit_rm) { | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 70 | 		rm = conn->c_xmit_rm; | 
 | 71 | 		conn->c_xmit_rm = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 72 | 		/* Tell the user the RDMA op is no longer mapped by the | 
 | 73 | 		 * transport. This isn't entirely true (it's flushed out | 
 | 74 | 		 * independently) but as the connection is down, there's | 
 | 75 | 		 * no ongoing RDMA to/from that memory */ | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 76 | 		rds_message_unmapped(rm); | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 77 | 		rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 78 | 	} | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 79 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 80 | 	conn->c_xmit_sg = 0; | 
 | 81 | 	conn->c_xmit_hdr_off = 0; | 
 | 82 | 	conn->c_xmit_data_off = 0; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 83 | 	conn->c_xmit_atomic_sent = 0; | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 84 | 	conn->c_xmit_rdma_sent = 0; | 
 | 85 | 	conn->c_xmit_data_sent = 0; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 86 |  | 
 | 87 | 	conn->c_map_queued = 0; | 
 | 88 |  | 
 | 89 | 	conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | 
 | 90 | 	conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; | 
 | 91 |  | 
 | 92 | 	/* Mark messages as retransmissions, and move them to the send q */ | 
 | 93 | 	spin_lock_irqsave(&conn->c_lock, flags); | 
 | 94 | 	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | 
 | 95 | 		set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 
 | 96 | 		set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); | 
 | 97 | 	} | 
 | 98 | 	list_splice_init(&conn->c_retrans, &conn->c_send_queue); | 
 | 99 | 	spin_unlock_irqrestore(&conn->c_lock, flags); | 
 | 100 | } | 
 | 101 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 102 | static int acquire_in_xmit(struct rds_connection *conn) | 
 | 103 | { | 
 | 104 | 	return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; | 
 | 105 | } | 
 | 106 |  | 
 | 107 | static void release_in_xmit(struct rds_connection *conn) | 
 | 108 | { | 
 | 109 | 	clear_bit(RDS_IN_XMIT, &conn->c_flags); | 
 | 110 | 	smp_mb__after_clear_bit(); | 
 | 111 | 	/* | 
 | 112 | 	 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a | 
 | 113 | 	 * hot path and finding waiters is very rare.  We don't want to walk | 
 | 114 | 	 * the system-wide hashed waitqueue buckets in the fast path only to | 
 | 115 | 	 * almost never find waiters. | 
 | 116 | 	 */ | 
 | 117 | 	if (waitqueue_active(&conn->c_waitq)) | 
 | 118 | 		wake_up_all(&conn->c_waitq); | 
 | 119 | } | 
 | 120 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 121 | /* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 122 |  * We're making the conscious trade-off here to only send one message | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 123 |  * down the connection at a time. | 
 | 124 |  *   Pro: | 
 | 125 |  *      - tx queueing is a simple fifo list | 
 | 126 |  *   	- reassembly is optional and easily done by transports per conn | 
 | 127 |  *      - no per flow rx lookup at all, straight to the socket | 
 | 128 |  *   	- less per-frag memory and wire overhead | 
 | 129 |  *   Con: | 
 | 130 |  *      - queued acks can be delayed behind large messages | 
 | 131 |  *   Depends: | 
 | 132 |  *      - small message latency is higher behind queued large messages | 
 | 133 |  *      - large message latency isn't starved by intervening small sends | 
 | 134 |  */ | 
 | 135 | int rds_send_xmit(struct rds_connection *conn) | 
 | 136 | { | 
 | 137 | 	struct rds_message *rm; | 
 | 138 | 	unsigned long flags; | 
 | 139 | 	unsigned int tmp; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 140 | 	struct scatterlist *sg; | 
 | 141 | 	int ret = 0; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 142 | 	LIST_HEAD(to_be_dropped); | 
 | 143 |  | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 144 | restart: | 
| Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 145 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 146 | 	/* | 
 | 147 | 	 * sendmsg calls here after having queued its message on the send | 
 | 148 | 	 * queue.  We only have one task feeding the connection at a time.  If | 
 | 149 | 	 * another thread is already feeding the queue then we back off.  This | 
 | 150 | 	 * avoids blocking the caller and trading per-connection data between | 
 | 151 | 	 * caches per message. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 152 | 	 */ | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 153 | 	if (!acquire_in_xmit(conn)) { | 
| Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 154 | 		rds_stats_inc(s_send_lock_contention); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 155 | 		ret = -ENOMEM; | 
 | 156 | 		goto out; | 
 | 157 | 	} | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 158 |  | 
 | 159 | 	/* | 
 | 160 | 	 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, | 
 | 161 | 	 * we do the opposite to avoid races. | 
 | 162 | 	 */ | 
 | 163 | 	if (!rds_conn_up(conn)) { | 
 | 164 | 		release_in_xmit(conn); | 
 | 165 | 		ret = 0; | 
 | 166 | 		goto out; | 
 | 167 | 	} | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 168 |  | 
 | 169 | 	if (conn->c_trans->xmit_prepare) | 
 | 170 | 		conn->c_trans->xmit_prepare(conn); | 
 | 171 |  | 
 | 172 | 	/* | 
 | 173 | 	 * spin trying to push headers and data down the connection until | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 174 | 	 * the connection doesn't make forward progress. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 175 | 	 */ | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 176 | 	while (1) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 177 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 178 | 		rm = conn->c_xmit_rm; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 179 |  | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 180 | 		/* | 
 | 181 | 		 * If between sending messages, we can send a pending congestion | 
 | 182 | 		 * map update. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 183 | 		 */ | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 184 | 		if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { | 
| Andy Grover | 77dd550 | 2010-03-22 15:22:04 -0700 | [diff] [blame] | 185 | 			rm = rds_cong_update_alloc(conn); | 
 | 186 | 			if (IS_ERR(rm)) { | 
 | 187 | 				ret = PTR_ERR(rm); | 
 | 188 | 				break; | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 189 | 			} | 
| Andy Grover | 77dd550 | 2010-03-22 15:22:04 -0700 | [diff] [blame] | 190 | 			rm->data.op_active = 1; | 
 | 191 |  | 
 | 192 | 			conn->c_xmit_rm = rm; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 193 | 		} | 
 | 194 |  | 
 | 195 | 		/* | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 196 | 		 * If not already working on one, grab the next message. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 197 | 		 * | 
 | 198 | 		 * c_xmit_rm holds a ref while we're sending this message down | 
 | 199 | 		 * the connction.  We can use this ref while holding the | 
 | 200 | 		 * send_sem.. rds_send_reset() is serialized with it. | 
 | 201 | 		 */ | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 202 | 		if (!rm) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 203 | 			unsigned int len; | 
 | 204 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 205 | 			spin_lock_irqsave(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 206 |  | 
 | 207 | 			if (!list_empty(&conn->c_send_queue)) { | 
 | 208 | 				rm = list_entry(conn->c_send_queue.next, | 
 | 209 | 						struct rds_message, | 
 | 210 | 						m_conn_item); | 
 | 211 | 				rds_message_addref(rm); | 
 | 212 |  | 
 | 213 | 				/* | 
 | 214 | 				 * Move the message from the send queue to the retransmit | 
 | 215 | 				 * list right away. | 
 | 216 | 				 */ | 
 | 217 | 				list_move_tail(&rm->m_conn_item, &conn->c_retrans); | 
 | 218 | 			} | 
 | 219 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 220 | 			spin_unlock_irqrestore(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 221 |  | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 222 | 			if (!rm) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 223 | 				break; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 224 |  | 
 | 225 | 			/* Unfortunately, the way Infiniband deals with | 
 | 226 | 			 * RDMA to a bad MR key is by moving the entire | 
 | 227 | 			 * queue pair to error state. We cold possibly | 
 | 228 | 			 * recover from that, but right now we drop the | 
 | 229 | 			 * connection. | 
 | 230 | 			 * Therefore, we never retransmit messages with RDMA ops. | 
 | 231 | 			 */ | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 232 | 			if (rm->rdma.op_active && | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 233 | 			    test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 234 | 				spin_lock_irqsave(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 235 | 				if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) | 
 | 236 | 					list_move(&rm->m_conn_item, &to_be_dropped); | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 237 | 				spin_unlock_irqrestore(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 238 | 				continue; | 
 | 239 | 			} | 
 | 240 |  | 
 | 241 | 			/* Require an ACK every once in a while */ | 
 | 242 | 			len = ntohl(rm->m_inc.i_hdr.h_len); | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 243 | 			if (conn->c_unacked_packets == 0 || | 
 | 244 | 			    conn->c_unacked_bytes < len) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 245 | 				__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 
 | 246 |  | 
 | 247 | 				conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | 
 | 248 | 				conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; | 
 | 249 | 				rds_stats_inc(s_send_ack_required); | 
 | 250 | 			} else { | 
 | 251 | 				conn->c_unacked_bytes -= len; | 
 | 252 | 				conn->c_unacked_packets--; | 
 | 253 | 			} | 
 | 254 |  | 
 | 255 | 			conn->c_xmit_rm = rm; | 
 | 256 | 		} | 
 | 257 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 258 | 		/* The transport either sends the whole rdma or none of it */ | 
 | 259 | 		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 260 | 			rm->m_final_op = &rm->rdma; | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 261 | 			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); | 
| Chris Mason | 1cc2228 | 2010-05-11 16:15:35 -0700 | [diff] [blame] | 262 | 			if (ret) | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 263 | 				break; | 
 | 264 | 			conn->c_xmit_rdma_sent = 1; | 
 | 265 |  | 
 | 266 | 			/* The transport owns the mapped memory for now. | 
 | 267 | 			 * You can't unmap it while it's on the send queue */ | 
 | 268 | 			set_bit(RDS_MSG_MAPPED, &rm->m_flags); | 
 | 269 | 		} | 
 | 270 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 271 | 		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 272 | 			rm->m_final_op = &rm->atomic; | 
 | 273 | 			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); | 
| Chris Mason | 1cc2228 | 2010-05-11 16:15:35 -0700 | [diff] [blame] | 274 | 			if (ret) | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 275 | 				break; | 
 | 276 | 			conn->c_xmit_atomic_sent = 1; | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 277 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 278 | 			/* The transport owns the mapped memory for now. | 
 | 279 | 			 * You can't unmap it while it's on the send queue */ | 
 | 280 | 			set_bit(RDS_MSG_MAPPED, &rm->m_flags); | 
 | 281 | 		} | 
 | 282 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 283 | 		/* | 
 | 284 | 		 * A number of cases require an RDS header to be sent | 
 | 285 | 		 * even if there is no data. | 
 | 286 | 		 * We permit 0-byte sends; rds-ping depends on this. | 
 | 287 | 		 * However, if there are exclusively attached silent ops, | 
 | 288 | 		 * we skip the hdr/data send, to enable silent operation. | 
 | 289 | 		 */ | 
 | 290 | 		if (rm->data.op_nents == 0) { | 
 | 291 | 			int ops_present; | 
 | 292 | 			int all_ops_are_silent = 1; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 293 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 294 | 			ops_present = (rm->atomic.op_active || rm->rdma.op_active); | 
 | 295 | 			if (rm->atomic.op_active && !rm->atomic.op_silent) | 
 | 296 | 				all_ops_are_silent = 0; | 
 | 297 | 			if (rm->rdma.op_active && !rm->rdma.op_silent) | 
 | 298 | 				all_ops_are_silent = 0; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 299 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 300 | 			if (ops_present && all_ops_are_silent | 
 | 301 | 			    && !rm->m_rdma_cookie) | 
 | 302 | 				rm->data.op_active = 0; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 303 | 		} | 
 | 304 |  | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 305 | 		if (rm->data.op_active && !conn->c_xmit_data_sent) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 306 | 			rm->m_final_op = &rm->data; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 307 | 			ret = conn->c_trans->xmit(conn, rm, | 
 | 308 | 						  conn->c_xmit_hdr_off, | 
 | 309 | 						  conn->c_xmit_sg, | 
 | 310 | 						  conn->c_xmit_data_off); | 
 | 311 | 			if (ret <= 0) | 
 | 312 | 				break; | 
 | 313 |  | 
 | 314 | 			if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { | 
 | 315 | 				tmp = min_t(int, ret, | 
 | 316 | 					    sizeof(struct rds_header) - | 
 | 317 | 					    conn->c_xmit_hdr_off); | 
 | 318 | 				conn->c_xmit_hdr_off += tmp; | 
 | 319 | 				ret -= tmp; | 
 | 320 | 			} | 
 | 321 |  | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 322 | 			sg = &rm->data.op_sg[conn->c_xmit_sg]; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 323 | 			while (ret) { | 
 | 324 | 				tmp = min_t(int, ret, sg->length - | 
 | 325 | 						      conn->c_xmit_data_off); | 
 | 326 | 				conn->c_xmit_data_off += tmp; | 
 | 327 | 				ret -= tmp; | 
 | 328 | 				if (conn->c_xmit_data_off == sg->length) { | 
 | 329 | 					conn->c_xmit_data_off = 0; | 
 | 330 | 					sg++; | 
 | 331 | 					conn->c_xmit_sg++; | 
 | 332 | 					BUG_ON(ret != 0 && | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 333 | 					       conn->c_xmit_sg == rm->data.op_nents); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 334 | 				} | 
 | 335 | 			} | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 336 |  | 
 | 337 | 			if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && | 
 | 338 | 			    (conn->c_xmit_sg == rm->data.op_nents)) | 
 | 339 | 				conn->c_xmit_data_sent = 1; | 
 | 340 | 		} | 
 | 341 |  | 
 | 342 | 		/* | 
 | 343 | 		 * A rm will only take multiple times through this loop | 
 | 344 | 		 * if there is a data op. Thus, if the data is sent (or there was | 
 | 345 | 		 * none), then we're done with the rm. | 
 | 346 | 		 */ | 
 | 347 | 		if (!rm->data.op_active || conn->c_xmit_data_sent) { | 
 | 348 | 			conn->c_xmit_rm = NULL; | 
 | 349 | 			conn->c_xmit_sg = 0; | 
 | 350 | 			conn->c_xmit_hdr_off = 0; | 
 | 351 | 			conn->c_xmit_data_off = 0; | 
 | 352 | 			conn->c_xmit_rdma_sent = 0; | 
 | 353 | 			conn->c_xmit_atomic_sent = 0; | 
 | 354 | 			conn->c_xmit_data_sent = 0; | 
 | 355 |  | 
 | 356 | 			rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 357 | 		} | 
 | 358 | 	} | 
 | 359 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 360 | 	if (conn->c_trans->xmit_complete) | 
 | 361 | 		conn->c_trans->xmit_complete(conn); | 
 | 362 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 363 | 	release_in_xmit(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 364 |  | 
| Andy Grover | 2ad8099 | 2010-03-23 17:48:04 -0700 | [diff] [blame] | 365 | 	/* Nuke any messages we decided not to retransmit. */ | 
 | 366 | 	if (!list_empty(&to_be_dropped)) { | 
 | 367 | 		/* irqs on here, so we can put(), unlike above */ | 
 | 368 | 		list_for_each_entry(rm, &to_be_dropped, m_conn_item) | 
 | 369 | 			rds_message_put(rm); | 
 | 370 | 		rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); | 
 | 371 | 	} | 
 | 372 |  | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 373 | 	/* | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 374 | 	 * Other senders can queue a message after we last test the send queue | 
 | 375 | 	 * but before we clear RDS_IN_XMIT.  In that case they'd back off and | 
 | 376 | 	 * not try and send their newly queued message.  We need to check the | 
 | 377 | 	 * send queue after having cleared RDS_IN_XMIT so that their message | 
 | 378 | 	 * doesn't get stuck on the send queue. | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 379 | 	 * | 
 | 380 | 	 * If the transport cannot continue (i.e ret != 0), then it must | 
 | 381 | 	 * call us when more room is available, such as from the tx | 
 | 382 | 	 * completion handler. | 
 | 383 | 	 */ | 
 | 384 | 	if (ret == 0) { | 
| Chris Mason | 9e29db0 | 2010-04-15 16:38:14 -0400 | [diff] [blame] | 385 | 		smp_mb(); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 386 | 		if (!list_empty(&conn->c_send_queue)) { | 
| Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 387 | 			rds_stats_inc(s_send_lock_queue_raced); | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 388 | 			goto restart; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 389 | 		} | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 390 | 	} | 
 | 391 | out: | 
 | 392 | 	return ret; | 
 | 393 | } | 
 | 394 |  | 
 | 395 | static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) | 
 | 396 | { | 
 | 397 | 	u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | 
 | 398 |  | 
 | 399 | 	assert_spin_locked(&rs->rs_lock); | 
 | 400 |  | 
 | 401 | 	BUG_ON(rs->rs_snd_bytes < len); | 
 | 402 | 	rs->rs_snd_bytes -= len; | 
 | 403 |  | 
 | 404 | 	if (rs->rs_snd_bytes == 0) | 
 | 405 | 		rds_stats_inc(s_send_queue_empty); | 
 | 406 | } | 
 | 407 |  | 
 | 408 | static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, | 
 | 409 | 				    is_acked_func is_acked) | 
 | 410 | { | 
 | 411 | 	if (is_acked) | 
 | 412 | 		return is_acked(rm, ack); | 
 | 413 | 	return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; | 
 | 414 | } | 
 | 415 |  | 
 | 416 | /* | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 417 |  * This is pretty similar to what happens below in the ACK | 
 | 418 |  * handling code - except that we call here as soon as we get | 
 | 419 |  * the IB send completion on the RDMA op and the accompanying | 
 | 420 |  * message. | 
 | 421 |  */ | 
 | 422 | void rds_rdma_send_complete(struct rds_message *rm, int status) | 
 | 423 | { | 
 | 424 | 	struct rds_sock *rs = NULL; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 425 | 	struct rm_rdma_op *ro; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 426 | 	struct rds_notifier *notifier; | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 427 | 	unsigned long flags; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 428 |  | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 429 | 	spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 430 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 431 | 	ro = &rm->rdma; | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 432 | 	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 433 | 	    ro->op_active && ro->op_notify && ro->op_notifier) { | 
 | 434 | 		notifier = ro->op_notifier; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 435 | 		rs = rm->m_rs; | 
 | 436 | 		sock_hold(rds_rs_to_sk(rs)); | 
 | 437 |  | 
 | 438 | 		notifier->n_status = status; | 
 | 439 | 		spin_lock(&rs->rs_lock); | 
 | 440 | 		list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | 
 | 441 | 		spin_unlock(&rs->rs_lock); | 
 | 442 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 443 | 		ro->op_notifier = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 444 | 	} | 
 | 445 |  | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 446 | 	spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 447 |  | 
 | 448 | 	if (rs) { | 
 | 449 | 		rds_wake_sk_sleep(rs); | 
 | 450 | 		sock_put(rds_rs_to_sk(rs)); | 
 | 451 | 	} | 
 | 452 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 453 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 454 |  | 
 | 455 | /* | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 456 |  * Just like above, except looks at atomic op | 
 | 457 |  */ | 
 | 458 | void rds_atomic_send_complete(struct rds_message *rm, int status) | 
 | 459 | { | 
 | 460 | 	struct rds_sock *rs = NULL; | 
 | 461 | 	struct rm_atomic_op *ao; | 
 | 462 | 	struct rds_notifier *notifier; | 
| Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 463 | 	unsigned long flags; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 464 |  | 
| Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 465 | 	spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 466 |  | 
 | 467 | 	ao = &rm->atomic; | 
 | 468 | 	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) | 
 | 469 | 	    && ao->op_active && ao->op_notify && ao->op_notifier) { | 
 | 470 | 		notifier = ao->op_notifier; | 
 | 471 | 		rs = rm->m_rs; | 
 | 472 | 		sock_hold(rds_rs_to_sk(rs)); | 
 | 473 |  | 
 | 474 | 		notifier->n_status = status; | 
 | 475 | 		spin_lock(&rs->rs_lock); | 
 | 476 | 		list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | 
 | 477 | 		spin_unlock(&rs->rs_lock); | 
 | 478 |  | 
 | 479 | 		ao->op_notifier = NULL; | 
 | 480 | 	} | 
 | 481 |  | 
| Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 482 | 	spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 483 |  | 
 | 484 | 	if (rs) { | 
 | 485 | 		rds_wake_sk_sleep(rs); | 
 | 486 | 		sock_put(rds_rs_to_sk(rs)); | 
 | 487 | 	} | 
 | 488 | } | 
 | 489 | EXPORT_SYMBOL_GPL(rds_atomic_send_complete); | 
 | 490 |  | 
 | 491 | /* | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 492 |  * This is the same as rds_rdma_send_complete except we | 
 | 493 |  * don't do any locking - we have all the ingredients (message, | 
 | 494 |  * socket, socket lock) and can just move the notifier. | 
 | 495 |  */ | 
 | 496 | static inline void | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 497 | __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 498 | { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 499 | 	struct rm_rdma_op *ro; | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 500 | 	struct rm_atomic_op *ao; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 501 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 502 | 	ro = &rm->rdma; | 
 | 503 | 	if (ro->op_active && ro->op_notify && ro->op_notifier) { | 
 | 504 | 		ro->op_notifier->n_status = status; | 
 | 505 | 		list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); | 
 | 506 | 		ro->op_notifier = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 507 | 	} | 
 | 508 |  | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 509 | 	ao = &rm->atomic; | 
 | 510 | 	if (ao->op_active && ao->op_notify && ao->op_notifier) { | 
 | 511 | 		ao->op_notifier->n_status = status; | 
 | 512 | 		list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); | 
 | 513 | 		ao->op_notifier = NULL; | 
 | 514 | 	} | 
 | 515 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 516 | 	/* No need to wake the app - caller does this */ | 
 | 517 | } | 
 | 518 |  | 
 | 519 | /* | 
 | 520 |  * This is called from the IB send completion when we detect | 
 | 521 |  * a RDMA operation that failed with remote access error. | 
 | 522 |  * So speed is not an issue here. | 
 | 523 |  */ | 
 | 524 | struct rds_message *rds_send_get_message(struct rds_connection *conn, | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 525 | 					 struct rm_rdma_op *op) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 526 | { | 
 | 527 | 	struct rds_message *rm, *tmp, *found = NULL; | 
 | 528 | 	unsigned long flags; | 
 | 529 |  | 
 | 530 | 	spin_lock_irqsave(&conn->c_lock, flags); | 
 | 531 |  | 
 | 532 | 	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 533 | 		if (&rm->rdma == op) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 534 | 			atomic_inc(&rm->m_refcount); | 
 | 535 | 			found = rm; | 
 | 536 | 			goto out; | 
 | 537 | 		} | 
 | 538 | 	} | 
 | 539 |  | 
 | 540 | 	list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 541 | 		if (&rm->rdma == op) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 542 | 			atomic_inc(&rm->m_refcount); | 
 | 543 | 			found = rm; | 
 | 544 | 			break; | 
 | 545 | 		} | 
 | 546 | 	} | 
 | 547 |  | 
 | 548 | out: | 
 | 549 | 	spin_unlock_irqrestore(&conn->c_lock, flags); | 
 | 550 |  | 
 | 551 | 	return found; | 
 | 552 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 553 | EXPORT_SYMBOL_GPL(rds_send_get_message); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 554 |  | 
 | 555 | /* | 
 | 556 |  * This removes messages from the socket's list if they're on it.  The list | 
 | 557 |  * argument must be private to the caller, we must be able to modify it | 
 | 558 |  * without locks.  The messages must have a reference held for their | 
 | 559 |  * position on the list.  This function will drop that reference after | 
 | 560 |  * removing the messages from the 'messages' list regardless of if it found | 
 | 561 |  * the messages on the socket list or not. | 
 | 562 |  */ | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 563 | static void rds_send_remove_from_sock(struct list_head *messages, int status) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 564 | { | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 565 | 	unsigned long flags; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 566 | 	struct rds_sock *rs = NULL; | 
 | 567 | 	struct rds_message *rm; | 
 | 568 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 569 | 	while (!list_empty(messages)) { | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 570 | 		int was_on_sock = 0; | 
 | 571 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 572 | 		rm = list_entry(messages->next, struct rds_message, | 
 | 573 | 				m_conn_item); | 
 | 574 | 		list_del_init(&rm->m_conn_item); | 
 | 575 |  | 
 | 576 | 		/* | 
 | 577 | 		 * If we see this flag cleared then we're *sure* that someone | 
 | 578 | 		 * else beat us to removing it from the sock.  If we race | 
 | 579 | 		 * with their flag update we'll get the lock and then really | 
 | 580 | 		 * see that the flag has been cleared. | 
 | 581 | 		 * | 
 | 582 | 		 * The message spinlock makes sure nobody clears rm->m_rs | 
 | 583 | 		 * while we're messing with it. It does not prevent the | 
 | 584 | 		 * message from being removed from the socket, though. | 
 | 585 | 		 */ | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 586 | 		spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 587 | 		if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) | 
 | 588 | 			goto unlock_and_drop; | 
 | 589 |  | 
 | 590 | 		if (rs != rm->m_rs) { | 
 | 591 | 			if (rs) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 592 | 				rds_wake_sk_sleep(rs); | 
 | 593 | 				sock_put(rds_rs_to_sk(rs)); | 
 | 594 | 			} | 
 | 595 | 			rs = rm->m_rs; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 596 | 			sock_hold(rds_rs_to_sk(rs)); | 
 | 597 | 		} | 
| Tina Yang | 048c15e | 2010-03-11 13:50:00 +0000 | [diff] [blame] | 598 | 		spin_lock(&rs->rs_lock); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 599 |  | 
 | 600 | 		if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 601 | 			struct rm_rdma_op *ro = &rm->rdma; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 602 | 			struct rds_notifier *notifier; | 
 | 603 |  | 
 | 604 | 			list_del_init(&rm->m_sock_item); | 
 | 605 | 			rds_send_sndbuf_remove(rs, rm); | 
 | 606 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 607 | 			if (ro->op_active && ro->op_notifier && | 
 | 608 | 			       (ro->op_notify || (ro->op_recverr && status))) { | 
 | 609 | 				notifier = ro->op_notifier; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 610 | 				list_add_tail(¬ifier->n_list, | 
 | 611 | 						&rs->rs_notify_queue); | 
 | 612 | 				if (!notifier->n_status) | 
 | 613 | 					notifier->n_status = status; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 614 | 				rm->rdma.op_notifier = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 615 | 			} | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 616 | 			was_on_sock = 1; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 617 | 			rm->m_rs = NULL; | 
 | 618 | 		} | 
| Tina Yang | 048c15e | 2010-03-11 13:50:00 +0000 | [diff] [blame] | 619 | 		spin_unlock(&rs->rs_lock); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 620 |  | 
 | 621 | unlock_and_drop: | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 622 | 		spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 623 | 		rds_message_put(rm); | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 624 | 		if (was_on_sock) | 
 | 625 | 			rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 626 | 	} | 
 | 627 |  | 
 | 628 | 	if (rs) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 629 | 		rds_wake_sk_sleep(rs); | 
 | 630 | 		sock_put(rds_rs_to_sk(rs)); | 
 | 631 | 	} | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 632 | } | 
 | 633 |  | 
 | 634 | /* | 
 | 635 |  * Transports call here when they've determined that the receiver queued | 
 | 636 |  * messages up to, and including, the given sequence number.  Messages are | 
 | 637 |  * moved to the retrans queue when rds_send_xmit picks them off the send | 
 | 638 |  * queue. This means that in the TCP case, the message may not have been | 
 | 639 |  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked | 
 | 640 |  * checks the RDS_MSG_HAS_ACK_SEQ bit. | 
 | 641 |  * | 
 | 642 |  * XXX It's not clear to me how this is safely serialized with socket | 
 | 643 |  * destruction.  Maybe it should bail if it sees SOCK_DEAD. | 
 | 644 |  */ | 
 | 645 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, | 
 | 646 | 			 is_acked_func is_acked) | 
 | 647 | { | 
 | 648 | 	struct rds_message *rm, *tmp; | 
 | 649 | 	unsigned long flags; | 
 | 650 | 	LIST_HEAD(list); | 
 | 651 |  | 
 | 652 | 	spin_lock_irqsave(&conn->c_lock, flags); | 
 | 653 |  | 
 | 654 | 	list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | 
 | 655 | 		if (!rds_send_is_acked(rm, ack, is_acked)) | 
 | 656 | 			break; | 
 | 657 |  | 
 | 658 | 		list_move(&rm->m_conn_item, &list); | 
 | 659 | 		clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); | 
 | 660 | 	} | 
 | 661 |  | 
 | 662 | 	/* order flag updates with spin locks */ | 
 | 663 | 	if (!list_empty(&list)) | 
 | 664 | 		smp_mb__after_clear_bit(); | 
 | 665 |  | 
 | 666 | 	spin_unlock_irqrestore(&conn->c_lock, flags); | 
 | 667 |  | 
 | 668 | 	/* now remove the messages from the sock list as needed */ | 
 | 669 | 	rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); | 
 | 670 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 671 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 672 |  | 
 | 673 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | 
 | 674 | { | 
 | 675 | 	struct rds_message *rm, *tmp; | 
 | 676 | 	struct rds_connection *conn; | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 677 | 	unsigned long flags; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 678 | 	LIST_HEAD(list); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 679 |  | 
 | 680 | 	/* get all the messages we're dropping under the rs lock */ | 
 | 681 | 	spin_lock_irqsave(&rs->rs_lock, flags); | 
 | 682 |  | 
 | 683 | 	list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { | 
 | 684 | 		if (dest && (dest->sin_addr.s_addr != rm->m_daddr || | 
 | 685 | 			     dest->sin_port != rm->m_inc.i_hdr.h_dport)) | 
 | 686 | 			continue; | 
 | 687 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 688 | 		list_move(&rm->m_sock_item, &list); | 
 | 689 | 		rds_send_sndbuf_remove(rs, rm); | 
 | 690 | 		clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 691 | 	} | 
 | 692 |  | 
 | 693 | 	/* order flag updates with the rs lock */ | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 694 | 	smp_mb__after_clear_bit(); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 695 |  | 
 | 696 | 	spin_unlock_irqrestore(&rs->rs_lock, flags); | 
 | 697 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 698 | 	if (list_empty(&list)) | 
 | 699 | 		return; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 700 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 701 | 	/* Remove the messages from the conn */ | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 702 | 	list_for_each_entry(rm, &list, m_sock_item) { | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 703 |  | 
 | 704 | 		conn = rm->m_inc.i_conn; | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 705 |  | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 706 | 		spin_lock_irqsave(&conn->c_lock, flags); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 707 | 		/* | 
 | 708 | 		 * Maybe someone else beat us to removing rm from the conn. | 
 | 709 | 		 * If we race with their flag update we'll get the lock and | 
 | 710 | 		 * then really see that the flag has been cleared. | 
 | 711 | 		 */ | 
 | 712 | 		if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { | 
 | 713 | 			spin_unlock_irqrestore(&conn->c_lock, flags); | 
 | 714 | 			continue; | 
 | 715 | 		} | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 716 | 		list_del_init(&rm->m_conn_item); | 
 | 717 | 		spin_unlock_irqrestore(&conn->c_lock, flags); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 718 |  | 
 | 719 | 		/* | 
 | 720 | 		 * Couldn't grab m_rs_lock in top loop (lock ordering), | 
 | 721 | 		 * but we can now. | 
 | 722 | 		 */ | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 723 | 		spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 724 |  | 
| Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 725 | 		spin_lock(&rs->rs_lock); | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 726 | 		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED); | 
| Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 727 | 		spin_unlock(&rs->rs_lock); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 728 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 729 | 		rm->m_rs = NULL; | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 730 | 		spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 731 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 732 | 		rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 733 | 	} | 
 | 734 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 735 | 	rds_wake_sk_sleep(rs); | 
| Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 736 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 737 | 	while (!list_empty(&list)) { | 
 | 738 | 		rm = list_entry(list.next, struct rds_message, m_sock_item); | 
 | 739 | 		list_del_init(&rm->m_sock_item); | 
 | 740 |  | 
 | 741 | 		rds_message_wait(rm); | 
 | 742 | 		rds_message_put(rm); | 
 | 743 | 	} | 
 | 744 | } | 
 | 745 |  | 
 | 746 | /* | 
 | 747 |  * we only want this to fire once so we use the callers 'queued'.  It's | 
 | 748 |  * possible that another thread can race with us and remove the | 
 | 749 |  * message from the flow with RDS_CANCEL_SENT_TO. | 
 | 750 |  */ | 
 | 751 | static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, | 
 | 752 | 			     struct rds_message *rm, __be16 sport, | 
 | 753 | 			     __be16 dport, int *queued) | 
 | 754 | { | 
 | 755 | 	unsigned long flags; | 
 | 756 | 	u32 len; | 
 | 757 |  | 
 | 758 | 	if (*queued) | 
 | 759 | 		goto out; | 
 | 760 |  | 
 | 761 | 	len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | 
 | 762 |  | 
 | 763 | 	/* this is the only place which holds both the socket's rs_lock | 
 | 764 | 	 * and the connection's c_lock */ | 
 | 765 | 	spin_lock_irqsave(&rs->rs_lock, flags); | 
 | 766 |  | 
 | 767 | 	/* | 
 | 768 | 	 * If there is a little space in sndbuf, we don't queue anything, | 
 | 769 | 	 * and userspace gets -EAGAIN. But poll() indicates there's send | 
 | 770 | 	 * room. This can lead to bad behavior (spinning) if snd_bytes isn't | 
 | 771 | 	 * freed up by incoming acks. So we check the *old* value of | 
 | 772 | 	 * rs_snd_bytes here to allow the last msg to exceed the buffer, | 
 | 773 | 	 * and poll() now knows no more data can be sent. | 
 | 774 | 	 */ | 
 | 775 | 	if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { | 
 | 776 | 		rs->rs_snd_bytes += len; | 
 | 777 |  | 
 | 778 | 		/* let recv side know we are close to send space exhaustion. | 
 | 779 | 		 * This is probably not the optimal way to do it, as this | 
 | 780 | 		 * means we set the flag on *all* messages as soon as our | 
 | 781 | 		 * throughput hits a certain threshold. | 
 | 782 | 		 */ | 
 | 783 | 		if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) | 
 | 784 | 			__set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 
 | 785 |  | 
 | 786 | 		list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); | 
 | 787 | 		set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 
 | 788 | 		rds_message_addref(rm); | 
 | 789 | 		rm->m_rs = rs; | 
 | 790 |  | 
 | 791 | 		/* The code ordering is a little weird, but we're | 
 | 792 | 		   trying to minimize the time we hold c_lock */ | 
 | 793 | 		rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); | 
 | 794 | 		rm->m_inc.i_conn = conn; | 
 | 795 | 		rds_message_addref(rm); | 
 | 796 |  | 
 | 797 | 		spin_lock(&conn->c_lock); | 
 | 798 | 		rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); | 
 | 799 | 		list_add_tail(&rm->m_conn_item, &conn->c_send_queue); | 
 | 800 | 		set_bit(RDS_MSG_ON_CONN, &rm->m_flags); | 
 | 801 | 		spin_unlock(&conn->c_lock); | 
 | 802 |  | 
 | 803 | 		rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", | 
 | 804 | 			 rm, len, rs, rs->rs_snd_bytes, | 
 | 805 | 			 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); | 
 | 806 |  | 
 | 807 | 		*queued = 1; | 
 | 808 | 	} | 
 | 809 |  | 
 | 810 | 	spin_unlock_irqrestore(&rs->rs_lock, flags); | 
 | 811 | out: | 
 | 812 | 	return *queued; | 
 | 813 | } | 
 | 814 |  | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 815 | /* | 
 | 816 |  * rds_message is getting to be quite complicated, and we'd like to allocate | 
 | 817 |  * it all in one go. This figures out how big it needs to be up front. | 
 | 818 |  */ | 
 | 819 | static int rds_rm_size(struct msghdr *msg, int data_len) | 
 | 820 | { | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 821 | 	struct cmsghdr *cmsg; | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 822 | 	int size = 0; | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 823 | 	int cmsg_groups = 0; | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 824 | 	int retval; | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 825 |  | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 826 | 	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | 
 | 827 | 		if (!CMSG_OK(msg, cmsg)) | 
 | 828 | 			return -EINVAL; | 
 | 829 |  | 
 | 830 | 		if (cmsg->cmsg_level != SOL_RDS) | 
 | 831 | 			continue; | 
 | 832 |  | 
 | 833 | 		switch (cmsg->cmsg_type) { | 
 | 834 | 		case RDS_CMSG_RDMA_ARGS: | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 835 | 			cmsg_groups |= 1; | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 836 | 			retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); | 
 | 837 | 			if (retval < 0) | 
 | 838 | 				return retval; | 
 | 839 | 			size += retval; | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 840 |  | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 841 | 			break; | 
 | 842 |  | 
 | 843 | 		case RDS_CMSG_RDMA_DEST: | 
 | 844 | 		case RDS_CMSG_RDMA_MAP: | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 845 | 			cmsg_groups |= 2; | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 846 | 			/* these are valid but do no add any size */ | 
 | 847 | 			break; | 
 | 848 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 849 | 		case RDS_CMSG_ATOMIC_CSWP: | 
 | 850 | 		case RDS_CMSG_ATOMIC_FADD: | 
| Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 851 | 		case RDS_CMSG_MASKED_ATOMIC_CSWP: | 
 | 852 | 		case RDS_CMSG_MASKED_ATOMIC_FADD: | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 853 | 			cmsg_groups |= 1; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 854 | 			size += sizeof(struct scatterlist); | 
 | 855 | 			break; | 
 | 856 |  | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 857 | 		default: | 
 | 858 | 			return -EINVAL; | 
 | 859 | 		} | 
 | 860 |  | 
 | 861 | 	} | 
 | 862 |  | 
 | 863 | 	size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 864 |  | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 865 | 	/* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ | 
 | 866 | 	if (cmsg_groups == 3) | 
 | 867 | 		return -EINVAL; | 
 | 868 |  | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 869 | 	return size; | 
 | 870 | } | 
 | 871 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 872 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, | 
 | 873 | 			 struct msghdr *msg, int *allocated_mr) | 
 | 874 | { | 
 | 875 | 	struct cmsghdr *cmsg; | 
 | 876 | 	int ret = 0; | 
 | 877 |  | 
 | 878 | 	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | 
 | 879 | 		if (!CMSG_OK(msg, cmsg)) | 
 | 880 | 			return -EINVAL; | 
 | 881 |  | 
 | 882 | 		if (cmsg->cmsg_level != SOL_RDS) | 
 | 883 | 			continue; | 
 | 884 |  | 
 | 885 | 		/* As a side effect, RDMA_DEST and RDMA_MAP will set | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 886 | 		 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 887 | 		 */ | 
 | 888 | 		switch (cmsg->cmsg_type) { | 
 | 889 | 		case RDS_CMSG_RDMA_ARGS: | 
 | 890 | 			ret = rds_cmsg_rdma_args(rs, rm, cmsg); | 
 | 891 | 			break; | 
 | 892 |  | 
 | 893 | 		case RDS_CMSG_RDMA_DEST: | 
 | 894 | 			ret = rds_cmsg_rdma_dest(rs, rm, cmsg); | 
 | 895 | 			break; | 
 | 896 |  | 
 | 897 | 		case RDS_CMSG_RDMA_MAP: | 
 | 898 | 			ret = rds_cmsg_rdma_map(rs, rm, cmsg); | 
 | 899 | 			if (!ret) | 
 | 900 | 				*allocated_mr = 1; | 
 | 901 | 			break; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 902 | 		case RDS_CMSG_ATOMIC_CSWP: | 
 | 903 | 		case RDS_CMSG_ATOMIC_FADD: | 
| Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 904 | 		case RDS_CMSG_MASKED_ATOMIC_CSWP: | 
 | 905 | 		case RDS_CMSG_MASKED_ATOMIC_FADD: | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 906 | 			ret = rds_cmsg_atomic(rs, rm, cmsg); | 
 | 907 | 			break; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 908 |  | 
 | 909 | 		default: | 
 | 910 | 			return -EINVAL; | 
 | 911 | 		} | 
 | 912 |  | 
 | 913 | 		if (ret) | 
 | 914 | 			break; | 
 | 915 | 	} | 
 | 916 |  | 
 | 917 | 	return ret; | 
 | 918 | } | 
 | 919 |  | 
 | 920 | int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | 
 | 921 | 		size_t payload_len) | 
 | 922 | { | 
 | 923 | 	struct sock *sk = sock->sk; | 
 | 924 | 	struct rds_sock *rs = rds_sk_to_rs(sk); | 
 | 925 | 	struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; | 
 | 926 | 	__be32 daddr; | 
 | 927 | 	__be16 dport; | 
 | 928 | 	struct rds_message *rm = NULL; | 
 | 929 | 	struct rds_connection *conn; | 
 | 930 | 	int ret = 0; | 
 | 931 | 	int queued = 0, allocated_mr = 0; | 
 | 932 | 	int nonblock = msg->msg_flags & MSG_DONTWAIT; | 
| Andy Grover | 1123fd7 | 2010-03-11 13:49:56 +0000 | [diff] [blame] | 933 | 	long timeo = sock_sndtimeo(sk, nonblock); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 934 |  | 
 | 935 | 	/* Mirror Linux UDP mirror of BSD error message compatibility */ | 
 | 936 | 	/* XXX: Perhaps MSG_MORE someday */ | 
 | 937 | 	if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { | 
 | 938 | 		printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags); | 
 | 939 | 		ret = -EOPNOTSUPP; | 
 | 940 | 		goto out; | 
 | 941 | 	} | 
 | 942 |  | 
 | 943 | 	if (msg->msg_namelen) { | 
 | 944 | 		/* XXX fail non-unicast destination IPs? */ | 
 | 945 | 		if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { | 
 | 946 | 			ret = -EINVAL; | 
 | 947 | 			goto out; | 
 | 948 | 		} | 
 | 949 | 		daddr = usin->sin_addr.s_addr; | 
 | 950 | 		dport = usin->sin_port; | 
 | 951 | 	} else { | 
 | 952 | 		/* We only care about consistency with ->connect() */ | 
 | 953 | 		lock_sock(sk); | 
 | 954 | 		daddr = rs->rs_conn_addr; | 
 | 955 | 		dport = rs->rs_conn_port; | 
 | 956 | 		release_sock(sk); | 
 | 957 | 	} | 
 | 958 |  | 
 | 959 | 	/* racing with another thread binding seems ok here */ | 
 | 960 | 	if (daddr == 0 || rs->rs_bound_addr == 0) { | 
 | 961 | 		ret = -ENOTCONN; /* XXX not a great errno */ | 
 | 962 | 		goto out; | 
 | 963 | 	} | 
 | 964 |  | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 965 | 	/* size of rm including all sgs */ | 
 | 966 | 	ret = rds_rm_size(msg, payload_len); | 
 | 967 | 	if (ret < 0) | 
 | 968 | 		goto out; | 
 | 969 |  | 
 | 970 | 	rm = rds_message_alloc(ret, GFP_KERNEL); | 
 | 971 | 	if (!rm) { | 
 | 972 | 		ret = -ENOMEM; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 973 | 		goto out; | 
 | 974 | 	} | 
 | 975 |  | 
| Andy Grover | 372cd7d | 2010-02-03 19:40:32 -0800 | [diff] [blame] | 976 | 	/* Attach data to the rm */ | 
 | 977 | 	if (payload_len) { | 
 | 978 | 		rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); | 
| Andy Grover | d139ff0 | 2010-10-28 15:40:59 +0000 | [diff] [blame] | 979 | 		if (!rm->data.op_sg) { | 
 | 980 | 			ret = -ENOMEM; | 
 | 981 | 			goto out; | 
 | 982 | 		} | 
| Andy Grover | 372cd7d | 2010-02-03 19:40:32 -0800 | [diff] [blame] | 983 | 		ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); | 
 | 984 | 		if (ret) | 
 | 985 | 			goto out; | 
 | 986 | 	} | 
 | 987 | 	rm->data.op_active = 1; | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 988 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 989 | 	rm->m_daddr = daddr; | 
 | 990 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 991 | 	/* rds_conn_create has a spinlock that runs with IRQ off. | 
 | 992 | 	 * Caching the conn in the socket helps a lot. */ | 
 | 993 | 	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) | 
 | 994 | 		conn = rs->rs_conn; | 
 | 995 | 	else { | 
 | 996 | 		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, | 
 | 997 | 					rs->rs_transport, | 
 | 998 | 					sock->sk->sk_allocation); | 
 | 999 | 		if (IS_ERR(conn)) { | 
 | 1000 | 			ret = PTR_ERR(conn); | 
 | 1001 | 			goto out; | 
 | 1002 | 		} | 
 | 1003 | 		rs->rs_conn = conn; | 
 | 1004 | 	} | 
 | 1005 |  | 
| Andy Grover | 49f6969 | 2009-04-09 14:09:41 +0000 | [diff] [blame] | 1006 | 	/* Parse any control messages the user may have included. */ | 
 | 1007 | 	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); | 
 | 1008 | 	if (ret) | 
 | 1009 | 		goto out; | 
 | 1010 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 1011 | 	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { | 
| Manuel Zerpies | cb0a605 | 2011-06-16 02:09:57 +0000 | [diff] [blame] | 1012 | 		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 1013 | 			       &rm->rdma, conn->c_trans->xmit_rdma); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 1014 | 		ret = -EOPNOTSUPP; | 
 | 1015 | 		goto out; | 
 | 1016 | 	} | 
 | 1017 |  | 
 | 1018 | 	if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { | 
| Manuel Zerpies | cb0a605 | 2011-06-16 02:09:57 +0000 | [diff] [blame] | 1019 | 		printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 1020 | 			       &rm->atomic, conn->c_trans->xmit_atomic); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1021 | 		ret = -EOPNOTSUPP; | 
 | 1022 | 		goto out; | 
 | 1023 | 	} | 
 | 1024 |  | 
| Zach Brown | f3c6808 | 2010-05-24 13:14:36 -0700 | [diff] [blame] | 1025 | 	rds_conn_connect_if_down(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1026 |  | 
 | 1027 | 	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 
| Andy Grover | b98ba52 | 2010-03-11 13:50:04 +0000 | [diff] [blame] | 1028 | 	if (ret) { | 
 | 1029 | 		rs->rs_seen_congestion = 1; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1030 | 		goto out; | 
| Andy Grover | b98ba52 | 2010-03-11 13:50:04 +0000 | [diff] [blame] | 1031 | 	} | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1032 |  | 
 | 1033 | 	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, | 
 | 1034 | 				  dport, &queued)) { | 
 | 1035 | 		rds_stats_inc(s_send_queue_full); | 
 | 1036 | 		/* XXX make sure this is reasonable */ | 
 | 1037 | 		if (payload_len > rds_sk_sndbuf(rs)) { | 
 | 1038 | 			ret = -EMSGSIZE; | 
 | 1039 | 			goto out; | 
 | 1040 | 		} | 
 | 1041 | 		if (nonblock) { | 
 | 1042 | 			ret = -EAGAIN; | 
 | 1043 | 			goto out; | 
 | 1044 | 		} | 
 | 1045 |  | 
| Eric Dumazet | aa39514 | 2010-04-20 13:03:51 +0000 | [diff] [blame] | 1046 | 		timeo = wait_event_interruptible_timeout(*sk_sleep(sk), | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1047 | 					rds_send_queue_rm(rs, conn, rm, | 
 | 1048 | 							  rs->rs_bound_port, | 
 | 1049 | 							  dport, | 
 | 1050 | 							  &queued), | 
 | 1051 | 					timeo); | 
 | 1052 | 		rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); | 
 | 1053 | 		if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) | 
 | 1054 | 			continue; | 
 | 1055 |  | 
 | 1056 | 		ret = timeo; | 
 | 1057 | 		if (ret == 0) | 
 | 1058 | 			ret = -ETIMEDOUT; | 
 | 1059 | 		goto out; | 
 | 1060 | 	} | 
 | 1061 |  | 
 | 1062 | 	/* | 
 | 1063 | 	 * By now we've committed to the send.  We reuse rds_send_worker() | 
 | 1064 | 	 * to retry sends in the rds thread if the transport asks us to. | 
 | 1065 | 	 */ | 
 | 1066 | 	rds_stats_inc(s_send_queued); | 
 | 1067 |  | 
 | 1068 | 	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | 
| Andy Grover | a7d3a28 | 2010-03-29 16:20:18 -0700 | [diff] [blame] | 1069 | 		rds_send_xmit(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1070 |  | 
 | 1071 | 	rds_message_put(rm); | 
 | 1072 | 	return payload_len; | 
 | 1073 |  | 
 | 1074 | out: | 
 | 1075 | 	/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. | 
 | 1076 | 	 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN | 
 | 1077 | 	 * or in any other way, we need to destroy the MR again */ | 
 | 1078 | 	if (allocated_mr) | 
 | 1079 | 		rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); | 
 | 1080 |  | 
 | 1081 | 	if (rm) | 
 | 1082 | 		rds_message_put(rm); | 
 | 1083 | 	return ret; | 
 | 1084 | } | 
 | 1085 |  | 
 | 1086 | /* | 
 | 1087 |  * Reply to a ping packet. | 
 | 1088 |  */ | 
 | 1089 | int | 
 | 1090 | rds_send_pong(struct rds_connection *conn, __be16 dport) | 
 | 1091 | { | 
 | 1092 | 	struct rds_message *rm; | 
 | 1093 | 	unsigned long flags; | 
 | 1094 | 	int ret = 0; | 
 | 1095 |  | 
 | 1096 | 	rm = rds_message_alloc(0, GFP_ATOMIC); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 1097 | 	if (!rm) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1098 | 		ret = -ENOMEM; | 
 | 1099 | 		goto out; | 
 | 1100 | 	} | 
 | 1101 |  | 
 | 1102 | 	rm->m_daddr = conn->c_faddr; | 
| Andy Grover | acfcd4d | 2010-03-31 18:56:25 -0700 | [diff] [blame] | 1103 | 	rm->data.op_active = 1; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1104 |  | 
| Zach Brown | f3c6808 | 2010-05-24 13:14:36 -0700 | [diff] [blame] | 1105 | 	rds_conn_connect_if_down(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1106 |  | 
 | 1107 | 	ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); | 
 | 1108 | 	if (ret) | 
 | 1109 | 		goto out; | 
 | 1110 |  | 
 | 1111 | 	spin_lock_irqsave(&conn->c_lock, flags); | 
 | 1112 | 	list_add_tail(&rm->m_conn_item, &conn->c_send_queue); | 
 | 1113 | 	set_bit(RDS_MSG_ON_CONN, &rm->m_flags); | 
 | 1114 | 	rds_message_addref(rm); | 
 | 1115 | 	rm->m_inc.i_conn = conn; | 
 | 1116 |  | 
 | 1117 | 	rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, | 
 | 1118 | 				    conn->c_next_tx_seq); | 
 | 1119 | 	conn->c_next_tx_seq++; | 
 | 1120 | 	spin_unlock_irqrestore(&conn->c_lock, flags); | 
 | 1121 |  | 
 | 1122 | 	rds_stats_inc(s_send_queued); | 
 | 1123 | 	rds_stats_inc(s_send_pong); | 
 | 1124 |  | 
| Andy Grover | acfcd4d | 2010-03-31 18:56:25 -0700 | [diff] [blame] | 1125 | 	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | 
 | 1126 | 		rds_send_xmit(conn); | 
 | 1127 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1128 | 	rds_message_put(rm); | 
 | 1129 | 	return 0; | 
 | 1130 |  | 
 | 1131 | out: | 
 | 1132 | 	if (rm) | 
 | 1133 | 		rds_message_put(rm); | 
 | 1134 | 	return ret; | 
 | 1135 | } |