| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2006 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This software is available to you under a choice of one of two | 
 | 5 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 6 |  * General Public License (GPL) Version 2, available from the file | 
 | 7 |  * COPYING in the main directory of this source tree, or the | 
 | 8 |  * OpenIB.org BSD license below: | 
 | 9 |  * | 
 | 10 |  *     Redistribution and use in source and binary forms, with or | 
 | 11 |  *     without modification, are permitted provided that the following | 
 | 12 |  *     conditions are met: | 
 | 13 |  * | 
 | 14 |  *      - Redistributions of source code must retain the above | 
 | 15 |  *        copyright notice, this list of conditions and the following | 
 | 16 |  *        disclaimer. | 
 | 17 |  * | 
 | 18 |  *      - Redistributions in binary form must reproduce the above | 
 | 19 |  *        copyright notice, this list of conditions and the following | 
 | 20 |  *        disclaimer in the documentation and/or other materials | 
 | 21 |  *        provided with the distribution. | 
 | 22 |  * | 
 | 23 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 24 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 25 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 26 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 27 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 28 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 29 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 30 |  * SOFTWARE. | 
 | 31 |  * | 
 | 32 |  */ | 
 | 33 | #include <linux/kernel.h> | 
 | 34 | #include <linux/list.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 35 | #include <linux/slab.h> | 
| Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 36 | #include <linux/export.h> | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 37 | #include <net/inet_hashtables.h> | 
 | 38 |  | 
 | 39 | #include "rds.h" | 
 | 40 | #include "loop.h" | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 41 |  | 
 | 42 | #define RDS_CONNECTION_HASH_BITS 12 | 
 | 43 | #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) | 
 | 44 | #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) | 
 | 45 |  | 
 | 46 | /* converting this to RCU is a chore for another day.. */ | 
 | 47 | static DEFINE_SPINLOCK(rds_conn_lock); | 
 | 48 | static unsigned long rds_conn_count; | 
 | 49 | static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; | 
 | 50 | static struct kmem_cache *rds_conn_slab; | 
 | 51 |  | 
 | 52 | static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) | 
 | 53 | { | 
 | 54 | 	/* Pass NULL, don't need struct net for hash */ | 
 | 55 | 	unsigned long hash = inet_ehashfn(NULL, | 
 | 56 | 					  be32_to_cpu(laddr), 0, | 
 | 57 | 					  be32_to_cpu(faddr), 0); | 
 | 58 | 	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; | 
 | 59 | } | 
 | 60 |  | 
 | 61 | #define rds_conn_info_set(var, test, suffix) do {		\ | 
 | 62 | 	if (test)						\ | 
 | 63 | 		var |= RDS_INFO_CONNECTION_FLAG_##suffix;	\ | 
 | 64 | } while (0) | 
 | 65 |  | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 66 | /* rcu read lock must be held or the connection spinlock */ | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 67 | static struct rds_connection *rds_conn_lookup(struct hlist_head *head, | 
 | 68 | 					      __be32 laddr, __be32 faddr, | 
 | 69 | 					      struct rds_transport *trans) | 
 | 70 | { | 
 | 71 | 	struct rds_connection *conn, *ret = NULL; | 
 | 72 | 	struct hlist_node *pos; | 
 | 73 |  | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 74 | 	hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 75 | 		if (conn->c_faddr == faddr && conn->c_laddr == laddr && | 
 | 76 | 				conn->c_trans == trans) { | 
 | 77 | 			ret = conn; | 
 | 78 | 			break; | 
 | 79 | 		} | 
 | 80 | 	} | 
 | 81 | 	rdsdebug("returning conn %p for %pI4 -> %pI4\n", ret, | 
 | 82 | 		 &laddr, &faddr); | 
 | 83 | 	return ret; | 
 | 84 | } | 
 | 85 |  | 
 | 86 | /* | 
 | 87 |  * This is called by transports as they're bringing down a connection. | 
 | 88 |  * It clears partial message state so that the transport can start sending | 
 | 89 |  * and receiving over this connection again in the future.  It is up to | 
 | 90 |  * the transport to have serialized this call with its send and recv. | 
 | 91 |  */ | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 92 | static void rds_conn_reset(struct rds_connection *conn) | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 93 | { | 
 | 94 | 	rdsdebug("connection %pI4 to %pI4 reset\n", | 
 | 95 | 	  &conn->c_laddr, &conn->c_faddr); | 
 | 96 |  | 
 | 97 | 	rds_stats_inc(s_conn_reset); | 
 | 98 | 	rds_send_reset(conn); | 
 | 99 | 	conn->c_flags = 0; | 
 | 100 |  | 
 | 101 | 	/* Do not clear next_rx_seq here, else we cannot distinguish | 
 | 102 | 	 * retransmitted packets from new packets, and will hand all | 
 | 103 | 	 * of them to the application. That is not consistent with the | 
 | 104 | 	 * reliability guarantees of RDS. */ | 
 | 105 | } | 
 | 106 |  | 
 | 107 | /* | 
 | 108 |  * There is only every one 'conn' for a given pair of addresses in the | 
 | 109 |  * system at a time.  They contain messages to be retransmitted and so | 
 | 110 |  * span the lifetime of the actual underlying transport connections. | 
 | 111 |  * | 
 | 112 |  * For now they are not garbage collected once they're created.  They | 
 | 113 |  * are torn down as the module is removed, if ever. | 
 | 114 |  */ | 
 | 115 | static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr, | 
 | 116 | 				       struct rds_transport *trans, gfp_t gfp, | 
 | 117 | 				       int is_outgoing) | 
 | 118 | { | 
| Andy Grover | cb24405 | 2009-07-17 13:13:36 +0000 | [diff] [blame] | 119 | 	struct rds_connection *conn, *parent = NULL; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 120 | 	struct hlist_head *head = rds_conn_bucket(laddr, faddr); | 
| Zach Brown | 5adb5bc | 2010-07-23 10:32:31 -0700 | [diff] [blame] | 121 | 	struct rds_transport *loop_trans; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 122 | 	unsigned long flags; | 
 | 123 | 	int ret; | 
 | 124 |  | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 125 | 	rcu_read_lock(); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 126 | 	conn = rds_conn_lookup(head, laddr, faddr, trans); | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 127 | 	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && | 
 | 128 | 	    !is_outgoing) { | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 129 | 		/* This is a looped back IB connection, and we're | 
 | 130 | 		 * called by the code handling the incoming connect. | 
 | 131 | 		 * We need a second connection object into which we | 
 | 132 | 		 * can stick the other QP. */ | 
 | 133 | 		parent = conn; | 
 | 134 | 		conn = parent->c_passive; | 
 | 135 | 	} | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 136 | 	rcu_read_unlock(); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 137 | 	if (conn) | 
 | 138 | 		goto out; | 
 | 139 |  | 
| Wei Yongjun | 05a178e | 2009-04-09 14:09:44 +0000 | [diff] [blame] | 140 | 	conn = kmem_cache_zalloc(rds_conn_slab, gfp); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 141 | 	if (!conn) { | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 142 | 		conn = ERR_PTR(-ENOMEM); | 
 | 143 | 		goto out; | 
 | 144 | 	} | 
 | 145 |  | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 146 | 	INIT_HLIST_NODE(&conn->c_hash_node); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 147 | 	conn->c_laddr = laddr; | 
 | 148 | 	conn->c_faddr = faddr; | 
 | 149 | 	spin_lock_init(&conn->c_lock); | 
 | 150 | 	conn->c_next_tx_seq = 1; | 
 | 151 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 152 | 	init_waitqueue_head(&conn->c_waitq); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 153 | 	INIT_LIST_HEAD(&conn->c_send_queue); | 
 | 154 | 	INIT_LIST_HEAD(&conn->c_retrans); | 
 | 155 |  | 
 | 156 | 	ret = rds_cong_get_maps(conn); | 
 | 157 | 	if (ret) { | 
 | 158 | 		kmem_cache_free(rds_conn_slab, conn); | 
 | 159 | 		conn = ERR_PTR(ret); | 
 | 160 | 		goto out; | 
 | 161 | 	} | 
 | 162 |  | 
 | 163 | 	/* | 
 | 164 | 	 * This is where a connection becomes loopback.  If *any* RDS sockets | 
 | 165 | 	 * can bind to the destination address then we'd rather the messages | 
 | 166 | 	 * flow through loopback rather than either transport. | 
 | 167 | 	 */ | 
| Zach Brown | 5adb5bc | 2010-07-23 10:32:31 -0700 | [diff] [blame] | 168 | 	loop_trans = rds_trans_get_preferred(faddr); | 
 | 169 | 	if (loop_trans) { | 
 | 170 | 		rds_trans_put(loop_trans); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 171 | 		conn->c_loopback = 1; | 
 | 172 | 		if (is_outgoing && trans->t_prefer_loopback) { | 
 | 173 | 			/* "outgoing" connection - and the transport | 
 | 174 | 			 * says it wants the connection handled by the | 
 | 175 | 			 * loopback transport. This is what TCP does. | 
 | 176 | 			 */ | 
 | 177 | 			trans = &rds_loop_transport; | 
 | 178 | 		} | 
 | 179 | 	} | 
 | 180 |  | 
 | 181 | 	conn->c_trans = trans; | 
 | 182 |  | 
 | 183 | 	ret = trans->conn_alloc(conn, gfp); | 
 | 184 | 	if (ret) { | 
 | 185 | 		kmem_cache_free(rds_conn_slab, conn); | 
 | 186 | 		conn = ERR_PTR(ret); | 
 | 187 | 		goto out; | 
 | 188 | 	} | 
 | 189 |  | 
 | 190 | 	atomic_set(&conn->c_state, RDS_CONN_DOWN); | 
 | 191 | 	conn->c_reconnect_jiffies = 0; | 
 | 192 | 	INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker); | 
 | 193 | 	INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker); | 
 | 194 | 	INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker); | 
 | 195 | 	INIT_WORK(&conn->c_down_w, rds_shutdown_worker); | 
 | 196 | 	mutex_init(&conn->c_cm_lock); | 
 | 197 | 	conn->c_flags = 0; | 
 | 198 |  | 
 | 199 | 	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n", | 
 | 200 | 	  conn, &laddr, &faddr, | 
 | 201 | 	  trans->t_name ? trans->t_name : "[unknown]", | 
 | 202 | 	  is_outgoing ? "(outgoing)" : ""); | 
 | 203 |  | 
| Andy Grover | cb24405 | 2009-07-17 13:13:36 +0000 | [diff] [blame] | 204 | 	/* | 
 | 205 | 	 * Since we ran without holding the conn lock, someone could | 
 | 206 | 	 * have created the same conn (either normal or passive) in the | 
 | 207 | 	 * interim. We check while holding the lock. If we won, we complete | 
 | 208 | 	 * init and return our conn. If we lost, we rollback and return the | 
 | 209 | 	 * other one. | 
 | 210 | 	 */ | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 211 | 	spin_lock_irqsave(&rds_conn_lock, flags); | 
| Andy Grover | cb24405 | 2009-07-17 13:13:36 +0000 | [diff] [blame] | 212 | 	if (parent) { | 
 | 213 | 		/* Creating passive conn */ | 
 | 214 | 		if (parent->c_passive) { | 
 | 215 | 			trans->conn_free(conn->c_transport_data); | 
 | 216 | 			kmem_cache_free(rds_conn_slab, conn); | 
 | 217 | 			conn = parent->c_passive; | 
 | 218 | 		} else { | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 219 | 			parent->c_passive = conn; | 
| Andy Grover | cb24405 | 2009-07-17 13:13:36 +0000 | [diff] [blame] | 220 | 			rds_cong_add_conn(conn); | 
 | 221 | 			rds_conn_count++; | 
 | 222 | 		} | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 223 | 	} else { | 
| Andy Grover | cb24405 | 2009-07-17 13:13:36 +0000 | [diff] [blame] | 224 | 		/* Creating normal conn */ | 
 | 225 | 		struct rds_connection *found; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 226 |  | 
| Andy Grover | cb24405 | 2009-07-17 13:13:36 +0000 | [diff] [blame] | 227 | 		found = rds_conn_lookup(head, laddr, faddr, trans); | 
 | 228 | 		if (found) { | 
 | 229 | 			trans->conn_free(conn->c_transport_data); | 
 | 230 | 			kmem_cache_free(rds_conn_slab, conn); | 
 | 231 | 			conn = found; | 
 | 232 | 		} else { | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 233 | 			hlist_add_head_rcu(&conn->c_hash_node, head); | 
| Andy Grover | cb24405 | 2009-07-17 13:13:36 +0000 | [diff] [blame] | 234 | 			rds_cong_add_conn(conn); | 
 | 235 | 			rds_conn_count++; | 
 | 236 | 		} | 
 | 237 | 	} | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 238 | 	spin_unlock_irqrestore(&rds_conn_lock, flags); | 
 | 239 |  | 
 | 240 | out: | 
 | 241 | 	return conn; | 
 | 242 | } | 
 | 243 |  | 
 | 244 | struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr, | 
 | 245 | 				       struct rds_transport *trans, gfp_t gfp) | 
 | 246 | { | 
 | 247 | 	return __rds_conn_create(laddr, faddr, trans, gfp, 0); | 
 | 248 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 249 | EXPORT_SYMBOL_GPL(rds_conn_create); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 250 |  | 
 | 251 | struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr, | 
 | 252 | 				       struct rds_transport *trans, gfp_t gfp) | 
 | 253 | { | 
 | 254 | 	return __rds_conn_create(laddr, faddr, trans, gfp, 1); | 
 | 255 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 256 | EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 257 |  | 
| Andy Grover | 2dc3935 | 2010-06-11 13:49:13 -0700 | [diff] [blame] | 258 | void rds_conn_shutdown(struct rds_connection *conn) | 
 | 259 | { | 
 | 260 | 	/* shut it down unless it's down already */ | 
 | 261 | 	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) { | 
 | 262 | 		/* | 
 | 263 | 		 * Quiesce the connection mgmt handlers before we start tearing | 
 | 264 | 		 * things down. We don't hold the mutex for the entire | 
 | 265 | 		 * duration of the shutdown operation, else we may be | 
 | 266 | 		 * deadlocking with the CM handler. Instead, the CM event | 
 | 267 | 		 * handler is supposed to check for state DISCONNECTING | 
 | 268 | 		 */ | 
 | 269 | 		mutex_lock(&conn->c_cm_lock); | 
 | 270 | 		if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) | 
 | 271 | 		 && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { | 
 | 272 | 			rds_conn_error(conn, "shutdown called in state %d\n", | 
 | 273 | 					atomic_read(&conn->c_state)); | 
 | 274 | 			mutex_unlock(&conn->c_cm_lock); | 
 | 275 | 			return; | 
 | 276 | 		} | 
 | 277 | 		mutex_unlock(&conn->c_cm_lock); | 
 | 278 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 279 | 		wait_event(conn->c_waitq, | 
 | 280 | 			   !test_bit(RDS_IN_XMIT, &conn->c_flags)); | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 281 |  | 
| Andy Grover | 2dc3935 | 2010-06-11 13:49:13 -0700 | [diff] [blame] | 282 | 		conn->c_trans->conn_shutdown(conn); | 
 | 283 | 		rds_conn_reset(conn); | 
| Andy Grover | 2dc3935 | 2010-06-11 13:49:13 -0700 | [diff] [blame] | 284 |  | 
 | 285 | 		if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) { | 
 | 286 | 			/* This can happen - eg when we're in the middle of tearing | 
 | 287 | 			 * down the connection, and someone unloads the rds module. | 
 | 288 | 			 * Quite reproduceable with loopback connections. | 
 | 289 | 			 * Mostly harmless. | 
 | 290 | 			 */ | 
 | 291 | 			rds_conn_error(conn, | 
 | 292 | 				"%s: failed to transition to state DOWN, " | 
 | 293 | 				"current state is %d\n", | 
 | 294 | 				__func__, | 
 | 295 | 				atomic_read(&conn->c_state)); | 
 | 296 | 			return; | 
 | 297 | 		} | 
 | 298 | 	} | 
 | 299 |  | 
 | 300 | 	/* Then reconnect if it's still live. | 
 | 301 | 	 * The passive side of an IB loopback connection is never added | 
 | 302 | 	 * to the conn hash, so we never trigger a reconnect on this | 
 | 303 | 	 * conn - the reconnect is always triggered by the active peer. */ | 
 | 304 | 	cancel_delayed_work_sync(&conn->c_conn_w); | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 305 | 	rcu_read_lock(); | 
 | 306 | 	if (!hlist_unhashed(&conn->c_hash_node)) { | 
 | 307 | 		rcu_read_unlock(); | 
| Andy Grover | 2dc3935 | 2010-06-11 13:49:13 -0700 | [diff] [blame] | 308 | 		rds_queue_reconnect(conn); | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 309 | 	} else { | 
 | 310 | 		rcu_read_unlock(); | 
 | 311 | 	} | 
| Andy Grover | 2dc3935 | 2010-06-11 13:49:13 -0700 | [diff] [blame] | 312 | } | 
 | 313 |  | 
 | 314 | /* | 
 | 315 |  * Stop and free a connection. | 
| Zach Brown | ffcec0e | 2010-07-23 10:36:58 -0700 | [diff] [blame] | 316 |  * | 
 | 317 |  * This can only be used in very limited circumstances.  It assumes that once | 
 | 318 |  * the conn has been shutdown that no one else is referencing the connection. | 
 | 319 |  * We can only ensure this in the rmmod path in the current code. | 
| Andy Grover | 2dc3935 | 2010-06-11 13:49:13 -0700 | [diff] [blame] | 320 |  */ | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 321 | void rds_conn_destroy(struct rds_connection *conn) | 
 | 322 | { | 
 | 323 | 	struct rds_message *rm, *rtmp; | 
| Zach Brown | fe8ff6b | 2010-07-23 10:30:45 -0700 | [diff] [blame] | 324 | 	unsigned long flags; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 325 |  | 
 | 326 | 	rdsdebug("freeing conn %p for %pI4 -> " | 
 | 327 | 		 "%pI4\n", conn, &conn->c_laddr, | 
 | 328 | 		 &conn->c_faddr); | 
 | 329 |  | 
| Chris Mason | abf4543 | 2010-05-11 15:14:52 -0700 | [diff] [blame] | 330 | 	/* Ensure conn will not be scheduled for reconnect */ | 
 | 331 | 	spin_lock_irq(&rds_conn_lock); | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 332 | 	hlist_del_init_rcu(&conn->c_hash_node); | 
| Chris Mason | abf4543 | 2010-05-11 15:14:52 -0700 | [diff] [blame] | 333 | 	spin_unlock_irq(&rds_conn_lock); | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 334 | 	synchronize_rcu(); | 
 | 335 |  | 
| Zach Brown | ffcec0e | 2010-07-23 10:36:58 -0700 | [diff] [blame] | 336 | 	/* shut the connection down */ | 
 | 337 | 	rds_conn_drop(conn); | 
 | 338 | 	flush_work(&conn->c_down_w); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 339 |  | 
| Zach Brown | 4518071 | 2010-07-23 10:37:33 -0700 | [diff] [blame] | 340 | 	/* make sure lingering queued work won't try to ref the conn */ | 
 | 341 | 	cancel_delayed_work_sync(&conn->c_send_w); | 
 | 342 | 	cancel_delayed_work_sync(&conn->c_recv_w); | 
 | 343 |  | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 344 | 	/* tear down queued messages */ | 
 | 345 | 	list_for_each_entry_safe(rm, rtmp, | 
 | 346 | 				 &conn->c_send_queue, | 
 | 347 | 				 m_conn_item) { | 
 | 348 | 		list_del_init(&rm->m_conn_item); | 
 | 349 | 		BUG_ON(!list_empty(&rm->m_sock_item)); | 
 | 350 | 		rds_message_put(rm); | 
 | 351 | 	} | 
 | 352 | 	if (conn->c_xmit_rm) | 
 | 353 | 		rds_message_put(conn->c_xmit_rm); | 
 | 354 |  | 
 | 355 | 	conn->c_trans->conn_free(conn->c_transport_data); | 
 | 356 |  | 
 | 357 | 	/* | 
 | 358 | 	 * The congestion maps aren't freed up here.  They're | 
 | 359 | 	 * freed by rds_cong_exit() after all the connections | 
 | 360 | 	 * have been freed. | 
 | 361 | 	 */ | 
 | 362 | 	rds_cong_remove_conn(conn); | 
 | 363 |  | 
 | 364 | 	BUG_ON(!list_empty(&conn->c_retrans)); | 
 | 365 | 	kmem_cache_free(rds_conn_slab, conn); | 
 | 366 |  | 
| Zach Brown | fe8ff6b | 2010-07-23 10:30:45 -0700 | [diff] [blame] | 367 | 	spin_lock_irqsave(&rds_conn_lock, flags); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 368 | 	rds_conn_count--; | 
| Zach Brown | fe8ff6b | 2010-07-23 10:30:45 -0700 | [diff] [blame] | 369 | 	spin_unlock_irqrestore(&rds_conn_lock, flags); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 370 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 371 | EXPORT_SYMBOL_GPL(rds_conn_destroy); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 372 |  | 
 | 373 | static void rds_conn_message_info(struct socket *sock, unsigned int len, | 
 | 374 | 				  struct rds_info_iterator *iter, | 
 | 375 | 				  struct rds_info_lengths *lens, | 
 | 376 | 				  int want_send) | 
 | 377 | { | 
 | 378 | 	struct hlist_head *head; | 
 | 379 | 	struct hlist_node *pos; | 
 | 380 | 	struct list_head *list; | 
 | 381 | 	struct rds_connection *conn; | 
 | 382 | 	struct rds_message *rm; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 383 | 	unsigned int total = 0; | 
| Zach Brown | 501dccc | 2010-06-04 14:25:27 -0700 | [diff] [blame] | 384 | 	unsigned long flags; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 385 | 	size_t i; | 
 | 386 |  | 
 | 387 | 	len /= sizeof(struct rds_info_message); | 
 | 388 |  | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 389 | 	rcu_read_lock(); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 390 |  | 
 | 391 | 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); | 
 | 392 | 	     i++, head++) { | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 393 | 		hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 394 | 			if (want_send) | 
 | 395 | 				list = &conn->c_send_queue; | 
 | 396 | 			else | 
 | 397 | 				list = &conn->c_retrans; | 
 | 398 |  | 
| Zach Brown | 501dccc | 2010-06-04 14:25:27 -0700 | [diff] [blame] | 399 | 			spin_lock_irqsave(&conn->c_lock, flags); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 400 |  | 
 | 401 | 			/* XXX too lazy to maintain counts.. */ | 
 | 402 | 			list_for_each_entry(rm, list, m_conn_item) { | 
 | 403 | 				total++; | 
 | 404 | 				if (total <= len) | 
 | 405 | 					rds_inc_info_copy(&rm->m_inc, iter, | 
 | 406 | 							  conn->c_laddr, | 
 | 407 | 							  conn->c_faddr, 0); | 
 | 408 | 			} | 
 | 409 |  | 
| Zach Brown | 501dccc | 2010-06-04 14:25:27 -0700 | [diff] [blame] | 410 | 			spin_unlock_irqrestore(&conn->c_lock, flags); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 411 | 		} | 
 | 412 | 	} | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 413 | 	rcu_read_unlock(); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 414 |  | 
 | 415 | 	lens->nr = total; | 
 | 416 | 	lens->each = sizeof(struct rds_info_message); | 
 | 417 | } | 
 | 418 |  | 
 | 419 | static void rds_conn_message_info_send(struct socket *sock, unsigned int len, | 
 | 420 | 				       struct rds_info_iterator *iter, | 
 | 421 | 				       struct rds_info_lengths *lens) | 
 | 422 | { | 
 | 423 | 	rds_conn_message_info(sock, len, iter, lens, 1); | 
 | 424 | } | 
 | 425 |  | 
 | 426 | static void rds_conn_message_info_retrans(struct socket *sock, | 
 | 427 | 					  unsigned int len, | 
 | 428 | 					  struct rds_info_iterator *iter, | 
 | 429 | 					  struct rds_info_lengths *lens) | 
 | 430 | { | 
 | 431 | 	rds_conn_message_info(sock, len, iter, lens, 0); | 
 | 432 | } | 
 | 433 |  | 
 | 434 | void rds_for_each_conn_info(struct socket *sock, unsigned int len, | 
 | 435 | 			  struct rds_info_iterator *iter, | 
 | 436 | 			  struct rds_info_lengths *lens, | 
 | 437 | 			  int (*visitor)(struct rds_connection *, void *), | 
 | 438 | 			  size_t item_len) | 
 | 439 | { | 
 | 440 | 	uint64_t buffer[(item_len + 7) / 8]; | 
 | 441 | 	struct hlist_head *head; | 
 | 442 | 	struct hlist_node *pos; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 443 | 	struct rds_connection *conn; | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 444 | 	size_t i; | 
 | 445 |  | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 446 | 	rcu_read_lock(); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 447 |  | 
 | 448 | 	lens->nr = 0; | 
 | 449 | 	lens->each = item_len; | 
 | 450 |  | 
 | 451 | 	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); | 
 | 452 | 	     i++, head++) { | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 453 | 		hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) { | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 454 |  | 
 | 455 | 			/* XXX no c_lock usage.. */ | 
 | 456 | 			if (!visitor(conn, buffer)) | 
 | 457 | 				continue; | 
 | 458 |  | 
 | 459 | 			/* We copy as much as we can fit in the buffer, | 
 | 460 | 			 * but we count all items so that the caller | 
 | 461 | 			 * can resize the buffer. */ | 
 | 462 | 			if (len >= item_len) { | 
 | 463 | 				rds_info_copy(iter, buffer, item_len); | 
 | 464 | 				len -= item_len; | 
 | 465 | 			} | 
 | 466 | 			lens->nr++; | 
 | 467 | 		} | 
 | 468 | 	} | 
| Chris Mason | bcf50ef | 2010-05-11 15:15:15 -0700 | [diff] [blame] | 469 | 	rcu_read_unlock(); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 470 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 471 | EXPORT_SYMBOL_GPL(rds_for_each_conn_info); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 472 |  | 
 | 473 | static int rds_conn_info_visitor(struct rds_connection *conn, | 
 | 474 | 				  void *buffer) | 
 | 475 | { | 
 | 476 | 	struct rds_info_connection *cinfo = buffer; | 
 | 477 |  | 
 | 478 | 	cinfo->next_tx_seq = conn->c_next_tx_seq; | 
 | 479 | 	cinfo->next_rx_seq = conn->c_next_rx_seq; | 
 | 480 | 	cinfo->laddr = conn->c_laddr; | 
 | 481 | 	cinfo->faddr = conn->c_faddr; | 
 | 482 | 	strncpy(cinfo->transport, conn->c_trans->t_name, | 
 | 483 | 		sizeof(cinfo->transport)); | 
 | 484 | 	cinfo->flags = 0; | 
 | 485 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 486 | 	rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags), | 
 | 487 | 			  SENDING); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 488 | 	/* XXX Future: return the state rather than these funky bits */ | 
 | 489 | 	rds_conn_info_set(cinfo->flags, | 
 | 490 | 			  atomic_read(&conn->c_state) == RDS_CONN_CONNECTING, | 
 | 491 | 			  CONNECTING); | 
 | 492 | 	rds_conn_info_set(cinfo->flags, | 
 | 493 | 			  atomic_read(&conn->c_state) == RDS_CONN_UP, | 
 | 494 | 			  CONNECTED); | 
 | 495 | 	return 1; | 
 | 496 | } | 
 | 497 |  | 
 | 498 | static void rds_conn_info(struct socket *sock, unsigned int len, | 
 | 499 | 			  struct rds_info_iterator *iter, | 
 | 500 | 			  struct rds_info_lengths *lens) | 
 | 501 | { | 
 | 502 | 	rds_for_each_conn_info(sock, len, iter, lens, | 
 | 503 | 				rds_conn_info_visitor, | 
 | 504 | 				sizeof(struct rds_info_connection)); | 
 | 505 | } | 
 | 506 |  | 
| Zach Brown | ef87b7e | 2010-07-09 12:26:20 -0700 | [diff] [blame] | 507 | int rds_conn_init(void) | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 508 | { | 
 | 509 | 	rds_conn_slab = kmem_cache_create("rds_connection", | 
 | 510 | 					  sizeof(struct rds_connection), | 
 | 511 | 					  0, 0, NULL); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 512 | 	if (!rds_conn_slab) | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 513 | 		return -ENOMEM; | 
 | 514 |  | 
 | 515 | 	rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); | 
 | 516 | 	rds_info_register_func(RDS_INFO_SEND_MESSAGES, | 
 | 517 | 			       rds_conn_message_info_send); | 
 | 518 | 	rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, | 
 | 519 | 			       rds_conn_message_info_retrans); | 
 | 520 |  | 
 | 521 | 	return 0; | 
 | 522 | } | 
 | 523 |  | 
 | 524 | void rds_conn_exit(void) | 
 | 525 | { | 
 | 526 | 	rds_loop_exit(); | 
 | 527 |  | 
 | 528 | 	WARN_ON(!hlist_empty(rds_conn_hash)); | 
 | 529 |  | 
 | 530 | 	kmem_cache_destroy(rds_conn_slab); | 
 | 531 |  | 
 | 532 | 	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); | 
 | 533 | 	rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, | 
 | 534 | 				 rds_conn_message_info_send); | 
 | 535 | 	rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, | 
 | 536 | 				 rds_conn_message_info_retrans); | 
 | 537 | } | 
 | 538 |  | 
 | 539 | /* | 
 | 540 |  * Force a disconnect | 
 | 541 |  */ | 
 | 542 | void rds_conn_drop(struct rds_connection *conn) | 
 | 543 | { | 
 | 544 | 	atomic_set(&conn->c_state, RDS_CONN_ERROR); | 
 | 545 | 	queue_work(rds_wq, &conn->c_down_w); | 
 | 546 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 547 | EXPORT_SYMBOL_GPL(rds_conn_drop); | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 548 |  | 
 | 549 | /* | 
| Zach Brown | f3c6808 | 2010-05-24 13:14:36 -0700 | [diff] [blame] | 550 |  * If the connection is down, trigger a connect. We may have scheduled a | 
 | 551 |  * delayed reconnect however - in this case we should not interfere. | 
 | 552 |  */ | 
 | 553 | void rds_conn_connect_if_down(struct rds_connection *conn) | 
 | 554 | { | 
 | 555 | 	if (rds_conn_state(conn) == RDS_CONN_DOWN && | 
 | 556 | 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags)) | 
 | 557 | 		queue_delayed_work(rds_wq, &conn->c_conn_w, 0); | 
 | 558 | } | 
 | 559 | EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); | 
 | 560 |  | 
 | 561 | /* | 
| Andy Grover | 00e0f34 | 2009-02-24 15:30:23 +0000 | [diff] [blame] | 562 |  * An error occurred on the connection | 
 | 563 |  */ | 
 | 564 | void | 
 | 565 | __rds_conn_error(struct rds_connection *conn, const char *fmt, ...) | 
 | 566 | { | 
 | 567 | 	va_list ap; | 
 | 568 |  | 
 | 569 | 	va_start(ap, fmt); | 
 | 570 | 	vprintk(fmt, ap); | 
 | 571 | 	va_end(ap); | 
 | 572 |  | 
 | 573 | 	rds_conn_drop(conn); | 
 | 574 | } |