| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2006 Oracle.  All rights reserved. | 
 | 3 |  * | 
 | 4 |  * This software is available to you under a choice of one of two | 
 | 5 |  * licenses.  You may choose to be licensed under the terms of the GNU | 
 | 6 |  * General Public License (GPL) Version 2, available from the file | 
 | 7 |  * COPYING in the main directory of this source tree, or the | 
 | 8 |  * OpenIB.org BSD license below: | 
 | 9 |  * | 
 | 10 |  *     Redistribution and use in source and binary forms, with or | 
 | 11 |  *     without modification, are permitted provided that the following | 
 | 12 |  *     conditions are met: | 
 | 13 |  * | 
 | 14 |  *      - Redistributions of source code must retain the above | 
 | 15 |  *        copyright notice, this list of conditions and the following | 
 | 16 |  *        disclaimer. | 
 | 17 |  * | 
 | 18 |  *      - Redistributions in binary form must reproduce the above | 
 | 19 |  *        copyright notice, this list of conditions and the following | 
 | 20 |  *        disclaimer in the documentation and/or other materials | 
 | 21 |  *        provided with the distribution. | 
 | 22 |  * | 
 | 23 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
 | 24 |  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
 | 25 |  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
 | 26 |  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
 | 27 |  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
 | 28 |  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
 | 29 |  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
 | 30 |  * SOFTWARE. | 
 | 31 |  * | 
 | 32 |  */ | 
 | 33 | #include <linux/kernel.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 35 | #include <net/tcp.h> | 
 | 36 |  | 
 | 37 | #include "rds.h" | 
 | 38 | #include "tcp.h" | 
 | 39 |  | 
 | 40 | static struct kmem_cache *rds_tcp_incoming_slab; | 
 | 41 |  | 
| Andy Grover | 809fa14 | 2010-01-12 14:41:46 -0800 | [diff] [blame] | 42 | static void rds_tcp_inc_purge(struct rds_incoming *inc) | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 43 | { | 
 | 44 | 	struct rds_tcp_incoming *tinc; | 
 | 45 | 	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); | 
 | 46 | 	rdsdebug("purging tinc %p inc %p\n", tinc, inc); | 
 | 47 | 	skb_queue_purge(&tinc->ti_skb_list); | 
 | 48 | } | 
 | 49 |  | 
 | 50 | void rds_tcp_inc_free(struct rds_incoming *inc) | 
 | 51 | { | 
 | 52 | 	struct rds_tcp_incoming *tinc; | 
 | 53 | 	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); | 
 | 54 | 	rds_tcp_inc_purge(inc); | 
 | 55 | 	rdsdebug("freeing tinc %p inc %p\n", tinc, inc); | 
 | 56 | 	kmem_cache_free(rds_tcp_incoming_slab, tinc); | 
 | 57 | } | 
 | 58 |  | 
 | 59 | /* | 
 | 60 |  * this is pretty lame, but, whatever. | 
 | 61 |  */ | 
 | 62 | int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | 
 | 63 | 			     size_t size) | 
 | 64 | { | 
 | 65 | 	struct rds_tcp_incoming *tinc; | 
 | 66 | 	struct iovec *iov, tmp; | 
 | 67 | 	struct sk_buff *skb; | 
 | 68 | 	unsigned long to_copy, skb_off; | 
 | 69 | 	int ret = 0; | 
 | 70 |  | 
 | 71 | 	if (size == 0) | 
 | 72 | 		goto out; | 
 | 73 |  | 
 | 74 | 	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc); | 
 | 75 | 	iov = first_iov; | 
 | 76 | 	tmp = *iov; | 
 | 77 |  | 
 | 78 | 	skb_queue_walk(&tinc->ti_skb_list, skb) { | 
 | 79 | 		skb_off = 0; | 
 | 80 | 		while (skb_off < skb->len) { | 
 | 81 | 			while (tmp.iov_len == 0) { | 
 | 82 | 				iov++; | 
 | 83 | 				tmp = *iov; | 
 | 84 | 			} | 
 | 85 |  | 
 | 86 | 			to_copy = min(tmp.iov_len, size); | 
 | 87 | 			to_copy = min(to_copy, skb->len - skb_off); | 
 | 88 |  | 
 | 89 | 			rdsdebug("ret %d size %zu skb %p skb_off %lu " | 
 | 90 | 				 "skblen %d iov_base %p iov_len %zu cpy %lu\n", | 
 | 91 | 				 ret, size, skb, skb_off, skb->len, | 
 | 92 | 				 tmp.iov_base, tmp.iov_len, to_copy); | 
 | 93 |  | 
 | 94 | 			/* modifies tmp as it copies */ | 
 | 95 | 			if (skb_copy_datagram_iovec(skb, skb_off, &tmp, | 
 | 96 | 						    to_copy)) { | 
 | 97 | 				ret = -EFAULT; | 
 | 98 | 				goto out; | 
 | 99 | 			} | 
 | 100 |  | 
| Andy Grover | b075cfd | 2010-03-11 13:49:57 +0000 | [diff] [blame] | 101 | 			rds_stats_add(s_copy_to_user, to_copy); | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 102 | 			size -= to_copy; | 
 | 103 | 			ret += to_copy; | 
 | 104 | 			skb_off += to_copy; | 
 | 105 | 			if (size == 0) | 
 | 106 | 				goto out; | 
 | 107 | 		} | 
 | 108 | 	} | 
 | 109 | out: | 
 | 110 | 	return ret; | 
 | 111 | } | 
 | 112 |  | 
 | 113 | /* | 
 | 114 |  * We have a series of skbs that have fragmented pieces of the congestion | 
 | 115 |  * bitmap.  They must add up to the exact size of the congestion bitmap.  We | 
 | 116 |  * use the skb helpers to copy those into the pages that make up the in-memory | 
 | 117 |  * congestion bitmap for the remote address of this connection.  We then tell | 
 | 118 |  * the congestion core that the bitmap has been changed so that it can wake up | 
 | 119 |  * sleepers. | 
 | 120 |  * | 
 | 121 |  * This is racing with sending paths which are using test_bit to see if the | 
 | 122 |  * bitmap indicates that their recipient is congested. | 
 | 123 |  */ | 
 | 124 |  | 
 | 125 | static void rds_tcp_cong_recv(struct rds_connection *conn, | 
 | 126 | 			      struct rds_tcp_incoming *tinc) | 
 | 127 | { | 
 | 128 | 	struct sk_buff *skb; | 
 | 129 | 	unsigned int to_copy, skb_off; | 
 | 130 | 	unsigned int map_off; | 
 | 131 | 	unsigned int map_page; | 
 | 132 | 	struct rds_cong_map *map; | 
 | 133 | 	int ret; | 
 | 134 |  | 
 | 135 | 	/* catch completely corrupt packets */ | 
 | 136 | 	if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) | 
 | 137 | 		return; | 
 | 138 |  | 
 | 139 | 	map_page = 0; | 
 | 140 | 	map_off = 0; | 
 | 141 | 	map = conn->c_fcong; | 
 | 142 |  | 
 | 143 | 	skb_queue_walk(&tinc->ti_skb_list, skb) { | 
 | 144 | 		skb_off = 0; | 
 | 145 | 		while (skb_off < skb->len) { | 
 | 146 | 			to_copy = min_t(unsigned int, PAGE_SIZE - map_off, | 
 | 147 | 					skb->len - skb_off); | 
 | 148 |  | 
 | 149 | 			BUG_ON(map_page >= RDS_CONG_MAP_PAGES); | 
 | 150 |  | 
 | 151 | 			/* only returns 0 or -error */ | 
 | 152 | 			ret = skb_copy_bits(skb, skb_off, | 
 | 153 | 				(void *)map->m_page_addrs[map_page] + map_off, | 
 | 154 | 				to_copy); | 
 | 155 | 			BUG_ON(ret != 0); | 
 | 156 |  | 
 | 157 | 			skb_off += to_copy; | 
 | 158 | 			map_off += to_copy; | 
 | 159 | 			if (map_off == PAGE_SIZE) { | 
 | 160 | 				map_off = 0; | 
 | 161 | 				map_page++; | 
 | 162 | 			} | 
 | 163 | 		} | 
 | 164 | 	} | 
 | 165 |  | 
 | 166 | 	rds_cong_map_updated(map, ~(u64) 0); | 
 | 167 | } | 
 | 168 |  | 
 | 169 | struct rds_tcp_desc_arg { | 
 | 170 | 	struct rds_connection *conn; | 
 | 171 | 	gfp_t gfp; | 
 | 172 | 	enum km_type km; | 
 | 173 | }; | 
 | 174 |  | 
 | 175 | static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, | 
 | 176 | 			     unsigned int offset, size_t len) | 
 | 177 | { | 
 | 178 | 	struct rds_tcp_desc_arg *arg = desc->arg.data; | 
 | 179 | 	struct rds_connection *conn = arg->conn; | 
 | 180 | 	struct rds_tcp_connection *tc = conn->c_transport_data; | 
 | 181 | 	struct rds_tcp_incoming *tinc = tc->t_tinc; | 
 | 182 | 	struct sk_buff *clone; | 
 | 183 | 	size_t left = len, to_copy; | 
 | 184 |  | 
 | 185 | 	rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset, | 
 | 186 | 		 len); | 
 | 187 |  | 
 | 188 | 	/* | 
 | 189 | 	 * tcp_read_sock() interprets partial progress as an indication to stop | 
 | 190 | 	 * processing. | 
 | 191 | 	 */ | 
 | 192 | 	while (left) { | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 193 | 		if (!tinc) { | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 194 | 			tinc = kmem_cache_alloc(rds_tcp_incoming_slab, | 
 | 195 | 					        arg->gfp); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 196 | 			if (!tinc) { | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 197 | 				desc->error = -ENOMEM; | 
 | 198 | 				goto out; | 
 | 199 | 			} | 
 | 200 | 			tc->t_tinc = tinc; | 
 | 201 | 			rdsdebug("alloced tinc %p\n", tinc); | 
 | 202 | 			rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr); | 
 | 203 | 			/* | 
 | 204 | 			 * XXX * we might be able to use the __ variants when | 
 | 205 | 			 * we've already serialized at a higher level. | 
 | 206 | 			 */ | 
 | 207 | 			skb_queue_head_init(&tinc->ti_skb_list); | 
 | 208 | 		} | 
 | 209 |  | 
 | 210 | 		if (left && tc->t_tinc_hdr_rem) { | 
 | 211 | 			to_copy = min(tc->t_tinc_hdr_rem, left); | 
 | 212 | 			rdsdebug("copying %zu header from skb %p\n", to_copy, | 
 | 213 | 				 skb); | 
 | 214 | 			skb_copy_bits(skb, offset, | 
 | 215 | 				      (char *)&tinc->ti_inc.i_hdr + | 
 | 216 | 						sizeof(struct rds_header) - | 
 | 217 | 						tc->t_tinc_hdr_rem, | 
 | 218 | 				      to_copy); | 
 | 219 | 			tc->t_tinc_hdr_rem -= to_copy; | 
 | 220 | 			left -= to_copy; | 
 | 221 | 			offset += to_copy; | 
 | 222 |  | 
 | 223 | 			if (tc->t_tinc_hdr_rem == 0) { | 
 | 224 | 				/* could be 0 for a 0 len message */ | 
 | 225 | 				tc->t_tinc_data_rem = | 
 | 226 | 					be32_to_cpu(tinc->ti_inc.i_hdr.h_len); | 
 | 227 | 			} | 
 | 228 | 		} | 
 | 229 |  | 
 | 230 | 		if (left && tc->t_tinc_data_rem) { | 
 | 231 | 			clone = skb_clone(skb, arg->gfp); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 232 | 			if (!clone) { | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 233 | 				desc->error = -ENOMEM; | 
 | 234 | 				goto out; | 
 | 235 | 			} | 
 | 236 |  | 
 | 237 | 			to_copy = min(tc->t_tinc_data_rem, left); | 
 | 238 | 			pskb_pull(clone, offset); | 
 | 239 | 			pskb_trim(clone, to_copy); | 
 | 240 | 			skb_queue_tail(&tinc->ti_skb_list, clone); | 
 | 241 |  | 
 | 242 | 			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> " | 
 | 243 | 				 "clone %p data %p len %d\n", | 
 | 244 | 				 skb, skb->data, skb->len, offset, to_copy, | 
 | 245 | 				 clone, clone->data, clone->len); | 
 | 246 |  | 
 | 247 | 			tc->t_tinc_data_rem -= to_copy; | 
 | 248 | 			left -= to_copy; | 
 | 249 | 			offset += to_copy; | 
 | 250 | 		} | 
 | 251 |  | 
 | 252 | 		if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) { | 
 | 253 | 			if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) | 
 | 254 | 				rds_tcp_cong_recv(conn, tinc); | 
 | 255 | 			else | 
 | 256 | 				rds_recv_incoming(conn, conn->c_faddr, | 
 | 257 | 						  conn->c_laddr, &tinc->ti_inc, | 
 | 258 | 						  arg->gfp, arg->km); | 
 | 259 |  | 
 | 260 | 			tc->t_tinc_hdr_rem = sizeof(struct rds_header); | 
 | 261 | 			tc->t_tinc_data_rem = 0; | 
 | 262 | 			tc->t_tinc = NULL; | 
 | 263 | 			rds_inc_put(&tinc->ti_inc); | 
 | 264 | 			tinc = NULL; | 
 | 265 | 		} | 
 | 266 | 	} | 
 | 267 | out: | 
 | 268 | 	rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n", | 
 | 269 | 		 len, left, skb->len, | 
 | 270 | 		 skb_queue_len(&tc->t_sock->sk->sk_receive_queue)); | 
 | 271 | 	return len - left; | 
 | 272 | } | 
 | 273 |  | 
 | 274 | /* the caller has to hold the sock lock */ | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 275 | static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, | 
 | 276 | 			     enum km_type km) | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 277 | { | 
 | 278 | 	struct rds_tcp_connection *tc = conn->c_transport_data; | 
 | 279 | 	struct socket *sock = tc->t_sock; | 
 | 280 | 	read_descriptor_t desc; | 
 | 281 | 	struct rds_tcp_desc_arg arg; | 
 | 282 |  | 
 | 283 | 	/* It's like glib in the kernel! */ | 
 | 284 | 	arg.conn = conn; | 
 | 285 | 	arg.gfp = gfp; | 
 | 286 | 	arg.km = km; | 
 | 287 | 	desc.arg.data = &arg; | 
 | 288 | 	desc.error = 0; | 
 | 289 | 	desc.count = 1; /* give more than one skb per call */ | 
 | 290 |  | 
 | 291 | 	tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv); | 
 | 292 | 	rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp, | 
 | 293 | 		 desc.error); | 
 | 294 |  | 
 | 295 | 	return desc.error; | 
 | 296 | } | 
 | 297 |  | 
 | 298 | /* | 
 | 299 |  * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from | 
 | 300 |  * data_ready. | 
 | 301 |  * | 
 | 302 |  * if we fail to allocate we're in trouble.. blindly wait some time before | 
 | 303 |  * trying again to see if the VM can free up something for us. | 
 | 304 |  */ | 
 | 305 | int rds_tcp_recv(struct rds_connection *conn) | 
 | 306 | { | 
 | 307 | 	struct rds_tcp_connection *tc = conn->c_transport_data; | 
 | 308 | 	struct socket *sock = tc->t_sock; | 
 | 309 | 	int ret = 0; | 
 | 310 |  | 
 | 311 | 	rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock); | 
 | 312 |  | 
 | 313 | 	lock_sock(sock->sk); | 
 | 314 | 	ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0); | 
 | 315 | 	release_sock(sock->sk); | 
 | 316 |  | 
 | 317 | 	return ret; | 
 | 318 | } | 
 | 319 |  | 
 | 320 | void rds_tcp_data_ready(struct sock *sk, int bytes) | 
 | 321 | { | 
 | 322 | 	void (*ready)(struct sock *sk, int bytes); | 
 | 323 | 	struct rds_connection *conn; | 
 | 324 | 	struct rds_tcp_connection *tc; | 
 | 325 |  | 
 | 326 | 	rdsdebug("data ready sk %p bytes %d\n", sk, bytes); | 
 | 327 |  | 
| Eric Dumazet | f064af1 | 2010-09-22 12:43:39 +0000 | [diff] [blame] | 328 | 	read_lock_bh(&sk->sk_callback_lock); | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 329 | 	conn = sk->sk_user_data; | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 330 | 	if (!conn) { /* check for teardown race */ | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 331 | 		ready = sk->sk_data_ready; | 
 | 332 | 		goto out; | 
 | 333 | 	} | 
 | 334 |  | 
 | 335 | 	tc = conn->c_transport_data; | 
 | 336 | 	ready = tc->t_orig_data_ready; | 
 | 337 | 	rds_tcp_stats_inc(s_tcp_data_ready_calls); | 
 | 338 |  | 
 | 339 | 	if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) | 
 | 340 | 		queue_delayed_work(rds_wq, &conn->c_recv_w, 0); | 
 | 341 | out: | 
| Eric Dumazet | f064af1 | 2010-09-22 12:43:39 +0000 | [diff] [blame] | 342 | 	read_unlock_bh(&sk->sk_callback_lock); | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 343 | 	ready(sk, bytes); | 
 | 344 | } | 
 | 345 |  | 
| Zach Brown | ef87b7e | 2010-07-09 12:26:20 -0700 | [diff] [blame] | 346 | int rds_tcp_recv_init(void) | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 347 | { | 
 | 348 | 	rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming", | 
 | 349 | 					sizeof(struct rds_tcp_incoming), | 
 | 350 | 					0, 0, NULL); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 351 | 	if (!rds_tcp_incoming_slab) | 
| Andy Grover | 7004108 | 2009-08-21 12:28:31 +0000 | [diff] [blame] | 352 | 		return -ENOMEM; | 
 | 353 | 	return 0; | 
 | 354 | } | 
 | 355 |  | 
 | 356 | void rds_tcp_recv_exit(void) | 
 | 357 | { | 
 | 358 | 	kmem_cache_destroy(rds_tcp_incoming_slab); | 
 | 359 | } |