| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2006 Oracle.  All rights reserved. | 
|  | 3 | * | 
|  | 4 | * This software is available to you under a choice of one of two | 
|  | 5 | * licenses.  You may choose to be licensed under the terms of the GNU | 
|  | 6 | * General Public License (GPL) Version 2, available from the file | 
|  | 7 | * COPYING in the main directory of this source tree, or the | 
|  | 8 | * OpenIB.org BSD license below: | 
|  | 9 | * | 
|  | 10 | *     Redistribution and use in source and binary forms, with or | 
|  | 11 | *     without modification, are permitted provided that the following | 
|  | 12 | *     conditions are met: | 
|  | 13 | * | 
|  | 14 | *      - Redistributions of source code must retain the above | 
|  | 15 | *        copyright notice, this list of conditions and the following | 
|  | 16 | *        disclaimer. | 
|  | 17 | * | 
|  | 18 | *      - Redistributions in binary form must reproduce the above | 
|  | 19 | *        copyright notice, this list of conditions and the following | 
|  | 20 | *        disclaimer in the documentation and/or other materials | 
|  | 21 | *        provided with the distribution. | 
|  | 22 | * | 
|  | 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
|  | 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
|  | 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
|  | 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
|  | 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
|  | 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
|  | 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
|  | 30 | * SOFTWARE. | 
|  | 31 | * | 
|  | 32 | */ | 
|  | 33 | #include <linux/kernel.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/gfp.h> | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 35 | #include <net/sock.h> | 
|  | 36 | #include <linux/in.h> | 
|  | 37 | #include <linux/list.h> | 
|  | 38 |  | 
|  | 39 | #include "rds.h" | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 40 |  | 
|  | 41 | /* When transmitting messages in rds_send_xmit, we need to emerge from | 
|  | 42 | * time to time and briefly release the CPU. Otherwise the softlock watchdog | 
|  | 43 | * will kick our shin. | 
|  | 44 | * Also, it seems fairer to not let one busy connection stall all the | 
|  | 45 | * others. | 
|  | 46 | * | 
|  | 47 | * send_batch_count is the number of times we'll loop in send_xmit. Setting | 
|  | 48 | * it to 0 will restore the old behavior (where we looped until we had | 
|  | 49 | * drained the queue). | 
|  | 50 | */ | 
|  | 51 | static int send_batch_count = 64; | 
|  | 52 | module_param(send_batch_count, int, 0444); | 
|  | 53 | MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); | 
|  | 54 |  | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 55 | static void rds_send_remove_from_sock(struct list_head *messages, int status); | 
|  | 56 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 57 | /* | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 58 | * Reset the send state.  Callers must ensure that this doesn't race with | 
|  | 59 | * rds_send_xmit(). | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 60 | */ | 
|  | 61 | void rds_send_reset(struct rds_connection *conn) | 
|  | 62 | { | 
|  | 63 | struct rds_message *rm, *tmp; | 
|  | 64 | unsigned long flags; | 
|  | 65 |  | 
|  | 66 | if (conn->c_xmit_rm) { | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 67 | rm = conn->c_xmit_rm; | 
|  | 68 | conn->c_xmit_rm = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 69 | /* Tell the user the RDMA op is no longer mapped by the | 
|  | 70 | * transport. This isn't entirely true (it's flushed out | 
|  | 71 | * independently) but as the connection is down, there's | 
|  | 72 | * no ongoing RDMA to/from that memory */ | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 73 | rds_message_unmapped(rm); | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 74 | rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 75 | } | 
| Chris Mason | 7e3f295 | 2010-05-11 15:11:11 -0700 | [diff] [blame] | 76 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 77 | conn->c_xmit_sg = 0; | 
|  | 78 | conn->c_xmit_hdr_off = 0; | 
|  | 79 | conn->c_xmit_data_off = 0; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 80 | conn->c_xmit_atomic_sent = 0; | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 81 | conn->c_xmit_rdma_sent = 0; | 
|  | 82 | conn->c_xmit_data_sent = 0; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 83 |  | 
|  | 84 | conn->c_map_queued = 0; | 
|  | 85 |  | 
|  | 86 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | 
|  | 87 | conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; | 
|  | 88 |  | 
|  | 89 | /* Mark messages as retransmissions, and move them to the send q */ | 
|  | 90 | spin_lock_irqsave(&conn->c_lock, flags); | 
|  | 91 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | 
|  | 92 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 
|  | 93 | set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); | 
|  | 94 | } | 
|  | 95 | list_splice_init(&conn->c_retrans, &conn->c_send_queue); | 
|  | 96 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
|  | 97 | } | 
|  | 98 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 99 | static int acquire_in_xmit(struct rds_connection *conn) | 
|  | 100 | { | 
|  | 101 | return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | static void release_in_xmit(struct rds_connection *conn) | 
|  | 105 | { | 
|  | 106 | clear_bit(RDS_IN_XMIT, &conn->c_flags); | 
|  | 107 | smp_mb__after_clear_bit(); | 
|  | 108 | /* | 
|  | 109 | * We don't use wait_on_bit()/wake_up_bit() because our waking is in a | 
|  | 110 | * hot path and finding waiters is very rare.  We don't want to walk | 
|  | 111 | * the system-wide hashed waitqueue buckets in the fast path only to | 
|  | 112 | * almost never find waiters. | 
|  | 113 | */ | 
|  | 114 | if (waitqueue_active(&conn->c_waitq)) | 
|  | 115 | wake_up_all(&conn->c_waitq); | 
|  | 116 | } | 
|  | 117 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 118 | /* | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 119 | * We're making the conscious trade-off here to only send one message | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 120 | * down the connection at a time. | 
|  | 121 | *   Pro: | 
|  | 122 | *      - tx queueing is a simple fifo list | 
|  | 123 | *   	- reassembly is optional and easily done by transports per conn | 
|  | 124 | *      - no per flow rx lookup at all, straight to the socket | 
|  | 125 | *   	- less per-frag memory and wire overhead | 
|  | 126 | *   Con: | 
|  | 127 | *      - queued acks can be delayed behind large messages | 
|  | 128 | *   Depends: | 
|  | 129 | *      - small message latency is higher behind queued large messages | 
|  | 130 | *      - large message latency isn't starved by intervening small sends | 
|  | 131 | */ | 
|  | 132 | int rds_send_xmit(struct rds_connection *conn) | 
|  | 133 | { | 
|  | 134 | struct rds_message *rm; | 
|  | 135 | unsigned long flags; | 
|  | 136 | unsigned int tmp; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 137 | struct scatterlist *sg; | 
|  | 138 | int ret = 0; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 139 | LIST_HEAD(to_be_dropped); | 
|  | 140 |  | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 141 | restart: | 
| Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 142 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 143 | /* | 
|  | 144 | * sendmsg calls here after having queued its message on the send | 
|  | 145 | * queue.  We only have one task feeding the connection at a time.  If | 
|  | 146 | * another thread is already feeding the queue then we back off.  This | 
|  | 147 | * avoids blocking the caller and trading per-connection data between | 
|  | 148 | * caches per message. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 149 | */ | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 150 | if (!acquire_in_xmit(conn)) { | 
| Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 151 | rds_stats_inc(s_send_lock_contention); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 152 | ret = -ENOMEM; | 
|  | 153 | goto out; | 
|  | 154 | } | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 155 |  | 
|  | 156 | /* | 
|  | 157 | * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, | 
|  | 158 | * we do the opposite to avoid races. | 
|  | 159 | */ | 
|  | 160 | if (!rds_conn_up(conn)) { | 
|  | 161 | release_in_xmit(conn); | 
|  | 162 | ret = 0; | 
|  | 163 | goto out; | 
|  | 164 | } | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 165 |  | 
|  | 166 | if (conn->c_trans->xmit_prepare) | 
|  | 167 | conn->c_trans->xmit_prepare(conn); | 
|  | 168 |  | 
|  | 169 | /* | 
|  | 170 | * spin trying to push headers and data down the connection until | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 171 | * the connection doesn't make forward progress. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 172 | */ | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 173 | while (1) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 174 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 175 | rm = conn->c_xmit_rm; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 176 |  | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 177 | /* | 
|  | 178 | * If between sending messages, we can send a pending congestion | 
|  | 179 | * map update. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 180 | */ | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 181 | if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { | 
| Andy Grover | 77dd550 | 2010-03-22 15:22:04 -0700 | [diff] [blame] | 182 | rm = rds_cong_update_alloc(conn); | 
|  | 183 | if (IS_ERR(rm)) { | 
|  | 184 | ret = PTR_ERR(rm); | 
|  | 185 | break; | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 186 | } | 
| Andy Grover | 77dd550 | 2010-03-22 15:22:04 -0700 | [diff] [blame] | 187 | rm->data.op_active = 1; | 
|  | 188 |  | 
|  | 189 | conn->c_xmit_rm = rm; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 190 | } | 
|  | 191 |  | 
|  | 192 | /* | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 193 | * If not already working on one, grab the next message. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 194 | * | 
|  | 195 | * c_xmit_rm holds a ref while we're sending this message down | 
|  | 196 | * the connction.  We can use this ref while holding the | 
|  | 197 | * send_sem.. rds_send_reset() is serialized with it. | 
|  | 198 | */ | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 199 | if (!rm) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 200 | unsigned int len; | 
|  | 201 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 202 | spin_lock_irqsave(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 203 |  | 
|  | 204 | if (!list_empty(&conn->c_send_queue)) { | 
|  | 205 | rm = list_entry(conn->c_send_queue.next, | 
|  | 206 | struct rds_message, | 
|  | 207 | m_conn_item); | 
|  | 208 | rds_message_addref(rm); | 
|  | 209 |  | 
|  | 210 | /* | 
|  | 211 | * Move the message from the send queue to the retransmit | 
|  | 212 | * list right away. | 
|  | 213 | */ | 
|  | 214 | list_move_tail(&rm->m_conn_item, &conn->c_retrans); | 
|  | 215 | } | 
|  | 216 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 217 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 218 |  | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 219 | if (!rm) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 220 | break; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 221 |  | 
|  | 222 | /* Unfortunately, the way Infiniband deals with | 
|  | 223 | * RDMA to a bad MR key is by moving the entire | 
|  | 224 | * queue pair to error state. We cold possibly | 
|  | 225 | * recover from that, but right now we drop the | 
|  | 226 | * connection. | 
|  | 227 | * Therefore, we never retransmit messages with RDMA ops. | 
|  | 228 | */ | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 229 | if (rm->rdma.op_active && | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 230 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 231 | spin_lock_irqsave(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 232 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) | 
|  | 233 | list_move(&rm->m_conn_item, &to_be_dropped); | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 234 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 235 | continue; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | /* Require an ACK every once in a while */ | 
|  | 239 | len = ntohl(rm->m_inc.i_hdr.h_len); | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 240 | if (conn->c_unacked_packets == 0 || | 
|  | 241 | conn->c_unacked_bytes < len) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 242 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 
|  | 243 |  | 
|  | 244 | conn->c_unacked_packets = rds_sysctl_max_unacked_packets; | 
|  | 245 | conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; | 
|  | 246 | rds_stats_inc(s_send_ack_required); | 
|  | 247 | } else { | 
|  | 248 | conn->c_unacked_bytes -= len; | 
|  | 249 | conn->c_unacked_packets--; | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | conn->c_xmit_rm = rm; | 
|  | 253 | } | 
|  | 254 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 255 | /* The transport either sends the whole rdma or none of it */ | 
|  | 256 | if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 257 | rm->m_final_op = &rm->rdma; | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 258 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); | 
| Chris Mason | 1cc2228 | 2010-05-11 16:15:35 -0700 | [diff] [blame] | 259 | if (ret) | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 260 | break; | 
|  | 261 | conn->c_xmit_rdma_sent = 1; | 
|  | 262 |  | 
|  | 263 | /* The transport owns the mapped memory for now. | 
|  | 264 | * You can't unmap it while it's on the send queue */ | 
|  | 265 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | 
|  | 266 | } | 
|  | 267 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 268 | if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 269 | rm->m_final_op = &rm->atomic; | 
|  | 270 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); | 
| Chris Mason | 1cc2228 | 2010-05-11 16:15:35 -0700 | [diff] [blame] | 271 | if (ret) | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 272 | break; | 
|  | 273 | conn->c_xmit_atomic_sent = 1; | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 274 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 275 | /* The transport owns the mapped memory for now. | 
|  | 276 | * You can't unmap it while it's on the send queue */ | 
|  | 277 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | 
|  | 278 | } | 
|  | 279 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 280 | /* | 
|  | 281 | * A number of cases require an RDS header to be sent | 
|  | 282 | * even if there is no data. | 
|  | 283 | * We permit 0-byte sends; rds-ping depends on this. | 
|  | 284 | * However, if there are exclusively attached silent ops, | 
|  | 285 | * we skip the hdr/data send, to enable silent operation. | 
|  | 286 | */ | 
|  | 287 | if (rm->data.op_nents == 0) { | 
|  | 288 | int ops_present; | 
|  | 289 | int all_ops_are_silent = 1; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 290 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 291 | ops_present = (rm->atomic.op_active || rm->rdma.op_active); | 
|  | 292 | if (rm->atomic.op_active && !rm->atomic.op_silent) | 
|  | 293 | all_ops_are_silent = 0; | 
|  | 294 | if (rm->rdma.op_active && !rm->rdma.op_silent) | 
|  | 295 | all_ops_are_silent = 0; | 
| Andy Grover | 241eef3 | 2010-01-19 21:25:26 -0800 | [diff] [blame] | 296 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 297 | if (ops_present && all_ops_are_silent | 
|  | 298 | && !rm->m_rdma_cookie) | 
|  | 299 | rm->data.op_active = 0; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 300 | } | 
|  | 301 |  | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 302 | if (rm->data.op_active && !conn->c_xmit_data_sent) { | 
| Andy Grover | ff3d7d3 | 2010-03-01 14:03:09 -0800 | [diff] [blame] | 303 | rm->m_final_op = &rm->data; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 304 | ret = conn->c_trans->xmit(conn, rm, | 
|  | 305 | conn->c_xmit_hdr_off, | 
|  | 306 | conn->c_xmit_sg, | 
|  | 307 | conn->c_xmit_data_off); | 
|  | 308 | if (ret <= 0) | 
|  | 309 | break; | 
|  | 310 |  | 
|  | 311 | if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { | 
|  | 312 | tmp = min_t(int, ret, | 
|  | 313 | sizeof(struct rds_header) - | 
|  | 314 | conn->c_xmit_hdr_off); | 
|  | 315 | conn->c_xmit_hdr_off += tmp; | 
|  | 316 | ret -= tmp; | 
|  | 317 | } | 
|  | 318 |  | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 319 | sg = &rm->data.op_sg[conn->c_xmit_sg]; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 320 | while (ret) { | 
|  | 321 | tmp = min_t(int, ret, sg->length - | 
|  | 322 | conn->c_xmit_data_off); | 
|  | 323 | conn->c_xmit_data_off += tmp; | 
|  | 324 | ret -= tmp; | 
|  | 325 | if (conn->c_xmit_data_off == sg->length) { | 
|  | 326 | conn->c_xmit_data_off = 0; | 
|  | 327 | sg++; | 
|  | 328 | conn->c_xmit_sg++; | 
|  | 329 | BUG_ON(ret != 0 && | 
| Andy Grover | 6c7cc6e | 2010-01-27 18:04:18 -0800 | [diff] [blame] | 330 | conn->c_xmit_sg == rm->data.op_nents); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 331 | } | 
|  | 332 | } | 
| Andy Grover | 5b2366b | 2010-02-03 19:36:44 -0800 | [diff] [blame] | 333 |  | 
|  | 334 | if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && | 
|  | 335 | (conn->c_xmit_sg == rm->data.op_nents)) | 
|  | 336 | conn->c_xmit_data_sent = 1; | 
|  | 337 | } | 
|  | 338 |  | 
|  | 339 | /* | 
|  | 340 | * A rm will only take multiple times through this loop | 
|  | 341 | * if there is a data op. Thus, if the data is sent (or there was | 
|  | 342 | * none), then we're done with the rm. | 
|  | 343 | */ | 
|  | 344 | if (!rm->data.op_active || conn->c_xmit_data_sent) { | 
|  | 345 | conn->c_xmit_rm = NULL; | 
|  | 346 | conn->c_xmit_sg = 0; | 
|  | 347 | conn->c_xmit_hdr_off = 0; | 
|  | 348 | conn->c_xmit_data_off = 0; | 
|  | 349 | conn->c_xmit_rdma_sent = 0; | 
|  | 350 | conn->c_xmit_atomic_sent = 0; | 
|  | 351 | conn->c_xmit_data_sent = 0; | 
|  | 352 |  | 
|  | 353 | rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 354 | } | 
|  | 355 | } | 
|  | 356 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 357 | if (conn->c_trans->xmit_complete) | 
|  | 358 | conn->c_trans->xmit_complete(conn); | 
|  | 359 |  | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 360 | release_in_xmit(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 361 |  | 
| Andy Grover | 2ad8099 | 2010-03-23 17:48:04 -0700 | [diff] [blame] | 362 | /* Nuke any messages we decided not to retransmit. */ | 
|  | 363 | if (!list_empty(&to_be_dropped)) { | 
|  | 364 | /* irqs on here, so we can put(), unlike above */ | 
|  | 365 | list_for_each_entry(rm, &to_be_dropped, m_conn_item) | 
|  | 366 | rds_message_put(rm); | 
|  | 367 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); | 
|  | 368 | } | 
|  | 369 |  | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 370 | /* | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 371 | * Other senders can queue a message after we last test the send queue | 
|  | 372 | * but before we clear RDS_IN_XMIT.  In that case they'd back off and | 
|  | 373 | * not try and send their newly queued message.  We need to check the | 
|  | 374 | * send queue after having cleared RDS_IN_XMIT so that their message | 
|  | 375 | * doesn't get stuck on the send queue. | 
| Andy Grover | fcc5450 | 2010-03-29 17:08:49 -0700 | [diff] [blame] | 376 | * | 
|  | 377 | * If the transport cannot continue (i.e ret != 0), then it must | 
|  | 378 | * call us when more room is available, such as from the tx | 
|  | 379 | * completion handler. | 
|  | 380 | */ | 
|  | 381 | if (ret == 0) { | 
| Chris Mason | 9e29db0 | 2010-04-15 16:38:14 -0400 | [diff] [blame] | 382 | smp_mb(); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 383 | if (!list_empty(&conn->c_send_queue)) { | 
| Andy Grover | 049ee3f | 2010-03-23 17:39:07 -0700 | [diff] [blame] | 384 | rds_stats_inc(s_send_lock_queue_raced); | 
| Zach Brown | 0f4b1c7 | 2010-06-04 14:41:41 -0700 | [diff] [blame] | 385 | goto restart; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 386 | } | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 387 | } | 
|  | 388 | out: | 
|  | 389 | return ret; | 
|  | 390 | } | 
|  | 391 |  | 
|  | 392 | static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) | 
|  | 393 | { | 
|  | 394 | u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | 
|  | 395 |  | 
|  | 396 | assert_spin_locked(&rs->rs_lock); | 
|  | 397 |  | 
|  | 398 | BUG_ON(rs->rs_snd_bytes < len); | 
|  | 399 | rs->rs_snd_bytes -= len; | 
|  | 400 |  | 
|  | 401 | if (rs->rs_snd_bytes == 0) | 
|  | 402 | rds_stats_inc(s_send_queue_empty); | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, | 
|  | 406 | is_acked_func is_acked) | 
|  | 407 | { | 
|  | 408 | if (is_acked) | 
|  | 409 | return is_acked(rm, ack); | 
|  | 410 | return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; | 
|  | 411 | } | 
|  | 412 |  | 
|  | 413 | /* | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 414 | * This is pretty similar to what happens below in the ACK | 
|  | 415 | * handling code - except that we call here as soon as we get | 
|  | 416 | * the IB send completion on the RDMA op and the accompanying | 
|  | 417 | * message. | 
|  | 418 | */ | 
|  | 419 | void rds_rdma_send_complete(struct rds_message *rm, int status) | 
|  | 420 | { | 
|  | 421 | struct rds_sock *rs = NULL; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 422 | struct rm_rdma_op *ro; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 423 | struct rds_notifier *notifier; | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 424 | unsigned long flags; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 425 |  | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 426 | spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 427 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 428 | ro = &rm->rdma; | 
| Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 429 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 430 | ro->op_active && ro->op_notify && ro->op_notifier) { | 
|  | 431 | notifier = ro->op_notifier; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 432 | rs = rm->m_rs; | 
|  | 433 | sock_hold(rds_rs_to_sk(rs)); | 
|  | 434 |  | 
|  | 435 | notifier->n_status = status; | 
|  | 436 | spin_lock(&rs->rs_lock); | 
|  | 437 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | 
|  | 438 | spin_unlock(&rs->rs_lock); | 
|  | 439 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 440 | ro->op_notifier = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 441 | } | 
|  | 442 |  | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 443 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 444 |  | 
|  | 445 | if (rs) { | 
|  | 446 | rds_wake_sk_sleep(rs); | 
|  | 447 | sock_put(rds_rs_to_sk(rs)); | 
|  | 448 | } | 
|  | 449 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 450 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 451 |  | 
|  | 452 | /* | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 453 | * Just like above, except looks at atomic op | 
|  | 454 | */ | 
|  | 455 | void rds_atomic_send_complete(struct rds_message *rm, int status) | 
|  | 456 | { | 
|  | 457 | struct rds_sock *rs = NULL; | 
|  | 458 | struct rm_atomic_op *ao; | 
|  | 459 | struct rds_notifier *notifier; | 
| Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 460 | unsigned long flags; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 461 |  | 
| Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 462 | spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 463 |  | 
|  | 464 | ao = &rm->atomic; | 
|  | 465 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) | 
|  | 466 | && ao->op_active && ao->op_notify && ao->op_notifier) { | 
|  | 467 | notifier = ao->op_notifier; | 
|  | 468 | rs = rm->m_rs; | 
|  | 469 | sock_hold(rds_rs_to_sk(rs)); | 
|  | 470 |  | 
|  | 471 | notifier->n_status = status; | 
|  | 472 | spin_lock(&rs->rs_lock); | 
|  | 473 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | 
|  | 474 | spin_unlock(&rs->rs_lock); | 
|  | 475 |  | 
|  | 476 | ao->op_notifier = NULL; | 
|  | 477 | } | 
|  | 478 |  | 
| Andy Grover | cf4b738 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 479 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 480 |  | 
|  | 481 | if (rs) { | 
|  | 482 | rds_wake_sk_sleep(rs); | 
|  | 483 | sock_put(rds_rs_to_sk(rs)); | 
|  | 484 | } | 
|  | 485 | } | 
|  | 486 | EXPORT_SYMBOL_GPL(rds_atomic_send_complete); | 
|  | 487 |  | 
|  | 488 | /* | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 489 | * This is the same as rds_rdma_send_complete except we | 
|  | 490 | * don't do any locking - we have all the ingredients (message, | 
|  | 491 | * socket, socket lock) and can just move the notifier. | 
|  | 492 | */ | 
|  | 493 | static inline void | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 494 | __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 495 | { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 496 | struct rm_rdma_op *ro; | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 497 | struct rm_atomic_op *ao; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 498 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 499 | ro = &rm->rdma; | 
|  | 500 | if (ro->op_active && ro->op_notify && ro->op_notifier) { | 
|  | 501 | ro->op_notifier->n_status = status; | 
|  | 502 | list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); | 
|  | 503 | ro->op_notifier = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 504 | } | 
|  | 505 |  | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 506 | ao = &rm->atomic; | 
|  | 507 | if (ao->op_active && ao->op_notify && ao->op_notifier) { | 
|  | 508 | ao->op_notifier->n_status = status; | 
|  | 509 | list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); | 
|  | 510 | ao->op_notifier = NULL; | 
|  | 511 | } | 
|  | 512 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 513 | /* No need to wake the app - caller does this */ | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | /* | 
|  | 517 | * This is called from the IB send completion when we detect | 
|  | 518 | * a RDMA operation that failed with remote access error. | 
|  | 519 | * So speed is not an issue here. | 
|  | 520 | */ | 
|  | 521 | struct rds_message *rds_send_get_message(struct rds_connection *conn, | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 522 | struct rm_rdma_op *op) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 523 | { | 
|  | 524 | struct rds_message *rm, *tmp, *found = NULL; | 
|  | 525 | unsigned long flags; | 
|  | 526 |  | 
|  | 527 | spin_lock_irqsave(&conn->c_lock, flags); | 
|  | 528 |  | 
|  | 529 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 530 | if (&rm->rdma == op) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 531 | atomic_inc(&rm->m_refcount); | 
|  | 532 | found = rm; | 
|  | 533 | goto out; | 
|  | 534 | } | 
|  | 535 | } | 
|  | 536 |  | 
|  | 537 | list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 538 | if (&rm->rdma == op) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 539 | atomic_inc(&rm->m_refcount); | 
|  | 540 | found = rm; | 
|  | 541 | break; | 
|  | 542 | } | 
|  | 543 | } | 
|  | 544 |  | 
|  | 545 | out: | 
|  | 546 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
|  | 547 |  | 
|  | 548 | return found; | 
|  | 549 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 550 | EXPORT_SYMBOL_GPL(rds_send_get_message); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 551 |  | 
|  | 552 | /* | 
|  | 553 | * This removes messages from the socket's list if they're on it.  The list | 
|  | 554 | * argument must be private to the caller, we must be able to modify it | 
|  | 555 | * without locks.  The messages must have a reference held for their | 
|  | 556 | * position on the list.  This function will drop that reference after | 
|  | 557 | * removing the messages from the 'messages' list regardless of if it found | 
|  | 558 | * the messages on the socket list or not. | 
|  | 559 | */ | 
| stephen hemminger | ff51bf8 | 2010-10-19 08:08:33 +0000 | [diff] [blame] | 560 | static void rds_send_remove_from_sock(struct list_head *messages, int status) | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 561 | { | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 562 | unsigned long flags; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 563 | struct rds_sock *rs = NULL; | 
|  | 564 | struct rds_message *rm; | 
|  | 565 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 566 | while (!list_empty(messages)) { | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 567 | int was_on_sock = 0; | 
|  | 568 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 569 | rm = list_entry(messages->next, struct rds_message, | 
|  | 570 | m_conn_item); | 
|  | 571 | list_del_init(&rm->m_conn_item); | 
|  | 572 |  | 
|  | 573 | /* | 
|  | 574 | * If we see this flag cleared then we're *sure* that someone | 
|  | 575 | * else beat us to removing it from the sock.  If we race | 
|  | 576 | * with their flag update we'll get the lock and then really | 
|  | 577 | * see that the flag has been cleared. | 
|  | 578 | * | 
|  | 579 | * The message spinlock makes sure nobody clears rm->m_rs | 
|  | 580 | * while we're messing with it. It does not prevent the | 
|  | 581 | * message from being removed from the socket, though. | 
|  | 582 | */ | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 583 | spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 584 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) | 
|  | 585 | goto unlock_and_drop; | 
|  | 586 |  | 
|  | 587 | if (rs != rm->m_rs) { | 
|  | 588 | if (rs) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 589 | rds_wake_sk_sleep(rs); | 
|  | 590 | sock_put(rds_rs_to_sk(rs)); | 
|  | 591 | } | 
|  | 592 | rs = rm->m_rs; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 593 | sock_hold(rds_rs_to_sk(rs)); | 
|  | 594 | } | 
| Tina Yang | 048c15e | 2010-03-11 13:50:00 +0000 | [diff] [blame] | 595 | spin_lock(&rs->rs_lock); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 596 |  | 
|  | 597 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 598 | struct rm_rdma_op *ro = &rm->rdma; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 599 | struct rds_notifier *notifier; | 
|  | 600 |  | 
|  | 601 | list_del_init(&rm->m_sock_item); | 
|  | 602 | rds_send_sndbuf_remove(rs, rm); | 
|  | 603 |  | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 604 | if (ro->op_active && ro->op_notifier && | 
|  | 605 | (ro->op_notify || (ro->op_recverr && status))) { | 
|  | 606 | notifier = ro->op_notifier; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 607 | list_add_tail(¬ifier->n_list, | 
|  | 608 | &rs->rs_notify_queue); | 
|  | 609 | if (!notifier->n_status) | 
|  | 610 | notifier->n_status = status; | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 611 | rm->rdma.op_notifier = NULL; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 612 | } | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 613 | was_on_sock = 1; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 614 | rm->m_rs = NULL; | 
|  | 615 | } | 
| Tina Yang | 048c15e | 2010-03-11 13:50:00 +0000 | [diff] [blame] | 616 | spin_unlock(&rs->rs_lock); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 617 |  | 
|  | 618 | unlock_and_drop: | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 619 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 620 | rds_message_put(rm); | 
| Andy Grover | 561c7df | 2010-03-11 13:50:06 +0000 | [diff] [blame] | 621 | if (was_on_sock) | 
|  | 622 | rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 623 | } | 
|  | 624 |  | 
|  | 625 | if (rs) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 626 | rds_wake_sk_sleep(rs); | 
|  | 627 | sock_put(rds_rs_to_sk(rs)); | 
|  | 628 | } | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 629 | } | 
|  | 630 |  | 
|  | 631 | /* | 
|  | 632 | * Transports call here when they've determined that the receiver queued | 
|  | 633 | * messages up to, and including, the given sequence number.  Messages are | 
|  | 634 | * moved to the retrans queue when rds_send_xmit picks them off the send | 
|  | 635 | * queue. This means that in the TCP case, the message may not have been | 
|  | 636 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked | 
|  | 637 | * checks the RDS_MSG_HAS_ACK_SEQ bit. | 
|  | 638 | * | 
|  | 639 | * XXX It's not clear to me how this is safely serialized with socket | 
|  | 640 | * destruction.  Maybe it should bail if it sees SOCK_DEAD. | 
|  | 641 | */ | 
|  | 642 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, | 
|  | 643 | is_acked_func is_acked) | 
|  | 644 | { | 
|  | 645 | struct rds_message *rm, *tmp; | 
|  | 646 | unsigned long flags; | 
|  | 647 | LIST_HEAD(list); | 
|  | 648 |  | 
|  | 649 | spin_lock_irqsave(&conn->c_lock, flags); | 
|  | 650 |  | 
|  | 651 | list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { | 
|  | 652 | if (!rds_send_is_acked(rm, ack, is_acked)) | 
|  | 653 | break; | 
|  | 654 |  | 
|  | 655 | list_move(&rm->m_conn_item, &list); | 
|  | 656 | clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); | 
|  | 657 | } | 
|  | 658 |  | 
|  | 659 | /* order flag updates with spin locks */ | 
|  | 660 | if (!list_empty(&list)) | 
|  | 661 | smp_mb__after_clear_bit(); | 
|  | 662 |  | 
|  | 663 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
|  | 664 |  | 
|  | 665 | /* now remove the messages from the sock list as needed */ | 
|  | 666 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); | 
|  | 667 | } | 
| Andy Grover | 616b757 | 2009-08-21 12:28:32 +0000 | [diff] [blame] | 668 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 669 |  | 
|  | 670 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) | 
|  | 671 | { | 
|  | 672 | struct rds_message *rm, *tmp; | 
|  | 673 | struct rds_connection *conn; | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 674 | unsigned long flags; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 675 | LIST_HEAD(list); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 676 |  | 
|  | 677 | /* get all the messages we're dropping under the rs lock */ | 
|  | 678 | spin_lock_irqsave(&rs->rs_lock, flags); | 
|  | 679 |  | 
|  | 680 | list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { | 
|  | 681 | if (dest && (dest->sin_addr.s_addr != rm->m_daddr || | 
|  | 682 | dest->sin_port != rm->m_inc.i_hdr.h_dport)) | 
|  | 683 | continue; | 
|  | 684 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 685 | list_move(&rm->m_sock_item, &list); | 
|  | 686 | rds_send_sndbuf_remove(rs, rm); | 
|  | 687 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 688 | } | 
|  | 689 |  | 
|  | 690 | /* order flag updates with the rs lock */ | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 691 | smp_mb__after_clear_bit(); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 692 |  | 
|  | 693 | spin_unlock_irqrestore(&rs->rs_lock, flags); | 
|  | 694 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 695 | if (list_empty(&list)) | 
|  | 696 | return; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 697 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 698 | /* Remove the messages from the conn */ | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 699 | list_for_each_entry(rm, &list, m_sock_item) { | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 700 |  | 
|  | 701 | conn = rm->m_inc.i_conn; | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 702 |  | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 703 | spin_lock_irqsave(&conn->c_lock, flags); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 704 | /* | 
|  | 705 | * Maybe someone else beat us to removing rm from the conn. | 
|  | 706 | * If we race with their flag update we'll get the lock and | 
|  | 707 | * then really see that the flag has been cleared. | 
|  | 708 | */ | 
|  | 709 | if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { | 
|  | 710 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
|  | 711 | continue; | 
|  | 712 | } | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 713 | list_del_init(&rm->m_conn_item); | 
|  | 714 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 715 |  | 
|  | 716 | /* | 
|  | 717 | * Couldn't grab m_rs_lock in top loop (lock ordering), | 
|  | 718 | * but we can now. | 
|  | 719 | */ | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 720 | spin_lock_irqsave(&rm->m_rs_lock, flags); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 721 |  | 
| Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 722 | spin_lock(&rs->rs_lock); | 
| Andy Grover | 940786e | 2010-02-19 18:04:58 -0800 | [diff] [blame] | 723 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); | 
| Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 724 | spin_unlock(&rs->rs_lock); | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 725 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 726 | rm->m_rs = NULL; | 
| Andy Grover | 9de0864 | 2010-03-29 16:50:54 -0700 | [diff] [blame] | 727 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 728 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 729 | rds_message_put(rm); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 730 | } | 
|  | 731 |  | 
| Andy Grover | 7c82eaf | 2010-02-19 18:01:41 -0800 | [diff] [blame] | 732 | rds_wake_sk_sleep(rs); | 
| Tina Yang | 550a800 | 2010-03-11 13:50:03 +0000 | [diff] [blame] | 733 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 734 | while (!list_empty(&list)) { | 
|  | 735 | rm = list_entry(list.next, struct rds_message, m_sock_item); | 
|  | 736 | list_del_init(&rm->m_sock_item); | 
|  | 737 |  | 
|  | 738 | rds_message_wait(rm); | 
|  | 739 | rds_message_put(rm); | 
|  | 740 | } | 
|  | 741 | } | 
|  | 742 |  | 
|  | 743 | /* | 
|  | 744 | * we only want this to fire once so we use the callers 'queued'.  It's | 
|  | 745 | * possible that another thread can race with us and remove the | 
|  | 746 | * message from the flow with RDS_CANCEL_SENT_TO. | 
|  | 747 | */ | 
|  | 748 | static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, | 
|  | 749 | struct rds_message *rm, __be16 sport, | 
|  | 750 | __be16 dport, int *queued) | 
|  | 751 | { | 
|  | 752 | unsigned long flags; | 
|  | 753 | u32 len; | 
|  | 754 |  | 
|  | 755 | if (*queued) | 
|  | 756 | goto out; | 
|  | 757 |  | 
|  | 758 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | 
|  | 759 |  | 
|  | 760 | /* this is the only place which holds both the socket's rs_lock | 
|  | 761 | * and the connection's c_lock */ | 
|  | 762 | spin_lock_irqsave(&rs->rs_lock, flags); | 
|  | 763 |  | 
|  | 764 | /* | 
|  | 765 | * If there is a little space in sndbuf, we don't queue anything, | 
|  | 766 | * and userspace gets -EAGAIN. But poll() indicates there's send | 
|  | 767 | * room. This can lead to bad behavior (spinning) if snd_bytes isn't | 
|  | 768 | * freed up by incoming acks. So we check the *old* value of | 
|  | 769 | * rs_snd_bytes here to allow the last msg to exceed the buffer, | 
|  | 770 | * and poll() now knows no more data can be sent. | 
|  | 771 | */ | 
|  | 772 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { | 
|  | 773 | rs->rs_snd_bytes += len; | 
|  | 774 |  | 
|  | 775 | /* let recv side know we are close to send space exhaustion. | 
|  | 776 | * This is probably not the optimal way to do it, as this | 
|  | 777 | * means we set the flag on *all* messages as soon as our | 
|  | 778 | * throughput hits a certain threshold. | 
|  | 779 | */ | 
|  | 780 | if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) | 
|  | 781 | __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); | 
|  | 782 |  | 
|  | 783 | list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); | 
|  | 784 | set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | 
|  | 785 | rds_message_addref(rm); | 
|  | 786 | rm->m_rs = rs; | 
|  | 787 |  | 
|  | 788 | /* The code ordering is a little weird, but we're | 
|  | 789 | trying to minimize the time we hold c_lock */ | 
|  | 790 | rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); | 
|  | 791 | rm->m_inc.i_conn = conn; | 
|  | 792 | rds_message_addref(rm); | 
|  | 793 |  | 
|  | 794 | spin_lock(&conn->c_lock); | 
|  | 795 | rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); | 
|  | 796 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); | 
|  | 797 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); | 
|  | 798 | spin_unlock(&conn->c_lock); | 
|  | 799 |  | 
|  | 800 | rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", | 
|  | 801 | rm, len, rs, rs->rs_snd_bytes, | 
|  | 802 | (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); | 
|  | 803 |  | 
|  | 804 | *queued = 1; | 
|  | 805 | } | 
|  | 806 |  | 
|  | 807 | spin_unlock_irqrestore(&rs->rs_lock, flags); | 
|  | 808 | out: | 
|  | 809 | return *queued; | 
|  | 810 | } | 
|  | 811 |  | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 812 | /* | 
|  | 813 | * rds_message is getting to be quite complicated, and we'd like to allocate | 
|  | 814 | * it all in one go. This figures out how big it needs to be up front. | 
|  | 815 | */ | 
|  | 816 | static int rds_rm_size(struct msghdr *msg, int data_len) | 
|  | 817 | { | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 818 | struct cmsghdr *cmsg; | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 819 | int size = 0; | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 820 | int cmsg_groups = 0; | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 821 | int retval; | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 822 |  | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 823 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | 
|  | 824 | if (!CMSG_OK(msg, cmsg)) | 
|  | 825 | return -EINVAL; | 
|  | 826 |  | 
|  | 827 | if (cmsg->cmsg_level != SOL_RDS) | 
|  | 828 | continue; | 
|  | 829 |  | 
|  | 830 | switch (cmsg->cmsg_type) { | 
|  | 831 | case RDS_CMSG_RDMA_ARGS: | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 832 | cmsg_groups |= 1; | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 833 | retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); | 
|  | 834 | if (retval < 0) | 
|  | 835 | return retval; | 
|  | 836 | size += retval; | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 837 |  | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 838 | break; | 
|  | 839 |  | 
|  | 840 | case RDS_CMSG_RDMA_DEST: | 
|  | 841 | case RDS_CMSG_RDMA_MAP: | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 842 | cmsg_groups |= 2; | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 843 | /* these are valid but do no add any size */ | 
|  | 844 | break; | 
|  | 845 |  | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 846 | case RDS_CMSG_ATOMIC_CSWP: | 
|  | 847 | case RDS_CMSG_ATOMIC_FADD: | 
| Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 848 | case RDS_CMSG_MASKED_ATOMIC_CSWP: | 
|  | 849 | case RDS_CMSG_MASKED_ATOMIC_FADD: | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 850 | cmsg_groups |= 1; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 851 | size += sizeof(struct scatterlist); | 
|  | 852 | break; | 
|  | 853 |  | 
| Andy Grover | ff87e97 | 2010-01-12 14:13:15 -0800 | [diff] [blame] | 854 | default: | 
|  | 855 | return -EINVAL; | 
|  | 856 | } | 
|  | 857 |  | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 861 |  | 
| Andy Grover | aa0a4ef | 2010-04-13 12:00:35 -0700 | [diff] [blame] | 862 | /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ | 
|  | 863 | if (cmsg_groups == 3) | 
|  | 864 | return -EINVAL; | 
|  | 865 |  | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 866 | return size; | 
|  | 867 | } | 
|  | 868 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 869 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, | 
|  | 870 | struct msghdr *msg, int *allocated_mr) | 
|  | 871 | { | 
|  | 872 | struct cmsghdr *cmsg; | 
|  | 873 | int ret = 0; | 
|  | 874 |  | 
|  | 875 | for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { | 
|  | 876 | if (!CMSG_OK(msg, cmsg)) | 
|  | 877 | return -EINVAL; | 
|  | 878 |  | 
|  | 879 | if (cmsg->cmsg_level != SOL_RDS) | 
|  | 880 | continue; | 
|  | 881 |  | 
|  | 882 | /* As a side effect, RDMA_DEST and RDMA_MAP will set | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 883 | * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 884 | */ | 
|  | 885 | switch (cmsg->cmsg_type) { | 
|  | 886 | case RDS_CMSG_RDMA_ARGS: | 
|  | 887 | ret = rds_cmsg_rdma_args(rs, rm, cmsg); | 
|  | 888 | break; | 
|  | 889 |  | 
|  | 890 | case RDS_CMSG_RDMA_DEST: | 
|  | 891 | ret = rds_cmsg_rdma_dest(rs, rm, cmsg); | 
|  | 892 | break; | 
|  | 893 |  | 
|  | 894 | case RDS_CMSG_RDMA_MAP: | 
|  | 895 | ret = rds_cmsg_rdma_map(rs, rm, cmsg); | 
|  | 896 | if (!ret) | 
|  | 897 | *allocated_mr = 1; | 
|  | 898 | break; | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 899 | case RDS_CMSG_ATOMIC_CSWP: | 
|  | 900 | case RDS_CMSG_ATOMIC_FADD: | 
| Andy Grover | 20c72bd | 2010-08-25 05:51:28 -0700 | [diff] [blame] | 901 | case RDS_CMSG_MASKED_ATOMIC_CSWP: | 
|  | 902 | case RDS_CMSG_MASKED_ATOMIC_FADD: | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 903 | ret = rds_cmsg_atomic(rs, rm, cmsg); | 
|  | 904 | break; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 905 |  | 
|  | 906 | default: | 
|  | 907 | return -EINVAL; | 
|  | 908 | } | 
|  | 909 |  | 
|  | 910 | if (ret) | 
|  | 911 | break; | 
|  | 912 | } | 
|  | 913 |  | 
|  | 914 | return ret; | 
|  | 915 | } | 
|  | 916 |  | 
|  | 917 | int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | 
|  | 918 | size_t payload_len) | 
|  | 919 | { | 
|  | 920 | struct sock *sk = sock->sk; | 
|  | 921 | struct rds_sock *rs = rds_sk_to_rs(sk); | 
|  | 922 | struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; | 
|  | 923 | __be32 daddr; | 
|  | 924 | __be16 dport; | 
|  | 925 | struct rds_message *rm = NULL; | 
|  | 926 | struct rds_connection *conn; | 
|  | 927 | int ret = 0; | 
|  | 928 | int queued = 0, allocated_mr = 0; | 
|  | 929 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | 
| Andy Grover | 1123fd7 | 2010-03-11 13:49:56 +0000 | [diff] [blame] | 930 | long timeo = sock_sndtimeo(sk, nonblock); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 931 |  | 
|  | 932 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | 
|  | 933 | /* XXX: Perhaps MSG_MORE someday */ | 
|  | 934 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { | 
|  | 935 | printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags); | 
|  | 936 | ret = -EOPNOTSUPP; | 
|  | 937 | goto out; | 
|  | 938 | } | 
|  | 939 |  | 
|  | 940 | if (msg->msg_namelen) { | 
|  | 941 | /* XXX fail non-unicast destination IPs? */ | 
|  | 942 | if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { | 
|  | 943 | ret = -EINVAL; | 
|  | 944 | goto out; | 
|  | 945 | } | 
|  | 946 | daddr = usin->sin_addr.s_addr; | 
|  | 947 | dport = usin->sin_port; | 
|  | 948 | } else { | 
|  | 949 | /* We only care about consistency with ->connect() */ | 
|  | 950 | lock_sock(sk); | 
|  | 951 | daddr = rs->rs_conn_addr; | 
|  | 952 | dport = rs->rs_conn_port; | 
|  | 953 | release_sock(sk); | 
|  | 954 | } | 
|  | 955 |  | 
|  | 956 | /* racing with another thread binding seems ok here */ | 
|  | 957 | if (daddr == 0 || rs->rs_bound_addr == 0) { | 
|  | 958 | ret = -ENOTCONN; /* XXX not a great errno */ | 
|  | 959 | goto out; | 
|  | 960 | } | 
|  | 961 |  | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 962 | /* size of rm including all sgs */ | 
|  | 963 | ret = rds_rm_size(msg, payload_len); | 
|  | 964 | if (ret < 0) | 
|  | 965 | goto out; | 
|  | 966 |  | 
|  | 967 | rm = rds_message_alloc(ret, GFP_KERNEL); | 
|  | 968 | if (!rm) { | 
|  | 969 | ret = -ENOMEM; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 970 | goto out; | 
|  | 971 | } | 
|  | 972 |  | 
| Andy Grover | 372cd7d | 2010-02-03 19:40:32 -0800 | [diff] [blame] | 973 | /* Attach data to the rm */ | 
|  | 974 | if (payload_len) { | 
|  | 975 | rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); | 
| Andy Grover | d139ff0 | 2010-10-28 15:40:59 +0000 | [diff] [blame] | 976 | if (!rm->data.op_sg) { | 
|  | 977 | ret = -ENOMEM; | 
|  | 978 | goto out; | 
|  | 979 | } | 
| Andy Grover | 372cd7d | 2010-02-03 19:40:32 -0800 | [diff] [blame] | 980 | ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); | 
|  | 981 | if (ret) | 
|  | 982 | goto out; | 
|  | 983 | } | 
|  | 984 | rm->data.op_active = 1; | 
| Andy Grover | fc44508 | 2010-01-12 12:56:06 -0800 | [diff] [blame] | 985 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 986 | rm->m_daddr = daddr; | 
|  | 987 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 988 | /* rds_conn_create has a spinlock that runs with IRQ off. | 
|  | 989 | * Caching the conn in the socket helps a lot. */ | 
|  | 990 | if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) | 
|  | 991 | conn = rs->rs_conn; | 
|  | 992 | else { | 
|  | 993 | conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, | 
|  | 994 | rs->rs_transport, | 
|  | 995 | sock->sk->sk_allocation); | 
|  | 996 | if (IS_ERR(conn)) { | 
|  | 997 | ret = PTR_ERR(conn); | 
|  | 998 | goto out; | 
|  | 999 | } | 
|  | 1000 | rs->rs_conn = conn; | 
|  | 1001 | } | 
|  | 1002 |  | 
| Andy Grover | 49f6969 | 2009-04-09 14:09:41 +0000 | [diff] [blame] | 1003 | /* Parse any control messages the user may have included. */ | 
|  | 1004 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); | 
|  | 1005 | if (ret) | 
|  | 1006 | goto out; | 
|  | 1007 |  | 
| Andy Grover | 2c3a5f9 | 2010-03-01 16:10:40 -0800 | [diff] [blame] | 1008 | if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1009 | if (printk_ratelimit()) | 
|  | 1010 | printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", | 
| Andy Grover | f8b3aaf | 2010-03-01 14:11:53 -0800 | [diff] [blame] | 1011 | &rm->rdma, conn->c_trans->xmit_rdma); | 
| Andy Grover | 15133f6 | 2010-01-12 14:33:38 -0800 | [diff] [blame] | 1012 | ret = -EOPNOTSUPP; | 
|  | 1013 | goto out; | 
|  | 1014 | } | 
|  | 1015 |  | 
|  | 1016 | if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { | 
|  | 1017 | if (printk_ratelimit()) | 
|  | 1018 | printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", | 
|  | 1019 | &rm->atomic, conn->c_trans->xmit_atomic); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1020 | ret = -EOPNOTSUPP; | 
|  | 1021 | goto out; | 
|  | 1022 | } | 
|  | 1023 |  | 
| Zach Brown | f3c6808 | 2010-05-24 13:14:36 -0700 | [diff] [blame] | 1024 | rds_conn_connect_if_down(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1025 |  | 
|  | 1026 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | 
| Andy Grover | b98ba52 | 2010-03-11 13:50:04 +0000 | [diff] [blame] | 1027 | if (ret) { | 
|  | 1028 | rs->rs_seen_congestion = 1; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1029 | goto out; | 
| Andy Grover | b98ba52 | 2010-03-11 13:50:04 +0000 | [diff] [blame] | 1030 | } | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1031 |  | 
|  | 1032 | while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, | 
|  | 1033 | dport, &queued)) { | 
|  | 1034 | rds_stats_inc(s_send_queue_full); | 
|  | 1035 | /* XXX make sure this is reasonable */ | 
|  | 1036 | if (payload_len > rds_sk_sndbuf(rs)) { | 
|  | 1037 | ret = -EMSGSIZE; | 
|  | 1038 | goto out; | 
|  | 1039 | } | 
|  | 1040 | if (nonblock) { | 
|  | 1041 | ret = -EAGAIN; | 
|  | 1042 | goto out; | 
|  | 1043 | } | 
|  | 1044 |  | 
| Eric Dumazet | aa39514 | 2010-04-20 13:03:51 +0000 | [diff] [blame] | 1045 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1046 | rds_send_queue_rm(rs, conn, rm, | 
|  | 1047 | rs->rs_bound_port, | 
|  | 1048 | dport, | 
|  | 1049 | &queued), | 
|  | 1050 | timeo); | 
|  | 1051 | rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); | 
|  | 1052 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) | 
|  | 1053 | continue; | 
|  | 1054 |  | 
|  | 1055 | ret = timeo; | 
|  | 1056 | if (ret == 0) | 
|  | 1057 | ret = -ETIMEDOUT; | 
|  | 1058 | goto out; | 
|  | 1059 | } | 
|  | 1060 |  | 
|  | 1061 | /* | 
|  | 1062 | * By now we've committed to the send.  We reuse rds_send_worker() | 
|  | 1063 | * to retry sends in the rds thread if the transport asks us to. | 
|  | 1064 | */ | 
|  | 1065 | rds_stats_inc(s_send_queued); | 
|  | 1066 |  | 
|  | 1067 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | 
| Andy Grover | a7d3a28 | 2010-03-29 16:20:18 -0700 | [diff] [blame] | 1068 | rds_send_xmit(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1069 |  | 
|  | 1070 | rds_message_put(rm); | 
|  | 1071 | return payload_len; | 
|  | 1072 |  | 
|  | 1073 | out: | 
|  | 1074 | /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. | 
|  | 1075 | * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN | 
|  | 1076 | * or in any other way, we need to destroy the MR again */ | 
|  | 1077 | if (allocated_mr) | 
|  | 1078 | rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); | 
|  | 1079 |  | 
|  | 1080 | if (rm) | 
|  | 1081 | rds_message_put(rm); | 
|  | 1082 | return ret; | 
|  | 1083 | } | 
|  | 1084 |  | 
|  | 1085 | /* | 
|  | 1086 | * Reply to a ping packet. | 
|  | 1087 | */ | 
|  | 1088 | int | 
|  | 1089 | rds_send_pong(struct rds_connection *conn, __be16 dport) | 
|  | 1090 | { | 
|  | 1091 | struct rds_message *rm; | 
|  | 1092 | unsigned long flags; | 
|  | 1093 | int ret = 0; | 
|  | 1094 |  | 
|  | 1095 | rm = rds_message_alloc(0, GFP_ATOMIC); | 
| Andy Grover | 8690bfa | 2010-01-12 11:56:44 -0800 | [diff] [blame] | 1096 | if (!rm) { | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1097 | ret = -ENOMEM; | 
|  | 1098 | goto out; | 
|  | 1099 | } | 
|  | 1100 |  | 
|  | 1101 | rm->m_daddr = conn->c_faddr; | 
| Andy Grover | acfcd4d | 2010-03-31 18:56:25 -0700 | [diff] [blame] | 1102 | rm->data.op_active = 1; | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1103 |  | 
| Zach Brown | f3c6808 | 2010-05-24 13:14:36 -0700 | [diff] [blame] | 1104 | rds_conn_connect_if_down(conn); | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1105 |  | 
|  | 1106 | ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); | 
|  | 1107 | if (ret) | 
|  | 1108 | goto out; | 
|  | 1109 |  | 
|  | 1110 | spin_lock_irqsave(&conn->c_lock, flags); | 
|  | 1111 | list_add_tail(&rm->m_conn_item, &conn->c_send_queue); | 
|  | 1112 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); | 
|  | 1113 | rds_message_addref(rm); | 
|  | 1114 | rm->m_inc.i_conn = conn; | 
|  | 1115 |  | 
|  | 1116 | rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, | 
|  | 1117 | conn->c_next_tx_seq); | 
|  | 1118 | conn->c_next_tx_seq++; | 
|  | 1119 | spin_unlock_irqrestore(&conn->c_lock, flags); | 
|  | 1120 |  | 
|  | 1121 | rds_stats_inc(s_send_queued); | 
|  | 1122 | rds_stats_inc(s_send_pong); | 
|  | 1123 |  | 
| Andy Grover | acfcd4d | 2010-03-31 18:56:25 -0700 | [diff] [blame] | 1124 | if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) | 
|  | 1125 | rds_send_xmit(conn); | 
|  | 1126 |  | 
| Andy Grover | 5c11559 | 2009-02-24 15:30:27 +0000 | [diff] [blame] | 1127 | rds_message_put(rm); | 
|  | 1128 | return 0; | 
|  | 1129 |  | 
|  | 1130 | out: | 
|  | 1131 | if (rm) | 
|  | 1132 | rds_message_put(rm); | 
|  | 1133 | return ret; | 
|  | 1134 | } |