blob: d29c71aabbd4ac08216217b3165e38b2a99b5fee [file] [log] [blame]
Andy Grover39de8282009-02-24 15:30:19 +00001#ifndef _RDS_RDS_H
2#define _RDS_RDS_H
3
4#include <net/sock.h>
5#include <linux/scatterlist.h>
6#include <linux/highmem.h>
7#include <rdma/rdma_cm.h>
8#include <linux/mutex.h>
9#include <linux/rds.h>
10
11#include "info.h"
12
13/*
14 * RDS Network protocol version
15 */
16#define RDS_PROTOCOL_3_0 0x0300
17#define RDS_PROTOCOL_3_1 0x0301
18#define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
19#define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
20#define RDS_PROTOCOL_MINOR(v) ((v) & 255)
21#define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
22
23/*
24 * XXX randomly chosen, but at least seems to be unused:
25 * # 18464-18768 Unassigned
26 * We should do better. We want a reserved port to discourage unpriv'ed
27 * userspace from listening.
28 */
29#define RDS_PORT 18634
30
Andy Grover8cbd9602009-04-01 08:20:20 +000031#ifdef ATOMIC64_INIT
32#define KERNEL_HAS_ATOMIC64
33#endif
34
Andy Grover39de8282009-02-24 15:30:19 +000035#ifdef DEBUG
36#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
37#else
38/* sigh, pr_debug() causes unused variable warnings */
39static inline void __attribute__ ((format (printf, 1, 2)))
40rdsdebug(char *fmt, ...)
41{
42}
43#endif
44
45/* XXX is there one of these somewhere? */
46#define ceil(x, y) \
47 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
48
49#define RDS_FRAG_SHIFT 12
50#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
51
52#define RDS_CONG_MAP_BYTES (65536 / 8)
53#define RDS_CONG_MAP_LONGS (RDS_CONG_MAP_BYTES / sizeof(unsigned long))
54#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
55#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
56
57struct rds_cong_map {
58 struct rb_node m_rb_node;
59 __be32 m_addr;
60 wait_queue_head_t m_waitq;
61 struct list_head m_conn_list;
62 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
63};
64
65
66/*
67 * This is how we will track the connection state:
68 * A connection is always in one of the following
69 * states. Updates to the state are atomic and imply
70 * a memory barrier.
71 */
72enum {
73 RDS_CONN_DOWN = 0,
74 RDS_CONN_CONNECTING,
75 RDS_CONN_DISCONNECTING,
76 RDS_CONN_UP,
77 RDS_CONN_ERROR,
78};
79
80/* Bits for c_flags */
81#define RDS_LL_SEND_FULL 0
82#define RDS_RECONNECT_PENDING 1
83
84struct rds_connection {
85 struct hlist_node c_hash_node;
86 __be32 c_laddr;
87 __be32 c_faddr;
88 unsigned int c_loopback:1;
89 struct rds_connection *c_passive;
90
91 struct rds_cong_map *c_lcong;
92 struct rds_cong_map *c_fcong;
93
94 struct mutex c_send_lock; /* protect send ring */
95 struct rds_message *c_xmit_rm;
96 unsigned long c_xmit_sg;
97 unsigned int c_xmit_hdr_off;
98 unsigned int c_xmit_data_off;
99 unsigned int c_xmit_rdma_sent;
100
101 spinlock_t c_lock; /* protect msg queues */
102 u64 c_next_tx_seq;
103 struct list_head c_send_queue;
104 struct list_head c_retrans;
105
106 u64 c_next_rx_seq;
107
108 struct rds_transport *c_trans;
109 void *c_transport_data;
110
111 atomic_t c_state;
112 unsigned long c_flags;
113 unsigned long c_reconnect_jiffies;
114 struct delayed_work c_send_w;
115 struct delayed_work c_recv_w;
116 struct delayed_work c_conn_w;
117 struct work_struct c_down_w;
118 struct mutex c_cm_lock; /* protect conn state & cm */
119
120 struct list_head c_map_item;
121 unsigned long c_map_queued;
122 unsigned long c_map_offset;
123 unsigned long c_map_bytes;
124
125 unsigned int c_unacked_packets;
126 unsigned int c_unacked_bytes;
127
128 /* Protocol version */
129 unsigned int c_version;
130};
131
132#define RDS_FLAG_CONG_BITMAP 0x01
133#define RDS_FLAG_ACK_REQUIRED 0x02
134#define RDS_FLAG_RETRANSMITTED 0x04
Steve Wise7b70d032009-04-09 14:09:39 +0000135#define RDS_MAX_ADV_CREDIT 255
Andy Grover39de8282009-02-24 15:30:19 +0000136
137/*
138 * Maximum space available for extension headers.
139 */
140#define RDS_HEADER_EXT_SPACE 16
141
142struct rds_header {
143 __be64 h_sequence;
144 __be64 h_ack;
145 __be32 h_len;
146 __be16 h_sport;
147 __be16 h_dport;
148 u8 h_flags;
149 u8 h_credit;
150 u8 h_padding[4];
151 __sum16 h_csum;
152
153 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
154};
155
156/*
157 * Reserved - indicates end of extensions
158 */
159#define RDS_EXTHDR_NONE 0
160
161/*
162 * This extension header is included in the very
163 * first message that is sent on a new connection,
164 * and identifies the protocol level. This will help
165 * rolling updates if a future change requires breaking
166 * the protocol.
167 * NB: This is no longer true for IB, where we do a version
168 * negotiation during the connection setup phase (protocol
169 * version information is included in the RDMA CM private data).
170 */
171#define RDS_EXTHDR_VERSION 1
172struct rds_ext_header_version {
173 __be32 h_version;
174};
175
176/*
177 * This extension header is included in the RDS message
178 * chasing an RDMA operation.
179 */
180#define RDS_EXTHDR_RDMA 2
181struct rds_ext_header_rdma {
182 __be32 h_rdma_rkey;
183};
184
185/*
186 * This extension header tells the peer about the
187 * destination <R_Key,offset> of the requested RDMA
188 * operation.
189 */
190#define RDS_EXTHDR_RDMA_DEST 3
191struct rds_ext_header_rdma_dest {
192 __be32 h_rdma_rkey;
193 __be32 h_rdma_offset;
194};
195
196#define __RDS_EXTHDR_MAX 16 /* for now */
197
198struct rds_incoming {
199 atomic_t i_refcount;
200 struct list_head i_item;
201 struct rds_connection *i_conn;
202 struct rds_header i_hdr;
203 unsigned long i_rx_jiffies;
204 __be32 i_saddr;
205
206 rds_rdma_cookie_t i_rdma_cookie;
207};
208
209/*
210 * m_sock_item and m_conn_item are on lists that are serialized under
211 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
212 * the message will not be put back on the retransmit list after being sent.
213 * messages that are canceled while being sent rely on this.
214 *
215 * m_inc is used by loopback so that it can pass an incoming message straight
216 * back up into the rx path. It embeds a wire header which is also used by
217 * the send path, which is kind of awkward.
218 *
219 * m_sock_item indicates the message's presence on a socket's send or receive
220 * queue. m_rs will point to that socket.
221 *
222 * m_daddr is used by cancellation to prune messages to a given destination.
223 *
224 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
225 * nesting. As paths iterate over messages on a sock, or conn, they must
226 * also lock the conn, or sock, to remove the message from those lists too.
227 * Testing the flag to determine if the message is still on the lists lets
228 * us avoid testing the list_head directly. That means each path can use
229 * the message's list_head to keep it on a local list while juggling locks
230 * without confusing the other path.
231 *
232 * m_ack_seq is an optional field set by transports who need a different
233 * sequence number range to invalidate. They can use this in a callback
234 * that they pass to rds_send_drop_acked() to see if each message has been
235 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
236 * had ack_seq set yet.
237 */
238#define RDS_MSG_ON_SOCK 1
239#define RDS_MSG_ON_CONN 2
240#define RDS_MSG_HAS_ACK_SEQ 3
241#define RDS_MSG_ACK_REQUIRED 4
242#define RDS_MSG_RETRANSMITTED 5
243#define RDS_MSG_MAPPED 6
244#define RDS_MSG_PAGEVEC 7
245
246struct rds_message {
247 atomic_t m_refcount;
248 struct list_head m_sock_item;
249 struct list_head m_conn_item;
250 struct rds_incoming m_inc;
251 u64 m_ack_seq;
252 __be32 m_daddr;
253 unsigned long m_flags;
254
255 /* Never access m_rs without holding m_rs_lock.
256 * Lock nesting is
257 * rm->m_rs_lock
258 * -> rs->rs_lock
259 */
260 spinlock_t m_rs_lock;
261 struct rds_sock *m_rs;
Andy Grover39de8282009-02-24 15:30:19 +0000262 rds_rdma_cookie_t m_rdma_cookie;
Andy Grovere7791372010-01-12 12:15:02 -0800263 struct {
264 struct {
265 struct rds_rdma_op *m_rdma_op;
266 struct rds_mr *m_rdma_mr;
267 } rdma;
268 struct {
269 unsigned int m_nents;
270 unsigned int m_count;
Andy Groverfc445082010-01-12 12:56:06 -0800271 struct scatterlist *m_sg;
Andy Grovere7791372010-01-12 12:15:02 -0800272 } data;
273 };
Andy Groverfc445082010-01-12 12:56:06 -0800274 unsigned int m_used_sgs;
275 unsigned int m_total_sgs;
Andy Grover39de8282009-02-24 15:30:19 +0000276};
277
278/*
279 * The RDS notifier is used (optionally) to tell the application about
280 * completed RDMA operations. Rather than keeping the whole rds message
281 * around on the queue, we allocate a small notifier that is put on the
282 * socket's notifier_list. Notifications are delivered to the application
283 * through control messages.
284 */
285struct rds_notifier {
286 struct list_head n_list;
287 uint64_t n_user_token;
288 int n_status;
289};
290
291/**
292 * struct rds_transport - transport specific behavioural hooks
293 *
294 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
295 * part of a message. The caller serializes on the send_sem so this
296 * doesn't need to be reentrant for a given conn. The header must be
297 * sent before the data payload. .xmit must be prepared to send a
298 * message with no data payload. .xmit should return the number of
299 * bytes that were sent down the connection, including header bytes.
300 * Returning 0 tells the caller that it doesn't need to perform any
301 * additional work now. This is usually the case when the transport has
302 * filled the sending queue for its connection and will handle
303 * triggering the rds thread to continue the send when space becomes
304 * available. Returning -EAGAIN tells the caller to retry the send
305 * immediately. Returning -ENOMEM tells the caller to retry the send at
306 * some point in the future.
307 *
308 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
309 * it returns the connection can not call rds_recv_incoming().
310 * This will only be called once after conn_connect returns
311 * non-zero success and will The caller serializes this with
312 * the send and connecting paths (xmit_* and conn_*). The
313 * transport is responsible for other serialization, including
314 * rds_recv_incoming(). This is called in process context but
315 * should try hard not to block.
316 *
317 * @xmit_cong_map: This asks the transport to send the local bitmap down the
318 * given connection. XXX get a better story about the bitmap
319 * flag and header.
320 */
321
Andy Grover335776b2009-08-21 12:28:34 +0000322#define RDS_TRANS_IB 0
323#define RDS_TRANS_IWARP 1
324#define RDS_TRANS_TCP 2
325#define RDS_TRANS_COUNT 3
326
Andy Grover39de8282009-02-24 15:30:19 +0000327struct rds_transport {
328 char t_name[TRANSNAMSIZ];
329 struct list_head t_item;
330 struct module *t_owner;
331 unsigned int t_prefer_loopback:1;
Andy Grover335776b2009-08-21 12:28:34 +0000332 unsigned int t_type;
Andy Grover39de8282009-02-24 15:30:19 +0000333
334 int (*laddr_check)(__be32 addr);
335 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
336 void (*conn_free)(void *data);
337 int (*conn_connect)(struct rds_connection *conn);
338 void (*conn_shutdown)(struct rds_connection *conn);
339 void (*xmit_prepare)(struct rds_connection *conn);
340 void (*xmit_complete)(struct rds_connection *conn);
341 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
342 unsigned int hdr_off, unsigned int sg, unsigned int off);
343 int (*xmit_cong_map)(struct rds_connection *conn,
344 struct rds_cong_map *map, unsigned long offset);
345 int (*xmit_rdma)(struct rds_connection *conn, struct rds_rdma_op *op);
346 int (*recv)(struct rds_connection *conn);
347 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
348 size_t size);
349 void (*inc_purge)(struct rds_incoming *inc);
350 void (*inc_free)(struct rds_incoming *inc);
351
352 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
353 struct rdma_cm_event *event);
354 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
355 void (*cm_connect_complete)(struct rds_connection *conn,
356 struct rdma_cm_event *event);
357
358 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
359 unsigned int avail);
360 void (*exit)(void);
361 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
362 struct rds_sock *rs, u32 *key_ret);
363 void (*sync_mr)(void *trans_private, int direction);
364 void (*free_mr)(void *trans_private, int invalidate);
365 void (*flush_mrs)(void);
366};
367
368struct rds_sock {
369 struct sock rs_sk;
370
371 u64 rs_user_addr;
372 u64 rs_user_bytes;
373
374 /*
375 * bound_addr used for both incoming and outgoing, no INADDR_ANY
376 * support.
377 */
378 struct rb_node rs_bound_node;
379 __be32 rs_bound_addr;
380 __be32 rs_conn_addr;
381 __be16 rs_bound_port;
382 __be16 rs_conn_port;
383
384 /*
385 * This is only used to communicate the transport between bind and
386 * initiating connections. All other trans use is referenced through
387 * the connection.
388 */
389 struct rds_transport *rs_transport;
390
391 /*
392 * rds_sendmsg caches the conn it used the last time around.
393 * This helps avoid costly lookups.
394 */
395 struct rds_connection *rs_conn;
396
397 /* flag indicating we were congested or not */
398 int rs_congested;
Andy Groverb98ba522010-03-11 13:50:04 +0000399 /* seen congestion (ENOBUFS) when sending? */
400 int rs_seen_congestion;
Andy Grover39de8282009-02-24 15:30:19 +0000401
402 /* rs_lock protects all these adjacent members before the newline */
403 spinlock_t rs_lock;
404 struct list_head rs_send_queue;
405 u32 rs_snd_bytes;
406 int rs_rcv_bytes;
407 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
408
409 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
410 * to decide whether the application should be woken up.
411 * If not set, we use rs_cong_track to find out whether a cong map
412 * update arrived.
413 */
414 uint64_t rs_cong_mask;
415 uint64_t rs_cong_notify;
416 struct list_head rs_cong_list;
417 unsigned long rs_cong_track;
418
419 /*
420 * rs_recv_lock protects the receive queue, and is
421 * used to serialize with rds_release.
422 */
423 rwlock_t rs_recv_lock;
424 struct list_head rs_recv_queue;
425
426 /* just for stats reporting */
427 struct list_head rs_item;
428
429 /* these have their own lock */
430 spinlock_t rs_rdma_lock;
431 struct rb_root rs_rdma_keys;
432
433 /* Socket options - in case there will be more */
434 unsigned char rs_recverr,
435 rs_cong_monitor;
436};
437
438static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
439{
440 return container_of(sk, struct rds_sock, rs_sk);
441}
442static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
443{
444 return &rs->rs_sk;
445}
446
447/*
448 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
449 * to account for overhead. We don't account for overhead, we just apply
450 * the number of payload bytes to the specified value.
451 */
452static inline int rds_sk_sndbuf(struct rds_sock *rs)
453{
454 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
455}
456static inline int rds_sk_rcvbuf(struct rds_sock *rs)
457{
458 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
459}
460
461struct rds_statistics {
462 uint64_t s_conn_reset;
463 uint64_t s_recv_drop_bad_checksum;
464 uint64_t s_recv_drop_old_seq;
465 uint64_t s_recv_drop_no_sock;
466 uint64_t s_recv_drop_dead_sock;
467 uint64_t s_recv_deliver_raced;
468 uint64_t s_recv_delivered;
469 uint64_t s_recv_queued;
470 uint64_t s_recv_immediate_retry;
471 uint64_t s_recv_delayed_retry;
472 uint64_t s_recv_ack_required;
473 uint64_t s_recv_rdma_bytes;
474 uint64_t s_recv_ping;
475 uint64_t s_send_queue_empty;
476 uint64_t s_send_queue_full;
477 uint64_t s_send_sem_contention;
478 uint64_t s_send_sem_queue_raced;
479 uint64_t s_send_immediate_retry;
480 uint64_t s_send_delayed_retry;
481 uint64_t s_send_drop_acked;
482 uint64_t s_send_ack_required;
483 uint64_t s_send_queued;
484 uint64_t s_send_rdma;
485 uint64_t s_send_rdma_bytes;
486 uint64_t s_send_pong;
487 uint64_t s_page_remainder_hit;
488 uint64_t s_page_remainder_miss;
489 uint64_t s_copy_to_user;
490 uint64_t s_copy_from_user;
491 uint64_t s_cong_update_queued;
492 uint64_t s_cong_update_received;
493 uint64_t s_cong_send_error;
494 uint64_t s_cong_send_blocked;
495};
496
497/* af_rds.c */
498void rds_sock_addref(struct rds_sock *rs);
499void rds_sock_put(struct rds_sock *rs);
500void rds_wake_sk_sleep(struct rds_sock *rs);
501static inline void __rds_wake_sk_sleep(struct sock *sk)
502{
Eric Dumazetaa395142010-04-20 13:03:51 +0000503 wait_queue_head_t *waitq = sk_sleep(sk);
Andy Grover39de8282009-02-24 15:30:19 +0000504
505 if (!sock_flag(sk, SOCK_DEAD) && waitq)
506 wake_up(waitq);
507}
508extern wait_queue_head_t rds_poll_waitq;
509
510
511/* bind.c */
512int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
513void rds_remove_bound(struct rds_sock *rs);
514struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
515
516/* cong.c */
517int rds_cong_get_maps(struct rds_connection *conn);
518void rds_cong_add_conn(struct rds_connection *conn);
519void rds_cong_remove_conn(struct rds_connection *conn);
520void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
521void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
522int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
523void rds_cong_queue_updates(struct rds_cong_map *map);
524void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
525int rds_cong_updated_since(unsigned long *recent);
526void rds_cong_add_socket(struct rds_sock *);
527void rds_cong_remove_socket(struct rds_sock *);
528void rds_cong_exit(void);
529struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
530
531/* conn.c */
532int __init rds_conn_init(void);
533void rds_conn_exit(void);
534struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
535 struct rds_transport *trans, gfp_t gfp);
536struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
537 struct rds_transport *trans, gfp_t gfp);
Andy Grover2dc39352010-06-11 13:49:13 -0700538void rds_conn_shutdown(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000539void rds_conn_destroy(struct rds_connection *conn);
540void rds_conn_reset(struct rds_connection *conn);
541void rds_conn_drop(struct rds_connection *conn);
542void rds_for_each_conn_info(struct socket *sock, unsigned int len,
543 struct rds_info_iterator *iter,
544 struct rds_info_lengths *lens,
545 int (*visitor)(struct rds_connection *, void *),
546 size_t item_len);
547void __rds_conn_error(struct rds_connection *conn, const char *, ...)
548 __attribute__ ((format (printf, 2, 3)));
549#define rds_conn_error(conn, fmt...) \
550 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
551
552static inline int
553rds_conn_transition(struct rds_connection *conn, int old, int new)
554{
555 return atomic_cmpxchg(&conn->c_state, old, new) == old;
556}
557
558static inline int
559rds_conn_state(struct rds_connection *conn)
560{
561 return atomic_read(&conn->c_state);
562}
563
564static inline int
565rds_conn_up(struct rds_connection *conn)
566{
567 return atomic_read(&conn->c_state) == RDS_CONN_UP;
568}
569
570static inline int
571rds_conn_connecting(struct rds_connection *conn)
572{
573 return atomic_read(&conn->c_state) == RDS_CONN_CONNECTING;
574}
575
576/* message.c */
577struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
Andy Groverfc445082010-01-12 12:56:06 -0800578struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
579int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
Andy Grover39de8282009-02-24 15:30:19 +0000580 size_t total_len);
581struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
582void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
583 __be16 dport, u64 seq);
584int rds_message_add_extension(struct rds_header *hdr,
585 unsigned int type, const void *data, unsigned int len);
586int rds_message_next_extension(struct rds_header *hdr,
587 unsigned int *pos, void *buf, unsigned int *buflen);
588int rds_message_add_version_extension(struct rds_header *hdr, unsigned int version);
589int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *version);
590int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
591int rds_message_inc_copy_to_user(struct rds_incoming *inc,
592 struct iovec *first_iov, size_t size);
593void rds_message_inc_purge(struct rds_incoming *inc);
594void rds_message_inc_free(struct rds_incoming *inc);
595void rds_message_addref(struct rds_message *rm);
596void rds_message_put(struct rds_message *rm);
597void rds_message_wait(struct rds_message *rm);
598void rds_message_unmapped(struct rds_message *rm);
599
600static inline void rds_message_make_checksum(struct rds_header *hdr)
601{
602 hdr->h_csum = 0;
603 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
604}
605
606static inline int rds_message_verify_checksum(const struct rds_header *hdr)
607{
608 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
609}
610
611
612/* page.c */
613int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
614 gfp_t gfp);
615int rds_page_copy_user(struct page *page, unsigned long offset,
616 void __user *ptr, unsigned long bytes,
617 int to_user);
618#define rds_page_copy_to_user(page, offset, ptr, bytes) \
619 rds_page_copy_user(page, offset, ptr, bytes, 1)
620#define rds_page_copy_from_user(page, offset, ptr, bytes) \
621 rds_page_copy_user(page, offset, ptr, bytes, 0)
622void rds_page_exit(void);
623
624/* recv.c */
625void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
626 __be32 saddr);
627void rds_inc_addref(struct rds_incoming *inc);
628void rds_inc_put(struct rds_incoming *inc);
629void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
630 struct rds_incoming *inc, gfp_t gfp, enum km_type km);
631int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
632 size_t size, int msg_flags);
633void rds_clear_recv_queue(struct rds_sock *rs);
634int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
635void rds_inc_info_copy(struct rds_incoming *inc,
636 struct rds_info_iterator *iter,
637 __be32 saddr, __be32 daddr, int flip);
638
639/* send.c */
640int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
641 size_t payload_len);
642void rds_send_reset(struct rds_connection *conn);
643int rds_send_xmit(struct rds_connection *conn);
644struct sockaddr_in;
645void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
646typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
647void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
648 is_acked_func is_acked);
649int rds_send_acked_before(struct rds_connection *conn, u64 seq);
650void rds_send_remove_from_sock(struct list_head *messages, int status);
651int rds_send_pong(struct rds_connection *conn, __be16 dport);
652struct rds_message *rds_send_get_message(struct rds_connection *,
653 struct rds_rdma_op *);
654
655/* rdma.c */
656void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
657
658/* stats.c */
David Howells9b8de742009-04-21 23:00:24 +0100659DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
Andy Grover39de8282009-02-24 15:30:19 +0000660#define rds_stats_inc_which(which, member) do { \
661 per_cpu(which, get_cpu()).member++; \
662 put_cpu(); \
663} while (0)
664#define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
665#define rds_stats_add_which(which, member, count) do { \
666 per_cpu(which, get_cpu()).member += count; \
667 put_cpu(); \
668} while (0)
669#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
670int __init rds_stats_init(void);
671void rds_stats_exit(void);
672void rds_stats_info_copy(struct rds_info_iterator *iter,
Jan Engelhardt36cbd3d2009-08-05 10:42:58 -0700673 uint64_t *values, const char *const *names,
674 size_t nr);
Andy Grover39de8282009-02-24 15:30:19 +0000675
676/* sysctl.c */
677int __init rds_sysctl_init(void);
678void rds_sysctl_exit(void);
679extern unsigned long rds_sysctl_sndbuf_min;
680extern unsigned long rds_sysctl_sndbuf_default;
681extern unsigned long rds_sysctl_sndbuf_max;
682extern unsigned long rds_sysctl_reconnect_min_jiffies;
683extern unsigned long rds_sysctl_reconnect_max_jiffies;
684extern unsigned int rds_sysctl_max_unacked_packets;
685extern unsigned int rds_sysctl_max_unacked_bytes;
686extern unsigned int rds_sysctl_ping_enable;
687extern unsigned long rds_sysctl_trace_flags;
688extern unsigned int rds_sysctl_trace_level;
689
690/* threads.c */
691int __init rds_threads_init(void);
692void rds_threads_exit(void);
693extern struct workqueue_struct *rds_wq;
Andy Grover2dc39352010-06-11 13:49:13 -0700694void rds_queue_reconnect(struct rds_connection *conn);
Andy Grover39de8282009-02-24 15:30:19 +0000695void rds_connect_worker(struct work_struct *);
696void rds_shutdown_worker(struct work_struct *);
697void rds_send_worker(struct work_struct *);
698void rds_recv_worker(struct work_struct *);
699void rds_connect_complete(struct rds_connection *conn);
700
701/* transport.c */
702int rds_trans_register(struct rds_transport *trans);
703void rds_trans_unregister(struct rds_transport *trans);
704struct rds_transport *rds_trans_get_preferred(__be32 addr);
705unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
706 unsigned int avail);
707int __init rds_trans_init(void);
708void rds_trans_exit(void);
709
710#endif