| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * INET		An implementation of the TCP/IP protocol suite for the LINUX | 
|  | 3 | *		operating system.  INET is implemented using the  BSD Socket | 
|  | 4 | *		interface as the means of communication with the user level. | 
|  | 5 | * | 
|  | 6 | *		"Ping" sockets | 
|  | 7 | * | 
|  | 8 | *		This program is free software; you can redistribute it and/or | 
|  | 9 | *		modify it under the terms of the GNU General Public License | 
|  | 10 | *		as published by the Free Software Foundation; either version | 
|  | 11 | *		2 of the License, or (at your option) any later version. | 
|  | 12 | * | 
|  | 13 | * Based on ipv4/udp.c code. | 
|  | 14 | * | 
|  | 15 | * Authors:	Vasiliy Kulikov / Openwall (for Linux 2.6), | 
|  | 16 | *		Pavel Kankovsky (for Linux 2.4.32) | 
|  | 17 | * | 
|  | 18 | * Pavel gave all rights to bugs to Vasiliy, | 
|  | 19 | * none of the bugs are Pavel's now. | 
|  | 20 | * | 
|  | 21 | */ | 
|  | 22 |  | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 23 | #include <linux/uaccess.h> | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 24 | #include <linux/types.h> | 
|  | 25 | #include <linux/fcntl.h> | 
|  | 26 | #include <linux/socket.h> | 
|  | 27 | #include <linux/sockios.h> | 
|  | 28 | #include <linux/in.h> | 
|  | 29 | #include <linux/errno.h> | 
|  | 30 | #include <linux/timer.h> | 
|  | 31 | #include <linux/mm.h> | 
|  | 32 | #include <linux/inet.h> | 
|  | 33 | #include <linux/netdevice.h> | 
|  | 34 | #include <net/snmp.h> | 
|  | 35 | #include <net/ip.h> | 
|  | 36 | #include <net/ipv6.h> | 
|  | 37 | #include <net/icmp.h> | 
|  | 38 | #include <net/protocol.h> | 
|  | 39 | #include <linux/skbuff.h> | 
|  | 40 | #include <linux/proc_fs.h> | 
| Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 41 | #include <linux/export.h> | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 42 | #include <net/sock.h> | 
|  | 43 | #include <net/ping.h> | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 44 | #include <net/udp.h> | 
|  | 45 | #include <net/route.h> | 
|  | 46 | #include <net/inet_common.h> | 
|  | 47 | #include <net/checksum.h> | 
|  | 48 |  | 
|  | 49 |  | 
| Eric Dumazet | 1b1cb1f | 2011-05-13 22:59:19 +0000 | [diff] [blame] | 50 | static struct ping_table ping_table; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 51 |  | 
| Eric Dumazet | 1b1cb1f | 2011-05-13 22:59:19 +0000 | [diff] [blame] | 52 | static u16 ping_port_rover; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 53 |  | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 54 | static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 55 | { | 
|  | 56 | int res = (num + net_hash_mix(net)) & mask; | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 57 |  | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 58 | pr_debug("hash(%d) = %d\n", num, res); | 
|  | 59 | return res; | 
|  | 60 | } | 
|  | 61 |  | 
|  | 62 | static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 63 | struct net *net, unsigned int num) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 64 | { | 
|  | 65 | return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | static int ping_v4_get_port(struct sock *sk, unsigned short ident) | 
|  | 69 | { | 
|  | 70 | struct hlist_nulls_node *node; | 
|  | 71 | struct hlist_nulls_head *hlist; | 
|  | 72 | struct inet_sock *isk, *isk2; | 
|  | 73 | struct sock *sk2 = NULL; | 
|  | 74 |  | 
|  | 75 | isk = inet_sk(sk); | 
|  | 76 | write_lock_bh(&ping_table.lock); | 
|  | 77 | if (ident == 0) { | 
|  | 78 | u32 i; | 
|  | 79 | u16 result = ping_port_rover + 1; | 
|  | 80 |  | 
|  | 81 | for (i = 0; i < (1L << 16); i++, result++) { | 
|  | 82 | if (!result) | 
|  | 83 | result++; /* avoid zero */ | 
|  | 84 | hlist = ping_hashslot(&ping_table, sock_net(sk), | 
|  | 85 | result); | 
|  | 86 | ping_portaddr_for_each_entry(sk2, node, hlist) { | 
|  | 87 | isk2 = inet_sk(sk2); | 
|  | 88 |  | 
|  | 89 | if (isk2->inet_num == result) | 
|  | 90 | goto next_port; | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | /* found */ | 
|  | 94 | ping_port_rover = ident = result; | 
|  | 95 | break; | 
|  | 96 | next_port: | 
|  | 97 | ; | 
|  | 98 | } | 
|  | 99 | if (i >= (1L << 16)) | 
|  | 100 | goto fail; | 
|  | 101 | } else { | 
|  | 102 | hlist = ping_hashslot(&ping_table, sock_net(sk), ident); | 
|  | 103 | ping_portaddr_for_each_entry(sk2, node, hlist) { | 
|  | 104 | isk2 = inet_sk(sk2); | 
|  | 105 |  | 
|  | 106 | if ((isk2->inet_num == ident) && | 
|  | 107 | (sk2 != sk) && | 
|  | 108 | (!sk2->sk_reuse || !sk->sk_reuse)) | 
|  | 109 | goto fail; | 
|  | 110 | } | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | pr_debug("found port/ident = %d\n", ident); | 
|  | 114 | isk->inet_num = ident; | 
|  | 115 | if (sk_unhashed(sk)) { | 
|  | 116 | pr_debug("was not hashed\n"); | 
|  | 117 | sock_hold(sk); | 
|  | 118 | hlist_nulls_add_head(&sk->sk_nulls_node, hlist); | 
|  | 119 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); | 
|  | 120 | } | 
|  | 121 | write_unlock_bh(&ping_table.lock); | 
|  | 122 | return 0; | 
|  | 123 |  | 
|  | 124 | fail: | 
|  | 125 | write_unlock_bh(&ping_table.lock); | 
|  | 126 | return 1; | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | static void ping_v4_hash(struct sock *sk) | 
|  | 130 | { | 
|  | 131 | pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); | 
|  | 132 | BUG(); /* "Please do not press this button again." */ | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | static void ping_v4_unhash(struct sock *sk) | 
|  | 136 | { | 
|  | 137 | struct inet_sock *isk = inet_sk(sk); | 
|  | 138 | pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); | 
|  | 139 | if (sk_hashed(sk)) { | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 140 | write_lock_bh(&ping_table.lock); | 
|  | 141 | hlist_nulls_del(&sk->sk_nulls_node); | 
|  | 142 | sock_put(sk); | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 143 | isk->inet_num = 0; | 
|  | 144 | isk->inet_sport = 0; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 145 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 
|  | 146 | write_unlock_bh(&ping_table.lock); | 
|  | 147 | } | 
|  | 148 | } | 
|  | 149 |  | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 150 | static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr, | 
| Eric Dumazet | 1b1cb1f | 2011-05-13 22:59:19 +0000 | [diff] [blame] | 151 | u16 ident, int dif) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 152 | { | 
|  | 153 | struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident); | 
|  | 154 | struct sock *sk = NULL; | 
|  | 155 | struct inet_sock *isk; | 
|  | 156 | struct hlist_nulls_node *hnode; | 
|  | 157 |  | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 158 | pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", | 
| Joe Perches | 058bd4d | 2012-03-11 18:36:11 +0000 | [diff] [blame] | 159 | (int)ident, &daddr, dif); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 160 | read_lock_bh(&ping_table.lock); | 
|  | 161 |  | 
|  | 162 | ping_portaddr_for_each_entry(sk, hnode, hslot) { | 
|  | 163 | isk = inet_sk(sk); | 
|  | 164 |  | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 165 | pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk, | 
|  | 166 | (int)isk->inet_num, &isk->inet_rcv_saddr, | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 167 | sk->sk_bound_dev_if); | 
|  | 168 |  | 
|  | 169 | pr_debug("iterate\n"); | 
|  | 170 | if (isk->inet_num != ident) | 
|  | 171 | continue; | 
|  | 172 | if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr) | 
|  | 173 | continue; | 
|  | 174 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) | 
|  | 175 | continue; | 
|  | 176 |  | 
|  | 177 | sock_hold(sk); | 
|  | 178 | goto exit; | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 | sk = NULL; | 
|  | 182 | exit: | 
|  | 183 | read_unlock_bh(&ping_table.lock); | 
|  | 184 |  | 
|  | 185 | return sk; | 
|  | 186 | } | 
|  | 187 |  | 
| Changli Gao | 75e308c | 2011-05-18 21:16:01 +0000 | [diff] [blame] | 188 | static void inet_get_ping_group_range_net(struct net *net, gid_t *low, | 
|  | 189 | gid_t *high) | 
| Vasiliy Kulikov | f56e03e | 2011-05-17 00:16:56 +0000 | [diff] [blame] | 190 | { | 
|  | 191 | gid_t *data = net->ipv4.sysctl_ping_group_range; | 
| Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 192 | unsigned int seq; | 
|  | 193 |  | 
| Vasiliy Kulikov | f56e03e | 2011-05-17 00:16:56 +0000 | [diff] [blame] | 194 | do { | 
|  | 195 | seq = read_seqbegin(&sysctl_local_ports.lock); | 
|  | 196 |  | 
|  | 197 | *low = data[0]; | 
|  | 198 | *high = data[1]; | 
|  | 199 | } while (read_seqretry(&sysctl_local_ports.lock, seq)); | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 |  | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 203 | static int ping_init_sock(struct sock *sk) | 
|  | 204 | { | 
|  | 205 | struct net *net = sock_net(sk); | 
|  | 206 | gid_t group = current_egid(); | 
|  | 207 | gid_t range[2]; | 
|  | 208 | struct group_info *group_info = get_current_groups(); | 
|  | 209 | int i, j, count = group_info->ngroups; | 
| Eric W. Biederman | ae2975b | 2011-11-14 15:56:38 -0800 | [diff] [blame] | 210 | kgid_t low, high; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 211 |  | 
|  | 212 | inet_get_ping_group_range_net(net, range, range+1); | 
| Eric W. Biederman | ae2975b | 2011-11-14 15:56:38 -0800 | [diff] [blame] | 213 | low = make_kgid(&init_user_ns, range[0]); | 
|  | 214 | high = make_kgid(&init_user_ns, range[1]); | 
|  | 215 | if (!gid_valid(low) || !gid_valid(high) || gid_lt(high, low)) | 
|  | 216 | return -EACCES; | 
|  | 217 |  | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 218 | if (range[0] <= group && group <= range[1]) | 
|  | 219 | return 0; | 
|  | 220 |  | 
|  | 221 | for (i = 0; i < group_info->nblocks; i++) { | 
|  | 222 | int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 223 | for (j = 0; j < cp_count; j++) { | 
| Eric W. Biederman | ae2975b | 2011-11-14 15:56:38 -0800 | [diff] [blame] | 224 | kgid_t gid = group_info->blocks[i][j]; | 
|  | 225 | if (gid_lte(low, gid) && gid_lte(gid, high)) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 226 | return 0; | 
|  | 227 | } | 
|  | 228 |  | 
|  | 229 | count -= cp_count; | 
|  | 230 | } | 
|  | 231 |  | 
|  | 232 | return -EACCES; | 
|  | 233 | } | 
|  | 234 |  | 
|  | 235 | static void ping_close(struct sock *sk, long timeout) | 
|  | 236 | { | 
|  | 237 | pr_debug("ping_close(sk=%p,sk->num=%u)\n", | 
| Joe Perches | 058bd4d | 2012-03-11 18:36:11 +0000 | [diff] [blame] | 238 | inet_sk(sk), inet_sk(sk)->inet_num); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 239 | pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); | 
|  | 240 |  | 
|  | 241 | sk_common_release(sk); | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | /* | 
|  | 245 | * We need our own bind because there are no privileged id's == local ports. | 
|  | 246 | * Moreover, we don't allow binding to multi- and broadcast addresses. | 
|  | 247 | */ | 
|  | 248 |  | 
|  | 249 | static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 
|  | 250 | { | 
|  | 251 | struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; | 
|  | 252 | struct inet_sock *isk = inet_sk(sk); | 
|  | 253 | unsigned short snum; | 
|  | 254 | int chk_addr_ret; | 
|  | 255 | int err; | 
|  | 256 |  | 
|  | 257 | if (addr_len < sizeof(struct sockaddr_in)) | 
|  | 258 | return -EINVAL; | 
|  | 259 |  | 
|  | 260 | pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n", | 
| Joe Perches | 058bd4d | 2012-03-11 18:36:11 +0000 | [diff] [blame] | 261 | sk, addr->sin_addr.s_addr, ntohs(addr->sin_port)); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 262 |  | 
|  | 263 | chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 264 | if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 265 | chk_addr_ret = RTN_LOCAL; | 
|  | 266 |  | 
|  | 267 | if ((sysctl_ip_nonlocal_bind == 0 && | 
|  | 268 | isk->freebind == 0 && isk->transparent == 0 && | 
|  | 269 | chk_addr_ret != RTN_LOCAL) || | 
|  | 270 | chk_addr_ret == RTN_MULTICAST || | 
|  | 271 | chk_addr_ret == RTN_BROADCAST) | 
|  | 272 | return -EADDRNOTAVAIL; | 
|  | 273 |  | 
|  | 274 | lock_sock(sk); | 
|  | 275 |  | 
|  | 276 | err = -EINVAL; | 
|  | 277 | if (isk->inet_num != 0) | 
|  | 278 | goto out; | 
|  | 279 |  | 
|  | 280 | err = -EADDRINUSE; | 
|  | 281 | isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr; | 
|  | 282 | snum = ntohs(addr->sin_port); | 
|  | 283 | if (ping_v4_get_port(sk, snum) != 0) { | 
|  | 284 | isk->inet_saddr = isk->inet_rcv_saddr = 0; | 
|  | 285 | goto out; | 
|  | 286 | } | 
|  | 287 |  | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 288 | pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n", | 
| Joe Perches | 058bd4d | 2012-03-11 18:36:11 +0000 | [diff] [blame] | 289 | (int)isk->inet_num, | 
|  | 290 | &isk->inet_rcv_saddr, | 
|  | 291 | (int)sk->sk_bound_dev_if); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 292 |  | 
|  | 293 | err = 0; | 
|  | 294 | if (isk->inet_rcv_saddr) | 
|  | 295 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; | 
|  | 296 | if (snum) | 
|  | 297 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; | 
|  | 298 | isk->inet_sport = htons(isk->inet_num); | 
|  | 299 | isk->inet_daddr = 0; | 
|  | 300 | isk->inet_dport = 0; | 
|  | 301 | sk_dst_reset(sk); | 
|  | 302 | out: | 
|  | 303 | release_sock(sk); | 
|  | 304 | pr_debug("ping_v4_bind -> %d\n", err); | 
|  | 305 | return err; | 
|  | 306 | } | 
|  | 307 |  | 
|  | 308 | /* | 
|  | 309 | * Is this a supported type of ICMP message? | 
|  | 310 | */ | 
|  | 311 |  | 
|  | 312 | static inline int ping_supported(int type, int code) | 
|  | 313 | { | 
|  | 314 | if (type == ICMP_ECHO && code == 0) | 
|  | 315 | return 1; | 
|  | 316 | return 0; | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 | /* | 
|  | 320 | * This routine is called by the ICMP module when it gets some | 
|  | 321 | * sort of error condition. | 
|  | 322 | */ | 
|  | 323 |  | 
|  | 324 | static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 
|  | 325 |  | 
|  | 326 | void ping_err(struct sk_buff *skb, u32 info) | 
|  | 327 | { | 
|  | 328 | struct iphdr *iph = (struct iphdr *)skb->data; | 
|  | 329 | struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2)); | 
|  | 330 | struct inet_sock *inet_sock; | 
|  | 331 | int type = icmph->type; | 
|  | 332 | int code = icmph->code; | 
|  | 333 | struct net *net = dev_net(skb->dev); | 
|  | 334 | struct sock *sk; | 
|  | 335 | int harderr; | 
|  | 336 | int err; | 
|  | 337 |  | 
|  | 338 | /* We assume the packet has already been checked by icmp_unreach */ | 
|  | 339 |  | 
|  | 340 | if (!ping_supported(icmph->type, icmph->code)) | 
|  | 341 | return; | 
|  | 342 |  | 
|  | 343 | pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type, | 
| Joe Perches | 058bd4d | 2012-03-11 18:36:11 +0000 | [diff] [blame] | 344 | code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 345 |  | 
|  | 346 | sk = ping_v4_lookup(net, iph->daddr, iph->saddr, | 
|  | 347 | ntohs(icmph->un.echo.id), skb->dev->ifindex); | 
|  | 348 | if (sk == NULL) { | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 349 | pr_debug("no socket, dropping\n"); | 
|  | 350 | return;	/* No socket for error */ | 
|  | 351 | } | 
|  | 352 | pr_debug("err on socket %p\n", sk); | 
|  | 353 |  | 
|  | 354 | err = 0; | 
|  | 355 | harderr = 0; | 
|  | 356 | inet_sock = inet_sk(sk); | 
|  | 357 |  | 
|  | 358 | switch (type) { | 
|  | 359 | default: | 
|  | 360 | case ICMP_TIME_EXCEEDED: | 
|  | 361 | err = EHOSTUNREACH; | 
|  | 362 | break; | 
|  | 363 | case ICMP_SOURCE_QUENCH: | 
|  | 364 | /* This is not a real error but ping wants to see it. | 
|  | 365 | * Report it with some fake errno. */ | 
|  | 366 | err = EREMOTEIO; | 
|  | 367 | break; | 
|  | 368 | case ICMP_PARAMETERPROB: | 
|  | 369 | err = EPROTO; | 
|  | 370 | harderr = 1; | 
|  | 371 | break; | 
|  | 372 | case ICMP_DEST_UNREACH: | 
|  | 373 | if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ | 
| David S. Miller | 3639339 | 2012-06-14 22:21:46 -0700 | [diff] [blame] | 374 | ipv4_sk_update_pmtu(skb, sk, info); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 375 | if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { | 
|  | 376 | err = EMSGSIZE; | 
|  | 377 | harderr = 1; | 
|  | 378 | break; | 
|  | 379 | } | 
|  | 380 | goto out; | 
|  | 381 | } | 
|  | 382 | err = EHOSTUNREACH; | 
|  | 383 | if (code <= NR_ICMP_UNREACH) { | 
|  | 384 | harderr = icmp_err_convert[code].fatal; | 
|  | 385 | err = icmp_err_convert[code].errno; | 
|  | 386 | } | 
|  | 387 | break; | 
|  | 388 | case ICMP_REDIRECT: | 
|  | 389 | /* See ICMP_SOURCE_QUENCH */ | 
| David S. Miller | 55be7a9 | 2012-07-11 21:27:49 -0700 | [diff] [blame] | 390 | ipv4_sk_redirect(skb, sk); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 391 | err = EREMOTEIO; | 
|  | 392 | break; | 
|  | 393 | } | 
|  | 394 |  | 
|  | 395 | /* | 
|  | 396 | *      RFC1122: OK.  Passes ICMP errors back to application, as per | 
|  | 397 | *	4.1.3.3. | 
|  | 398 | */ | 
|  | 399 | if (!inet_sock->recverr) { | 
|  | 400 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | 
|  | 401 | goto out; | 
|  | 402 | } else { | 
|  | 403 | ip_icmp_error(sk, skb, err, 0 /* no remote port */, | 
|  | 404 | info, (u8 *)icmph); | 
|  | 405 | } | 
|  | 406 | sk->sk_err = err; | 
|  | 407 | sk->sk_error_report(sk); | 
|  | 408 | out: | 
|  | 409 | sock_put(sk); | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | /* | 
|  | 413 | *	Copy and checksum an ICMP Echo packet from user space into a buffer. | 
|  | 414 | */ | 
|  | 415 |  | 
|  | 416 | struct pingfakehdr { | 
|  | 417 | struct icmphdr icmph; | 
|  | 418 | struct iovec *iov; | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 419 | __wsum wcheck; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 420 | }; | 
|  | 421 |  | 
| Daniel Baluta | 5e73ea1 | 2012-04-15 01:34:41 +0000 | [diff] [blame] | 422 | static int ping_getfrag(void *from, char *to, | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 423 | int offset, int fraglen, int odd, struct sk_buff *skb) | 
|  | 424 | { | 
|  | 425 | struct pingfakehdr *pfh = (struct pingfakehdr *)from; | 
|  | 426 |  | 
|  | 427 | if (offset == 0) { | 
|  | 428 | if (fraglen < sizeof(struct icmphdr)) | 
|  | 429 | BUG(); | 
|  | 430 | if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr), | 
|  | 431 | pfh->iov, 0, fraglen - sizeof(struct icmphdr), | 
|  | 432 | &pfh->wcheck)) | 
|  | 433 | return -EFAULT; | 
|  | 434 |  | 
|  | 435 | return 0; | 
|  | 436 | } | 
|  | 437 | if (offset < sizeof(struct icmphdr)) | 
|  | 438 | BUG(); | 
|  | 439 | if (csum_partial_copy_fromiovecend | 
|  | 440 | (to, pfh->iov, offset - sizeof(struct icmphdr), | 
|  | 441 | fraglen, &pfh->wcheck)) | 
|  | 442 | return -EFAULT; | 
|  | 443 | return 0; | 
|  | 444 | } | 
|  | 445 |  | 
| Changli Gao | 75e308c | 2011-05-18 21:16:01 +0000 | [diff] [blame] | 446 | static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, | 
|  | 447 | struct flowi4 *fl4) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 448 | { | 
|  | 449 | struct sk_buff *skb = skb_peek(&sk->sk_write_queue); | 
|  | 450 |  | 
|  | 451 | pfh->wcheck = csum_partial((char *)&pfh->icmph, | 
|  | 452 | sizeof(struct icmphdr), pfh->wcheck); | 
|  | 453 | pfh->icmph.checksum = csum_fold(pfh->wcheck); | 
|  | 454 | memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr)); | 
|  | 455 | skb->ip_summed = CHECKSUM_NONE; | 
|  | 456 | return ip_push_pending_frames(sk, fl4); | 
|  | 457 | } | 
|  | 458 |  | 
| Changli Gao | bb0cd2f | 2011-05-18 21:16:00 +0000 | [diff] [blame] | 459 | static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | 
|  | 460 | size_t len) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 461 | { | 
|  | 462 | struct net *net = sock_net(sk); | 
|  | 463 | struct flowi4 fl4; | 
|  | 464 | struct inet_sock *inet = inet_sk(sk); | 
|  | 465 | struct ipcm_cookie ipc; | 
|  | 466 | struct icmphdr user_icmph; | 
|  | 467 | struct pingfakehdr pfh; | 
|  | 468 | struct rtable *rt = NULL; | 
|  | 469 | struct ip_options_data opt_copy; | 
|  | 470 | int free = 0; | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 471 | __be32 saddr, daddr, faddr; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 472 | u8  tos; | 
|  | 473 | int err; | 
|  | 474 |  | 
|  | 475 | pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); | 
|  | 476 |  | 
|  | 477 |  | 
|  | 478 | if (len > 0xFFFF) | 
|  | 479 | return -EMSGSIZE; | 
|  | 480 |  | 
|  | 481 | /* | 
|  | 482 | *	Check the flags. | 
|  | 483 | */ | 
|  | 484 |  | 
|  | 485 | /* Mirror BSD error message compatibility */ | 
|  | 486 | if (msg->msg_flags & MSG_OOB) | 
|  | 487 | return -EOPNOTSUPP; | 
|  | 488 |  | 
|  | 489 | /* | 
|  | 490 | *	Fetch the ICMP header provided by the userland. | 
|  | 491 | *	iovec is modified! | 
|  | 492 | */ | 
|  | 493 |  | 
|  | 494 | if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov, | 
|  | 495 | sizeof(struct icmphdr))) | 
|  | 496 | return -EFAULT; | 
|  | 497 | if (!ping_supported(user_icmph.type, user_icmph.code)) | 
|  | 498 | return -EINVAL; | 
|  | 499 |  | 
|  | 500 | /* | 
|  | 501 | *	Get and verify the address. | 
|  | 502 | */ | 
|  | 503 |  | 
|  | 504 | if (msg->msg_name) { | 
|  | 505 | struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; | 
|  | 506 | if (msg->msg_namelen < sizeof(*usin)) | 
|  | 507 | return -EINVAL; | 
|  | 508 | if (usin->sin_family != AF_INET) | 
|  | 509 | return -EINVAL; | 
|  | 510 | daddr = usin->sin_addr.s_addr; | 
|  | 511 | /* no remote port */ | 
|  | 512 | } else { | 
|  | 513 | if (sk->sk_state != TCP_ESTABLISHED) | 
|  | 514 | return -EDESTADDRREQ; | 
|  | 515 | daddr = inet->inet_daddr; | 
|  | 516 | /* no remote port */ | 
|  | 517 | } | 
|  | 518 |  | 
|  | 519 | ipc.addr = inet->inet_saddr; | 
|  | 520 | ipc.opt = NULL; | 
|  | 521 | ipc.oif = sk->sk_bound_dev_if; | 
|  | 522 | ipc.tx_flags = 0; | 
|  | 523 | err = sock_tx_timestamp(sk, &ipc.tx_flags); | 
|  | 524 | if (err) | 
|  | 525 | return err; | 
|  | 526 |  | 
|  | 527 | if (msg->msg_controllen) { | 
|  | 528 | err = ip_cmsg_send(sock_net(sk), msg, &ipc); | 
|  | 529 | if (err) | 
|  | 530 | return err; | 
|  | 531 | if (ipc.opt) | 
|  | 532 | free = 1; | 
|  | 533 | } | 
|  | 534 | if (!ipc.opt) { | 
|  | 535 | struct ip_options_rcu *inet_opt; | 
|  | 536 |  | 
|  | 537 | rcu_read_lock(); | 
|  | 538 | inet_opt = rcu_dereference(inet->inet_opt); | 
|  | 539 | if (inet_opt) { | 
|  | 540 | memcpy(&opt_copy, inet_opt, | 
|  | 541 | sizeof(*inet_opt) + inet_opt->opt.optlen); | 
|  | 542 | ipc.opt = &opt_copy.opt; | 
|  | 543 | } | 
|  | 544 | rcu_read_unlock(); | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | saddr = ipc.addr; | 
|  | 548 | ipc.addr = faddr = daddr; | 
|  | 549 |  | 
|  | 550 | if (ipc.opt && ipc.opt->opt.srr) { | 
|  | 551 | if (!daddr) | 
|  | 552 | return -EINVAL; | 
|  | 553 | faddr = ipc.opt->opt.faddr; | 
|  | 554 | } | 
|  | 555 | tos = RT_TOS(inet->tos); | 
|  | 556 | if (sock_flag(sk, SOCK_LOCALROUTE) || | 
|  | 557 | (msg->msg_flags & MSG_DONTROUTE) || | 
|  | 558 | (ipc.opt && ipc.opt->opt.is_strictroute)) { | 
|  | 559 | tos |= RTO_ONLINK; | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 | if (ipv4_is_multicast(daddr)) { | 
|  | 563 | if (!ipc.oif) | 
|  | 564 | ipc.oif = inet->mc_index; | 
|  | 565 | if (!saddr) | 
|  | 566 | saddr = inet->mc_addr; | 
| Erich E. Hoover | 76e2105 | 2012-02-08 09:11:07 +0000 | [diff] [blame] | 567 | } else if (!ipc.oif) | 
|  | 568 | ipc.oif = inet->uc_index; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 569 |  | 
|  | 570 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 
|  | 571 | RT_SCOPE_UNIVERSE, sk->sk_protocol, | 
|  | 572 | inet_sk_flowi_flags(sk), faddr, saddr, 0, 0); | 
|  | 573 |  | 
|  | 574 | security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); | 
|  | 575 | rt = ip_route_output_flow(net, &fl4, sk); | 
|  | 576 | if (IS_ERR(rt)) { | 
|  | 577 | err = PTR_ERR(rt); | 
|  | 578 | rt = NULL; | 
|  | 579 | if (err == -ENETUNREACH) | 
|  | 580 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | 
|  | 581 | goto out; | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | err = -EACCES; | 
|  | 585 | if ((rt->rt_flags & RTCF_BROADCAST) && | 
|  | 586 | !sock_flag(sk, SOCK_BROADCAST)) | 
|  | 587 | goto out; | 
|  | 588 |  | 
|  | 589 | if (msg->msg_flags & MSG_CONFIRM) | 
|  | 590 | goto do_confirm; | 
|  | 591 | back_from_confirm: | 
|  | 592 |  | 
|  | 593 | if (!ipc.addr) | 
|  | 594 | ipc.addr = fl4.daddr; | 
|  | 595 |  | 
|  | 596 | lock_sock(sk); | 
|  | 597 |  | 
|  | 598 | pfh.icmph.type = user_icmph.type; /* already checked */ | 
|  | 599 | pfh.icmph.code = user_icmph.code; /* ditto */ | 
|  | 600 | pfh.icmph.checksum = 0; | 
|  | 601 | pfh.icmph.un.echo.id = inet->inet_sport; | 
|  | 602 | pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence; | 
|  | 603 | pfh.iov = msg->msg_iov; | 
|  | 604 | pfh.wcheck = 0; | 
|  | 605 |  | 
|  | 606 | err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, | 
|  | 607 | 0, &ipc, &rt, msg->msg_flags); | 
|  | 608 | if (err) | 
|  | 609 | ip_flush_pending_frames(sk); | 
|  | 610 | else | 
|  | 611 | err = ping_push_pending_frames(sk, &pfh, &fl4); | 
|  | 612 | release_sock(sk); | 
|  | 613 |  | 
|  | 614 | out: | 
|  | 615 | ip_rt_put(rt); | 
|  | 616 | if (free) | 
|  | 617 | kfree(ipc.opt); | 
|  | 618 | if (!err) { | 
|  | 619 | icmp_out_count(sock_net(sk), user_icmph.type); | 
|  | 620 | return len; | 
|  | 621 | } | 
|  | 622 | return err; | 
|  | 623 |  | 
|  | 624 | do_confirm: | 
|  | 625 | dst_confirm(&rt->dst); | 
|  | 626 | if (!(msg->msg_flags & MSG_PROBE) || len) | 
|  | 627 | goto back_from_confirm; | 
|  | 628 | err = 0; | 
|  | 629 | goto out; | 
|  | 630 | } | 
|  | 631 |  | 
| Changli Gao | bb0cd2f | 2011-05-18 21:16:00 +0000 | [diff] [blame] | 632 | static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | 
|  | 633 | size_t len, int noblock, int flags, int *addr_len) | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 634 | { | 
|  | 635 | struct inet_sock *isk = inet_sk(sk); | 
|  | 636 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | 
|  | 637 | struct sk_buff *skb; | 
|  | 638 | int copied, err; | 
|  | 639 |  | 
|  | 640 | pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); | 
|  | 641 |  | 
| David S. Miller | a5e7424 | 2012-02-21 17:59:19 -0500 | [diff] [blame] | 642 | err = -EOPNOTSUPP; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 643 | if (flags & MSG_OOB) | 
|  | 644 | goto out; | 
|  | 645 |  | 
|  | 646 | if (addr_len) | 
|  | 647 | *addr_len = sizeof(*sin); | 
|  | 648 |  | 
|  | 649 | if (flags & MSG_ERRQUEUE) | 
|  | 650 | return ip_recv_error(sk, msg, len); | 
|  | 651 |  | 
|  | 652 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 
|  | 653 | if (!skb) | 
|  | 654 | goto out; | 
|  | 655 |  | 
|  | 656 | copied = skb->len; | 
|  | 657 | if (copied > len) { | 
|  | 658 | msg->msg_flags |= MSG_TRUNC; | 
|  | 659 | copied = len; | 
|  | 660 | } | 
|  | 661 |  | 
|  | 662 | /* Don't bother checking the checksum */ | 
|  | 663 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 
|  | 664 | if (err) | 
|  | 665 | goto done; | 
|  | 666 |  | 
|  | 667 | sock_recv_timestamp(msg, sk, skb); | 
|  | 668 |  | 
|  | 669 | /* Copy the address. */ | 
|  | 670 | if (sin) { | 
|  | 671 | sin->sin_family = AF_INET; | 
|  | 672 | sin->sin_port = 0 /* skb->h.uh->source */; | 
|  | 673 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | 
|  | 674 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | 
|  | 675 | } | 
|  | 676 | if (isk->cmsg_flags) | 
|  | 677 | ip_cmsg_recv(msg, skb); | 
|  | 678 | err = copied; | 
|  | 679 |  | 
|  | 680 | done: | 
|  | 681 | skb_free_datagram(sk, skb); | 
|  | 682 | out: | 
|  | 683 | pr_debug("ping_recvmsg -> %d\n", err); | 
|  | 684 | return err; | 
|  | 685 | } | 
|  | 686 |  | 
|  | 687 | static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 
|  | 688 | { | 
|  | 689 | pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", | 
| Joe Perches | 058bd4d | 2012-03-11 18:36:11 +0000 | [diff] [blame] | 690 | inet_sk(sk), inet_sk(sk)->inet_num, skb); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 691 | if (sock_queue_rcv_skb(sk, skb) < 0) { | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 692 | kfree_skb(skb); | 
|  | 693 | pr_debug("ping_queue_rcv_skb -> failed\n"); | 
|  | 694 | return -1; | 
|  | 695 | } | 
|  | 696 | return 0; | 
|  | 697 | } | 
|  | 698 |  | 
|  | 699 |  | 
|  | 700 | /* | 
|  | 701 | *	All we need to do is get the socket. | 
|  | 702 | */ | 
|  | 703 |  | 
|  | 704 | void ping_rcv(struct sk_buff *skb) | 
|  | 705 | { | 
|  | 706 | struct sock *sk; | 
|  | 707 | struct net *net = dev_net(skb->dev); | 
|  | 708 | struct iphdr *iph = ip_hdr(skb); | 
|  | 709 | struct icmphdr *icmph = icmp_hdr(skb); | 
| Eric Dumazet | 747465e | 2012-01-16 19:27:39 +0000 | [diff] [blame] | 710 | __be32 saddr = iph->saddr; | 
|  | 711 | __be32 daddr = iph->daddr; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 712 |  | 
|  | 713 | /* We assume the packet has already been checked by icmp_rcv */ | 
|  | 714 |  | 
|  | 715 | pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", | 
| Joe Perches | 058bd4d | 2012-03-11 18:36:11 +0000 | [diff] [blame] | 716 | skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 717 |  | 
|  | 718 | /* Push ICMP header back */ | 
|  | 719 | skb_push(skb, skb->data - (u8 *)icmph); | 
|  | 720 |  | 
|  | 721 | sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id), | 
|  | 722 | skb->dev->ifindex); | 
|  | 723 | if (sk != NULL) { | 
|  | 724 | pr_debug("rcv on socket %p\n", sk); | 
|  | 725 | ping_queue_rcv_skb(sk, skb_get(skb)); | 
|  | 726 | sock_put(sk); | 
|  | 727 | return; | 
|  | 728 | } | 
|  | 729 | pr_debug("no socket, dropping\n"); | 
|  | 730 |  | 
|  | 731 | /* We're called from icmp_rcv(). kfree_skb() is done there. */ | 
|  | 732 | } | 
|  | 733 |  | 
|  | 734 | struct proto ping_prot = { | 
|  | 735 | .name =		"PING", | 
|  | 736 | .owner =	THIS_MODULE, | 
|  | 737 | .init =		ping_init_sock, | 
|  | 738 | .close =	ping_close, | 
|  | 739 | .connect =	ip4_datagram_connect, | 
|  | 740 | .disconnect =	udp_disconnect, | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 741 | .setsockopt =	ip_setsockopt, | 
|  | 742 | .getsockopt =	ip_getsockopt, | 
|  | 743 | .sendmsg =	ping_sendmsg, | 
|  | 744 | .recvmsg =	ping_recvmsg, | 
|  | 745 | .bind =		ping_bind, | 
|  | 746 | .backlog_rcv =	ping_queue_rcv_skb, | 
|  | 747 | .hash =		ping_v4_hash, | 
|  | 748 | .unhash =	ping_v4_unhash, | 
|  | 749 | .get_port =	ping_v4_get_port, | 
|  | 750 | .obj_size =	sizeof(struct inet_sock), | 
|  | 751 | }; | 
|  | 752 | EXPORT_SYMBOL(ping_prot); | 
|  | 753 |  | 
|  | 754 | #ifdef CONFIG_PROC_FS | 
|  | 755 |  | 
|  | 756 | static struct sock *ping_get_first(struct seq_file *seq, int start) | 
|  | 757 | { | 
|  | 758 | struct sock *sk; | 
|  | 759 | struct ping_iter_state *state = seq->private; | 
|  | 760 | struct net *net = seq_file_net(seq); | 
|  | 761 |  | 
|  | 762 | for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; | 
|  | 763 | ++state->bucket) { | 
|  | 764 | struct hlist_nulls_node *node; | 
| Changli Gao | 75e308c | 2011-05-18 21:16:01 +0000 | [diff] [blame] | 765 | struct hlist_nulls_head *hslot; | 
|  | 766 |  | 
|  | 767 | hslot = &ping_table.hash[state->bucket]; | 
| Vasiliy Kulikov | c319b4d | 2011-05-13 10:01:00 +0000 | [diff] [blame] | 768 |  | 
|  | 769 | if (hlist_nulls_empty(hslot)) | 
|  | 770 | continue; | 
|  | 771 |  | 
|  | 772 | sk_nulls_for_each(sk, node, hslot) { | 
|  | 773 | if (net_eq(sock_net(sk), net)) | 
|  | 774 | goto found; | 
|  | 775 | } | 
|  | 776 | } | 
|  | 777 | sk = NULL; | 
|  | 778 | found: | 
|  | 779 | return sk; | 
|  | 780 | } | 
|  | 781 |  | 
|  | 782 | static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk) | 
|  | 783 | { | 
|  | 784 | struct ping_iter_state *state = seq->private; | 
|  | 785 | struct net *net = seq_file_net(seq); | 
|  | 786 |  | 
|  | 787 | do { | 
|  | 788 | sk = sk_nulls_next(sk); | 
|  | 789 | } while (sk && (!net_eq(sock_net(sk), net))); | 
|  | 790 |  | 
|  | 791 | if (!sk) | 
|  | 792 | return ping_get_first(seq, state->bucket + 1); | 
|  | 793 | return sk; | 
|  | 794 | } | 
|  | 795 |  | 
|  | 796 | static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) | 
|  | 797 | { | 
|  | 798 | struct sock *sk = ping_get_first(seq, 0); | 
|  | 799 |  | 
|  | 800 | if (sk) | 
|  | 801 | while (pos && (sk = ping_get_next(seq, sk)) != NULL) | 
|  | 802 | --pos; | 
|  | 803 | return pos ? NULL : sk; | 
|  | 804 | } | 
|  | 805 |  | 
|  | 806 | static void *ping_seq_start(struct seq_file *seq, loff_t *pos) | 
|  | 807 | { | 
|  | 808 | struct ping_iter_state *state = seq->private; | 
|  | 809 | state->bucket = 0; | 
|  | 810 |  | 
|  | 811 | read_lock_bh(&ping_table.lock); | 
|  | 812 |  | 
|  | 813 | return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; | 
|  | 814 | } | 
|  | 815 |  | 
|  | 816 | static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 
|  | 817 | { | 
|  | 818 | struct sock *sk; | 
|  | 819 |  | 
|  | 820 | if (v == SEQ_START_TOKEN) | 
|  | 821 | sk = ping_get_idx(seq, 0); | 
|  | 822 | else | 
|  | 823 | sk = ping_get_next(seq, v); | 
|  | 824 |  | 
|  | 825 | ++*pos; | 
|  | 826 | return sk; | 
|  | 827 | } | 
|  | 828 |  | 
|  | 829 | static void ping_seq_stop(struct seq_file *seq, void *v) | 
|  | 830 | { | 
|  | 831 | read_unlock_bh(&ping_table.lock); | 
|  | 832 | } | 
|  | 833 |  | 
|  | 834 | static void ping_format_sock(struct sock *sp, struct seq_file *f, | 
|  | 835 | int bucket, int *len) | 
|  | 836 | { | 
|  | 837 | struct inet_sock *inet = inet_sk(sp); | 
|  | 838 | __be32 dest = inet->inet_daddr; | 
|  | 839 | __be32 src = inet->inet_rcv_saddr; | 
|  | 840 | __u16 destp = ntohs(inet->inet_dport); | 
|  | 841 | __u16 srcp = ntohs(inet->inet_sport); | 
|  | 842 |  | 
|  | 843 | seq_printf(f, "%5d: %08X:%04X %08X:%04X" | 
|  | 844 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n", | 
|  | 845 | bucket, src, srcp, dest, destp, sp->sk_state, | 
|  | 846 | sk_wmem_alloc_get(sp), | 
|  | 847 | sk_rmem_alloc_get(sp), | 
|  | 848 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | 
|  | 849 | atomic_read(&sp->sk_refcnt), sp, | 
|  | 850 | atomic_read(&sp->sk_drops), len); | 
|  | 851 | } | 
|  | 852 |  | 
|  | 853 | static int ping_seq_show(struct seq_file *seq, void *v) | 
|  | 854 | { | 
|  | 855 | if (v == SEQ_START_TOKEN) | 
|  | 856 | seq_printf(seq, "%-127s\n", | 
|  | 857 | "  sl  local_address rem_address   st tx_queue " | 
|  | 858 | "rx_queue tr tm->when retrnsmt   uid  timeout " | 
|  | 859 | "inode ref pointer drops"); | 
|  | 860 | else { | 
|  | 861 | struct ping_iter_state *state = seq->private; | 
|  | 862 | int len; | 
|  | 863 |  | 
|  | 864 | ping_format_sock(v, seq, state->bucket, &len); | 
|  | 865 | seq_printf(seq, "%*s\n", 127 - len, ""); | 
|  | 866 | } | 
|  | 867 | return 0; | 
|  | 868 | } | 
|  | 869 |  | 
|  | 870 | static const struct seq_operations ping_seq_ops = { | 
|  | 871 | .show		= ping_seq_show, | 
|  | 872 | .start		= ping_seq_start, | 
|  | 873 | .next		= ping_seq_next, | 
|  | 874 | .stop		= ping_seq_stop, | 
|  | 875 | }; | 
|  | 876 |  | 
|  | 877 | static int ping_seq_open(struct inode *inode, struct file *file) | 
|  | 878 | { | 
|  | 879 | return seq_open_net(inode, file, &ping_seq_ops, | 
|  | 880 | sizeof(struct ping_iter_state)); | 
|  | 881 | } | 
|  | 882 |  | 
|  | 883 | static const struct file_operations ping_seq_fops = { | 
|  | 884 | .open		= ping_seq_open, | 
|  | 885 | .read		= seq_read, | 
|  | 886 | .llseek		= seq_lseek, | 
|  | 887 | .release	= seq_release_net, | 
|  | 888 | }; | 
|  | 889 |  | 
|  | 890 | static int ping_proc_register(struct net *net) | 
|  | 891 | { | 
|  | 892 | struct proc_dir_entry *p; | 
|  | 893 | int rc = 0; | 
|  | 894 |  | 
|  | 895 | p = proc_net_fops_create(net, "icmp", S_IRUGO, &ping_seq_fops); | 
|  | 896 | if (!p) | 
|  | 897 | rc = -ENOMEM; | 
|  | 898 | return rc; | 
|  | 899 | } | 
|  | 900 |  | 
|  | 901 | static void ping_proc_unregister(struct net *net) | 
|  | 902 | { | 
|  | 903 | proc_net_remove(net, "icmp"); | 
|  | 904 | } | 
|  | 905 |  | 
|  | 906 |  | 
|  | 907 | static int __net_init ping_proc_init_net(struct net *net) | 
|  | 908 | { | 
|  | 909 | return ping_proc_register(net); | 
|  | 910 | } | 
|  | 911 |  | 
|  | 912 | static void __net_exit ping_proc_exit_net(struct net *net) | 
|  | 913 | { | 
|  | 914 | ping_proc_unregister(net); | 
|  | 915 | } | 
|  | 916 |  | 
|  | 917 | static struct pernet_operations ping_net_ops = { | 
|  | 918 | .init = ping_proc_init_net, | 
|  | 919 | .exit = ping_proc_exit_net, | 
|  | 920 | }; | 
|  | 921 |  | 
|  | 922 | int __init ping_proc_init(void) | 
|  | 923 | { | 
|  | 924 | return register_pernet_subsys(&ping_net_ops); | 
|  | 925 | } | 
|  | 926 |  | 
|  | 927 | void ping_proc_exit(void) | 
|  | 928 | { | 
|  | 929 | unregister_pernet_subsys(&ping_net_ops); | 
|  | 930 | } | 
|  | 931 |  | 
|  | 932 | #endif | 
|  | 933 |  | 
|  | 934 | void __init ping_init(void) | 
|  | 935 | { | 
|  | 936 | int i; | 
|  | 937 |  | 
|  | 938 | for (i = 0; i < PING_HTABLE_SIZE; i++) | 
|  | 939 | INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i); | 
|  | 940 | rwlock_init(&ping_table.lock); | 
|  | 941 | } |