| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 1 | #include <linux/types.h> | 
 | 2 | #include <linux/spinlock.h> | 
 | 3 | #include <linux/sock_diag.h> | 
 | 4 | #include <linux/unix_diag.h> | 
 | 5 | #include <linux/skbuff.h> | 
| Cyrill Gorcunov | 2ea744a | 2011-12-20 04:33:03 +0000 | [diff] [blame] | 6 | #include <linux/module.h> | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 7 | #include <net/netlink.h> | 
 | 8 | #include <net/af_unix.h> | 
 | 9 | #include <net/tcp_states.h> | 
 | 10 |  | 
| Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 11 | static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) | 
 | 12 | { | 
 | 13 | 	struct unix_address *addr = unix_sk(sk)->addr; | 
| Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 14 |  | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 15 | 	if (!addr) | 
 | 16 | 		return 0; | 
| Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 17 |  | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 18 | 	return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short), | 
 | 19 | 		       addr->name->sun_path); | 
| Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 20 | } | 
 | 21 |  | 
| Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 22 | static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) | 
 | 23 | { | 
| Al Viro | 40ffe67 | 2012-03-14 21:54:32 -0400 | [diff] [blame] | 24 | 	struct dentry *dentry = unix_sk(sk)->path.dentry; | 
| Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 25 |  | 
 | 26 | 	if (dentry) { | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 27 | 		struct unix_diag_vfs uv = { | 
 | 28 | 			.udiag_vfs_ino = dentry->d_inode->i_ino, | 
 | 29 | 			.udiag_vfs_dev = dentry->d_sb->s_dev, | 
 | 30 | 		}; | 
 | 31 |  | 
 | 32 | 		return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); | 
| Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 33 | 	} | 
 | 34 |  | 
 | 35 | 	return 0; | 
| Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 36 | } | 
 | 37 |  | 
| Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 38 | static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) | 
 | 39 | { | 
 | 40 | 	struct sock *peer; | 
 | 41 | 	int ino; | 
 | 42 |  | 
 | 43 | 	peer = unix_peer_get(sk); | 
 | 44 | 	if (peer) { | 
 | 45 | 		unix_state_lock(peer); | 
 | 46 | 		ino = sock_i_ino(peer); | 
 | 47 | 		unix_state_unlock(peer); | 
 | 48 | 		sock_put(peer); | 
 | 49 |  | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 50 | 		return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); | 
| Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 51 | 	} | 
 | 52 |  | 
 | 53 | 	return 0; | 
| Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 54 | } | 
 | 55 |  | 
| Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 56 | static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) | 
 | 57 | { | 
 | 58 | 	struct sk_buff *skb; | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 59 | 	struct nlattr *attr; | 
| Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 60 | 	u32 *buf; | 
 | 61 | 	int i; | 
 | 62 |  | 
 | 63 | 	if (sk->sk_state == TCP_LISTEN) { | 
 | 64 | 		spin_lock(&sk->sk_receive_queue.lock); | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 65 |  | 
 | 66 | 		attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, | 
 | 67 | 				   sk->sk_receive_queue.qlen * sizeof(u32)); | 
 | 68 | 		if (!attr) | 
 | 69 | 			goto errout; | 
 | 70 |  | 
 | 71 | 		buf = nla_data(attr); | 
| Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 72 | 		i = 0; | 
 | 73 | 		skb_queue_walk(&sk->sk_receive_queue, skb) { | 
 | 74 | 			struct sock *req, *peer; | 
 | 75 |  | 
 | 76 | 			req = skb->sk; | 
 | 77 | 			/* | 
 | 78 | 			 * The state lock is outer for the same sk's | 
 | 79 | 			 * queue lock. With the other's queue locked it's | 
 | 80 | 			 * OK to lock the state. | 
 | 81 | 			 */ | 
 | 82 | 			unix_state_lock_nested(req); | 
 | 83 | 			peer = unix_sk(req)->peer; | 
| David S. Miller | e09e9d1 | 2011-12-26 14:41:55 -0500 | [diff] [blame] | 84 | 			buf[i++] = (peer ? sock_i_ino(peer) : 0); | 
| Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 85 | 			unix_state_unlock(req); | 
 | 86 | 		} | 
 | 87 | 		spin_unlock(&sk->sk_receive_queue.lock); | 
 | 88 | 	} | 
 | 89 |  | 
 | 90 | 	return 0; | 
 | 91 |  | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 92 | errout: | 
| Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 93 | 	spin_unlock(&sk->sk_receive_queue.lock); | 
 | 94 | 	return -EMSGSIZE; | 
 | 95 | } | 
 | 96 |  | 
| Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 97 | static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) | 
 | 98 | { | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 99 | 	struct unix_diag_rqlen rql; | 
| Pavel Emelyanov | c9da99e | 2011-12-30 00:54:39 +0000 | [diff] [blame] | 100 |  | 
 | 101 | 	if (sk->sk_state == TCP_LISTEN) { | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 102 | 		rql.udiag_rqueue = sk->sk_receive_queue.qlen; | 
 | 103 | 		rql.udiag_wqueue = sk->sk_max_ack_backlog; | 
| Pavel Emelyanov | c9da99e | 2011-12-30 00:54:39 +0000 | [diff] [blame] | 104 | 	} else { | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 105 | 		rql.udiag_rqueue = (u32) unix_inq_len(sk); | 
 | 106 | 		rql.udiag_wqueue = (u32) unix_outq_len(sk); | 
| Pavel Emelyanov | c9da99e | 2011-12-30 00:54:39 +0000 | [diff] [blame] | 107 | 	} | 
 | 108 |  | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 109 | 	return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); | 
| Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 110 | } | 
 | 111 |  | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 112 | static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 113 | 		u32 portid, u32 seq, u32 flags, int sk_ino) | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 114 | { | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 115 | 	struct nlmsghdr *nlh; | 
 | 116 | 	struct unix_diag_msg *rep; | 
 | 117 |  | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 118 | 	nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 119 | 			flags); | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 120 | 	if (!nlh) | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 121 | 		return -EMSGSIZE; | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 122 |  | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 123 | 	rep = nlmsg_data(nlh); | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 124 | 	rep->udiag_family = AF_UNIX; | 
 | 125 | 	rep->udiag_type = sk->sk_type; | 
 | 126 | 	rep->udiag_state = sk->sk_state; | 
 | 127 | 	rep->udiag_ino = sk_ino; | 
 | 128 | 	sock_diag_save_cookie(sk, rep->udiag_cookie); | 
 | 129 |  | 
| Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 130 | 	if ((req->udiag_show & UDIAG_SHOW_NAME) && | 
| Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 131 | 	    sk_diag_dump_name(sk, skb)) | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 132 | 		goto out_nlmsg_trim; | 
| Pavel Emelyanov | f5248b4 | 2011-12-15 02:45:24 +0000 | [diff] [blame] | 133 |  | 
| Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 134 | 	if ((req->udiag_show & UDIAG_SHOW_VFS) && | 
| Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 135 | 	    sk_diag_dump_vfs(sk, skb)) | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 136 | 		goto out_nlmsg_trim; | 
| Pavel Emelyanov | 5f7b056 | 2011-12-15 02:45:43 +0000 | [diff] [blame] | 137 |  | 
| Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 138 | 	if ((req->udiag_show & UDIAG_SHOW_PEER) && | 
| Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 139 | 	    sk_diag_dump_peer(sk, skb)) | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 140 | 		goto out_nlmsg_trim; | 
| Pavel Emelyanov | ac02be8 | 2011-12-15 02:45:58 +0000 | [diff] [blame] | 141 |  | 
| Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 142 | 	if ((req->udiag_show & UDIAG_SHOW_ICONS) && | 
| Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 143 | 	    sk_diag_dump_icons(sk, skb)) | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 144 | 		goto out_nlmsg_trim; | 
| Pavel Emelyanov | 2aac7a2 | 2011-12-15 02:46:14 +0000 | [diff] [blame] | 145 |  | 
| Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 146 | 	if ((req->udiag_show & UDIAG_SHOW_RQLEN) && | 
| Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 147 | 	    sk_diag_show_rqlen(sk, skb)) | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 148 | 		goto out_nlmsg_trim; | 
| Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 149 |  | 
 | 150 | 	if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && | 
 | 151 | 	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 152 | 		goto out_nlmsg_trim; | 
| Pavel Emelyanov | cbf3919 | 2011-12-15 02:46:31 +0000 | [diff] [blame] | 153 |  | 
| Pavel Emelyanov | e4e541a | 2012-10-23 22:29:56 +0400 | [diff] [blame] | 154 | 	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown)) | 
 | 155 | 		goto out_nlmsg_trim; | 
 | 156 |  | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 157 | 	return nlmsg_end(skb, nlh); | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 158 |  | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 159 | out_nlmsg_trim: | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 160 | 	nlmsg_cancel(skb, nlh); | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 161 | 	return -EMSGSIZE; | 
 | 162 | } | 
 | 163 |  | 
 | 164 | static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 165 | 		u32 portid, u32 seq, u32 flags) | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 166 | { | 
 | 167 | 	int sk_ino; | 
 | 168 |  | 
 | 169 | 	unix_state_lock(sk); | 
 | 170 | 	sk_ino = sock_i_ino(sk); | 
 | 171 | 	unix_state_unlock(sk); | 
 | 172 |  | 
 | 173 | 	if (!sk_ino) | 
 | 174 | 		return 0; | 
 | 175 |  | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 176 | 	return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino); | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 177 | } | 
 | 178 |  | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 179 | static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | 
 | 180 | { | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 181 | 	struct unix_diag_req *req; | 
 | 182 | 	int num, s_num, slot, s_slot; | 
| Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 183 | 	struct net *net = sock_net(skb->sk); | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 184 |  | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 185 | 	req = nlmsg_data(cb->nlh); | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 186 |  | 
 | 187 | 	s_slot = cb->args[0]; | 
 | 188 | 	num = s_num = cb->args[1]; | 
 | 189 |  | 
 | 190 | 	spin_lock(&unix_table_lock); | 
| Eric Dumazet | 7123aaa | 2012-06-08 05:03:21 +0000 | [diff] [blame] | 191 | 	for (slot = s_slot; | 
 | 192 | 	     slot < ARRAY_SIZE(unix_socket_table); | 
 | 193 | 	     s_num = 0, slot++) { | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 194 | 		struct sock *sk; | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 195 |  | 
 | 196 | 		num = 0; | 
| Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 197 | 		sk_for_each(sk, &unix_socket_table[slot]) { | 
| Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 198 | 			if (!net_eq(sock_net(sk), net)) | 
 | 199 | 				continue; | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 200 | 			if (num < s_num) | 
 | 201 | 				goto next; | 
 | 202 | 			if (!(req->udiag_states & (1 << sk->sk_state))) | 
 | 203 | 				goto next; | 
 | 204 | 			if (sk_diag_dump(sk, skb, req, | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 205 | 					 NETLINK_CB(cb->skb).portid, | 
| Pavel Emelyanov | 257b529 | 2011-12-30 09:27:43 +0000 | [diff] [blame] | 206 | 					 cb->nlh->nlmsg_seq, | 
 | 207 | 					 NLM_F_MULTI) < 0) | 
| Pavel Emelyanov | 45a96b9 | 2011-12-15 02:44:52 +0000 | [diff] [blame] | 208 | 				goto done; | 
 | 209 | next: | 
 | 210 | 			num++; | 
 | 211 | 		} | 
 | 212 | 	} | 
 | 213 | done: | 
 | 214 | 	spin_unlock(&unix_table_lock); | 
 | 215 | 	cb->args[0] = slot; | 
 | 216 | 	cb->args[1] = num; | 
 | 217 |  | 
 | 218 | 	return skb->len; | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 219 | } | 
 | 220 |  | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 221 | static struct sock *unix_lookup_by_ino(int ino) | 
 | 222 | { | 
 | 223 | 	int i; | 
 | 224 | 	struct sock *sk; | 
 | 225 |  | 
 | 226 | 	spin_lock(&unix_table_lock); | 
| Eric Dumazet | 7123aaa | 2012-06-08 05:03:21 +0000 | [diff] [blame] | 227 | 	for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { | 
| Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 228 | 		sk_for_each(sk, &unix_socket_table[i]) | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 229 | 			if (ino == sock_i_ino(sk)) { | 
 | 230 | 				sock_hold(sk); | 
 | 231 | 				spin_unlock(&unix_table_lock); | 
 | 232 |  | 
 | 233 | 				return sk; | 
 | 234 | 			} | 
 | 235 | 	} | 
 | 236 |  | 
 | 237 | 	spin_unlock(&unix_table_lock); | 
 | 238 | 	return NULL; | 
 | 239 | } | 
 | 240 |  | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 241 | static int unix_diag_get_exact(struct sk_buff *in_skb, | 
 | 242 | 			       const struct nlmsghdr *nlh, | 
 | 243 | 			       struct unix_diag_req *req) | 
 | 244 | { | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 245 | 	int err = -EINVAL; | 
 | 246 | 	struct sock *sk; | 
 | 247 | 	struct sk_buff *rep; | 
 | 248 | 	unsigned int extra_len; | 
| Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 249 | 	struct net *net = sock_net(in_skb->sk); | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 250 |  | 
 | 251 | 	if (req->udiag_ino == 0) | 
 | 252 | 		goto out_nosk; | 
 | 253 |  | 
 | 254 | 	sk = unix_lookup_by_ino(req->udiag_ino); | 
 | 255 | 	err = -ENOENT; | 
 | 256 | 	if (sk == NULL) | 
 | 257 | 		goto out_nosk; | 
 | 258 |  | 
 | 259 | 	err = sock_diag_check_cookie(sk, req->udiag_cookie); | 
 | 260 | 	if (err) | 
 | 261 | 		goto out; | 
 | 262 |  | 
 | 263 | 	extra_len = 256; | 
 | 264 | again: | 
 | 265 | 	err = -ENOMEM; | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 266 | 	rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL); | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 267 | 	if (!rep) | 
 | 268 | 		goto out; | 
 | 269 |  | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 270 | 	err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid, | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 271 | 			   nlh->nlmsg_seq, 0, req->udiag_ino); | 
 | 272 | 	if (err < 0) { | 
| Thomas Graf | 4245375 | 2012-06-26 23:36:10 +0000 | [diff] [blame] | 273 | 		nlmsg_free(rep); | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 274 | 		extra_len += 256; | 
 | 275 | 		if (extra_len >= PAGE_SIZE) | 
 | 276 | 			goto out; | 
 | 277 |  | 
 | 278 | 		goto again; | 
 | 279 | 	} | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 280 | 	err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, | 
| Pavel Emelyanov | 5d3cae8 | 2011-12-15 02:45:07 +0000 | [diff] [blame] | 281 | 			      MSG_DONTWAIT); | 
 | 282 | 	if (err > 0) | 
 | 283 | 		err = 0; | 
 | 284 | out: | 
 | 285 | 	if (sk) | 
 | 286 | 		sock_put(sk); | 
 | 287 | out_nosk: | 
 | 288 | 	return err; | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 289 | } | 
 | 290 |  | 
 | 291 | static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) | 
 | 292 | { | 
 | 293 | 	int hdrlen = sizeof(struct unix_diag_req); | 
| Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 294 | 	struct net *net = sock_net(skb->sk); | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 295 |  | 
 | 296 | 	if (nlmsg_len(h) < hdrlen) | 
 | 297 | 		return -EINVAL; | 
 | 298 |  | 
| Pablo Neira Ayuso | 80d326f | 2012-02-24 14:30:15 +0000 | [diff] [blame] | 299 | 	if (h->nlmsg_flags & NLM_F_DUMP) { | 
 | 300 | 		struct netlink_dump_control c = { | 
 | 301 | 			.dump = unix_diag_dump, | 
 | 302 | 		}; | 
| Andrey Vagin | 51d7ccc | 2012-07-16 04:28:49 +0000 | [diff] [blame] | 303 | 		return netlink_dump_start(net->diag_nlsk, skb, h, &c); | 
| Pablo Neira Ayuso | 80d326f | 2012-02-24 14:30:15 +0000 | [diff] [blame] | 304 | 	} else | 
| David S. Miller | b61bb01 | 2012-06-26 21:41:00 -0700 | [diff] [blame] | 305 | 		return unix_diag_get_exact(skb, h, nlmsg_data(h)); | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 306 | } | 
 | 307 |  | 
| Shan Wei | 8dcf01f | 2012-04-24 18:21:07 +0000 | [diff] [blame] | 308 | static const struct sock_diag_handler unix_diag_handler = { | 
| Pavel Emelyanov | 22931d3 | 2011-12-15 02:44:35 +0000 | [diff] [blame] | 309 | 	.family = AF_UNIX, | 
 | 310 | 	.dump = unix_diag_handler_dump, | 
 | 311 | }; | 
 | 312 |  | 
 | 313 | static int __init unix_diag_init(void) | 
 | 314 | { | 
 | 315 | 	return sock_diag_register(&unix_diag_handler); | 
 | 316 | } | 
 | 317 |  | 
 | 318 | static void __exit unix_diag_exit(void) | 
 | 319 | { | 
 | 320 | 	sock_diag_unregister(&unix_diag_handler); | 
 | 321 | } | 
 | 322 |  | 
 | 323 | module_init(unix_diag_init); | 
 | 324 | module_exit(unix_diag_exit); | 
 | 325 | MODULE_LICENSE("GPL"); | 
 | 326 | MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */); |