| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * taskstats.c - Export per-task statistics to userland | 
 | 3 |  * | 
 | 4 |  * Copyright (C) Shailabh Nagar, IBM Corp. 2006 | 
 | 5 |  *           (C) Balbir Singh,   IBM Corp. 2006 | 
 | 6 |  * | 
 | 7 |  * This program is free software; you can redistribute it and/or modify | 
 | 8 |  * it under the terms of the GNU General Public License as published by | 
 | 9 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 10 |  * (at your option) any later version. | 
 | 11 |  * | 
 | 12 |  * This program is distributed in the hope that it will be useful, | 
 | 13 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 14 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 15 |  * GNU General Public License for more details. | 
 | 16 |  * | 
 | 17 |  */ | 
 | 18 |  | 
 | 19 | #include <linux/kernel.h> | 
 | 20 | #include <linux/taskstats_kern.h> | 
| Jay Lan | f3cef7a | 2006-09-30 23:28:55 -0700 | [diff] [blame] | 21 | #include <linux/tsacct_kern.h> | 
| Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 22 | #include <linux/delayacct.h> | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 23 | #include <linux/cpumask.h> | 
 | 24 | #include <linux/percpu.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 26 | #include <linux/cgroupstats.h> | 
 | 27 | #include <linux/cgroup.h> | 
 | 28 | #include <linux/fs.h> | 
 | 29 | #include <linux/file.h> | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 30 | #include <linux/pid_namespace.h> | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 31 | #include <net/genetlink.h> | 
| Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 32 | #include <linux/atomic.h> | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 33 |  | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 34 | /* | 
 | 35 |  * Maximum length of a cpumask that can be specified in | 
 | 36 |  * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute | 
 | 37 |  */ | 
 | 38 | #define TASKSTATS_CPUMASK_MAXLEN	(100+6*NR_CPUS) | 
 | 39 |  | 
| Vegard Nossum | b81f3ea | 2008-07-25 01:48:55 -0700 | [diff] [blame] | 40 | static DEFINE_PER_CPU(__u32, taskstats_seqnum); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 41 | static int family_registered; | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 42 | struct kmem_cache *taskstats_cache; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 43 |  | 
 | 44 | static struct genl_family family = { | 
 | 45 | 	.id		= GENL_ID_GENERATE, | 
 | 46 | 	.name		= TASKSTATS_GENL_NAME, | 
 | 47 | 	.version	= TASKSTATS_GENL_VERSION, | 
 | 48 | 	.maxattr	= TASKSTATS_CMD_ATTR_MAX, | 
 | 49 | }; | 
 | 50 |  | 
| Alexey Dobriyan | b54452b | 2010-02-18 08:14:31 +0000 | [diff] [blame] | 51 | static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 52 | 	[TASKSTATS_CMD_ATTR_PID]  = { .type = NLA_U32 }, | 
 | 53 | 	[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 }, | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 54 | 	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, | 
 | 55 | 	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; | 
 | 56 |  | 
| Alexey Dobriyan | b54452b | 2010-02-18 08:14:31 +0000 | [diff] [blame] | 57 | static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 58 | 	[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, | 
 | 59 | }; | 
 | 60 |  | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 61 | struct listener { | 
 | 62 | 	struct list_head list; | 
 | 63 | 	pid_t pid; | 
| Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 64 | 	char valid; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 65 | }; | 
 | 66 |  | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 67 | struct listener_list { | 
 | 68 | 	struct rw_semaphore sem; | 
 | 69 | 	struct list_head list; | 
 | 70 | }; | 
 | 71 | static DEFINE_PER_CPU(struct listener_list, listener_array); | 
 | 72 |  | 
 | 73 | enum actions { | 
 | 74 | 	REGISTER, | 
 | 75 | 	DEREGISTER, | 
 | 76 | 	CPU_DONT_CARE | 
 | 77 | }; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 78 |  | 
 | 79 | static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 80 | 				size_t size) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 81 | { | 
 | 82 | 	struct sk_buff *skb; | 
 | 83 | 	void *reply; | 
 | 84 |  | 
 | 85 | 	/* | 
 | 86 | 	 * If new attributes are added, please revisit this allocation | 
 | 87 | 	 */ | 
| Thomas Graf | 3dabc71 | 2006-11-14 19:44:52 -0800 | [diff] [blame] | 88 | 	skb = genlmsg_new(size, GFP_KERNEL); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 89 | 	if (!skb) | 
 | 90 | 		return -ENOMEM; | 
 | 91 |  | 
 | 92 | 	if (!info) { | 
| Christoph Lameter | cd85fc5 | 2010-12-08 17:42:22 +0100 | [diff] [blame] | 93 | 		int seq = this_cpu_inc_return(taskstats_seqnum) - 1; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 94 |  | 
| Thomas Graf | 17c157c | 2006-11-14 19:46:02 -0800 | [diff] [blame] | 95 | 		reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 96 | 	} else | 
| Thomas Graf | 17c157c | 2006-11-14 19:46:02 -0800 | [diff] [blame] | 97 | 		reply = genlmsg_put_reply(skb, info, &family, 0, cmd); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 98 | 	if (reply == NULL) { | 
 | 99 | 		nlmsg_free(skb); | 
 | 100 | 		return -EINVAL; | 
 | 101 | 	} | 
 | 102 |  | 
 | 103 | 	*skbp = skb; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 104 | 	return 0; | 
 | 105 | } | 
 | 106 |  | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 107 | /* | 
 | 108 |  * Send taskstats data in @skb to listener with nl_pid @pid | 
 | 109 |  */ | 
| Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 110 | static int send_reply(struct sk_buff *skb, struct genl_info *info) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 111 | { | 
| Arnaldo Carvalho de Melo | b529ccf | 2007-04-25 19:08:35 -0700 | [diff] [blame] | 112 | 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 113 | 	void *reply = genlmsg_data(genlhdr); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 114 | 	int rc; | 
 | 115 |  | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 116 | 	rc = genlmsg_end(skb, reply); | 
 | 117 | 	if (rc < 0) { | 
 | 118 | 		nlmsg_free(skb); | 
 | 119 | 		return rc; | 
 | 120 | 	} | 
 | 121 |  | 
| Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 122 | 	return genlmsg_reply(skb, info); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 123 | } | 
 | 124 |  | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 125 | /* | 
 | 126 |  * Send taskstats data in @skb to listeners registered for @cpu's exit data | 
 | 127 |  */ | 
| Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 128 | static void send_cpu_listeners(struct sk_buff *skb, | 
 | 129 | 					struct listener_list *listeners) | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 130 | { | 
| Arnaldo Carvalho de Melo | b529ccf | 2007-04-25 19:08:35 -0700 | [diff] [blame] | 131 | 	struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 132 | 	struct listener *s, *tmp; | 
 | 133 | 	struct sk_buff *skb_next, *skb_cur = skb; | 
 | 134 | 	void *reply = genlmsg_data(genlhdr); | 
| Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 135 | 	int rc, delcount = 0; | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 136 |  | 
 | 137 | 	rc = genlmsg_end(skb, reply); | 
 | 138 | 	if (rc < 0) { | 
 | 139 | 		nlmsg_free(skb); | 
| Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 140 | 		return; | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 141 | 	} | 
 | 142 |  | 
 | 143 | 	rc = 0; | 
| Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 144 | 	down_read(&listeners->sem); | 
| Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 145 | 	list_for_each_entry(s, &listeners->list, list) { | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 146 | 		skb_next = NULL; | 
 | 147 | 		if (!list_is_last(&s->list, &listeners->list)) { | 
 | 148 | 			skb_next = skb_clone(skb_cur, GFP_KERNEL); | 
| Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 149 | 			if (!skb_next) | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 150 | 				break; | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 151 | 		} | 
| Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 152 | 		rc = genlmsg_unicast(&init_net, skb_cur, s->pid); | 
| Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 153 | 		if (rc == -ECONNREFUSED) { | 
| Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 154 | 			s->valid = 0; | 
 | 155 | 			delcount++; | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 156 | 		} | 
 | 157 | 		skb_cur = skb_next; | 
 | 158 | 	} | 
| Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 159 | 	up_read(&listeners->sem); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 160 |  | 
| Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 161 | 	if (skb_cur) | 
 | 162 | 		nlmsg_free(skb_cur); | 
 | 163 |  | 
| Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 164 | 	if (!delcount) | 
| Shailabh Nagar | d94a041 | 2006-07-30 03:03:11 -0700 | [diff] [blame] | 165 | 		return; | 
| Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 166 |  | 
 | 167 | 	/* Delete invalidated entries */ | 
 | 168 | 	down_write(&listeners->sem); | 
 | 169 | 	list_for_each_entry_safe(s, tmp, &listeners->list, list) { | 
 | 170 | 		if (!s->valid) { | 
 | 171 | 			list_del(&s->list); | 
 | 172 | 			kfree(s); | 
 | 173 | 		} | 
 | 174 | 	} | 
 | 175 | 	up_write(&listeners->sem); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 176 | } | 
 | 177 |  | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 178 | static void fill_stats(struct user_namespace *user_ns, | 
 | 179 | 		       struct pid_namespace *pid_ns, | 
 | 180 | 		       struct task_struct *tsk, struct taskstats *stats) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 181 | { | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 182 | 	memset(stats, 0, sizeof(*stats)); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 183 | 	/* | 
 | 184 | 	 * Each accounting subsystem adds calls to its functions to | 
 | 185 | 	 * fill in relevant parts of struct taskstsats as follows | 
 | 186 | 	 * | 
| Shailabh Nagar | 7d94ddd | 2006-07-30 03:03:10 -0700 | [diff] [blame] | 187 | 	 *	per-task-foo(stats, tsk); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 188 | 	 */ | 
 | 189 |  | 
| Shailabh Nagar | 7d94ddd | 2006-07-30 03:03:10 -0700 | [diff] [blame] | 190 | 	delayacct_add_tsk(stats, tsk); | 
| Jay Lan | f3cef7a | 2006-09-30 23:28:55 -0700 | [diff] [blame] | 191 |  | 
 | 192 | 	/* fill in basic acct fields */ | 
| Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 193 | 	stats->version = TASKSTATS_VERSION; | 
| Maxim Uvarov | b663a79 | 2007-07-15 23:40:48 -0700 | [diff] [blame] | 194 | 	stats->nvcsw = tsk->nvcsw; | 
 | 195 | 	stats->nivcsw = tsk->nivcsw; | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 196 | 	bacct_add_tsk(user_ns, pid_ns, stats, tsk); | 
| Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 197 |  | 
| Jay Lan | 9acc185 | 2006-09-30 23:28:58 -0700 | [diff] [blame] | 198 | 	/* fill in extended acct fields */ | 
 | 199 | 	xacct_add_tsk(stats, tsk); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 200 | } | 
 | 201 |  | 
| Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 202 | static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 203 | { | 
| Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 204 | 	struct task_struct *tsk; | 
| Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 205 |  | 
 | 206 | 	rcu_read_lock(); | 
 | 207 | 	tsk = find_task_by_vpid(pid); | 
 | 208 | 	if (tsk) | 
 | 209 | 		get_task_struct(tsk); | 
 | 210 | 	rcu_read_unlock(); | 
 | 211 | 	if (!tsk) | 
 | 212 | 		return -ESRCH; | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 213 | 	fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); | 
| Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 214 | 	put_task_struct(tsk); | 
 | 215 | 	return 0; | 
 | 216 | } | 
 | 217 |  | 
 | 218 | static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats) | 
 | 219 | { | 
 | 220 | 	struct task_struct *tsk, *first; | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 221 | 	unsigned long flags; | 
| Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 222 | 	int rc = -ESRCH; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 223 |  | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 224 | 	/* | 
 | 225 | 	 * Add additional stats from live tasks except zombie thread group | 
 | 226 | 	 * leaders who are already counted with the dead tasks | 
 | 227 | 	 */ | 
| Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 228 | 	rcu_read_lock(); | 
| Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 229 | 	first = find_task_by_vpid(tgid); | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 230 |  | 
| Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 231 | 	if (!first || !lock_task_sighand(first, &flags)) | 
 | 232 | 		goto out; | 
 | 233 |  | 
 | 234 | 	if (first->signal->stats) | 
 | 235 | 		memcpy(stats, first->signal->stats, sizeof(*stats)); | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 236 | 	else | 
 | 237 | 		memset(stats, 0, sizeof(*stats)); | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 238 |  | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 239 | 	tsk = first; | 
 | 240 | 	do { | 
| Oleg Nesterov | d7c3f5f | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 241 | 		if (tsk->exit_state) | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 242 | 			continue; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 243 | 		/* | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 244 | 		 * Accounting subsystem can call its functions here to | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 245 | 		 * fill in relevant parts of struct taskstsats as follows | 
 | 246 | 		 * | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 247 | 		 *	per-task-foo(stats, tsk); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 248 | 		 */ | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 249 | 		delayacct_add_tsk(stats, tsk); | 
| Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 250 |  | 
| Maxim Uvarov | b663a79 | 2007-07-15 23:40:48 -0700 | [diff] [blame] | 251 | 		stats->nvcsw += tsk->nvcsw; | 
 | 252 | 		stats->nivcsw += tsk->nivcsw; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 253 | 	} while_each_thread(first, tsk); | 
| Shailabh Nagar | 6f44993 | 2006-07-14 00:24:41 -0700 | [diff] [blame] | 254 |  | 
| Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 255 | 	unlock_task_sighand(first, &flags); | 
 | 256 | 	rc = 0; | 
 | 257 | out: | 
 | 258 | 	rcu_read_unlock(); | 
 | 259 |  | 
 | 260 | 	stats->version = TASKSTATS_VERSION; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 261 | 	/* | 
| Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 262 | 	 * Accounting subsystems can also add calls here to modify | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 263 | 	 * fields of taskstats. | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 264 | 	 */ | 
| Oleg Nesterov | a98b609 | 2006-10-28 10:38:54 -0700 | [diff] [blame] | 265 | 	return rc; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 266 | } | 
 | 267 |  | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 268 | static void fill_tgid_exit(struct task_struct *tsk) | 
 | 269 | { | 
 | 270 | 	unsigned long flags; | 
 | 271 |  | 
| Oleg Nesterov | b8534d7 | 2006-10-28 10:38:53 -0700 | [diff] [blame] | 272 | 	spin_lock_irqsave(&tsk->sighand->siglock, flags); | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 273 | 	if (!tsk->signal->stats) | 
 | 274 | 		goto ret; | 
 | 275 |  | 
 | 276 | 	/* | 
 | 277 | 	 * Each accounting subsystem calls its functions here to | 
 | 278 | 	 * accumalate its per-task stats for tsk, into the per-tgid structure | 
 | 279 | 	 * | 
 | 280 | 	 *	per-task-foo(tsk->signal->stats, tsk); | 
 | 281 | 	 */ | 
 | 282 | 	delayacct_add_tsk(tsk->signal->stats, tsk); | 
 | 283 | ret: | 
| Oleg Nesterov | b8534d7 | 2006-10-28 10:38:53 -0700 | [diff] [blame] | 284 | 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 285 | 	return; | 
 | 286 | } | 
 | 287 |  | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 288 | static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 289 | { | 
 | 290 | 	struct listener_list *listeners; | 
| Vasiliy Kulikov | 26c4cae | 2011-06-27 16:18:11 -0700 | [diff] [blame] | 291 | 	struct listener *s, *tmp, *s2; | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 292 | 	unsigned int cpu; | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 293 |  | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 294 | 	if (!cpumask_subset(mask, cpu_possible_mask)) | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 295 | 		return -EINVAL; | 
 | 296 |  | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 297 | 	if (current_user_ns() != &init_user_ns) | 
 | 298 | 		return -EINVAL; | 
 | 299 |  | 
 | 300 | 	if (task_active_pid_ns(current) != &init_pid_ns) | 
 | 301 | 		return -EINVAL; | 
 | 302 |  | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 303 | 	if (isadd == REGISTER) { | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 304 | 		for_each_cpu(cpu, mask) { | 
| Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 305 | 			s = kmalloc_node(sizeof(struct listener), | 
 | 306 | 					GFP_KERNEL, cpu_to_node(cpu)); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 307 | 			if (!s) | 
 | 308 | 				goto cleanup; | 
| Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 309 |  | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 310 | 			s->pid = pid; | 
| Shailabh Nagar | bb12999 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 311 | 			s->valid = 1; | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 312 |  | 
 | 313 | 			listeners = &per_cpu(listener_array, cpu); | 
 | 314 | 			down_write(&listeners->sem); | 
| Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 315 | 			list_for_each_entry(s2, &listeners->list, list) { | 
| Oleg Nesterov | a729589 | 2011-08-03 16:21:05 -0700 | [diff] [blame] | 316 | 				if (s2->pid == pid && s2->valid) | 
| Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 317 | 					goto exists; | 
| Vasiliy Kulikov | 26c4cae | 2011-06-27 16:18:11 -0700 | [diff] [blame] | 318 | 			} | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 319 | 			list_add(&s->list, &listeners->list); | 
| Vasiliy Kulikov | 26c4cae | 2011-06-27 16:18:11 -0700 | [diff] [blame] | 320 | 			s = NULL; | 
| Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 321 | exists: | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 322 | 			up_write(&listeners->sem); | 
| Oleg Nesterov | dfc428b | 2011-08-03 16:21:04 -0700 | [diff] [blame] | 323 | 			kfree(s); /* nop if NULL */ | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 324 | 		} | 
 | 325 | 		return 0; | 
 | 326 | 	} | 
 | 327 |  | 
 | 328 | 	/* Deregister or cleanup */ | 
 | 329 | cleanup: | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 330 | 	for_each_cpu(cpu, mask) { | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 331 | 		listeners = &per_cpu(listener_array, cpu); | 
 | 332 | 		down_write(&listeners->sem); | 
 | 333 | 		list_for_each_entry_safe(s, tmp, &listeners->list, list) { | 
 | 334 | 			if (s->pid == pid) { | 
 | 335 | 				list_del(&s->list); | 
 | 336 | 				kfree(s); | 
 | 337 | 				break; | 
 | 338 | 			} | 
 | 339 | 		} | 
 | 340 | 		up_write(&listeners->sem); | 
 | 341 | 	} | 
 | 342 | 	return 0; | 
 | 343 | } | 
 | 344 |  | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 345 | static int parse(struct nlattr *na, struct cpumask *mask) | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 346 | { | 
 | 347 | 	char *data; | 
 | 348 | 	int len; | 
 | 349 | 	int ret; | 
 | 350 |  | 
 | 351 | 	if (na == NULL) | 
 | 352 | 		return 1; | 
 | 353 | 	len = nla_len(na); | 
 | 354 | 	if (len > TASKSTATS_CPUMASK_MAXLEN) | 
 | 355 | 		return -E2BIG; | 
 | 356 | 	if (len < 1) | 
 | 357 | 		return -EINVAL; | 
 | 358 | 	data = kmalloc(len, GFP_KERNEL); | 
 | 359 | 	if (!data) | 
 | 360 | 		return -ENOMEM; | 
 | 361 | 	nla_strlcpy(data, na, len); | 
| Rusty Russell | 29c0177 | 2008-12-13 21:20:25 +1030 | [diff] [blame] | 362 | 	ret = cpulist_parse(data, mask); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 363 | 	kfree(data); | 
 | 364 | 	return ret; | 
 | 365 | } | 
 | 366 |  | 
| Jeff Mahoney | 9ab020c | 2011-01-12 17:00:48 -0800 | [diff] [blame] | 367 | #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 
| Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 368 | #define TASKSTATS_NEEDS_PADDING 1 | 
 | 369 | #endif | 
 | 370 |  | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 371 | static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) | 
| Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 372 | { | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 373 | 	struct nlattr *na, *ret; | 
| Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 374 | 	int aggr; | 
 | 375 |  | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 376 | 	aggr = (type == TASKSTATS_TYPE_PID) | 
 | 377 | 			? TASKSTATS_TYPE_AGGR_PID | 
 | 378 | 			: TASKSTATS_TYPE_AGGR_TGID; | 
| Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 379 |  | 
| Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 380 | 	/* | 
 | 381 | 	 * The taskstats structure is internally aligned on 8 byte | 
 | 382 | 	 * boundaries but the layout of the aggregrate reply, with | 
 | 383 | 	 * two NLA headers and the pid (each 4 bytes), actually | 
 | 384 | 	 * force the entire structure to be unaligned. This causes | 
 | 385 | 	 * the kernel to issue unaligned access warnings on some | 
 | 386 | 	 * architectures like ia64. Unfortunately, some software out there | 
 | 387 | 	 * doesn't properly unroll the NLA packet and assumes that the start | 
 | 388 | 	 * of the taskstats structure will always be 20 bytes from the start | 
 | 389 | 	 * of the netlink payload. Aligning the start of the taskstats | 
 | 390 | 	 * structure breaks this software, which we don't want. So, for now | 
 | 391 | 	 * the alignment only happens on architectures that require it | 
 | 392 | 	 * and those users will have to update to fixed versions of those | 
 | 393 | 	 * packages. Space is reserved in the packet only when needed. | 
 | 394 | 	 * This ifdef should be removed in several years e.g. 2012 once | 
 | 395 | 	 * we can be confident that fixed versions are installed on most | 
 | 396 | 	 * systems. We add the padding before the aggregate since the | 
 | 397 | 	 * aggregate is already a defined type. | 
 | 398 | 	 */ | 
 | 399 | #ifdef TASKSTATS_NEEDS_PADDING | 
 | 400 | 	if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0) | 
 | 401 | 		goto err; | 
 | 402 | #endif | 
| Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 403 | 	na = nla_nest_start(skb, aggr); | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 404 | 	if (!na) | 
 | 405 | 		goto err; | 
| Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 406 |  | 
 | 407 | 	if (nla_put(skb, type, sizeof(pid), &pid) < 0) | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 408 | 		goto err; | 
 | 409 | 	ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats)); | 
 | 410 | 	if (!ret) | 
 | 411 | 		goto err; | 
| Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 412 | 	nla_nest_end(skb, na); | 
 | 413 |  | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 414 | 	return nla_data(ret); | 
 | 415 | err: | 
 | 416 | 	return NULL; | 
| Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 417 | } | 
 | 418 |  | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 419 | static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | 
 | 420 | { | 
 | 421 | 	int rc = 0; | 
 | 422 | 	struct sk_buff *rep_skb; | 
 | 423 | 	struct cgroupstats *stats; | 
 | 424 | 	struct nlattr *na; | 
 | 425 | 	size_t size; | 
 | 426 | 	u32 fd; | 
| Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 427 | 	struct fd f; | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 428 |  | 
 | 429 | 	na = info->attrs[CGROUPSTATS_CMD_ATTR_FD]; | 
 | 430 | 	if (!na) | 
 | 431 | 		return -EINVAL; | 
 | 432 |  | 
 | 433 | 	fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); | 
| Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 434 | 	f = fdget(fd); | 
 | 435 | 	if (!f.file) | 
| Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 436 | 		return 0; | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 437 |  | 
| Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 438 | 	size = nla_total_size(sizeof(struct cgroupstats)); | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 439 |  | 
| Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 440 | 	rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, | 
 | 441 | 				size); | 
 | 442 | 	if (rc < 0) | 
 | 443 | 		goto err; | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 444 |  | 
| Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 445 | 	na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, | 
 | 446 | 				sizeof(struct cgroupstats)); | 
| Alan Cox | 25353b3 | 2012-07-30 14:42:49 -0700 | [diff] [blame] | 447 | 	if (na == NULL) { | 
| Jesper Juhl | 0324b5a | 2012-10-04 17:16:52 -0700 | [diff] [blame] | 448 | 		nlmsg_free(rep_skb); | 
| Alan Cox | 25353b3 | 2012-07-30 14:42:49 -0700 | [diff] [blame] | 449 | 		rc = -EMSGSIZE; | 
 | 450 | 		goto err; | 
 | 451 | 	} | 
 | 452 |  | 
| Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 453 | 	stats = nla_data(na); | 
 | 454 | 	memset(stats, 0, sizeof(*stats)); | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 455 |  | 
| Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 456 | 	rc = cgroupstats_build(stats, f.file->f_dentry); | 
| Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 457 | 	if (rc < 0) { | 
 | 458 | 		nlmsg_free(rep_skb); | 
 | 459 | 		goto err; | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 460 | 	} | 
 | 461 |  | 
| Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 462 | 	rc = send_reply(rep_skb, info); | 
| Adrian Bunk | f961598 | 2007-11-14 17:00:37 -0800 | [diff] [blame] | 463 |  | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 464 | err: | 
| Al Viro | 2903ff0 | 2012-08-28 12:52:22 -0400 | [diff] [blame] | 465 | 	fdput(f); | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 466 | 	return rc; | 
 | 467 | } | 
 | 468 |  | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 469 | static int cmd_attr_register_cpumask(struct genl_info *info) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 470 | { | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 471 | 	cpumask_var_t mask; | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 472 | 	int rc; | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 473 |  | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 474 | 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | 
 | 475 | 		return -ENOMEM; | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 476 | 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 477 | 	if (rc < 0) | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 478 | 		goto out; | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 479 | 	rc = add_del_listener(info->snd_portid, mask, REGISTER); | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 480 | out: | 
 | 481 | 	free_cpumask_var(mask); | 
 | 482 | 	return rc; | 
 | 483 | } | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 484 |  | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 485 | static int cmd_attr_deregister_cpumask(struct genl_info *info) | 
 | 486 | { | 
 | 487 | 	cpumask_var_t mask; | 
 | 488 | 	int rc; | 
 | 489 |  | 
 | 490 | 	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | 
 | 491 | 		return -ENOMEM; | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 492 | 	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); | 
 | 493 | 	if (rc < 0) | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 494 | 		goto out; | 
| Eric W. Biederman | 15e4730 | 2012-09-07 20:12:54 +0000 | [diff] [blame] | 495 | 	rc = add_del_listener(info->snd_portid, mask, DEREGISTER); | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 496 | out: | 
| Rusty Russell | 41c7bb9 | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 497 | 	free_cpumask_var(mask); | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 498 | 	return rc; | 
 | 499 | } | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 500 |  | 
| Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 501 | static size_t taskstats_packet_size(void) | 
 | 502 | { | 
 | 503 | 	size_t size; | 
 | 504 |  | 
 | 505 | 	size = nla_total_size(sizeof(u32)) + | 
 | 506 | 		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | 
 | 507 | #ifdef TASKSTATS_NEEDS_PADDING | 
 | 508 | 	size += nla_total_size(0); /* Padding for alignment */ | 
 | 509 | #endif | 
 | 510 | 	return size; | 
 | 511 | } | 
 | 512 |  | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 513 | static int cmd_attr_pid(struct genl_info *info) | 
 | 514 | { | 
 | 515 | 	struct taskstats *stats; | 
 | 516 | 	struct sk_buff *rep_skb; | 
 | 517 | 	size_t size; | 
 | 518 | 	u32 pid; | 
 | 519 | 	int rc; | 
 | 520 |  | 
| Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 521 | 	size = taskstats_packet_size(); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 522 |  | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 523 | 	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 524 | 	if (rc < 0) | 
 | 525 | 		return rc; | 
 | 526 |  | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 527 | 	rc = -EINVAL; | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 528 | 	pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); | 
 | 529 | 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); | 
 | 530 | 	if (!stats) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 531 | 		goto err; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 532 |  | 
| Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 533 | 	rc = fill_stats_for_pid(pid, stats); | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 534 | 	if (rc < 0) | 
 | 535 | 		goto err; | 
| Johannes Berg | 134e637 | 2009-07-10 09:51:34 +0000 | [diff] [blame] | 536 | 	return send_reply(rep_skb, info); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 537 | err: | 
 | 538 | 	nlmsg_free(rep_skb); | 
 | 539 | 	return rc; | 
 | 540 | } | 
 | 541 |  | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 542 | static int cmd_attr_tgid(struct genl_info *info) | 
 | 543 | { | 
 | 544 | 	struct taskstats *stats; | 
 | 545 | 	struct sk_buff *rep_skb; | 
 | 546 | 	size_t size; | 
 | 547 | 	u32 tgid; | 
 | 548 | 	int rc; | 
 | 549 |  | 
| Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 550 | 	size = taskstats_packet_size(); | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 551 |  | 
 | 552 | 	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); | 
 | 553 | 	if (rc < 0) | 
 | 554 | 		return rc; | 
 | 555 |  | 
 | 556 | 	rc = -EINVAL; | 
 | 557 | 	tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); | 
 | 558 | 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); | 
 | 559 | 	if (!stats) | 
 | 560 | 		goto err; | 
 | 561 |  | 
| Michael Holzheu | 3d9e0cf | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 562 | 	rc = fill_stats_for_tgid(tgid, stats); | 
| Michael Holzheu | 9323312 | 2010-10-27 15:34:44 -0700 | [diff] [blame] | 563 | 	if (rc < 0) | 
 | 564 | 		goto err; | 
 | 565 | 	return send_reply(rep_skb, info); | 
 | 566 | err: | 
 | 567 | 	nlmsg_free(rep_skb); | 
 | 568 | 	return rc; | 
 | 569 | } | 
 | 570 |  | 
 | 571 | static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | 
 | 572 | { | 
 | 573 | 	if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) | 
 | 574 | 		return cmd_attr_register_cpumask(info); | 
 | 575 | 	else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) | 
 | 576 | 		return cmd_attr_deregister_cpumask(info); | 
 | 577 | 	else if (info->attrs[TASKSTATS_CMD_ATTR_PID]) | 
 | 578 | 		return cmd_attr_pid(info); | 
 | 579 | 	else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) | 
 | 580 | 		return cmd_attr_tgid(info); | 
 | 581 | 	else | 
 | 582 | 		return -EINVAL; | 
 | 583 | } | 
 | 584 |  | 
| Oleg Nesterov | 34ec123 | 2006-12-06 20:36:52 -0800 | [diff] [blame] | 585 | static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) | 
 | 586 | { | 
 | 587 | 	struct signal_struct *sig = tsk->signal; | 
 | 588 | 	struct taskstats *stats; | 
 | 589 |  | 
 | 590 | 	if (sig->stats || thread_group_empty(tsk)) | 
 | 591 | 		goto ret; | 
 | 592 |  | 
 | 593 | 	/* No problem if kmem_cache_zalloc() fails */ | 
 | 594 | 	stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); | 
 | 595 |  | 
 | 596 | 	spin_lock_irq(&tsk->sighand->siglock); | 
 | 597 | 	if (!sig->stats) { | 
 | 598 | 		sig->stats = stats; | 
 | 599 | 		stats = NULL; | 
 | 600 | 	} | 
 | 601 | 	spin_unlock_irq(&tsk->sighand->siglock); | 
 | 602 |  | 
 | 603 | 	if (stats) | 
 | 604 | 		kmem_cache_free(taskstats_cache, stats); | 
 | 605 | ret: | 
 | 606 | 	return sig->stats; | 
 | 607 | } | 
 | 608 |  | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 609 | /* Send pid data out on exit */ | 
| Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 610 | void taskstats_exit(struct task_struct *tsk, int group_dead) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 611 | { | 
 | 612 | 	int rc; | 
| Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 613 | 	struct listener_list *listeners; | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 614 | 	struct taskstats *stats; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 615 | 	struct sk_buff *rep_skb; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 616 | 	size_t size; | 
 | 617 | 	int is_thread_group; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 618 |  | 
| Oleg Nesterov | 4a279ff | 2006-10-30 22:07:15 -0800 | [diff] [blame] | 619 | 	if (!family_registered) | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 620 | 		return; | 
 | 621 |  | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 622 | 	/* | 
 | 623 | 	 * Size includes space for nested attributes | 
 | 624 | 	 */ | 
| Jeff Mahoney | 4be2c95 | 2010-12-21 17:24:30 -0800 | [diff] [blame] | 625 | 	size = taskstats_packet_size(); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 626 |  | 
| Oleg Nesterov | 34ec123 | 2006-12-06 20:36:52 -0800 | [diff] [blame] | 627 | 	is_thread_group = !!taskstats_tgid_alloc(tsk); | 
| Oleg Nesterov | 4a279ff | 2006-10-30 22:07:15 -0800 | [diff] [blame] | 628 | 	if (is_thread_group) { | 
 | 629 | 		/* PID + STATS + TGID + STATS */ | 
 | 630 | 		size = 2 * size; | 
 | 631 | 		/* fill the tsk->signal->stats structure */ | 
 | 632 | 		fill_tgid_exit(tsk); | 
 | 633 | 	} | 
 | 634 |  | 
| Christoph Lameter | cd85fc5 | 2010-12-08 17:42:22 +0100 | [diff] [blame] | 635 | 	listeners = __this_cpu_ptr(&listener_array); | 
| Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 636 | 	if (list_empty(&listeners->list)) | 
 | 637 | 		return; | 
 | 638 |  | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 639 | 	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 640 | 	if (rc < 0) | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 641 | 		return; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 642 |  | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 643 | 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, | 
 | 644 | 			 task_pid_nr_ns(tsk, &init_pid_ns)); | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 645 | 	if (!stats) | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 646 | 		goto err; | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 647 |  | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 648 | 	fill_stats(&init_user_ns, &init_pid_ns, tsk, stats); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 649 |  | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 650 | 	/* | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 651 | 	 * Doesn't matter if tsk is the leader or the last group member leaving | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 652 | 	 */ | 
| Oleg Nesterov | 68062b8 | 2006-12-06 20:36:53 -0800 | [diff] [blame] | 653 | 	if (!is_thread_group || !group_dead) | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 654 | 		goto send; | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 655 |  | 
| Eric W. Biederman | 4bd6e32 | 2012-02-07 17:56:49 -0800 | [diff] [blame] | 656 | 	stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, | 
 | 657 | 			 task_tgid_nr_ns(tsk, &init_pid_ns)); | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 658 | 	if (!stats) | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 659 | 		goto err; | 
| Oleg Nesterov | 51de4d9 | 2006-12-06 20:36:54 -0800 | [diff] [blame] | 660 |  | 
 | 661 | 	memcpy(stats, tsk->signal->stats, sizeof(*stats)); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 662 |  | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 663 | send: | 
| Oleg Nesterov | 115085e | 2006-12-06 20:36:51 -0800 | [diff] [blame] | 664 | 	send_cpu_listeners(rep_skb, listeners); | 
| Shailabh Nagar | ad4ecbc | 2006-07-14 00:24:44 -0700 | [diff] [blame] | 665 | 	return; | 
| Oleg Nesterov | 3716748 | 2006-12-06 20:36:55 -0800 | [diff] [blame] | 666 | err: | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 667 | 	nlmsg_free(rep_skb); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 668 | } | 
 | 669 |  | 
 | 670 | static struct genl_ops taskstats_ops = { | 
 | 671 | 	.cmd		= TASKSTATS_CMD_GET, | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 672 | 	.doit		= taskstats_user_cmd, | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 673 | 	.policy		= taskstats_cmd_get_policy, | 
| Linus Torvalds | 1a51410 | 2011-09-19 17:04:37 -0700 | [diff] [blame] | 674 | 	.flags		= GENL_ADMIN_PERM, | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 675 | }; | 
 | 676 |  | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 677 | static struct genl_ops cgroupstats_ops = { | 
 | 678 | 	.cmd		= CGROUPSTATS_CMD_GET, | 
 | 679 | 	.doit		= cgroupstats_user_cmd, | 
 | 680 | 	.policy		= cgroupstats_cmd_get_policy, | 
 | 681 | }; | 
 | 682 |  | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 683 | /* Needed early in initialization */ | 
 | 684 | void __init taskstats_init_early(void) | 
 | 685 | { | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 686 | 	unsigned int i; | 
 | 687 |  | 
| Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 688 | 	taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC); | 
| Shailabh Nagar | f9fd891 | 2006-07-14 00:24:47 -0700 | [diff] [blame] | 689 | 	for_each_possible_cpu(i) { | 
 | 690 | 		INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); | 
 | 691 | 		init_rwsem(&(per_cpu(listener_array, i).sem)); | 
 | 692 | 	} | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 693 | } | 
 | 694 |  | 
 | 695 | static int __init taskstats_init(void) | 
 | 696 | { | 
 | 697 | 	int rc; | 
 | 698 |  | 
 | 699 | 	rc = genl_register_family(&family); | 
 | 700 | 	if (rc) | 
 | 701 | 		return rc; | 
 | 702 |  | 
 | 703 | 	rc = genl_register_ops(&family, &taskstats_ops); | 
 | 704 | 	if (rc < 0) | 
 | 705 | 		goto err; | 
 | 706 |  | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 707 | 	rc = genl_register_ops(&family, &cgroupstats_ops); | 
 | 708 | 	if (rc < 0) | 
 | 709 | 		goto err_cgroup_ops; | 
 | 710 |  | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 711 | 	family_registered = 1; | 
| Mandeep Singh Baines | f9b182e | 2011-03-23 16:43:27 -0700 | [diff] [blame] | 712 | 	pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 713 | 	return 0; | 
| Balbir Singh | 846c7bb | 2007-10-18 23:39:44 -0700 | [diff] [blame] | 714 | err_cgroup_ops: | 
 | 715 | 	genl_unregister_ops(&family, &taskstats_ops); | 
| Shailabh Nagar | c757249 | 2006-07-14 00:24:40 -0700 | [diff] [blame] | 716 | err: | 
 | 717 | 	genl_unregister_family(&family); | 
 | 718 | 	return rc; | 
 | 719 | } | 
 | 720 |  | 
 | 721 | /* | 
 | 722 |  * late initcall ensures initialization of statistics collection | 
 | 723 |  * mechanisms precedes initialization of the taskstats interface | 
 | 724 |  */ | 
 | 725 | late_initcall(taskstats_init); |