| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * cn_proc.c - process events connector | 
|  | 3 | * | 
|  | 4 | * Copyright (C) Matt Helsley, IBM Corp. 2005 | 
|  | 5 | * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net> | 
|  | 6 | * Original copyright notice follows: | 
|  | 7 | * Copyright (C) 2005 BULL SA. | 
|  | 8 | * | 
|  | 9 | * | 
|  | 10 | * This program is free software; you can redistribute it and/or modify | 
|  | 11 | * it under the terms of the GNU General Public License as published by | 
|  | 12 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 13 | * (at your option) any later version. | 
|  | 14 | * | 
|  | 15 | * This program is distributed in the hope that it will be useful, | 
|  | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 18 | * GNU General Public License for more details. | 
|  | 19 | * | 
|  | 20 | * You should have received a copy of the GNU General Public License | 
|  | 21 | * along with this program; if not, write to the Free Software | 
|  | 22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 | #include <linux/module.h> | 
|  | 26 | #include <linux/kernel.h> | 
| Matt Helsley | caf3c9d | 2006-01-09 20:52:40 -0800 | [diff] [blame] | 27 | #include <linux/ktime.h> | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 28 | #include <linux/init.h> | 
| Matt Helsley | 1d31a4e | 2006-06-23 02:05:42 -0700 | [diff] [blame] | 29 | #include <linux/connector.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 30 | #include <linux/gfp.h> | 
| Vladimir Zapolskiy | f701e5b | 2011-07-15 20:45:18 +0300 | [diff] [blame] | 31 | #include <linux/ptrace.h> | 
| Arun Sharma | 6006349 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 32 | #include <linux/atomic.h> | 
|  | 33 |  | 
| Erik Jacobson | af3e095 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 34 | #include <asm/unaligned.h> | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 35 |  | 
|  | 36 | #include <linux/cn_proc.h> | 
|  | 37 |  | 
|  | 38 | #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) | 
|  | 39 |  | 
|  | 40 | static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); | 
|  | 41 | static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; | 
|  | 42 |  | 
| David S. Miller | cc398c2 | 2006-01-08 01:03:34 -0800 | [diff] [blame] | 43 | /* proc_event_counts is used as the sequence number of the netlink message */ | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 44 | static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; | 
|  | 45 |  | 
|  | 46 | static inline void get_seq(__u32 *ts, int *cpu) | 
|  | 47 | { | 
| Christoph Lameter | 3ea9f68 | 2010-12-08 17:42:23 +0100 | [diff] [blame] | 48 | preempt_disable(); | 
|  | 49 | *ts = __this_cpu_inc_return(proc_event_counts) -1; | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 50 | *cpu = smp_processor_id(); | 
| Christoph Lameter | 3ea9f68 | 2010-12-08 17:42:23 +0100 | [diff] [blame] | 51 | preempt_enable(); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 52 | } | 
|  | 53 |  | 
|  | 54 | void proc_fork_connector(struct task_struct *task) | 
|  | 55 | { | 
|  | 56 | struct cn_msg *msg; | 
|  | 57 | struct proc_event *ev; | 
|  | 58 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 59 | struct timespec ts; | 
| Oleg Nesterov | 9e8f90d | 2011-07-28 18:26:32 -0700 | [diff] [blame] | 60 | struct task_struct *parent; | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 61 |  | 
|  | 62 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 63 | return; | 
|  | 64 |  | 
|  | 65 | msg = (struct cn_msg*)buffer; | 
|  | 66 | ev = (struct proc_event*)msg->data; | 
|  | 67 | get_seq(&msg->seq, &ev->cpu); | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 68 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
| Erik Jacobson | af3e095 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 69 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 70 | ev->what = PROC_EVENT_FORK; | 
| Oleg Nesterov | 9e8f90d | 2011-07-28 18:26:32 -0700 | [diff] [blame] | 71 | rcu_read_lock(); | 
|  | 72 | parent = rcu_dereference(task->real_parent); | 
|  | 73 | ev->event_data.fork.parent_pid = parent->pid; | 
|  | 74 | ev->event_data.fork.parent_tgid = parent->tgid; | 
|  | 75 | rcu_read_unlock(); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 76 | ev->event_data.fork.child_pid = task->pid; | 
|  | 77 | ev->event_data.fork.child_tgid = task->tgid; | 
|  | 78 |  | 
|  | 79 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 80 | msg->ack = 0; /* not used */ | 
|  | 81 | msg->len = sizeof(*ev); | 
|  | 82 | /*  If cn_netlink_send() failed, the data is not sent */ | 
|  | 83 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | void proc_exec_connector(struct task_struct *task) | 
|  | 87 | { | 
|  | 88 | struct cn_msg *msg; | 
|  | 89 | struct proc_event *ev; | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 90 | struct timespec ts; | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 91 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
|  | 92 |  | 
|  | 93 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 94 | return; | 
|  | 95 |  | 
|  | 96 | msg = (struct cn_msg*)buffer; | 
|  | 97 | ev = (struct proc_event*)msg->data; | 
|  | 98 | get_seq(&msg->seq, &ev->cpu); | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 99 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
| Erik Jacobson | af3e095 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 100 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 101 | ev->what = PROC_EVENT_EXEC; | 
|  | 102 | ev->event_data.exec.process_pid = task->pid; | 
|  | 103 | ev->event_data.exec.process_tgid = task->tgid; | 
|  | 104 |  | 
|  | 105 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 106 | msg->ack = 0; /* not used */ | 
|  | 107 | msg->len = sizeof(*ev); | 
|  | 108 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 109 | } | 
|  | 110 |  | 
|  | 111 | void proc_id_connector(struct task_struct *task, int which_id) | 
|  | 112 | { | 
|  | 113 | struct cn_msg *msg; | 
|  | 114 | struct proc_event *ev; | 
|  | 115 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 116 | struct timespec ts; | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 117 | const struct cred *cred; | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 118 |  | 
|  | 119 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 120 | return; | 
|  | 121 |  | 
|  | 122 | msg = (struct cn_msg*)buffer; | 
|  | 123 | ev = (struct proc_event*)msg->data; | 
|  | 124 | ev->what = which_id; | 
|  | 125 | ev->event_data.id.process_pid = task->pid; | 
|  | 126 | ev->event_data.id.process_tgid = task->tgid; | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 127 | rcu_read_lock(); | 
|  | 128 | cred = __task_cred(task); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 129 | if (which_id == PROC_EVENT_UID) { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 130 | ev->event_data.id.r.ruid = cred->uid; | 
|  | 131 | ev->event_data.id.e.euid = cred->euid; | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 132 | } else if (which_id == PROC_EVENT_GID) { | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 133 | ev->event_data.id.r.rgid = cred->gid; | 
|  | 134 | ev->event_data.id.e.egid = cred->egid; | 
|  | 135 | } else { | 
|  | 136 | rcu_read_unlock(); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 137 | return; | 
| David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 138 | } | 
|  | 139 | rcu_read_unlock(); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 140 | get_seq(&msg->seq, &ev->cpu); | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 141 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
| Erik Jacobson | af3e095 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 142 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 143 |  | 
|  | 144 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 145 | msg->ack = 0; /* not used */ | 
|  | 146 | msg->len = sizeof(*ev); | 
|  | 147 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 148 | } | 
|  | 149 |  | 
| Scott James Remnant | 02b51df | 2009-09-22 16:43:44 -0700 | [diff] [blame] | 150 | void proc_sid_connector(struct task_struct *task) | 
|  | 151 | { | 
|  | 152 | struct cn_msg *msg; | 
|  | 153 | struct proc_event *ev; | 
|  | 154 | struct timespec ts; | 
|  | 155 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
|  | 156 |  | 
|  | 157 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 158 | return; | 
|  | 159 |  | 
|  | 160 | msg = (struct cn_msg *)buffer; | 
|  | 161 | ev = (struct proc_event *)msg->data; | 
|  | 162 | get_seq(&msg->seq, &ev->cpu); | 
|  | 163 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
|  | 164 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
|  | 165 | ev->what = PROC_EVENT_SID; | 
|  | 166 | ev->event_data.sid.process_pid = task->pid; | 
|  | 167 | ev->event_data.sid.process_tgid = task->tgid; | 
|  | 168 |  | 
|  | 169 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 170 | msg->ack = 0; /* not used */ | 
|  | 171 | msg->len = sizeof(*ev); | 
|  | 172 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 173 | } | 
|  | 174 |  | 
| Vladimir Zapolskiy | f701e5b | 2011-07-15 20:45:18 +0300 | [diff] [blame] | 175 | void proc_ptrace_connector(struct task_struct *task, int ptrace_id) | 
|  | 176 | { | 
|  | 177 | struct cn_msg *msg; | 
|  | 178 | struct proc_event *ev; | 
|  | 179 | struct timespec ts; | 
|  | 180 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
| Vladimir Zapolskiy | f701e5b | 2011-07-15 20:45:18 +0300 | [diff] [blame] | 181 |  | 
|  | 182 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 183 | return; | 
|  | 184 |  | 
|  | 185 | msg = (struct cn_msg *)buffer; | 
|  | 186 | ev = (struct proc_event *)msg->data; | 
|  | 187 | get_seq(&msg->seq, &ev->cpu); | 
|  | 188 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
|  | 189 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
|  | 190 | ev->what = PROC_EVENT_PTRACE; | 
|  | 191 | ev->event_data.ptrace.process_pid  = task->pid; | 
|  | 192 | ev->event_data.ptrace.process_tgid = task->tgid; | 
|  | 193 | if (ptrace_id == PTRACE_ATTACH) { | 
|  | 194 | ev->event_data.ptrace.tracer_pid  = current->pid; | 
|  | 195 | ev->event_data.ptrace.tracer_tgid = current->tgid; | 
|  | 196 | } else if (ptrace_id == PTRACE_DETACH) { | 
|  | 197 | ev->event_data.ptrace.tracer_pid  = 0; | 
|  | 198 | ev->event_data.ptrace.tracer_tgid = 0; | 
|  | 199 | } else | 
|  | 200 | return; | 
|  | 201 |  | 
|  | 202 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 203 | msg->ack = 0; /* not used */ | 
|  | 204 | msg->len = sizeof(*ev); | 
|  | 205 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 206 | } | 
|  | 207 |  | 
| Vladimir Zapolskiy | f786ecb | 2011-09-21 09:26:44 +0000 | [diff] [blame] | 208 | void proc_comm_connector(struct task_struct *task) | 
|  | 209 | { | 
|  | 210 | struct cn_msg *msg; | 
|  | 211 | struct proc_event *ev; | 
|  | 212 | struct timespec ts; | 
|  | 213 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
|  | 214 |  | 
|  | 215 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 216 | return; | 
|  | 217 |  | 
|  | 218 | msg = (struct cn_msg *)buffer; | 
|  | 219 | ev = (struct proc_event *)msg->data; | 
|  | 220 | get_seq(&msg->seq, &ev->cpu); | 
|  | 221 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
|  | 222 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
|  | 223 | ev->what = PROC_EVENT_COMM; | 
|  | 224 | ev->event_data.comm.process_pid  = task->pid; | 
|  | 225 | ev->event_data.comm.process_tgid = task->tgid; | 
|  | 226 | get_task_comm(ev->event_data.comm.comm, task); | 
|  | 227 |  | 
|  | 228 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 229 | msg->ack = 0; /* not used */ | 
|  | 230 | msg->len = sizeof(*ev); | 
|  | 231 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 232 | } | 
|  | 233 |  | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 234 | void proc_exit_connector(struct task_struct *task) | 
|  | 235 | { | 
|  | 236 | struct cn_msg *msg; | 
|  | 237 | struct proc_event *ev; | 
|  | 238 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 239 | struct timespec ts; | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 240 |  | 
|  | 241 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 242 | return; | 
|  | 243 |  | 
|  | 244 | msg = (struct cn_msg*)buffer; | 
|  | 245 | ev = (struct proc_event*)msg->data; | 
|  | 246 | get_seq(&msg->seq, &ev->cpu); | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 247 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
| Erik Jacobson | af3e095 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 248 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 249 | ev->what = PROC_EVENT_EXIT; | 
|  | 250 | ev->event_data.exit.process_pid = task->pid; | 
|  | 251 | ev->event_data.exit.process_tgid = task->tgid; | 
|  | 252 | ev->event_data.exit.exit_code = task->exit_code; | 
|  | 253 | ev->event_data.exit.exit_signal = task->exit_signal; | 
|  | 254 |  | 
|  | 255 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 256 | msg->ack = 0; /* not used */ | 
|  | 257 | msg->len = sizeof(*ev); | 
|  | 258 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 259 | } | 
|  | 260 |  | 
|  | 261 | /* | 
|  | 262 | * Send an acknowledgement message to userspace | 
|  | 263 | * | 
|  | 264 | * Use 0 for success, EFOO otherwise. | 
|  | 265 | * Note: this is the negative of conventional kernel error | 
|  | 266 | * values because it's not being returned via syscall return | 
|  | 267 | * mechanisms. | 
|  | 268 | */ | 
|  | 269 | static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) | 
|  | 270 | { | 
|  | 271 | struct cn_msg *msg; | 
|  | 272 | struct proc_event *ev; | 
|  | 273 | __u8 buffer[CN_PROC_MSG_SIZE]; | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 274 | struct timespec ts; | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 275 |  | 
|  | 276 | if (atomic_read(&proc_event_num_listeners) < 1) | 
|  | 277 | return; | 
|  | 278 |  | 
|  | 279 | msg = (struct cn_msg*)buffer; | 
|  | 280 | ev = (struct proc_event*)msg->data; | 
|  | 281 | msg->seq = rcvd_seq; | 
| Chandra Seetharaman | 822cfbff | 2006-07-30 03:03:04 -0700 | [diff] [blame] | 282 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ | 
| Erik Jacobson | af3e095 | 2007-01-05 16:37:05 -0800 | [diff] [blame] | 283 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 284 | ev->cpu = -1; | 
|  | 285 | ev->what = PROC_EVENT_NONE; | 
|  | 286 | ev->event_data.ack.err = err; | 
|  | 287 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); | 
|  | 288 | msg->ack = rcvd_ack + 1; | 
|  | 289 | msg->len = sizeof(*ev); | 
|  | 290 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | /** | 
|  | 294 | * cn_proc_mcast_ctl | 
|  | 295 | * @data: message sent from userspace via the connector | 
|  | 296 | */ | 
| Stephen Boyd | f0b2593 | 2009-10-06 01:39:51 -0700 | [diff] [blame] | 297 | static void cn_proc_mcast_ctl(struct cn_msg *msg, | 
|  | 298 | struct netlink_skb_parms *nsp) | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 299 | { | 
| Matt Helsley | 9f46080 | 2005-11-07 00:59:16 -0800 | [diff] [blame] | 300 | enum proc_cn_mcast_op *mc_op = NULL; | 
|  | 301 | int err = 0; | 
|  | 302 |  | 
|  | 303 | if (msg->len != sizeof(*mc_op)) | 
|  | 304 | return; | 
|  | 305 |  | 
|  | 306 | mc_op = (enum proc_cn_mcast_op*)msg->data; | 
|  | 307 | switch (*mc_op) { | 
|  | 308 | case PROC_CN_MCAST_LISTEN: | 
|  | 309 | atomic_inc(&proc_event_num_listeners); | 
|  | 310 | break; | 
|  | 311 | case PROC_CN_MCAST_IGNORE: | 
|  | 312 | atomic_dec(&proc_event_num_listeners); | 
|  | 313 | break; | 
|  | 314 | default: | 
|  | 315 | err = EINVAL; | 
|  | 316 | break; | 
|  | 317 | } | 
|  | 318 | cn_proc_ack(err, msg->seq, msg->ack); | 
|  | 319 | } | 
|  | 320 |  | 
|  | 321 | /* | 
|  | 322 | * cn_proc_init - initialization entry point | 
|  | 323 | * | 
|  | 324 | * Adds the connector callback to the connector driver. | 
|  | 325 | */ | 
|  | 326 | static int __init cn_proc_init(void) | 
|  | 327 | { | 
|  | 328 | int err; | 
|  | 329 |  | 
|  | 330 | if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc", | 
|  | 331 | &cn_proc_mcast_ctl))) { | 
|  | 332 | printk(KERN_WARNING "cn_proc failed to register\n"); | 
|  | 333 | return err; | 
|  | 334 | } | 
|  | 335 | return 0; | 
|  | 336 | } | 
|  | 337 |  | 
|  | 338 | module_init(cn_proc_init); |