blob: c6cc66f72861ba380aec4348a7635189a40a3fc8 [file] [log] [blame]
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001/*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */
43
44#include <linux/module.h>
45#include <linux/init.h>
Oliver Hartkopp73e87e02008-04-15 19:29:14 -070046#include <linux/hrtimer.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080047#include <linux/list.h>
48#include <linux/proc_fs.h>
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +000049#include <linux/seq_file.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080050#include <linux/uio.h>
51#include <linux/net.h>
52#include <linux/netdevice.h>
53#include <linux/socket.h>
54#include <linux/if_arp.h>
55#include <linux/skbuff.h>
56#include <linux/can.h>
57#include <linux/can/core.h>
58#include <linux/can/bcm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090059#include <linux/slab.h>
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080060#include <net/sock.h>
61#include <net/net_namespace.h>
62
Oliver Hartkopp5b75c492010-08-11 16:12:35 -070063/*
64 * To send multiple CAN frame content within TX_SETUP or to filter
65 * CAN messages with multiplex index within RX_SETUP, the number of
66 * different filters is limited to 256 due to the one byte index value.
67 */
68#define MAX_NFRAMES 256
69
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080070/* use of last_frames[index].can_dlc */
71#define RX_RECV 0x40 /* received data for this element */
72#define RX_THR 0x80 /* element not been sent due to throttle feature */
73#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
74
75/* get best masking value for can_rx_register() for a given single can_id */
Oliver Hartkoppd253eee2008-12-03 15:52:35 -080076#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
77 (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
78 (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080079
Oliver Hartkoppd253eee2008-12-03 15:52:35 -080080#define CAN_BCM_VERSION CAN_VERSION
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080081static __initdata const char banner[] = KERN_INFO
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -080082 "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080083
84MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
85MODULE_LICENSE("Dual BSD/GPL");
86MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
Lothar Waßmannb13bb2e2009-07-14 23:12:25 +000087MODULE_ALIAS("can-proto-2");
Oliver Hartkoppffd980f2007-11-16 15:53:52 -080088
89/* easy access to can_frame payload */
90static inline u64 GET_U64(const struct can_frame *cp)
91{
92 return *(u64 *)cp->data;
93}
94
95struct bcm_op {
96 struct list_head list;
97 int ifindex;
98 canid_t can_id;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -070099 u32 flags;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800100 unsigned long frames_abs, frames_filtered;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800101 struct timeval ival1, ival2;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700102 struct hrtimer timer, thrtimer;
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800103 struct tasklet_struct tsklet, thrtsklet;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700104 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800105 int rx_ifindex;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700106 u32 count;
107 u32 nframes;
108 u32 currframe;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800109 struct can_frame *frames;
110 struct can_frame *last_frames;
111 struct can_frame sframe;
112 struct can_frame last_sframe;
113 struct sock *sk;
114 struct net_device *rx_reg_dev;
115};
116
117static struct proc_dir_entry *proc_dir;
118
119struct bcm_sock {
120 struct sock sk;
121 int bound;
122 int ifindex;
123 struct notifier_block notifier;
124 struct list_head rx_ops;
125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read;
Dan Rosenberg9f260e02010-12-26 06:54:53 +0000128 char procname [32]; /* inode number in decimal with \0 */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800129};
130
131static inline struct bcm_sock *bcm_sk(const struct sock *sk)
132{
133 return (struct bcm_sock *)sk;
134}
135
136#define CFSIZ sizeof(struct can_frame)
137#define OPSIZ sizeof(struct bcm_op)
138#define MHSIZ sizeof(struct bcm_msg_head)
139
140/*
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800141 * procfs functions
142 */
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000143static char *bcm_proc_getifname(char *result, int ifindex)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800144{
145 struct net_device *dev;
146
147 if (!ifindex)
148 return "any";
149
stephen hemmingerff879eb2009-11-10 07:54:56 +0000150 rcu_read_lock();
151 dev = dev_get_by_index_rcu(&init_net, ifindex);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800152 if (dev)
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000153 strcpy(result, dev->name);
154 else
155 strcpy(result, "???");
stephen hemmingerff879eb2009-11-10 07:54:56 +0000156 rcu_read_unlock();
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800157
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000158 return result;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800159}
160
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000161static int bcm_proc_show(struct seq_file *m, void *v)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800162{
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000163 char ifname[IFNAMSIZ];
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000164 struct sock *sk = (struct sock *)m->private;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800165 struct bcm_sock *bo = bcm_sk(sk);
166 struct bcm_op *op;
167
Dan Rosenberg71338aa2011-05-23 12:17:35 +0000168 seq_printf(m, ">>> socket %pK", sk->sk_socket);
169 seq_printf(m, " / sk %pK", sk);
170 seq_printf(m, " / bo %pK", bo);
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000173 seq_printf(m, " <<<\n");
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800174
175 list_for_each_entry(op, &bo->rx_ops, list) {
176
177 unsigned long reduction;
178
179 /* print only active entries & prevent division by zero */
180 if (!op->frames_abs)
181 continue;
182
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000183 seq_printf(m, "rx_op: %03X %-5s ",
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000184 op->can_id, bcm_proc_getifname(ifname, op->ifindex));
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700185 seq_printf(m, "[%u]%c ", op->nframes,
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800186 (op->flags & RX_CHECK_DLC)?'d':' ');
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700187 if (op->kt_ival1.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000188 seq_printf(m, "timeo=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700189 (long long)
190 ktime_to_us(op->kt_ival1));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800191
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700192 if (op->kt_ival2.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000193 seq_printf(m, "thr=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700194 (long long)
195 ktime_to_us(op->kt_ival2));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800196
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000197 seq_printf(m, "# recv %ld (%ld) => reduction: ",
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800198 op->frames_filtered, op->frames_abs);
199
200 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
201
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000202 seq_printf(m, "%s%ld%%\n",
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800203 (reduction == 100)?"near ":"", reduction);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800204 }
205
206 list_for_each_entry(op, &bo->tx_ops, list) {
207
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700208 seq_printf(m, "tx_op: %03X %s [%u] ",
Eric Dumazet6755aeb2009-11-06 00:23:01 +0000209 op->can_id,
210 bcm_proc_getifname(ifname, op->ifindex),
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800211 op->nframes);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800212
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700213 if (op->kt_ival1.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000214 seq_printf(m, "t1=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700215 (long long) ktime_to_us(op->kt_ival1));
216
217 if (op->kt_ival2.tv64)
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000218 seq_printf(m, "t2=%lld ",
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700219 (long long) ktime_to_us(op->kt_ival2));
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800220
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000221 seq_printf(m, "# sent %ld\n", op->frames_abs);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800222 }
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000223 seq_putc(m, '\n');
224 return 0;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800225}
226
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +0000227static int bcm_proc_open(struct inode *inode, struct file *file)
228{
229 return single_open(file, bcm_proc_show, PDE(inode)->data);
230}
231
232static const struct file_operations bcm_proc_fops = {
233 .owner = THIS_MODULE,
234 .open = bcm_proc_open,
235 .read = seq_read,
236 .llseek = seq_lseek,
237 .release = single_release,
238};
239
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800240/*
241 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
242 * of the given bcm tx op
243 */
244static void bcm_can_tx(struct bcm_op *op)
245{
246 struct sk_buff *skb;
247 struct net_device *dev;
248 struct can_frame *cf = &op->frames[op->currframe];
249
250 /* no target device? => exit */
251 if (!op->ifindex)
252 return;
253
254 dev = dev_get_by_index(&init_net, op->ifindex);
255 if (!dev) {
256 /* RFC: should this bcm_op remove itself here? */
257 return;
258 }
259
260 skb = alloc_skb(CFSIZ, gfp_any());
261 if (!skb)
262 goto out;
263
264 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
265
266 /* send with loopback */
267 skb->dev = dev;
268 skb->sk = op->sk;
269 can_send(skb, 1);
270
271 /* update statistics */
272 op->currframe++;
273 op->frames_abs++;
274
275 /* reached last frame? */
276 if (op->currframe >= op->nframes)
277 op->currframe = 0;
278 out:
279 dev_put(dev);
280}
281
282/*
283 * bcm_send_to_user - send a BCM message to the userspace
284 * (consisting of bcm_msg_head + x CAN frames)
285 */
286static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
287 struct can_frame *frames, int has_timestamp)
288{
289 struct sk_buff *skb;
290 struct can_frame *firstframe;
291 struct sockaddr_can *addr;
292 struct sock *sk = op->sk;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700293 unsigned int datalen = head->nframes * CFSIZ;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800294 int err;
295
296 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
297 if (!skb)
298 return;
299
300 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
301
302 if (head->nframes) {
303 /* can_frames starting here */
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700304 firstframe = (struct can_frame *)skb_tail_pointer(skb);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800305
306 memcpy(skb_put(skb, datalen), frames, datalen);
307
308 /*
309 * the BCM uses the can_dlc-element of the can_frame
310 * structure for internal purposes. This is only
311 * relevant for updates that are generated by the
312 * BCM, where nframes is 1
313 */
314 if (head->nframes == 1)
315 firstframe->can_dlc &= BCM_CAN_DLC_MASK;
316 }
317
318 if (has_timestamp) {
319 /* restore rx timestamp */
320 skb->tstamp = op->rx_stamp;
321 }
322
323 /*
324 * Put the datagram to the queue so that bcm_recvmsg() can
325 * get it from there. We need to pass the interface index to
326 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
327 * containing the interface index.
328 */
329
330 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
331 addr = (struct sockaddr_can *)skb->cb;
332 memset(addr, 0, sizeof(*addr));
333 addr->can_family = AF_CAN;
334 addr->can_ifindex = op->rx_ifindex;
335
336 err = sock_queue_rcv_skb(sk, skb);
337 if (err < 0) {
338 struct bcm_sock *bo = bcm_sk(sk);
339
340 kfree_skb(skb);
341 /* don't care about overflows in this statistic */
342 bo->dropped_usr_msgs++;
343 }
344}
345
Oliver Hartkopp8adc3d32011-09-29 15:33:47 -0400346static void bcm_tx_start_timer(struct bcm_op *op)
347{
348 if (op->kt_ival1.tv64 && op->count)
349 hrtimer_start(&op->timer,
350 ktime_add(ktime_get(), op->kt_ival1),
351 HRTIMER_MODE_ABS);
352 else if (op->kt_ival2.tv64)
353 hrtimer_start(&op->timer,
354 ktime_add(ktime_get(), op->kt_ival2),
355 HRTIMER_MODE_ABS);
356}
357
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800358static void bcm_tx_timeout_tsklet(unsigned long data)
359{
360 struct bcm_op *op = (struct bcm_op *)data;
361 struct bcm_msg_head msg_head;
362
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800363 if (op->kt_ival1.tv64 && (op->count > 0)) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800364
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800365 op->count--;
366 if (!op->count && (op->flags & TX_COUNTEVT)) {
367
368 /* create notification to user */
369 msg_head.opcode = TX_EXPIRED;
370 msg_head.flags = op->flags;
371 msg_head.count = op->count;
372 msg_head.ival1 = op->ival1;
373 msg_head.ival2 = op->ival2;
374 msg_head.can_id = op->can_id;
375 msg_head.nframes = 0;
376
377 bcm_send_to_user(op, &msg_head, NULL, 0);
378 }
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800379 bcm_can_tx(op);
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800380
Oliver Hartkopp8adc3d32011-09-29 15:33:47 -0400381 } else if (op->kt_ival2.tv64)
382 bcm_can_tx(op);
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800383
Oliver Hartkopp8adc3d32011-09-29 15:33:47 -0400384 bcm_tx_start_timer(op);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800385}
386
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800387/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300388 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800389 */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700390static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800391{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700392 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800393
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800394 tasklet_schedule(&op->tsklet);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800395
Oliver Hartkoppc53a6ee2009-01-14 21:06:55 -0800396 return HRTIMER_NORESTART;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800397}
398
399/*
400 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
401 */
402static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
403{
404 struct bcm_msg_head head;
405
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800406 /* update statistics */
407 op->frames_filtered++;
408
409 /* prevent statistics overflow */
410 if (op->frames_filtered > ULONG_MAX/100)
411 op->frames_filtered = op->frames_abs = 0;
412
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800413 /* this element is not throttled anymore */
414 data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
415
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800416 head.opcode = RX_CHANGED;
417 head.flags = op->flags;
418 head.count = op->count;
419 head.ival1 = op->ival1;
420 head.ival2 = op->ival2;
421 head.can_id = op->can_id;
422 head.nframes = 1;
423
424 bcm_send_to_user(op, &head, data, 1);
425}
426
427/*
428 * bcm_rx_update_and_send - process a detected relevant receive content change
429 * 1. update the last received data
430 * 2. send a notification to the user (if possible)
431 */
432static void bcm_rx_update_and_send(struct bcm_op *op,
433 struct can_frame *lastdata,
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800434 const struct can_frame *rxdata)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800435{
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800436 memcpy(lastdata, rxdata, CFSIZ);
437
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800438 /* mark as used and throttled by default */
439 lastdata->can_dlc |= (RX_RECV|RX_THR);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800440
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800441 /* throtteling mode inactive ? */
442 if (!op->kt_ival2.tv64) {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800443 /* send RX_CHANGED to the user immediately */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800444 bcm_rx_changed(op, lastdata);
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700445 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800446 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700447
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800448 /* with active throttling timer we are just done here */
449 if (hrtimer_active(&op->thrtimer))
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700450 return;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700451
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800452 /* first receiption with enabled throttling mode */
453 if (!op->kt_lastmsg.tv64)
454 goto rx_changed_settime;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700455
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800456 /* got a second frame inside a potential throttle period? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700457 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
458 ktime_to_us(op->kt_ival2)) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800459 /* do not send the saved data - only start throttle timer */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700460 hrtimer_start(&op->thrtimer,
461 ktime_add(op->kt_lastmsg, op->kt_ival2),
462 HRTIMER_MODE_ABS);
463 return;
464 }
465
466 /* the gap was that big, that throttling was not needed here */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800467rx_changed_settime:
468 bcm_rx_changed(op, lastdata);
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700469 op->kt_lastmsg = ktime_get();
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800470}
471
472/*
473 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
474 * received data stored in op->last_frames[]
475 */
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700476static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800477 const struct can_frame *rxdata)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800478{
479 /*
480 * no one uses the MSBs of can_dlc for comparation,
481 * so we use it here to detect the first time of reception
482 */
483
484 if (!(op->last_frames[index].can_dlc & RX_RECV)) {
485 /* received data for the first time => send update to user */
486 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
487 return;
488 }
489
490 /* do a real check in can_frame data section */
491
492 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
493 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
494 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
495 return;
496 }
497
498 if (op->flags & RX_CHECK_DLC) {
499 /* do a real check in can_frame dlc */
500 if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
501 BCM_CAN_DLC_MASK)) {
502 bcm_rx_update_and_send(op, &op->last_frames[index],
503 rxdata);
504 return;
505 }
506 }
507}
508
509/*
510 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
511 */
512static void bcm_rx_starttimer(struct bcm_op *op)
513{
514 if (op->flags & RX_NO_AUTOTIMER)
515 return;
516
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700517 if (op->kt_ival1.tv64)
518 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800519}
520
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800521static void bcm_rx_timeout_tsklet(unsigned long data)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800522{
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800523 struct bcm_op *op = (struct bcm_op *)data;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800524 struct bcm_msg_head msg_head;
525
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800526 /* create notification to user */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800527 msg_head.opcode = RX_TIMEOUT;
528 msg_head.flags = op->flags;
529 msg_head.count = op->count;
530 msg_head.ival1 = op->ival1;
531 msg_head.ival2 = op->ival2;
532 msg_head.can_id = op->can_id;
533 msg_head.nframes = 0;
534
535 bcm_send_to_user(op, &msg_head, NULL, 0);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800536}
537
538/*
539 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
540 */
541static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
542{
543 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
544
545 /* schedule before NET_RX_SOFTIRQ */
546 tasklet_hi_schedule(&op->tsklet);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800547
548 /* no restart of the timer is done here! */
549
550 /* if user wants to be informed, when cyclic CAN-Messages come back */
551 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
552 /* clear received can_frames to indicate 'nothing received' */
553 memset(op->last_frames, 0, op->nframes * CFSIZ);
554 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700555
556 return HRTIMER_NORESTART;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800557}
558
559/*
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800560 * bcm_rx_do_flush - helper for bcm_rx_thr_flush
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800561 */
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700562static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
563 unsigned int index)
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800564{
565 if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
566 if (update)
567 bcm_rx_changed(op, &op->last_frames[index]);
568 return 1;
569 }
570 return 0;
571}
572
573/*
574 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
575 *
576 * update == 0 : just check if throttled data is available (any irq context)
577 * update == 1 : check and send throttled data to userspace (soft_irq context)
578 */
579static int bcm_rx_thr_flush(struct bcm_op *op, int update)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800580{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700581 int updated = 0;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800582
583 if (op->nframes > 1) {
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700584 unsigned int i;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700585
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800586 /* for MUX filter we start at index 1 */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800587 for (i = 1; i < op->nframes; i++)
588 updated += bcm_rx_do_flush(op, update, i);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800589
590 } else {
591 /* for RX_FILTER_ID and simple filter */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800592 updated += bcm_rx_do_flush(op, update, 0);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800593 }
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700594
595 return updated;
596}
597
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800598static void bcm_rx_thr_tsklet(unsigned long data)
599{
600 struct bcm_op *op = (struct bcm_op *)data;
601
602 /* push the changed data to the userspace */
603 bcm_rx_thr_flush(op, 1);
604}
605
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700606/*
607 * bcm_rx_thr_handler - the time for blocked content updates is over now:
608 * Check for throttled data and send it to the userspace
609 */
610static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
611{
612 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
613
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800614 tasklet_schedule(&op->thrtsklet);
615
616 if (bcm_rx_thr_flush(op, 0)) {
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700617 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
618 return HRTIMER_RESTART;
619 } else {
620 /* rearm throttle handling */
621 op->kt_lastmsg = ktime_set(0, 0);
622 return HRTIMER_NORESTART;
623 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800624}
625
626/*
627 * bcm_rx_handler - handle a CAN frame receiption
628 */
629static void bcm_rx_handler(struct sk_buff *skb, void *data)
630{
631 struct bcm_op *op = (struct bcm_op *)data;
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800632 const struct can_frame *rxframe = (struct can_frame *)skb->data;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700633 unsigned int i;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800634
635 /* disable timeout */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700636 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800637
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800638 if (op->can_id != rxframe->can_id)
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800639 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800640
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800641 /* save rx timestamp */
642 op->rx_stamp = skb->tstamp;
643 /* save originator for recvfrom() */
644 op->rx_ifindex = skb->dev->ifindex;
645 /* update statistics */
646 op->frames_abs++;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800647
648 if (op->flags & RX_RTR_FRAME) {
649 /* send reply for RTR-request (placed in op->frames[0]) */
650 bcm_can_tx(op);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800651 return;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800652 }
653
654 if (op->flags & RX_FILTER_ID) {
655 /* the easiest case */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800656 bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800657 goto rx_starttimer;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800658 }
659
660 if (op->nframes == 1) {
661 /* simple compare with index 0 */
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800662 bcm_rx_cmp_to_index(op, 0, rxframe);
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800663 goto rx_starttimer;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800664 }
665
666 if (op->nframes > 1) {
667 /*
668 * multiplex compare
669 *
670 * find the first multiplex mask that fits.
671 * Remark: The MUX-mask is stored in index 0
672 */
673
674 for (i = 1; i < op->nframes; i++) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800675 if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800676 (GET_U64(&op->frames[0]) &
677 GET_U64(&op->frames[i]))) {
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800678 bcm_rx_cmp_to_index(op, i, rxframe);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800679 break;
680 }
681 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800682 }
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800683
Oliver Hartkopp1fa17d42009-01-06 11:07:54 -0800684rx_starttimer:
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800685 bcm_rx_starttimer(op);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800686}
687
688/*
689 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
690 */
691static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
692 int ifindex)
693{
694 struct bcm_op *op;
695
696 list_for_each_entry(op, ops, list) {
697 if ((op->can_id == can_id) && (op->ifindex == ifindex))
698 return op;
699 }
700
701 return NULL;
702}
703
704static void bcm_remove_op(struct bcm_op *op)
705{
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700706 hrtimer_cancel(&op->timer);
707 hrtimer_cancel(&op->thrtimer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800708
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800709 if (op->tsklet.func)
710 tasklet_kill(&op->tsklet);
711
712 if (op->thrtsklet.func)
713 tasklet_kill(&op->thrtsklet);
714
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800715 if ((op->frames) && (op->frames != &op->sframe))
716 kfree(op->frames);
717
718 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
719 kfree(op->last_frames);
720
721 kfree(op);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800722}
723
724static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
725{
726 if (op->rx_reg_dev == dev) {
727 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
728 bcm_rx_handler, op);
729
730 /* mark as removed subscription */
731 op->rx_reg_dev = NULL;
732 } else
733 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
734 "mismatch %p %p\n", op->rx_reg_dev, dev);
735}
736
737/*
738 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
739 */
740static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
741{
742 struct bcm_op *op, *n;
743
744 list_for_each_entry_safe(op, n, ops, list) {
745 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
746
747 /*
748 * Don't care if we're bound or not (due to netdev
749 * problems) can_rx_unregister() is always a save
750 * thing to do here.
751 */
752 if (op->ifindex) {
753 /*
754 * Only remove subscriptions that had not
755 * been removed due to NETDEV_UNREGISTER
756 * in bcm_notifier()
757 */
758 if (op->rx_reg_dev) {
759 struct net_device *dev;
760
761 dev = dev_get_by_index(&init_net,
762 op->ifindex);
763 if (dev) {
764 bcm_rx_unreg(dev, op);
765 dev_put(dev);
766 }
767 }
768 } else
769 can_rx_unregister(NULL, op->can_id,
770 REGMASK(op->can_id),
771 bcm_rx_handler, op);
772
773 list_del(&op->list);
774 bcm_remove_op(op);
775 return 1; /* done */
776 }
777 }
778
779 return 0; /* not found */
780}
781
782/*
783 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
784 */
785static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
786{
787 struct bcm_op *op, *n;
788
789 list_for_each_entry_safe(op, n, ops, list) {
790 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
791 list_del(&op->list);
792 bcm_remove_op(op);
793 return 1; /* done */
794 }
795 }
796
797 return 0; /* not found */
798}
799
800/*
801 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
802 */
803static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
804 int ifindex)
805{
806 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
807
808 if (!op)
809 return -EINVAL;
810
811 /* put current values into msg_head */
812 msg_head->flags = op->flags;
813 msg_head->count = op->count;
814 msg_head->ival1 = op->ival1;
815 msg_head->ival2 = op->ival2;
816 msg_head->nframes = op->nframes;
817
818 bcm_send_to_user(op, msg_head, op->frames, 0);
819
820 return MHSIZ;
821}
822
823/*
824 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
825 */
826static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
827 int ifindex, struct sock *sk)
828{
829 struct bcm_sock *bo = bcm_sk(sk);
830 struct bcm_op *op;
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700831 unsigned int i;
832 int err;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800833
834 /* we need a real device to send frames */
835 if (!ifindex)
836 return -ENODEV;
837
Oliver Hartkopp5b75c492010-08-11 16:12:35 -0700838 /* check nframes boundaries - we need at least one can_frame */
839 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800840 return -EINVAL;
841
842 /* check the given can_id */
843 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
844
845 if (op) {
846 /* update existing BCM operation */
847
848 /*
849 * Do we need more space for the can_frames than currently
850 * allocated? -> This is a _really_ unusual use-case and
851 * therefore (complexity / locking) it is not supported.
852 */
853 if (msg_head->nframes > op->nframes)
854 return -E2BIG;
855
856 /* update can_frames content */
857 for (i = 0; i < msg_head->nframes; i++) {
858 err = memcpy_fromiovec((u8 *)&op->frames[i],
859 msg->msg_iov, CFSIZ);
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700860
861 if (op->frames[i].can_dlc > 8)
862 err = -EINVAL;
863
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800864 if (err < 0)
865 return err;
866
867 if (msg_head->flags & TX_CP_CAN_ID) {
868 /* copy can_id into frame */
869 op->frames[i].can_id = msg_head->can_id;
870 }
871 }
872
873 } else {
874 /* insert new BCM operation for the given can_id */
875
876 op = kzalloc(OPSIZ, GFP_KERNEL);
877 if (!op)
878 return -ENOMEM;
879
880 op->can_id = msg_head->can_id;
881
882 /* create array for can_frames and copy the data */
883 if (msg_head->nframes > 1) {
884 op->frames = kmalloc(msg_head->nframes * CFSIZ,
885 GFP_KERNEL);
886 if (!op->frames) {
887 kfree(op);
888 return -ENOMEM;
889 }
890 } else
891 op->frames = &op->sframe;
892
893 for (i = 0; i < msg_head->nframes; i++) {
894 err = memcpy_fromiovec((u8 *)&op->frames[i],
895 msg->msg_iov, CFSIZ);
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -0700896
897 if (op->frames[i].can_dlc > 8)
898 err = -EINVAL;
899
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800900 if (err < 0) {
901 if (op->frames != &op->sframe)
902 kfree(op->frames);
903 kfree(op);
904 return err;
905 }
906
907 if (msg_head->flags & TX_CP_CAN_ID) {
908 /* copy can_id into frame */
909 op->frames[i].can_id = msg_head->can_id;
910 }
911 }
912
913 /* tx_ops never compare with previous received messages */
914 op->last_frames = NULL;
915
916 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
917 op->sk = sk;
918 op->ifindex = ifindex;
919
920 /* initialize uninitialized (kzalloc) structure */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700921 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
922 op->timer.function = bcm_tx_timeout_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800923
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -0800924 /* initialize tasklet for tx countevent notification */
925 tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
926 (unsigned long) op);
927
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800928 /* currently unused in tx_ops */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700929 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800930
931 /* add this bcm_op to the list of the tx_ops */
932 list_add(&op->list, &bo->tx_ops);
933
934 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
935
936 if (op->nframes != msg_head->nframes) {
937 op->nframes = msg_head->nframes;
938 /* start multiple frame transmission with index 0 */
939 op->currframe = 0;
940 }
941
942 /* check flags */
943
944 op->flags = msg_head->flags;
945
946 if (op->flags & TX_RESET_MULTI_IDX) {
947 /* start multiple frame transmission with index 0 */
948 op->currframe = 0;
949 }
950
951 if (op->flags & SETTIMER) {
952 /* set timer values */
953 op->count = msg_head->count;
954 op->ival1 = msg_head->ival1;
955 op->ival2 = msg_head->ival2;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700956 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
957 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800958
959 /* disable an active timer due to zero values? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -0700960 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
961 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800962 }
963
Oliver Hartkopp8adc3d32011-09-29 15:33:47 -0400964 if (op->flags & STARTTIMER) {
965 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800966 /* spec: send can_frame when starting timer */
967 op->flags |= TX_ANNOUNCE;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800968 }
969
Oliver Hartkopp92dc9792011-09-23 08:23:47 +0000970 if (op->flags & TX_ANNOUNCE) {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800971 bcm_can_tx(op);
Oliver Hartkopp8adc3d32011-09-29 15:33:47 -0400972 if (op->count)
Oliver Hartkopp92dc9792011-09-23 08:23:47 +0000973 op->count--;
974 }
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800975
Oliver Hartkopp8adc3d32011-09-29 15:33:47 -0400976 if (op->flags & STARTTIMER)
977 bcm_tx_start_timer(op);
978
Oliver Hartkoppffd980f2007-11-16 15:53:52 -0800979 return msg_head->nframes * CFSIZ + MHSIZ;
980}
981
982/*
983 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
984 */
985static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
986 int ifindex, struct sock *sk)
987{
988 struct bcm_sock *bo = bcm_sk(sk);
989 struct bcm_op *op;
990 int do_rx_register;
991 int err = 0;
992
993 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
994 /* be robust against wrong usage ... */
995 msg_head->flags |= RX_FILTER_ID;
996 /* ignore trailing garbage */
997 msg_head->nframes = 0;
998 }
999
Oliver Hartkopp5b75c492010-08-11 16:12:35 -07001000 /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
1001 if (msg_head->nframes > MAX_NFRAMES + 1)
1002 return -EINVAL;
1003
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001004 if ((msg_head->flags & RX_RTR_FRAME) &&
1005 ((msg_head->nframes != 1) ||
1006 (!(msg_head->can_id & CAN_RTR_FLAG))))
1007 return -EINVAL;
1008
1009 /* check the given can_id */
1010 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1011 if (op) {
1012 /* update existing BCM operation */
1013
1014 /*
1015 * Do we need more space for the can_frames than currently
1016 * allocated? -> This is a _really_ unusual use-case and
1017 * therefore (complexity / locking) it is not supported.
1018 */
1019 if (msg_head->nframes > op->nframes)
1020 return -E2BIG;
1021
1022 if (msg_head->nframes) {
1023 /* update can_frames content */
1024 err = memcpy_fromiovec((u8 *)op->frames,
1025 msg->msg_iov,
1026 msg_head->nframes * CFSIZ);
1027 if (err < 0)
1028 return err;
1029
1030 /* clear last_frames to indicate 'nothing received' */
1031 memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1032 }
1033
1034 op->nframes = msg_head->nframes;
1035
1036 /* Only an update -> do not call can_rx_register() */
1037 do_rx_register = 0;
1038
1039 } else {
1040 /* insert new BCM operation for the given can_id */
1041 op = kzalloc(OPSIZ, GFP_KERNEL);
1042 if (!op)
1043 return -ENOMEM;
1044
1045 op->can_id = msg_head->can_id;
1046 op->nframes = msg_head->nframes;
1047
1048 if (msg_head->nframes > 1) {
1049 /* create array for can_frames and copy the data */
1050 op->frames = kmalloc(msg_head->nframes * CFSIZ,
1051 GFP_KERNEL);
1052 if (!op->frames) {
1053 kfree(op);
1054 return -ENOMEM;
1055 }
1056
1057 /* create and init array for received can_frames */
1058 op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1059 GFP_KERNEL);
1060 if (!op->last_frames) {
1061 kfree(op->frames);
1062 kfree(op);
1063 return -ENOMEM;
1064 }
1065
1066 } else {
1067 op->frames = &op->sframe;
1068 op->last_frames = &op->last_sframe;
1069 }
1070
1071 if (msg_head->nframes) {
1072 err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1073 msg_head->nframes * CFSIZ);
1074 if (err < 0) {
1075 if (op->frames != &op->sframe)
1076 kfree(op->frames);
1077 if (op->last_frames != &op->last_sframe)
1078 kfree(op->last_frames);
1079 kfree(op);
1080 return err;
1081 }
1082 }
1083
1084 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1085 op->sk = sk;
1086 op->ifindex = ifindex;
1087
1088 /* initialize uninitialized (kzalloc) structure */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001089 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1090 op->timer.function = bcm_rx_timeout_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001091
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001092 /* initialize tasklet for rx timeout notification */
1093 tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1094 (unsigned long) op);
1095
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001096 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1097 op->thrtimer.function = bcm_rx_thr_handler;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001098
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001099 /* initialize tasklet for rx throttle handling */
1100 tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1101 (unsigned long) op);
1102
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001103 /* add this bcm_op to the list of the rx_ops */
1104 list_add(&op->list, &bo->rx_ops);
1105
1106 /* call can_rx_register() */
1107 do_rx_register = 1;
1108
1109 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1110
1111 /* check flags */
1112 op->flags = msg_head->flags;
1113
1114 if (op->flags & RX_RTR_FRAME) {
1115
1116 /* no timers in RTR-mode */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001117 hrtimer_cancel(&op->thrtimer);
1118 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001119
1120 /*
1121 * funny feature in RX(!)_SETUP only for RTR-mode:
1122 * copy can_id into frame BUT without RTR-flag to
1123 * prevent a full-load-loopback-test ... ;-]
1124 */
1125 if ((op->flags & TX_CP_CAN_ID) ||
1126 (op->frames[0].can_id == op->can_id))
1127 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1128
1129 } else {
1130 if (op->flags & SETTIMER) {
1131
1132 /* set timer value */
1133 op->ival1 = msg_head->ival1;
1134 op->ival2 = msg_head->ival2;
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001135 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1136 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001137
1138 /* disable an active timer due to zero value? */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001139 if (!op->kt_ival1.tv64)
1140 hrtimer_cancel(&op->timer);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001141
1142 /*
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001143 * In any case cancel the throttle timer, flush
1144 * potentially blocked msgs and reset throttle handling
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001145 */
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001146 op->kt_lastmsg = ktime_set(0, 0);
1147 hrtimer_cancel(&op->thrtimer);
Oliver Hartkopp6e5c1722009-01-04 17:31:18 -08001148 bcm_rx_thr_flush(op, 1);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001149 }
1150
Oliver Hartkopp73e87e02008-04-15 19:29:14 -07001151 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1152 hrtimer_start(&op->timer, op->kt_ival1,
1153 HRTIMER_MODE_REL);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001154 }
1155
1156 /* now we can register for can_ids, if we added a new bcm_op */
1157 if (do_rx_register) {
1158 if (ifindex) {
1159 struct net_device *dev;
1160
1161 dev = dev_get_by_index(&init_net, ifindex);
1162 if (dev) {
1163 err = can_rx_register(dev, op->can_id,
1164 REGMASK(op->can_id),
1165 bcm_rx_handler, op,
1166 "bcm");
1167
1168 op->rx_reg_dev = dev;
1169 dev_put(dev);
1170 }
1171
1172 } else
1173 err = can_rx_register(NULL, op->can_id,
1174 REGMASK(op->can_id),
1175 bcm_rx_handler, op, "bcm");
1176 if (err) {
1177 /* this bcm rx op is broken -> remove it */
1178 list_del(&op->list);
1179 bcm_remove_op(op);
1180 return err;
1181 }
1182 }
1183
1184 return msg_head->nframes * CFSIZ + MHSIZ;
1185}
1186
1187/*
1188 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1189 */
1190static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1191{
1192 struct sk_buff *skb;
1193 struct net_device *dev;
1194 int err;
1195
1196 /* we need a real device to send frames */
1197 if (!ifindex)
1198 return -ENODEV;
1199
1200 skb = alloc_skb(CFSIZ, GFP_KERNEL);
1201
1202 if (!skb)
1203 return -ENOMEM;
1204
1205 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1206 if (err < 0) {
1207 kfree_skb(skb);
1208 return err;
1209 }
1210
1211 dev = dev_get_by_index(&init_net, ifindex);
1212 if (!dev) {
1213 kfree_skb(skb);
1214 return -ENODEV;
1215 }
1216
1217 skb->dev = dev;
1218 skb->sk = sk;
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001219 err = can_send(skb, 1); /* send with loopback */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001220 dev_put(dev);
1221
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001222 if (err)
1223 return err;
1224
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001225 return CFSIZ + MHSIZ;
1226}
1227
1228/*
1229 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1230 */
1231static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1232 struct msghdr *msg, size_t size)
1233{
1234 struct sock *sk = sock->sk;
1235 struct bcm_sock *bo = bcm_sk(sk);
1236 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1237 struct bcm_msg_head msg_head;
1238 int ret; /* read bytes or error codes as return value */
1239
1240 if (!bo->bound)
1241 return -ENOTCONN;
1242
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001243 /* check for valid message length from userspace */
1244 if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1245 return -EINVAL;
1246
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001247 /* check for alternative ifindex for this bcm_op */
1248
1249 if (!ifindex && msg->msg_name) {
1250 /* no bound device as default => check msg_name */
1251 struct sockaddr_can *addr =
1252 (struct sockaddr_can *)msg->msg_name;
1253
Kurt Van Dijck5e507322011-01-15 20:56:42 -08001254 if (msg->msg_namelen < sizeof(*addr))
1255 return -EINVAL;
1256
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001257 if (addr->can_family != AF_CAN)
1258 return -EINVAL;
1259
1260 /* ifindex from sendto() */
1261 ifindex = addr->can_ifindex;
1262
1263 if (ifindex) {
1264 struct net_device *dev;
1265
1266 dev = dev_get_by_index(&init_net, ifindex);
1267 if (!dev)
1268 return -ENODEV;
1269
1270 if (dev->type != ARPHRD_CAN) {
1271 dev_put(dev);
1272 return -ENODEV;
1273 }
1274
1275 dev_put(dev);
1276 }
1277 }
1278
1279 /* read message head information */
1280
1281 ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1282 if (ret < 0)
1283 return ret;
1284
1285 lock_sock(sk);
1286
1287 switch (msg_head.opcode) {
1288
1289 case TX_SETUP:
1290 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1291 break;
1292
1293 case RX_SETUP:
1294 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1295 break;
1296
1297 case TX_DELETE:
1298 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1299 ret = MHSIZ;
1300 else
1301 ret = -EINVAL;
1302 break;
1303
1304 case RX_DELETE:
1305 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1306 ret = MHSIZ;
1307 else
1308 ret = -EINVAL;
1309 break;
1310
1311 case TX_READ:
1312 /* reuse msg_head for the reply to TX_READ */
1313 msg_head.opcode = TX_STATUS;
1314 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1315 break;
1316
1317 case RX_READ:
1318 /* reuse msg_head for the reply to RX_READ */
1319 msg_head.opcode = RX_STATUS;
1320 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1321 break;
1322
1323 case TX_SEND:
Oliver Hartkopp7f2d38e2008-07-05 23:38:43 -07001324 /* we need exactly one can_frame behind the msg head */
1325 if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001326 ret = -EINVAL;
1327 else
1328 ret = bcm_tx_send(msg, ifindex, sk);
1329 break;
1330
1331 default:
1332 ret = -EINVAL;
1333 break;
1334 }
1335
1336 release_sock(sk);
1337
1338 return ret;
1339}
1340
1341/*
1342 * notification handler for netdevice status changes
1343 */
1344static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1345 void *data)
1346{
1347 struct net_device *dev = (struct net_device *)data;
1348 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1349 struct sock *sk = &bo->sk;
1350 struct bcm_op *op;
1351 int notify_enodev = 0;
1352
YOSHIFUJI Hideaki721499e2008-07-19 22:34:43 -07001353 if (!net_eq(dev_net(dev), &init_net))
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001354 return NOTIFY_DONE;
1355
1356 if (dev->type != ARPHRD_CAN)
1357 return NOTIFY_DONE;
1358
1359 switch (msg) {
1360
1361 case NETDEV_UNREGISTER:
1362 lock_sock(sk);
1363
1364 /* remove device specific receive entries */
1365 list_for_each_entry(op, &bo->rx_ops, list)
1366 if (op->rx_reg_dev == dev)
1367 bcm_rx_unreg(dev, op);
1368
1369 /* remove device reference, if this is our bound device */
1370 if (bo->bound && bo->ifindex == dev->ifindex) {
1371 bo->bound = 0;
1372 bo->ifindex = 0;
1373 notify_enodev = 1;
1374 }
1375
1376 release_sock(sk);
1377
1378 if (notify_enodev) {
1379 sk->sk_err = ENODEV;
1380 if (!sock_flag(sk, SOCK_DEAD))
1381 sk->sk_error_report(sk);
1382 }
1383 break;
1384
1385 case NETDEV_DOWN:
1386 if (bo->bound && bo->ifindex == dev->ifindex) {
1387 sk->sk_err = ENETDOWN;
1388 if (!sock_flag(sk, SOCK_DEAD))
1389 sk->sk_error_report(sk);
1390 }
1391 }
1392
1393 return NOTIFY_DONE;
1394}
1395
1396/*
1397 * initial settings for all BCM sockets to be set at socket creation time
1398 */
1399static int bcm_init(struct sock *sk)
1400{
1401 struct bcm_sock *bo = bcm_sk(sk);
1402
1403 bo->bound = 0;
1404 bo->ifindex = 0;
1405 bo->dropped_usr_msgs = 0;
1406 bo->bcm_proc_read = NULL;
1407
1408 INIT_LIST_HEAD(&bo->tx_ops);
1409 INIT_LIST_HEAD(&bo->rx_ops);
1410
1411 /* set notifier */
1412 bo->notifier.notifier_call = bcm_notifier;
1413
1414 register_netdevice_notifier(&bo->notifier);
1415
1416 return 0;
1417}
1418
1419/*
1420 * standard socket functions
1421 */
1422static int bcm_release(struct socket *sock)
1423{
1424 struct sock *sk = sock->sk;
Dave Jonesc6914a62011-04-19 20:36:59 -07001425 struct bcm_sock *bo;
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001426 struct bcm_op *op, *next;
1427
Dave Jonesc6914a62011-04-19 20:36:59 -07001428 if (sk == NULL)
1429 return 0;
1430
1431 bo = bcm_sk(sk);
1432
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001433 /* remove bcm_ops, timer, rx_unregister(), etc. */
1434
1435 unregister_netdevice_notifier(&bo->notifier);
1436
1437 lock_sock(sk);
1438
1439 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1440 bcm_remove_op(op);
1441
1442 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1443 /*
1444 * Don't care if we're bound or not (due to netdev problems)
1445 * can_rx_unregister() is always a save thing to do here.
1446 */
1447 if (op->ifindex) {
1448 /*
1449 * Only remove subscriptions that had not
1450 * been removed due to NETDEV_UNREGISTER
1451 * in bcm_notifier()
1452 */
1453 if (op->rx_reg_dev) {
1454 struct net_device *dev;
1455
1456 dev = dev_get_by_index(&init_net, op->ifindex);
1457 if (dev) {
1458 bcm_rx_unreg(dev, op);
1459 dev_put(dev);
1460 }
1461 }
1462 } else
1463 can_rx_unregister(NULL, op->can_id,
1464 REGMASK(op->can_id),
1465 bcm_rx_handler, op);
1466
1467 bcm_remove_op(op);
1468 }
1469
1470 /* remove procfs entry */
1471 if (proc_dir && bo->bcm_proc_read)
1472 remove_proc_entry(bo->procname, proc_dir);
1473
1474 /* remove device reference */
1475 if (bo->bound) {
1476 bo->bound = 0;
1477 bo->ifindex = 0;
1478 }
1479
Lothar Waßmannf7e5cc02009-07-14 23:10:21 +00001480 sock_orphan(sk);
1481 sock->sk = NULL;
1482
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001483 release_sock(sk);
1484 sock_put(sk);
1485
1486 return 0;
1487}
1488
1489static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1490 int flags)
1491{
1492 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1493 struct sock *sk = sock->sk;
1494 struct bcm_sock *bo = bcm_sk(sk);
1495
Changli Gao6503d962010-03-31 22:58:26 +00001496 if (len < sizeof(*addr))
1497 return -EINVAL;
1498
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001499 if (bo->bound)
1500 return -EISCONN;
1501
1502 /* bind a device to this socket */
1503 if (addr->can_ifindex) {
1504 struct net_device *dev;
1505
1506 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1507 if (!dev)
1508 return -ENODEV;
1509
1510 if (dev->type != ARPHRD_CAN) {
1511 dev_put(dev);
1512 return -ENODEV;
1513 }
1514
1515 bo->ifindex = dev->ifindex;
1516 dev_put(dev);
1517
1518 } else {
1519 /* no interface reference for ifindex = 0 ('any' CAN device) */
1520 bo->ifindex = 0;
1521 }
1522
1523 bo->bound = 1;
1524
1525 if (proc_dir) {
1526 /* unique socket address as filename */
Dan Rosenberg9f260e02010-12-26 06:54:53 +00001527 sprintf(bo->procname, "%lu", sock_i_ino(sk));
Alexey Dobriyanea00b8e2009-08-28 09:57:21 +00001528 bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1529 proc_dir,
1530 &bcm_proc_fops, sk);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001531 }
1532
1533 return 0;
1534}
1535
1536static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1537 struct msghdr *msg, size_t size, int flags)
1538{
1539 struct sock *sk = sock->sk;
1540 struct sk_buff *skb;
1541 int error = 0;
1542 int noblock;
1543 int err;
1544
1545 noblock = flags & MSG_DONTWAIT;
1546 flags &= ~MSG_DONTWAIT;
1547 skb = skb_recv_datagram(sk, flags, noblock, &error);
1548 if (!skb)
1549 return error;
1550
1551 if (skb->len < size)
1552 size = skb->len;
1553
1554 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1555 if (err < 0) {
1556 skb_free_datagram(sk, skb);
1557 return err;
1558 }
1559
Neil Horman3b885782009-10-12 13:26:31 -07001560 sock_recv_ts_and_drops(msg, sk, skb);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001561
1562 if (msg->msg_name) {
1563 msg->msg_namelen = sizeof(struct sockaddr_can);
1564 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1565 }
1566
1567 skb_free_datagram(sk, skb);
1568
1569 return size;
1570}
1571
Oliver Hartkopp53914b62011-03-22 08:27:25 +00001572static const struct proto_ops bcm_ops = {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001573 .family = PF_CAN,
1574 .release = bcm_release,
1575 .bind = sock_no_bind,
1576 .connect = bcm_connect,
1577 .socketpair = sock_no_socketpair,
1578 .accept = sock_no_accept,
1579 .getname = sock_no_getname,
1580 .poll = datagram_poll,
Oliver Hartkopp53914b62011-03-22 08:27:25 +00001581 .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001582 .listen = sock_no_listen,
1583 .shutdown = sock_no_shutdown,
1584 .setsockopt = sock_no_setsockopt,
1585 .getsockopt = sock_no_getsockopt,
1586 .sendmsg = bcm_sendmsg,
1587 .recvmsg = bcm_recvmsg,
1588 .mmap = sock_no_mmap,
1589 .sendpage = sock_no_sendpage,
1590};
1591
1592static struct proto bcm_proto __read_mostly = {
1593 .name = "CAN_BCM",
1594 .owner = THIS_MODULE,
1595 .obj_size = sizeof(struct bcm_sock),
1596 .init = bcm_init,
1597};
1598
Kurt Van Dijck16506292011-05-03 18:40:57 +00001599static const struct can_proto bcm_can_proto = {
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001600 .type = SOCK_DGRAM,
1601 .protocol = CAN_BCM,
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001602 .ops = &bcm_ops,
1603 .prot = &bcm_proto,
1604};
1605
1606static int __init bcm_module_init(void)
1607{
1608 int err;
1609
1610 printk(banner);
1611
1612 err = can_proto_register(&bcm_can_proto);
1613 if (err < 0) {
1614 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1615 return err;
1616 }
1617
1618 /* create /proc/net/can-bcm directory */
1619 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
Oliver Hartkoppffd980f2007-11-16 15:53:52 -08001620 return 0;
1621}
1622
1623static void __exit bcm_module_exit(void)
1624{
1625 can_proto_unregister(&bcm_can_proto);
1626
1627 if (proc_dir)
1628 proc_net_remove(&init_net, "can-bcm");
1629}
1630
1631module_init(bcm_module_init);
1632module_exit(bcm_module_exit);