blob: 98c5998988e63b320814f9b1b8ee7069f89d0dff [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/smd_rpcrouter.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved.
5 * Author: San Mehat <san@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18/* TODO: handle cases where smd_write() will tempfail due to full fifo */
19/* TODO: thread priority? schedule a work to bump it? */
20/* TODO: maybe make server_list_lock a mutex */
21/* TODO: pool fragments to avoid kmalloc/kfree churn */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/errno.h>
28#include <linux/cdev.h>
29#include <linux/init.h>
30#include <linux/device.h>
31#include <linux/types.h>
32#include <linux/delay.h>
33#include <linux/fs.h>
34#include <linux/err.h>
35#include <linux/sched.h>
36#include <linux/poll.h>
37#include <linux/wakelock.h>
38#include <asm/uaccess.h>
39#include <asm/byteorder.h>
40#include <linux/platform_device.h>
41#include <linux/uaccess.h>
42#include <linux/debugfs.h>
43
44#include <asm/byteorder.h>
45
46#include <mach/msm_smd.h>
47#include <mach/smem_log.h>
48#include <mach/subsystem_notif.h>
49
50#include "smd_rpcrouter.h"
51#include "modem_notifier.h"
52#include "smd_rpc_sym.h"
53#include "smd_private.h"
54
55enum {
56 SMEM_LOG = 1U << 0,
57 RTR_DBG = 1U << 1,
58 R2R_MSG = 1U << 2,
59 R2R_RAW = 1U << 3,
60 RPC_MSG = 1U << 4,
61 NTFY_MSG = 1U << 5,
62 RAW_PMR = 1U << 6,
63 RAW_PMW = 1U << 7,
64 R2R_RAW_HDR = 1U << 8,
65};
66static int msm_rpc_connect_timeout_ms;
67module_param_named(connect_timeout, msm_rpc_connect_timeout_ms,
68 int, S_IRUGO | S_IWUSR | S_IWGRP);
69
70static int smd_rpcrouter_debug_mask;
71module_param_named(debug_mask, smd_rpcrouter_debug_mask,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define DIAG(x...) printk(KERN_ERR "[RR] ERROR " x)
75
76#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
77#define D(x...) do { \
78if (smd_rpcrouter_debug_mask & RTR_DBG) \
79 printk(KERN_ERR x); \
80} while (0)
81
82#define RR(x...) do { \
83if (smd_rpcrouter_debug_mask & R2R_MSG) \
84 printk(KERN_ERR "[RR] "x); \
85} while (0)
86
87#define RAW(x...) do { \
88if (smd_rpcrouter_debug_mask & R2R_RAW) \
89 printk(KERN_ERR "[RAW] "x); \
90} while (0)
91
92#define RAW_HDR(x...) do { \
93if (smd_rpcrouter_debug_mask & R2R_RAW_HDR) \
94 printk(KERN_ERR "[HDR] "x); \
95} while (0)
96
97#define RAW_PMR(x...) do { \
98if (smd_rpcrouter_debug_mask & RAW_PMR) \
99 printk(KERN_ERR "[PMR] "x); \
100} while (0)
101
102#define RAW_PMR_NOMASK(x...) do { \
103 printk(KERN_ERR "[PMR] "x); \
104} while (0)
105
106#define RAW_PMW(x...) do { \
107if (smd_rpcrouter_debug_mask & RAW_PMW) \
108 printk(KERN_ERR "[PMW] "x); \
109} while (0)
110
111#define RAW_PMW_NOMASK(x...) do { \
112 printk(KERN_ERR "[PMW] "x); \
113} while (0)
114
115#define IO(x...) do { \
116if (smd_rpcrouter_debug_mask & RPC_MSG) \
117 printk(KERN_ERR "[RPC] "x); \
118} while (0)
119
120#define NTFY(x...) do { \
121if (smd_rpcrouter_debug_mask & NTFY_MSG) \
122 printk(KERN_ERR "[NOTIFY] "x); \
123} while (0)
124#else
125#define D(x...) do { } while (0)
126#define RR(x...) do { } while (0)
127#define RAW(x...) do { } while (0)
128#define RAW_HDR(x...) do { } while (0)
129#define RAW_PMR(x...) do { } while (0)
130#define RAW_PMR_NO_MASK(x...) do { } while (0)
131#define RAW_PMW(x...) do { } while (0)
132#define RAW_PMW_NO_MASK(x...) do { } while (0)
133#define IO(x...) do { } while (0)
134#define NTFY(x...) do { } while (0)
135#endif
136
137
138static LIST_HEAD(local_endpoints);
139static LIST_HEAD(remote_endpoints);
140
141static LIST_HEAD(server_list);
142
143static wait_queue_head_t newserver_wait;
144static wait_queue_head_t subsystem_restart_wait;
145
146static DEFINE_SPINLOCK(local_endpoints_lock);
147static DEFINE_SPINLOCK(remote_endpoints_lock);
148static DEFINE_SPINLOCK(server_list_lock);
149
150static LIST_HEAD(rpc_board_dev_list);
151static DEFINE_SPINLOCK(rpc_board_dev_list_lock);
152
153static struct workqueue_struct *rpcrouter_workqueue;
154
155static atomic_t next_xid = ATOMIC_INIT(1);
156static atomic_t pm_mid = ATOMIC_INIT(1);
157
158static void do_read_data(struct work_struct *work);
159static void do_create_pdevs(struct work_struct *work);
160static void do_create_rpcrouter_pdev(struct work_struct *work);
161
162static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
163static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
164
165#define RR_STATE_IDLE 0
166#define RR_STATE_HEADER 1
167#define RR_STATE_BODY 2
168#define RR_STATE_ERROR 3
169
170/* State for remote ep following restart */
171#define RESTART_QUOTA_ABORT 1
172
173struct rr_context {
174 struct rr_packet *pkt;
175 uint8_t *ptr;
176 uint32_t state; /* current assembly state */
177 uint32_t count; /* bytes needed in this state */
178};
179
180struct rr_context the_rr_context;
181
182struct rpc_board_dev_info {
183 struct list_head list;
184
185 struct rpc_board_dev *dev;
186};
187
188static struct platform_device rpcrouter_pdev = {
189 .name = "oncrpc_router",
190 .id = -1,
191};
192
193struct rpcrouter_xprt_info {
194 struct list_head list;
195
196 struct rpcrouter_xprt *xprt;
197
198 int remote_pid;
199 uint32_t initialized;
200 wait_queue_head_t read_wait;
201 struct wake_lock wakelock;
202 spinlock_t lock;
203 uint32_t need_len;
204 struct work_struct read_data;
205 struct workqueue_struct *workqueue;
206 int abort_data_read;
207 unsigned char r2r_buf[RPCROUTER_MSGSIZE_MAX];
208};
209
210static LIST_HEAD(xprt_info_list);
211static DEFINE_MUTEX(xprt_info_list_lock);
212
213DECLARE_COMPLETION(rpc_remote_router_up);
214static atomic_t pending_close_count = ATOMIC_INIT(0);
215
216/*
217 * Search for transport (xprt) that matches the provided PID.
218 *
219 * Note: The calling function must ensure that the mutex
220 * xprt_info_list_lock is locked when this function
221 * is called.
222 *
223 * @remote_pid Remote PID for the transport
224 *
225 * @returns Pointer to transport or NULL if not found
226 */
227static struct rpcrouter_xprt_info *rpcrouter_get_xprt_info(uint32_t remote_pid)
228{
229 struct rpcrouter_xprt_info *xprt_info;
230
231 list_for_each_entry(xprt_info, &xprt_info_list, list) {
232 if (xprt_info->remote_pid == remote_pid)
233 return xprt_info;
234 }
235 return NULL;
236}
237
238static int rpcrouter_send_control_msg(struct rpcrouter_xprt_info *xprt_info,
239 union rr_control_msg *msg)
240{
241 struct rr_header hdr;
242 unsigned long flags = 0;
243 int need;
244
245 if (xprt_info->remote_pid == RPCROUTER_PID_LOCAL)
246 return 0;
247
248 if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) &&
249 !xprt_info->initialized) {
250 printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
251 "router not initialized\n");
252 return -EINVAL;
253 }
254
255 hdr.version = RPCROUTER_VERSION;
256 hdr.type = msg->cmd;
257 hdr.src_pid = RPCROUTER_PID_LOCAL;
258 hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
259 hdr.confirm_rx = 0;
260 hdr.size = sizeof(*msg);
261 hdr.dst_pid = xprt_info->remote_pid;
262 hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
263
264 /* TODO: what if channel is full? */
265
266 need = sizeof(hdr) + hdr.size;
267 spin_lock_irqsave(&xprt_info->lock, flags);
268 while (xprt_info->xprt->write_avail() < need) {
269 spin_unlock_irqrestore(&xprt_info->lock, flags);
270 msleep(250);
271 spin_lock_irqsave(&xprt_info->lock, flags);
272 }
273 xprt_info->xprt->write(&hdr, sizeof(hdr), HEADER);
274 xprt_info->xprt->write(msg, hdr.size, PAYLOAD);
275 spin_unlock_irqrestore(&xprt_info->lock, flags);
276
277 return 0;
278}
279
280static void modem_reset_cleanup(struct rpcrouter_xprt_info *xprt_info)
281{
282 struct msm_rpc_endpoint *ept;
283 struct rr_remote_endpoint *r_ept;
284 struct rr_packet *pkt, *tmp_pkt;
285 struct rr_fragment *frag, *next;
286 struct msm_rpc_reply *reply, *reply_tmp;
287 unsigned long flags;
288
289 spin_lock_irqsave(&local_endpoints_lock, flags);
290 /* remove all partial packets received */
291 list_for_each_entry(ept, &local_endpoints, list) {
292 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
293 ept->dst_pid, xprt_info->remote_pid);
294
295 if (xprt_info->remote_pid != ept->dst_pid)
296 continue;
297
298 D("calling teardown cb %p\n", ept->cb_restart_teardown);
299 if (ept->cb_restart_teardown)
300 ept->cb_restart_teardown(ept->client_data);
301 ept->do_setup_notif = 1;
302
303 /* remove replies */
304 spin_lock(&ept->reply_q_lock);
305 list_for_each_entry_safe(reply, reply_tmp,
306 &ept->reply_pend_q, list) {
307 list_del(&reply->list);
308 kfree(reply);
309 }
310 list_for_each_entry_safe(reply, reply_tmp,
311 &ept->reply_avail_q, list) {
312 list_del(&reply->list);
313 kfree(reply);
314 }
Karthikeyan Ramasubramanian1079f782011-08-23 10:01:47 -0600315 ept->reply_cnt = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 spin_unlock(&ept->reply_q_lock);
317
318 /* Set restart state for local ep */
319 RR("EPT:0x%p, State %d RESTART_PEND_NTFY_SVR "
320 "PROG:0x%08x VERS:0x%08x\n",
321 ept, ept->restart_state,
322 be32_to_cpu(ept->dst_prog),
323 be32_to_cpu(ept->dst_vers));
324 spin_lock(&ept->restart_lock);
325 ept->restart_state = RESTART_PEND_NTFY_SVR;
326
327 /* remove incomplete packets */
328 spin_lock(&ept->incomplete_lock);
329 list_for_each_entry_safe(pkt, tmp_pkt,
330 &ept->incomplete, list) {
331 list_del(&pkt->list);
332 frag = pkt->first;
333 while (frag != NULL) {
334 next = frag->next;
335 kfree(frag);
336 frag = next;
337 }
338 kfree(pkt);
339 }
340 spin_unlock(&ept->incomplete_lock);
341
342 /* remove all completed packets waiting to be read */
343 spin_lock(&ept->read_q_lock);
344 list_for_each_entry_safe(pkt, tmp_pkt, &ept->read_q,
345 list) {
346 list_del(&pkt->list);
347 frag = pkt->first;
348 while (frag != NULL) {
349 next = frag->next;
350 kfree(frag);
351 frag = next;
352 }
353 kfree(pkt);
354 }
355 spin_unlock(&ept->read_q_lock);
356
357 spin_unlock(&ept->restart_lock);
358 wake_up(&ept->wait_q);
359 }
360
361 spin_unlock_irqrestore(&local_endpoints_lock, flags);
362
363 /* Unblock endpoints waiting for quota ack*/
364 spin_lock_irqsave(&remote_endpoints_lock, flags);
365 list_for_each_entry(r_ept, &remote_endpoints, list) {
366 spin_lock(&r_ept->quota_lock);
367 r_ept->quota_restart_state = RESTART_QUOTA_ABORT;
368 RR("Set STATE_PENDING PID:0x%08x CID:0x%08x \n", r_ept->pid,
369 r_ept->cid);
370 spin_unlock(&r_ept->quota_lock);
371 wake_up(&r_ept->quota_wait);
372 }
373 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
374}
375
376static void modem_reset_startup(struct rpcrouter_xprt_info *xprt_info)
377{
378 struct msm_rpc_endpoint *ept;
379 unsigned long flags;
380
381 spin_lock_irqsave(&local_endpoints_lock, flags);
382
383 /* notify all endpoints that we are coming back up */
384 list_for_each_entry(ept, &local_endpoints, list) {
385 RR("%s EPT DST PID %x, remote_pid:%d\n", __func__,
386 ept->dst_pid, xprt_info->remote_pid);
387
388 if (xprt_info->remote_pid != ept->dst_pid)
389 continue;
390
391 D("calling setup cb %d:%p\n", ept->do_setup_notif,
392 ept->cb_restart_setup);
393 if (ept->do_setup_notif && ept->cb_restart_setup)
394 ept->cb_restart_setup(ept->client_data);
395 ept->do_setup_notif = 0;
396 }
397
398 spin_unlock_irqrestore(&local_endpoints_lock, flags);
399}
400
401/*
402 * Blocks and waits for endpoint if a reset is in progress.
403 *
404 * @returns
405 * ENETRESET Reset is in progress and a notification needed
406 * ERESTARTSYS Signal occurred
407 * 0 Reset is not in progress
408 */
409static int wait_for_restart_and_notify(struct msm_rpc_endpoint *ept)
410{
411 unsigned long flags;
412 int ret = 0;
413 DEFINE_WAIT(__wait);
414
415 for (;;) {
416 prepare_to_wait(&ept->restart_wait, &__wait,
417 TASK_INTERRUPTIBLE);
418
419 spin_lock_irqsave(&ept->restart_lock, flags);
420 if (ept->restart_state == RESTART_NORMAL) {
421 spin_unlock_irqrestore(&ept->restart_lock, flags);
422 break;
423 } else if (ept->restart_state & RESTART_PEND_NTFY) {
424 ept->restart_state &= ~RESTART_PEND_NTFY;
425 spin_unlock_irqrestore(&ept->restart_lock, flags);
426 ret = -ENETRESET;
427 break;
428 }
429 if (signal_pending(current) &&
430 ((!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))) {
431 spin_unlock_irqrestore(&ept->restart_lock, flags);
432 ret = -ERESTARTSYS;
433 break;
434 }
435 spin_unlock_irqrestore(&ept->restart_lock, flags);
436 schedule();
437 }
438 finish_wait(&ept->restart_wait, &__wait);
439 return ret;
440}
441
442static struct rr_server *rpcrouter_create_server(uint32_t pid,
443 uint32_t cid,
444 uint32_t prog,
445 uint32_t ver)
446{
447 struct rr_server *server;
448 unsigned long flags;
449 int rc;
450
451 server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
452 if (!server)
453 return ERR_PTR(-ENOMEM);
454
455 memset(server, 0, sizeof(struct rr_server));
456 server->pid = pid;
457 server->cid = cid;
458 server->prog = prog;
459 server->vers = ver;
460
461 spin_lock_irqsave(&server_list_lock, flags);
462 list_add_tail(&server->list, &server_list);
463 spin_unlock_irqrestore(&server_list_lock, flags);
464
465 rc = msm_rpcrouter_create_server_cdev(server);
466 if (rc < 0)
467 goto out_fail;
468
469 return server;
470out_fail:
471 spin_lock_irqsave(&server_list_lock, flags);
472 list_del(&server->list);
473 spin_unlock_irqrestore(&server_list_lock, flags);
474 kfree(server);
475 return ERR_PTR(rc);
476}
477
478static void rpcrouter_destroy_server(struct rr_server *server)
479{
480 unsigned long flags;
481
482 spin_lock_irqsave(&server_list_lock, flags);
483 list_del(&server->list);
484 spin_unlock_irqrestore(&server_list_lock, flags);
485 device_destroy(msm_rpcrouter_class, server->device_number);
486 kfree(server);
487}
488
489int msm_rpc_add_board_dev(struct rpc_board_dev *devices, int num)
490{
491 unsigned long flags;
492 struct rpc_board_dev_info *board_info;
493 int i;
494
495 for (i = 0; i < num; i++) {
496 board_info = kzalloc(sizeof(struct rpc_board_dev_info),
497 GFP_KERNEL);
498 if (!board_info)
499 return -ENOMEM;
500
501 board_info->dev = &devices[i];
502 D("%s: adding program %x\n", __func__, board_info->dev->prog);
503 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
504 list_add_tail(&board_info->list, &rpc_board_dev_list);
505 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
506 }
507
508 return 0;
509}
510EXPORT_SYMBOL(msm_rpc_add_board_dev);
511
512static void rpcrouter_register_board_dev(struct rr_server *server)
513{
514 struct rpc_board_dev_info *board_info;
515 unsigned long flags;
516 int rc;
517
518 spin_lock_irqsave(&rpc_board_dev_list_lock, flags);
519 list_for_each_entry(board_info, &rpc_board_dev_list, list) {
520 if (server->prog == board_info->dev->prog) {
521 D("%s: registering device %x\n",
522 __func__, board_info->dev->prog);
523 list_del(&board_info->list);
524 rc = platform_device_register(&board_info->dev->pdev);
525 if (rc)
526 pr_err("%s: board dev register failed %d\n",
527 __func__, rc);
528 kfree(board_info);
529 break;
530 }
531 }
532 spin_unlock_irqrestore(&rpc_board_dev_list_lock, flags);
533}
534
535static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
536{
537 struct rr_server *server;
538 unsigned long flags;
539
540 spin_lock_irqsave(&server_list_lock, flags);
541 list_for_each_entry(server, &server_list, list) {
542 if (server->prog == prog
543 && server->vers == ver) {
544 spin_unlock_irqrestore(&server_list_lock, flags);
545 return server;
546 }
547 }
548 spin_unlock_irqrestore(&server_list_lock, flags);
549 return NULL;
550}
551
552static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
553{
554 struct rr_server *server;
555 unsigned long flags;
556
557 spin_lock_irqsave(&server_list_lock, flags);
558 list_for_each_entry(server, &server_list, list) {
559 if (server->device_number == dev) {
560 spin_unlock_irqrestore(&server_list_lock, flags);
561 return server;
562 }
563 }
564 spin_unlock_irqrestore(&server_list_lock, flags);
565 return NULL;
566}
567
568struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
569{
570 struct msm_rpc_endpoint *ept;
571 unsigned long flags;
572
573 ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
574 if (!ept)
575 return NULL;
576 memset(ept, 0, sizeof(struct msm_rpc_endpoint));
577 ept->cid = (uint32_t) ept;
578 ept->pid = RPCROUTER_PID_LOCAL;
579 ept->dev = dev;
580
581 if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
582 struct rr_server *srv;
583 /*
584 * This is a userspace client which opened
585 * a program/ver devicenode. Bind the client
586 * to that destination
587 */
588 srv = rpcrouter_lookup_server_by_dev(dev);
589 /* TODO: bug? really? */
590 BUG_ON(!srv);
591
592 ept->dst_pid = srv->pid;
593 ept->dst_cid = srv->cid;
594 ept->dst_prog = cpu_to_be32(srv->prog);
595 ept->dst_vers = cpu_to_be32(srv->vers);
596 } else {
597 /* mark not connected */
598 ept->dst_pid = 0xffffffff;
599 }
600
601 init_waitqueue_head(&ept->wait_q);
602 INIT_LIST_HEAD(&ept->read_q);
603 spin_lock_init(&ept->read_q_lock);
604 INIT_LIST_HEAD(&ept->reply_avail_q);
605 INIT_LIST_HEAD(&ept->reply_pend_q);
606 spin_lock_init(&ept->reply_q_lock);
607 spin_lock_init(&ept->restart_lock);
608 init_waitqueue_head(&ept->restart_wait);
609 ept->restart_state = RESTART_NORMAL;
610 wake_lock_init(&ept->read_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_read");
611 wake_lock_init(&ept->reply_q_wake_lock, WAKE_LOCK_SUSPEND, "rpc_reply");
612 INIT_LIST_HEAD(&ept->incomplete);
613 spin_lock_init(&ept->incomplete_lock);
614
615 spin_lock_irqsave(&local_endpoints_lock, flags);
616 list_add_tail(&ept->list, &local_endpoints);
617 spin_unlock_irqrestore(&local_endpoints_lock, flags);
618 return ept;
619}
620
621int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
622{
623 int rc;
624 union rr_control_msg msg;
625 struct msm_rpc_reply *reply, *reply_tmp;
626 unsigned long flags;
627 struct rpcrouter_xprt_info *xprt_info;
628
629 /* Endpoint with dst_pid = 0xffffffff corresponds to that of
630 ** router port. So don't send a REMOVE CLIENT message while
631 ** destroying it.*/
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600632 spin_lock_irqsave(&local_endpoints_lock, flags);
633 list_del(&ept->list);
634 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 if (ept->dst_pid != 0xffffffff) {
636 msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
637 msg.cli.pid = ept->pid;
638 msg.cli.cid = ept->cid;
639
640 RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
641 mutex_lock(&xprt_info_list_lock);
642 list_for_each_entry(xprt_info, &xprt_info_list, list) {
643 rc = rpcrouter_send_control_msg(xprt_info, &msg);
644 if (rc < 0) {
645 mutex_unlock(&xprt_info_list_lock);
646 return rc;
647 }
648 }
649 mutex_unlock(&xprt_info_list_lock);
650 }
651
652 /* Free replies */
653 spin_lock_irqsave(&ept->reply_q_lock, flags);
654 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_pend_q, list) {
655 list_del(&reply->list);
656 kfree(reply);
657 }
658 list_for_each_entry_safe(reply, reply_tmp, &ept->reply_avail_q, list) {
659 list_del(&reply->list);
660 kfree(reply);
661 }
662 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
663
664 wake_lock_destroy(&ept->read_q_wake_lock);
665 wake_lock_destroy(&ept->reply_q_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 kfree(ept);
667 return 0;
668}
669
670static int rpcrouter_create_remote_endpoint(uint32_t pid, uint32_t cid)
671{
672 struct rr_remote_endpoint *new_c;
673 unsigned long flags;
674
675 new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
676 if (!new_c)
677 return -ENOMEM;
678 memset(new_c, 0, sizeof(struct rr_remote_endpoint));
679
680 new_c->cid = cid;
681 new_c->pid = pid;
682 init_waitqueue_head(&new_c->quota_wait);
683 spin_lock_init(&new_c->quota_lock);
684
685 spin_lock_irqsave(&remote_endpoints_lock, flags);
686 list_add_tail(&new_c->list, &remote_endpoints);
687 new_c->quota_restart_state = RESTART_NORMAL;
688 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
689 return 0;
690}
691
692static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
693{
694 struct msm_rpc_endpoint *ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 list_for_each_entry(ept, &local_endpoints, list) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -0600697 if (ept->cid == cid)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 return ept;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700 return NULL;
701}
702
703static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t pid,
704 uint32_t cid)
705{
706 struct rr_remote_endpoint *ept;
707 unsigned long flags;
708
709 spin_lock_irqsave(&remote_endpoints_lock, flags);
710 list_for_each_entry(ept, &remote_endpoints, list) {
711 if ((ept->pid == pid) && (ept->cid == cid)) {
712 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
713 return ept;
714 }
715 }
716 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
717 return NULL;
718}
719
720static void handle_server_restart(struct rr_server *server,
721 uint32_t pid, uint32_t cid,
722 uint32_t prog, uint32_t vers)
723{
724 struct rr_remote_endpoint *r_ept;
725 struct msm_rpc_endpoint *ept;
726 unsigned long flags;
727 r_ept = rpcrouter_lookup_remote_endpoint(pid, cid);
728 if (r_ept && (r_ept->quota_restart_state !=
729 RESTART_NORMAL)) {
730 spin_lock_irqsave(&r_ept->quota_lock, flags);
731 r_ept->tx_quota_cntr = 0;
732 r_ept->quota_restart_state =
733 RESTART_NORMAL;
734 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
735 D(KERN_INFO "rpcrouter: Remote EPT Reset %0x\n",
736 (unsigned int)r_ept);
737 wake_up(&r_ept->quota_wait);
738 }
739 spin_lock_irqsave(&local_endpoints_lock, flags);
740 list_for_each_entry(ept, &local_endpoints, list) {
741 if ((be32_to_cpu(ept->dst_prog) == prog) &&
742 (be32_to_cpu(ept->dst_vers) == vers) &&
743 (ept->restart_state & RESTART_PEND_SVR)) {
744 spin_lock(&ept->restart_lock);
745 ept->restart_state &= ~RESTART_PEND_SVR;
746 spin_unlock(&ept->restart_lock);
747 D("rpcrouter: Local EPT Reset %08x:%08x \n",
748 prog, vers);
749 wake_up(&ept->restart_wait);
750 wake_up(&ept->wait_q);
751 }
752 }
753 spin_unlock_irqrestore(&local_endpoints_lock, flags);
754}
755
756static int process_control_msg(struct rpcrouter_xprt_info *xprt_info,
757 union rr_control_msg *msg, int len)
758{
759 union rr_control_msg ctl;
760 struct rr_server *server;
761 struct rr_remote_endpoint *r_ept;
762 int rc = 0;
763 unsigned long flags;
764 static int first = 1;
765
766 if (len != sizeof(*msg)) {
767 RR(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
768 len, sizeof(*msg));
769 return -EINVAL;
770 }
771
772 switch (msg->cmd) {
773 case RPCROUTER_CTRL_CMD_HELLO:
774 RR("o HELLO PID %d\n", xprt_info->remote_pid);
775 memset(&ctl, 0, sizeof(ctl));
776 ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
777 rpcrouter_send_control_msg(xprt_info, &ctl);
778
779 xprt_info->initialized = 1;
780
781 /* Send list of servers one at a time */
782 ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
783
784 /* TODO: long time to hold a spinlock... */
785 spin_lock_irqsave(&server_list_lock, flags);
786 list_for_each_entry(server, &server_list, list) {
787 if (server->pid != RPCROUTER_PID_LOCAL)
788 continue;
789 ctl.srv.pid = server->pid;
790 ctl.srv.cid = server->cid;
791 ctl.srv.prog = server->prog;
792 ctl.srv.vers = server->vers;
793
794 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
795 server->pid, server->cid,
796 server->prog, server->vers);
797
798 rpcrouter_send_control_msg(xprt_info, &ctl);
799 }
800 spin_unlock_irqrestore(&server_list_lock, flags);
801
802 if (first) {
803 first = 0;
804 queue_work(rpcrouter_workqueue,
805 &work_create_rpcrouter_pdev);
806 }
807 break;
808
809 case RPCROUTER_CTRL_CMD_RESUME_TX:
810 RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
811
812 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
813 msg->cli.cid);
814 if (!r_ept) {
815 printk(KERN_ERR
816 "rpcrouter: Unable to resume client\n");
817 break;
818 }
819 spin_lock_irqsave(&r_ept->quota_lock, flags);
820 r_ept->tx_quota_cntr = 0;
821 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
822 wake_up(&r_ept->quota_wait);
823 break;
824
825 case RPCROUTER_CTRL_CMD_NEW_SERVER:
826 if (msg->srv.vers == 0) {
827 pr_err(
828 "rpcrouter: Server create rejected, version = 0, "
829 "program = %08x\n", msg->srv.prog);
830 break;
831 }
832
833 RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
834 msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
835
836 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
837
838 if (!server) {
839 server = rpcrouter_create_server(
840 msg->srv.pid, msg->srv.cid,
841 msg->srv.prog, msg->srv.vers);
842 if (!server)
843 return -ENOMEM;
844 /*
845 * XXX: Verify that its okay to add the
846 * client to our remote client list
847 * if we get a NEW_SERVER notification
848 */
849 if (!rpcrouter_lookup_remote_endpoint(msg->srv.pid,
850 msg->srv.cid)) {
851 rc = rpcrouter_create_remote_endpoint(
852 msg->srv.pid, msg->srv.cid);
853 if (rc < 0)
854 printk(KERN_ERR
855 "rpcrouter:Client create"
856 "error (%d)\n", rc);
857 }
858 rpcrouter_register_board_dev(server);
859 schedule_work(&work_create_pdevs);
860 wake_up(&newserver_wait);
861 } else {
862 if ((server->pid == msg->srv.pid) &&
863 (server->cid == msg->srv.cid)) {
864 handle_server_restart(server,
865 msg->srv.pid,
866 msg->srv.cid,
867 msg->srv.prog,
868 msg->srv.vers);
869 } else {
870 server->pid = msg->srv.pid;
871 server->cid = msg->srv.cid;
872 }
873 }
874 break;
875
876 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
877 RR("o REMOVE_SERVER prog=%08x:%d\n",
878 msg->srv.prog, msg->srv.vers);
879 server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
880 if (server)
881 rpcrouter_destroy_server(server);
882 break;
883
884 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
885 RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
886 if (msg->cli.pid == RPCROUTER_PID_LOCAL) {
887 printk(KERN_ERR
888 "rpcrouter: Denying remote removal of "
889 "local client\n");
890 break;
891 }
892 r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.pid,
893 msg->cli.cid);
894 if (r_ept) {
895 spin_lock_irqsave(&remote_endpoints_lock, flags);
896 list_del(&r_ept->list);
897 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
898 kfree(r_ept);
899 }
900
901 /* Notify local clients of this event */
902 printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
903 rc = -ENOSYS;
904
905 break;
906 case RPCROUTER_CTRL_CMD_PING:
907 /* No action needed for ping messages received */
908 RR("o PING\n");
909 break;
910 default:
911 RR("o UNKNOWN(%08x)\n", msg->cmd);
912 rc = -ENOSYS;
913 }
914
915 return rc;
916}
917
918static void do_create_rpcrouter_pdev(struct work_struct *work)
919{
920 D("%s: modem rpc router up\n", __func__);
921 platform_device_register(&rpcrouter_pdev);
922 complete_all(&rpc_remote_router_up);
923}
924
925static void do_create_pdevs(struct work_struct *work)
926{
927 unsigned long flags;
928 struct rr_server *server;
929
930 /* TODO: race if destroyed while being registered */
931 spin_lock_irqsave(&server_list_lock, flags);
932 list_for_each_entry(server, &server_list, list) {
933 if (server->pid != RPCROUTER_PID_LOCAL) {
934 if (server->pdev_name[0] == 0) {
935 sprintf(server->pdev_name, "rs%.8x",
936 server->prog);
937 spin_unlock_irqrestore(&server_list_lock,
938 flags);
939 msm_rpcrouter_create_server_pdev(server);
940 schedule_work(&work_create_pdevs);
941 return;
942 }
943 }
944 }
945 spin_unlock_irqrestore(&server_list_lock, flags);
946}
947
948static void *rr_malloc(unsigned sz)
949{
950 void *ptr = kmalloc(sz, GFP_KERNEL);
951 if (ptr)
952 return ptr;
953
954 printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
955 do {
956 ptr = kmalloc(sz, GFP_KERNEL);
957 } while (!ptr);
958
959 return ptr;
960}
961
962static int rr_read(struct rpcrouter_xprt_info *xprt_info,
963 void *data, uint32_t len)
964{
965 int rc;
966 unsigned long flags;
967
968 while (!xprt_info->abort_data_read) {
969 spin_lock_irqsave(&xprt_info->lock, flags);
970 if (xprt_info->xprt->read_avail() >= len) {
971 rc = xprt_info->xprt->read(data, len);
972 spin_unlock_irqrestore(&xprt_info->lock, flags);
973 if (rc == len && !xprt_info->abort_data_read)
974 return 0;
975 else
976 return -EIO;
977 }
978 xprt_info->need_len = len;
979 wake_unlock(&xprt_info->wakelock);
980 spin_unlock_irqrestore(&xprt_info->lock, flags);
981
982 wait_event(xprt_info->read_wait,
983 xprt_info->xprt->read_avail() >= len
984 || xprt_info->abort_data_read);
985 }
986 return -EIO;
987}
988
989#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
990static char *type_to_str(int i)
991{
992 switch (i) {
993 case RPCROUTER_CTRL_CMD_DATA:
994 return "data ";
995 case RPCROUTER_CTRL_CMD_HELLO:
996 return "hello ";
997 case RPCROUTER_CTRL_CMD_BYE:
998 return "bye ";
999 case RPCROUTER_CTRL_CMD_NEW_SERVER:
1000 return "new_srvr";
1001 case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
1002 return "rmv_srvr";
1003 case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
1004 return "rmv_clnt";
1005 case RPCROUTER_CTRL_CMD_RESUME_TX:
1006 return "resum_tx";
1007 case RPCROUTER_CTRL_CMD_EXIT:
1008 return "cmd_exit";
1009 default:
1010 return "invalid";
1011 }
1012}
1013#endif
1014
1015static void do_read_data(struct work_struct *work)
1016{
1017 struct rr_header hdr;
1018 struct rr_packet *pkt;
1019 struct rr_fragment *frag;
1020 struct msm_rpc_endpoint *ept;
1021#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1022 struct rpc_request_hdr *rq;
1023#endif
1024 uint32_t pm, mid;
1025 unsigned long flags;
1026
1027 struct rpcrouter_xprt_info *xprt_info =
1028 container_of(work,
1029 struct rpcrouter_xprt_info,
1030 read_data);
1031
1032 if (rr_read(xprt_info, &hdr, sizeof(hdr)))
1033 goto fail_io;
1034
1035 RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
1036 hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
1037 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1038 RAW_HDR("[r rr_h] "
1039 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1040 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1041 hdr.version, type_to_str(hdr.type), hdr.src_pid, hdr.src_cid,
1042 hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
1043
1044 if (hdr.version != RPCROUTER_VERSION) {
1045 DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
1046 goto fail_data;
1047 }
1048 if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
1049 DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
1050 goto fail_data;
1051 }
1052
1053 if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
1054 if (xprt_info->remote_pid == -1) {
1055 xprt_info->remote_pid = hdr.src_pid;
1056
1057 /* do restart notification */
1058 modem_reset_startup(xprt_info);
1059 }
1060
1061 if (rr_read(xprt_info, xprt_info->r2r_buf, hdr.size))
1062 goto fail_io;
1063 process_control_msg(xprt_info,
1064 (void *) xprt_info->r2r_buf, hdr.size);
1065 goto done;
1066 }
1067
1068 if (hdr.size < sizeof(pm)) {
1069 DIAG("runt packet (no pacmark)\n");
1070 goto fail_data;
1071 }
1072 if (rr_read(xprt_info, &pm, sizeof(pm)))
1073 goto fail_io;
1074
1075 hdr.size -= sizeof(pm);
1076
1077 frag = rr_malloc(sizeof(*frag));
1078 frag->next = NULL;
1079 frag->length = hdr.size;
1080 if (rr_read(xprt_info, frag->data, hdr.size)) {
1081 kfree(frag);
1082 goto fail_io;
1083 }
1084
1085#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1086 if ((smd_rpcrouter_debug_mask & RAW_PMR) &&
1087 ((pm >> 30 & 0x1) || (pm >> 31 & 0x1))) {
1088 uint32_t xid = 0;
1089 if (pm >> 30 & 0x1) {
1090 rq = (struct rpc_request_hdr *) frag->data;
1091 xid = ntohl(rq->xid);
1092 }
1093 if ((pm >> 31 & 0x1) || (pm >> 30 & 0x1))
1094 RAW_PMR_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1095 "len=%3i,dst_cid=%08x\n",
1096 xid,
1097 pm >> 30 & 0x1,
1098 pm >> 31 & 0x1,
1099 pm >> 16 & 0xFF,
1100 pm & 0xFFFF, hdr.dst_cid);
1101 }
1102
1103 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1104 rq = (struct rpc_request_hdr *) frag->data;
1105 if (rq->xid == 0)
1106 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1107 RPC_ROUTER_LOG_EVENT_MID_READ,
1108 PACMARK_MID(pm),
1109 hdr.dst_cid,
1110 hdr.src_cid);
1111 else
1112 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1113 RPC_ROUTER_LOG_EVENT_MSG_READ,
1114 ntohl(rq->xid),
1115 hdr.dst_cid,
1116 hdr.src_cid);
1117 }
1118#endif
1119
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001120 spin_lock_irqsave(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
1122 if (!ept) {
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001123 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 DIAG("no local ept for cid %08x\n", hdr.dst_cid);
1125 kfree(frag);
1126 goto done;
1127 }
1128
1129 /* See if there is already a partial packet that matches our mid
1130 * and if so, append this fragment to that packet.
1131 */
1132 mid = PACMARK_MID(pm);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001133 spin_lock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 list_for_each_entry(pkt, &ept->incomplete, list) {
1135 if (pkt->mid == mid) {
1136 pkt->last->next = frag;
1137 pkt->last = frag;
1138 pkt->length += frag->length;
1139 if (PACMARK_LAST(pm)) {
1140 list_del(&pkt->list);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001141 spin_unlock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 goto packet_complete;
1143 }
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001144 spin_unlock(&ept->incomplete_lock);
1145 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 goto done;
1147 }
1148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001149 /* This mid is new -- create a packet for it, and put it on
1150 * the incomplete list if this fragment is not a last fragment,
1151 * otherwise put it on the read queue.
1152 */
1153 pkt = rr_malloc(sizeof(struct rr_packet));
1154 pkt->first = frag;
1155 pkt->last = frag;
1156 memcpy(&pkt->hdr, &hdr, sizeof(hdr));
1157 pkt->mid = mid;
1158 pkt->length = frag->length;
1159 if (!PACMARK_LAST(pm)) {
1160 list_add_tail(&pkt->list, &ept->incomplete);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001161 spin_unlock(&ept->incomplete_lock);
1162 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 goto done;
1164 }
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001165 spin_unlock(&ept->incomplete_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166
1167packet_complete:
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001168 spin_lock(&ept->read_q_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 D("%s: take read lock on ept %p\n", __func__, ept);
1170 wake_lock(&ept->read_q_wake_lock);
1171 list_add_tail(&pkt->list, &ept->read_q);
1172 wake_up(&ept->wait_q);
Karthikeyan Ramasubramanian768567a2011-08-11 11:53:21 -06001173 spin_unlock(&ept->read_q_lock);
1174 spin_unlock_irqrestore(&local_endpoints_lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175done:
1176
1177 if (hdr.confirm_rx) {
1178 union rr_control_msg msg;
1179
1180 msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
1181 msg.cli.pid = hdr.dst_pid;
1182 msg.cli.cid = hdr.dst_cid;
1183
1184 RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
1185 rpcrouter_send_control_msg(xprt_info, &msg);
1186
1187#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1188 if (smd_rpcrouter_debug_mask & SMEM_LOG)
1189 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1190 RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT,
1191 RPCROUTER_PID_LOCAL,
1192 hdr.dst_cid,
1193 hdr.src_cid);
1194#endif
1195
1196 }
1197
1198 /* don't requeue if we should be shutting down */
1199 if (!xprt_info->abort_data_read) {
1200 queue_work(xprt_info->workqueue, &xprt_info->read_data);
1201 return;
1202 }
1203
1204 D("rpc_router terminating for '%s'\n",
1205 xprt_info->xprt->name);
1206
1207fail_io:
1208fail_data:
1209 D(KERN_ERR "rpc_router has died for '%s'\n",
1210 xprt_info->xprt->name);
1211}
1212
1213void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
1214 uint32_t vers, uint32_t proc)
1215{
1216 memset(hdr, 0, sizeof(struct rpc_request_hdr));
1217 hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1218 hdr->rpc_vers = cpu_to_be32(2);
1219 hdr->prog = cpu_to_be32(prog);
1220 hdr->vers = cpu_to_be32(vers);
1221 hdr->procedure = cpu_to_be32(proc);
1222}
1223EXPORT_SYMBOL(msm_rpc_setup_req);
1224
1225struct msm_rpc_endpoint *msm_rpc_open(void)
1226{
1227 struct msm_rpc_endpoint *ept;
1228
1229 ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
1230 if (ept == NULL)
1231 return ERR_PTR(-ENOMEM);
1232
1233 return ept;
1234}
1235
1236void msm_rpc_read_wakeup(struct msm_rpc_endpoint *ept)
1237{
1238 ept->forced_wakeup = 1;
1239 wake_up(&ept->wait_q);
1240}
1241
1242int msm_rpc_close(struct msm_rpc_endpoint *ept)
1243{
1244 if (!ept)
1245 return -EINVAL;
1246 return msm_rpcrouter_destroy_local_endpoint(ept);
1247}
1248EXPORT_SYMBOL(msm_rpc_close);
1249
1250static int msm_rpc_write_pkt(
1251 struct rr_header *hdr,
1252 struct msm_rpc_endpoint *ept,
1253 struct rr_remote_endpoint *r_ept,
1254 void *buffer,
1255 int count,
1256 int first,
1257 int last,
1258 uint32_t mid
1259 )
1260{
1261#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1262 struct rpc_request_hdr *rq = buffer;
1263 uint32_t event_id;
1264#endif
1265 uint32_t pacmark;
1266 unsigned long flags = 0;
1267 int rc;
1268 struct rpcrouter_xprt_info *xprt_info;
1269 int needed;
1270
1271 DEFINE_WAIT(__wait);
1272
1273 /* Create routing header */
1274 hdr->type = RPCROUTER_CTRL_CMD_DATA;
1275 hdr->version = RPCROUTER_VERSION;
1276 hdr->src_pid = ept->pid;
1277 hdr->src_cid = ept->cid;
1278 hdr->confirm_rx = 0;
1279 hdr->size = count + sizeof(uint32_t);
1280
1281 rc = wait_for_restart_and_notify(ept);
1282 if (rc)
1283 return rc;
1284
1285 if (r_ept) {
1286 for (;;) {
1287 prepare_to_wait(&r_ept->quota_wait, &__wait,
1288 TASK_INTERRUPTIBLE);
1289 spin_lock_irqsave(&r_ept->quota_lock, flags);
1290 if ((r_ept->tx_quota_cntr <
1291 RPCROUTER_DEFAULT_RX_QUOTA) ||
1292 (r_ept->quota_restart_state != RESTART_NORMAL))
1293 break;
1294 if (signal_pending(current) &&
1295 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
1296 break;
1297 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1298 schedule();
1299 }
1300 finish_wait(&r_ept->quota_wait, &__wait);
1301
1302 if (r_ept->quota_restart_state != RESTART_NORMAL) {
1303 spin_lock(&ept->restart_lock);
1304 ept->restart_state &= ~RESTART_PEND_NTFY;
1305 spin_unlock(&ept->restart_lock);
1306 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1307 return -ENETRESET;
1308 }
1309
1310 if (signal_pending(current) &&
1311 (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
1312 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1313 return -ERESTARTSYS;
1314 }
1315 r_ept->tx_quota_cntr++;
1316 if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA) {
1317 hdr->confirm_rx = 1;
1318
1319#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1320 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1321 event_id = (rq->xid == 0) ?
1322 RPC_ROUTER_LOG_EVENT_MID_CFM_REQ :
1323 RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ;
1324
1325 smem_log_event(SMEM_LOG_PROC_ID_APPS | event_id,
1326 hdr->dst_pid,
1327 hdr->dst_cid,
1328 hdr->src_cid);
1329 }
1330#endif
1331
1332 }
1333 }
1334 pacmark = PACMARK(count, mid, first, last);
1335
1336 if (r_ept)
1337 spin_unlock_irqrestore(&r_ept->quota_lock, flags);
1338
1339 mutex_lock(&xprt_info_list_lock);
1340 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1341 if (!xprt_info) {
1342 mutex_unlock(&xprt_info_list_lock);
1343 return -ENETRESET;
1344 }
1345 spin_lock_irqsave(&xprt_info->lock, flags);
1346 mutex_unlock(&xprt_info_list_lock);
1347 spin_lock(&ept->restart_lock);
1348 if (ept->restart_state != RESTART_NORMAL) {
1349 ept->restart_state &= ~RESTART_PEND_NTFY;
1350 spin_unlock(&ept->restart_lock);
1351 spin_unlock_irqrestore(&xprt_info->lock, flags);
1352 return -ENETRESET;
1353 }
1354
1355 needed = sizeof(*hdr) + hdr->size;
1356 while ((ept->restart_state == RESTART_NORMAL) &&
1357 (xprt_info->xprt->write_avail() < needed)) {
1358 spin_unlock(&ept->restart_lock);
1359 spin_unlock_irqrestore(&xprt_info->lock, flags);
1360 msleep(250);
1361
1362 /* refresh xprt pointer to ensure that it hasn't
1363 * been deleted since our last retrieval */
1364 mutex_lock(&xprt_info_list_lock);
1365 xprt_info = rpcrouter_get_xprt_info(hdr->dst_pid);
1366 if (!xprt_info) {
1367 mutex_unlock(&xprt_info_list_lock);
1368 return -ENETRESET;
1369 }
1370 spin_lock_irqsave(&xprt_info->lock, flags);
1371 mutex_unlock(&xprt_info_list_lock);
1372 spin_lock(&ept->restart_lock);
1373 }
1374 if (ept->restart_state != RESTART_NORMAL) {
1375 ept->restart_state &= ~RESTART_PEND_NTFY;
1376 spin_unlock(&ept->restart_lock);
1377 spin_unlock_irqrestore(&xprt_info->lock, flags);
1378 return -ENETRESET;
1379 }
1380
1381 /* TODO: deal with full fifo */
1382 xprt_info->xprt->write(hdr, sizeof(*hdr), HEADER);
1383 RAW_HDR("[w rr_h] "
1384 "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
1385 "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
1386 hdr->version, type_to_str(hdr->type),
1387 hdr->src_pid, hdr->src_cid,
1388 hdr->confirm_rx, hdr->size, hdr->dst_pid, hdr->dst_cid);
1389 xprt_info->xprt->write(&pacmark, sizeof(pacmark), PACKMARK);
1390
1391#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1392 if ((smd_rpcrouter_debug_mask & RAW_PMW) &&
1393 ((pacmark >> 30 & 0x1) || (pacmark >> 31 & 0x1))) {
1394 uint32_t xid = 0;
1395 if (pacmark >> 30 & 0x1)
1396 xid = ntohl(rq->xid);
1397 if ((pacmark >> 31 & 0x1) || (pacmark >> 30 & 0x1))
1398 RAW_PMW_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
1399 "len=%3i,src_cid=%x\n",
1400 xid,
1401 pacmark >> 30 & 0x1,
1402 pacmark >> 31 & 0x1,
1403 pacmark >> 16 & 0xFF,
1404 pacmark & 0xFFFF, hdr->src_cid);
1405 }
1406#endif
1407
1408 xprt_info->xprt->write(buffer, count, PAYLOAD);
1409 spin_unlock(&ept->restart_lock);
1410 spin_unlock_irqrestore(&xprt_info->lock, flags);
1411
1412#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
1413 if (smd_rpcrouter_debug_mask & SMEM_LOG) {
1414 if (rq->xid == 0)
1415 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1416 RPC_ROUTER_LOG_EVENT_MID_WRITTEN,
1417 PACMARK_MID(pacmark),
1418 hdr->dst_cid,
1419 hdr->src_cid);
1420 else
1421 smem_log_event(SMEM_LOG_PROC_ID_APPS |
1422 RPC_ROUTER_LOG_EVENT_MSG_WRITTEN,
1423 ntohl(rq->xid),
1424 hdr->dst_cid,
1425 hdr->src_cid);
1426 }
1427#endif
1428
1429 return needed;
1430}
1431
1432static struct msm_rpc_reply *get_pend_reply(struct msm_rpc_endpoint *ept,
1433 uint32_t xid)
1434{
1435 unsigned long flags;
1436 struct msm_rpc_reply *reply;
1437 spin_lock_irqsave(&ept->reply_q_lock, flags);
1438 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1439 if (reply->xid == xid) {
1440 list_del(&reply->list);
1441 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1442 return reply;
1443 }
1444 }
1445 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1446 return NULL;
1447}
1448
1449void get_requesting_client(struct msm_rpc_endpoint *ept, uint32_t xid,
1450 struct msm_rpc_client_info *clnt_info)
1451{
1452 unsigned long flags;
1453 struct msm_rpc_reply *reply;
1454
1455 if (!clnt_info)
1456 return;
1457
1458 spin_lock_irqsave(&ept->reply_q_lock, flags);
1459 list_for_each_entry(reply, &ept->reply_pend_q, list) {
1460 if (reply->xid == xid) {
1461 clnt_info->pid = reply->pid;
1462 clnt_info->cid = reply->cid;
1463 clnt_info->prog = reply->prog;
1464 clnt_info->vers = reply->vers;
1465 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1466 return;
1467 }
1468 }
1469 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1470 return;
1471}
1472
1473static void set_avail_reply(struct msm_rpc_endpoint *ept,
1474 struct msm_rpc_reply *reply)
1475{
1476 unsigned long flags;
1477 spin_lock_irqsave(&ept->reply_q_lock, flags);
1478 list_add_tail(&reply->list, &ept->reply_avail_q);
1479 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1480}
1481
1482static struct msm_rpc_reply *get_avail_reply(struct msm_rpc_endpoint *ept)
1483{
1484 struct msm_rpc_reply *reply;
1485 unsigned long flags;
1486 if (list_empty(&ept->reply_avail_q)) {
1487 if (ept->reply_cnt >= RPCROUTER_PEND_REPLIES_MAX) {
1488 printk(KERN_ERR
1489 "exceeding max replies of %d \n",
1490 RPCROUTER_PEND_REPLIES_MAX);
1491 return 0;
1492 }
1493 reply = kmalloc(sizeof(struct msm_rpc_reply), GFP_KERNEL);
1494 if (!reply)
1495 return 0;
1496 D("Adding reply 0x%08x \n", (unsigned int)reply);
1497 memset(reply, 0, sizeof(struct msm_rpc_reply));
1498 spin_lock_irqsave(&ept->reply_q_lock, flags);
1499 ept->reply_cnt++;
1500 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1501 } else {
1502 spin_lock_irqsave(&ept->reply_q_lock, flags);
1503 reply = list_first_entry(&ept->reply_avail_q,
1504 struct msm_rpc_reply,
1505 list);
1506 list_del(&reply->list);
1507 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1508 }
1509 return reply;
1510}
1511
1512static void set_pend_reply(struct msm_rpc_endpoint *ept,
1513 struct msm_rpc_reply *reply)
1514{
1515 unsigned long flags;
1516 spin_lock_irqsave(&ept->reply_q_lock, flags);
1517 D("%s: take reply lock on ept %p\n", __func__, ept);
1518 wake_lock(&ept->reply_q_wake_lock);
1519 list_add_tail(&reply->list, &ept->reply_pend_q);
1520 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1521}
1522
1523int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
1524{
1525 struct rr_header hdr;
1526 struct rpc_request_hdr *rq = buffer;
1527 struct rr_remote_endpoint *r_ept;
1528 struct msm_rpc_reply *reply = NULL;
1529 int max_tx;
1530 int tx_cnt;
1531 char *tx_buf;
1532 int rc;
1533 int first_pkt = 1;
1534 uint32_t mid;
1535 unsigned long flags;
1536
1537 /* snoop the RPC packet and enforce permissions */
1538
1539 /* has to have at least the xid and type fields */
1540 if (count < (sizeof(uint32_t) * 2)) {
1541 printk(KERN_ERR "rr_write: rejecting runt packet\n");
1542 return -EINVAL;
1543 }
1544
1545 if (rq->type == 0) {
1546 /* RPC CALL */
1547 if (count < (sizeof(uint32_t) * 6)) {
1548 printk(KERN_ERR
1549 "rr_write: rejecting runt call packet\n");
1550 return -EINVAL;
1551 }
1552 if (ept->dst_pid == 0xffffffff) {
1553 printk(KERN_ERR "rr_write: not connected\n");
1554 return -ENOTCONN;
1555 }
1556 if ((ept->dst_prog != rq->prog) ||
1557 ((be32_to_cpu(ept->dst_vers) & 0x0fff0000) !=
1558 (be32_to_cpu(rq->vers) & 0x0fff0000))) {
1559 printk(KERN_ERR
1560 "rr_write: cannot write to %08x:%08x "
1561 "(bound to %08x:%08x)\n",
1562 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1563 be32_to_cpu(ept->dst_prog),
1564 be32_to_cpu(ept->dst_vers));
1565 return -EINVAL;
1566 }
1567 hdr.dst_pid = ept->dst_pid;
1568 hdr.dst_cid = ept->dst_cid;
1569 IO("CALL to %08x:%d @ %d:%08x (%d bytes)\n",
1570 be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
1571 ept->dst_pid, ept->dst_cid, count);
1572 } else {
1573 /* RPC REPLY */
1574 reply = get_pend_reply(ept, rq->xid);
1575 if (!reply) {
1576 printk(KERN_ERR
1577 "rr_write: rejecting, reply not found \n");
1578 return -EINVAL;
1579 }
1580 hdr.dst_pid = reply->pid;
1581 hdr.dst_cid = reply->cid;
1582 IO("REPLY to xid=%d @ %d:%08x (%d bytes)\n",
1583 be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
1584 }
1585
1586 r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_pid, hdr.dst_cid);
1587
1588 if ((!r_ept) && (hdr.dst_pid != RPCROUTER_PID_LOCAL)) {
1589 printk(KERN_ERR
1590 "msm_rpc_write(): No route to ept "
1591 "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
1592 count = -EHOSTUNREACH;
1593 goto write_release_lock;
1594 }
1595
1596 tx_cnt = count;
1597 tx_buf = buffer;
1598 mid = atomic_add_return(1, &pm_mid) & 0xFF;
1599 /* The modem's router can only take 500 bytes of data. The
1600 first 8 bytes it uses on the modem side for addressing,
1601 the next 4 bytes are for the pacmark header. */
1602 max_tx = RPCROUTER_MSGSIZE_MAX - 8 - sizeof(uint32_t);
1603 IO("Writing %d bytes, max pkt size is %d\n",
1604 tx_cnt, max_tx);
1605 while (tx_cnt > 0) {
1606 if (tx_cnt > max_tx) {
1607 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1608 tx_buf, max_tx,
1609 first_pkt, 0, mid);
1610 if (rc < 0) {
1611 count = rc;
1612 goto write_release_lock;
1613 }
1614 IO("Wrote %d bytes First %d, Last 0 mid %d\n",
1615 rc, first_pkt, mid);
1616 tx_cnt -= max_tx;
1617 tx_buf += max_tx;
1618 } else {
1619 rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
1620 tx_buf, tx_cnt,
1621 first_pkt, 1, mid);
1622 if (rc < 0) {
1623 count = rc;
1624 goto write_release_lock;
1625 }
1626 IO("Wrote %d bytes First %d Last 1 mid %d\n",
1627 rc, first_pkt, mid);
1628 break;
1629 }
1630 first_pkt = 0;
1631 }
1632
1633 write_release_lock:
1634 /* if reply, release wakelock after writing to the transport */
1635 if (rq->type != 0) {
1636 /* Upon failure, add reply tag to the pending list.
1637 ** Else add reply tag to the avail/free list. */
1638 if (count < 0)
1639 set_pend_reply(ept, reply);
1640 else
1641 set_avail_reply(ept, reply);
1642
1643 spin_lock_irqsave(&ept->reply_q_lock, flags);
1644 if (list_empty(&ept->reply_pend_q)) {
1645 D("%s: release reply lock on ept %p\n", __func__, ept);
1646 wake_unlock(&ept->reply_q_wake_lock);
1647 }
1648 spin_unlock_irqrestore(&ept->reply_q_lock, flags);
1649 }
1650
1651 return count;
1652}
1653EXPORT_SYMBOL(msm_rpc_write);
1654
1655/*
1656 * NOTE: It is the responsibility of the caller to kfree buffer
1657 */
1658int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
1659 unsigned user_len, long timeout)
1660{
1661 struct rr_fragment *frag, *next;
1662 char *buf;
1663 int rc;
1664
1665 rc = __msm_rpc_read(ept, &frag, user_len, timeout);
1666 if (rc <= 0)
1667 return rc;
1668
1669 /* single-fragment messages conveniently can be
1670 * returned as-is (the buffer is at the front)
1671 */
1672 if (frag->next == 0) {
1673 *buffer = (void*) frag;
1674 return rc;
1675 }
1676
1677 /* multi-fragment messages, we have to do it the
1678 * hard way, which is rather disgusting right now
1679 */
1680 buf = rr_malloc(rc);
1681 *buffer = buf;
1682
1683 while (frag != NULL) {
1684 memcpy(buf, frag->data, frag->length);
1685 next = frag->next;
1686 buf += frag->length;
1687 kfree(frag);
1688 frag = next;
1689 }
1690
1691 return rc;
1692}
1693EXPORT_SYMBOL(msm_rpc_read);
1694
1695int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
1696 void *_request, int request_size,
1697 long timeout)
1698{
1699 return msm_rpc_call_reply(ept, proc,
1700 _request, request_size,
1701 NULL, 0, timeout);
1702}
1703EXPORT_SYMBOL(msm_rpc_call);
1704
1705int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
1706 void *_request, int request_size,
1707 void *_reply, int reply_size,
1708 long timeout)
1709{
1710 struct rpc_request_hdr *req = _request;
1711 struct rpc_reply_hdr *reply;
1712 int rc;
1713
1714 if (request_size < sizeof(*req))
1715 return -ETOOSMALL;
1716
1717 if (ept->dst_pid == 0xffffffff)
1718 return -ENOTCONN;
1719
1720 memset(req, 0, sizeof(*req));
1721 req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
1722 req->rpc_vers = cpu_to_be32(2);
1723 req->prog = ept->dst_prog;
1724 req->vers = ept->dst_vers;
1725 req->procedure = cpu_to_be32(proc);
1726
1727 rc = msm_rpc_write(ept, req, request_size);
1728 if (rc < 0)
1729 return rc;
1730
1731 for (;;) {
1732 rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
1733 if (rc < 0)
1734 return rc;
1735 if (rc < (3 * sizeof(uint32_t))) {
1736 rc = -EIO;
1737 break;
1738 }
1739 /* we should not get CALL packets -- ignore them */
1740 if (reply->type == 0) {
1741 kfree(reply);
1742 continue;
1743 }
1744 /* If an earlier call timed out, we could get the (no
1745 * longer wanted) reply for it. Ignore replies that
1746 * we don't expect
1747 */
1748 if (reply->xid != req->xid) {
1749 kfree(reply);
1750 continue;
1751 }
1752 if (reply->reply_stat != 0) {
1753 rc = -EPERM;
1754 break;
1755 }
1756 if (reply->data.acc_hdr.accept_stat != 0) {
1757 rc = -EINVAL;
1758 break;
1759 }
1760 if (_reply == NULL) {
1761 rc = 0;
1762 break;
1763 }
1764 if (rc > reply_size) {
1765 rc = -ENOMEM;
1766 } else {
1767 memcpy(_reply, reply, rc);
1768 }
1769 break;
1770 }
1771 kfree(reply);
1772 return rc;
1773}
1774EXPORT_SYMBOL(msm_rpc_call_reply);
1775
1776
1777static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
1778{
1779 unsigned long flags;
1780 int ret;
1781 spin_lock_irqsave(&ept->read_q_lock, flags);
1782 ret = !list_empty(&ept->read_q);
1783 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1784 return ret;
1785}
1786
1787int __msm_rpc_read(struct msm_rpc_endpoint *ept,
1788 struct rr_fragment **frag_ret,
1789 unsigned len, long timeout)
1790{
1791 struct rr_packet *pkt;
1792 struct rpc_request_hdr *rq;
1793 struct msm_rpc_reply *reply;
1794 unsigned long flags;
1795 int rc;
1796
1797 rc = wait_for_restart_and_notify(ept);
1798 if (rc)
1799 return rc;
1800
1801 IO("READ on ept %p\n", ept);
1802 if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
1803 if (timeout < 0) {
1804 wait_event(ept->wait_q, (ept_packet_available(ept) ||
1805 ept->forced_wakeup ||
1806 ept->restart_state));
1807 if (!msm_rpc_clear_netreset(ept))
1808 return -ENETRESET;
1809 } else {
1810 rc = wait_event_timeout(
1811 ept->wait_q,
1812 (ept_packet_available(ept) ||
1813 ept->forced_wakeup ||
1814 ept->restart_state),
1815 timeout);
1816 if (!msm_rpc_clear_netreset(ept))
1817 return -ENETRESET;
1818 if (rc == 0)
1819 return -ETIMEDOUT;
1820 }
1821 } else {
1822 if (timeout < 0) {
1823 rc = wait_event_interruptible(
1824 ept->wait_q, (ept_packet_available(ept) ||
1825 ept->forced_wakeup ||
1826 ept->restart_state));
1827 if (!msm_rpc_clear_netreset(ept))
1828 return -ENETRESET;
1829 if (rc < 0)
1830 return rc;
1831 } else {
1832 rc = wait_event_interruptible_timeout(
1833 ept->wait_q,
1834 (ept_packet_available(ept) ||
1835 ept->forced_wakeup ||
1836 ept->restart_state),
1837 timeout);
1838 if (!msm_rpc_clear_netreset(ept))
1839 return -ENETRESET;
1840 if (rc == 0)
1841 return -ETIMEDOUT;
1842 }
1843 }
1844
1845 if (ept->forced_wakeup) {
1846 ept->forced_wakeup = 0;
1847 return 0;
1848 }
1849
1850 spin_lock_irqsave(&ept->read_q_lock, flags);
1851 if (list_empty(&ept->read_q)) {
1852 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1853 return -EAGAIN;
1854 }
1855 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
1856 if (pkt->length > len) {
1857 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1858 return -ETOOSMALL;
1859 }
1860 list_del(&pkt->list);
1861 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1862
1863 rc = pkt->length;
1864
1865 *frag_ret = pkt->first;
1866 rq = (void*) pkt->first->data;
1867 if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
1868 /* RPC CALL */
1869 reply = get_avail_reply(ept);
1870 if (!reply) {
1871 rc = -ENOMEM;
1872 goto read_release_lock;
1873 }
1874 reply->cid = pkt->hdr.src_cid;
1875 reply->pid = pkt->hdr.src_pid;
1876 reply->xid = rq->xid;
1877 reply->prog = rq->prog;
1878 reply->vers = rq->vers;
1879 set_pend_reply(ept, reply);
1880 }
1881
1882 kfree(pkt);
1883
1884 IO("READ on ept %p (%d bytes)\n", ept, rc);
1885
1886 read_release_lock:
1887
1888 /* release read wakelock after taking reply wakelock */
1889 spin_lock_irqsave(&ept->read_q_lock, flags);
1890 if (list_empty(&ept->read_q)) {
1891 D("%s: release read lock on ept %p\n", __func__, ept);
1892 wake_unlock(&ept->read_q_wake_lock);
1893 }
1894 spin_unlock_irqrestore(&ept->read_q_lock, flags);
1895
1896 return rc;
1897}
1898
1899int msm_rpc_is_compatible_version(uint32_t server_version,
1900 uint32_t client_version)
1901{
1902
1903 if ((server_version & RPC_VERSION_MODE_MASK) !=
1904 (client_version & RPC_VERSION_MODE_MASK))
1905 return 0;
1906
1907 if (server_version & RPC_VERSION_MODE_MASK)
1908 return server_version == client_version;
1909
1910 return ((server_version & RPC_VERSION_MAJOR_MASK) ==
1911 (client_version & RPC_VERSION_MAJOR_MASK)) &&
1912 ((server_version & RPC_VERSION_MINOR_MASK) >=
1913 (client_version & RPC_VERSION_MINOR_MASK));
1914}
1915EXPORT_SYMBOL(msm_rpc_is_compatible_version);
1916
1917static struct rr_server *msm_rpc_get_server(uint32_t prog, uint32_t vers,
1918 uint32_t accept_compatible,
1919 uint32_t *found_prog)
1920{
1921 struct rr_server *server;
1922 unsigned long flags;
1923
1924 if (found_prog == NULL)
1925 return NULL;
1926
1927 *found_prog = 0;
1928 spin_lock_irqsave(&server_list_lock, flags);
1929 list_for_each_entry(server, &server_list, list) {
1930 if (server->prog == prog) {
1931 *found_prog = 1;
1932 spin_unlock_irqrestore(&server_list_lock, flags);
1933 if (accept_compatible) {
1934 if (msm_rpc_is_compatible_version(server->vers,
1935 vers)) {
1936 return server;
1937 } else {
1938 return NULL;
1939 }
1940 } else if (server->vers == vers) {
1941 return server;
1942 } else
1943 return NULL;
1944 }
1945 }
1946 spin_unlock_irqrestore(&server_list_lock, flags);
1947 return NULL;
1948}
1949
1950static struct msm_rpc_endpoint *__msm_rpc_connect(uint32_t prog, uint32_t vers,
1951 uint32_t accept_compatible,
1952 unsigned flags)
1953{
1954 struct msm_rpc_endpoint *ept;
1955 struct rr_server *server;
1956 uint32_t found_prog;
1957 int rc = 0;
1958
1959 DEFINE_WAIT(__wait);
1960
1961 for (;;) {
1962 prepare_to_wait(&newserver_wait, &__wait,
1963 TASK_INTERRUPTIBLE);
1964
1965 server = msm_rpc_get_server(prog, vers, accept_compatible,
1966 &found_prog);
1967 if (server)
1968 break;
1969
1970 if (found_prog) {
1971 pr_info("%s: server not found %x:%x\n",
1972 __func__, prog, vers);
1973 rc = -EHOSTUNREACH;
1974 break;
1975 }
1976
1977 if (msm_rpc_connect_timeout_ms == 0) {
1978 rc = -EHOSTUNREACH;
1979 break;
1980 }
1981
1982 if (signal_pending(current)) {
1983 rc = -ERESTARTSYS;
1984 break;
1985 }
1986
1987 rc = schedule_timeout(
1988 msecs_to_jiffies(msm_rpc_connect_timeout_ms));
1989 if (!rc) {
1990 rc = -ETIMEDOUT;
1991 break;
1992 }
1993 }
1994 finish_wait(&newserver_wait, &__wait);
1995
1996 if (!server)
1997 return ERR_PTR(rc);
1998
1999 if (accept_compatible && (server->vers != vers)) {
2000 D("RPC Using new version 0x%08x(0x%08x) prog 0x%08x",
2001 vers, server->vers, prog);
2002 D(" ... Continuing\n");
2003 }
2004
2005 ept = msm_rpc_open();
2006 if (IS_ERR(ept))
2007 return ept;
2008
2009 ept->flags = flags;
2010 ept->dst_pid = server->pid;
2011 ept->dst_cid = server->cid;
2012 ept->dst_prog = cpu_to_be32(prog);
2013 ept->dst_vers = cpu_to_be32(server->vers);
2014
2015 return ept;
2016}
2017
2018struct msm_rpc_endpoint *msm_rpc_connect_compatible(uint32_t prog,
2019 uint32_t vers, unsigned flags)
2020{
2021 return __msm_rpc_connect(prog, vers, 1, flags);
2022}
2023EXPORT_SYMBOL(msm_rpc_connect_compatible);
2024
2025struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog,
2026 uint32_t vers, unsigned flags)
2027{
2028 return __msm_rpc_connect(prog, vers, 0, flags);
2029}
2030EXPORT_SYMBOL(msm_rpc_connect);
2031
2032/* TODO: permission check? */
2033int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
2034 uint32_t prog, uint32_t vers)
2035{
2036 int rc;
2037 union rr_control_msg msg;
2038 struct rr_server *server;
2039 struct rpcrouter_xprt_info *xprt_info;
2040
2041 server = rpcrouter_create_server(ept->pid, ept->cid,
2042 prog, vers);
2043 if (!server)
2044 return -ENODEV;
2045
2046 msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
2047 msg.srv.pid = ept->pid;
2048 msg.srv.cid = ept->cid;
2049 msg.srv.prog = prog;
2050 msg.srv.vers = vers;
2051
2052 RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
2053 ept->pid, ept->cid, prog, vers);
2054
2055 mutex_lock(&xprt_info_list_lock);
2056 list_for_each_entry(xprt_info, &xprt_info_list, list) {
2057 rc = rpcrouter_send_control_msg(xprt_info, &msg);
2058 if (rc < 0) {
2059 mutex_unlock(&xprt_info_list_lock);
2060 return rc;
2061 }
2062 }
2063 mutex_unlock(&xprt_info_list_lock);
2064 return 0;
2065}
2066
2067int msm_rpc_clear_netreset(struct msm_rpc_endpoint *ept)
2068{
2069 unsigned long flags;
2070 int rc = 1;
2071 spin_lock_irqsave(&ept->restart_lock, flags);
2072 if (ept->restart_state != RESTART_NORMAL) {
2073 ept->restart_state &= ~RESTART_PEND_NTFY;
2074 rc = 0;
2075 }
2076 spin_unlock_irqrestore(&ept->restart_lock, flags);
2077 return rc;
2078}
2079
2080/* TODO: permission check -- disallow unreg of somebody else's server */
2081int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
2082 uint32_t prog, uint32_t vers)
2083{
2084 struct rr_server *server;
2085 server = rpcrouter_lookup_server(prog, vers);
2086
2087 if (!server)
2088 return -ENOENT;
2089 rpcrouter_destroy_server(server);
2090 return 0;
2091}
2092
2093int msm_rpc_get_curr_pkt_size(struct msm_rpc_endpoint *ept)
2094{
2095 unsigned long flags;
2096 struct rr_packet *pkt;
2097 int rc = 0;
2098
2099 if (!ept)
2100 return -EINVAL;
2101
2102 if (!msm_rpc_clear_netreset(ept))
2103 return -ENETRESET;
2104
2105 spin_lock_irqsave(&ept->read_q_lock, flags);
2106 if (!list_empty(&ept->read_q)) {
2107 pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
2108 rc = pkt->length;
2109 }
2110 spin_unlock_irqrestore(&ept->read_q_lock, flags);
2111
2112 return rc;
2113}
2114
2115int msm_rpcrouter_close(void)
2116{
2117 struct rpcrouter_xprt_info *xprt_info, *tmp_xprt_info;
2118 union rr_control_msg ctl;
2119
2120 ctl.cmd = RPCROUTER_CTRL_CMD_BYE;
2121 mutex_lock(&xprt_info_list_lock);
2122 list_for_each_entry_safe(xprt_info, tmp_xprt_info,
2123 &xprt_info_list, list) {
2124 rpcrouter_send_control_msg(xprt_info, &ctl);
2125 xprt_info->xprt->close();
2126 list_del(&xprt_info->list);
2127 kfree(xprt_info);
2128 }
2129 mutex_unlock(&xprt_info_list_lock);
2130 return 0;
2131}
2132
2133#if defined(CONFIG_DEBUG_FS)
2134static int dump_servers(char *buf, int max)
2135{
2136 int i = 0;
2137 unsigned long flags;
2138 struct rr_server *svr;
2139 const char *sym;
2140
2141 spin_lock_irqsave(&server_list_lock, flags);
2142 list_for_each_entry(svr, &server_list, list) {
2143 i += scnprintf(buf + i, max - i, "pdev_name: %s\n",
2144 svr->pdev_name);
2145 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", svr->pid);
2146 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", svr->cid);
2147 i += scnprintf(buf + i, max - i, "prog: 0x%08x", svr->prog);
2148 sym = smd_rpc_get_sym(svr->prog);
2149 if (sym)
2150 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2151 else
2152 i += scnprintf(buf + i, max - i, "\n");
2153 i += scnprintf(buf + i, max - i, "vers: 0x%08x\n", svr->vers);
2154 i += scnprintf(buf + i, max - i, "\n");
2155 }
2156 spin_unlock_irqrestore(&server_list_lock, flags);
2157
2158 return i;
2159}
2160
2161static int dump_remote_endpoints(char *buf, int max)
2162{
2163 int i = 0;
2164 unsigned long flags;
2165 struct rr_remote_endpoint *ept;
2166
2167 spin_lock_irqsave(&remote_endpoints_lock, flags);
2168 list_for_each_entry(ept, &remote_endpoints, list) {
2169 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2170 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2171 i += scnprintf(buf + i, max - i, "tx_quota_cntr: %i\n",
2172 ept->tx_quota_cntr);
2173 i += scnprintf(buf + i, max - i, "quota_restart_state: %i\n",
2174 ept->quota_restart_state);
2175 i += scnprintf(buf + i, max - i, "\n");
2176 }
2177 spin_unlock_irqrestore(&remote_endpoints_lock, flags);
2178
2179 return i;
2180}
2181
2182static int dump_msm_rpc_endpoint(char *buf, int max)
2183{
2184 int i = 0;
2185 unsigned long flags;
2186 struct msm_rpc_reply *reply;
2187 struct msm_rpc_endpoint *ept;
2188 struct rr_packet *pkt;
2189 const char *sym;
2190
2191 spin_lock_irqsave(&local_endpoints_lock, flags);
2192 list_for_each_entry(ept, &local_endpoints, list) {
2193 i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
2194 i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
2195 i += scnprintf(buf + i, max - i, "dst_pid: 0x%08x\n",
2196 ept->dst_pid);
2197 i += scnprintf(buf + i, max - i, "dst_cid: 0x%08x\n",
2198 ept->dst_cid);
2199 i += scnprintf(buf + i, max - i, "dst_prog: 0x%08x",
2200 be32_to_cpu(ept->dst_prog));
2201 sym = smd_rpc_get_sym(be32_to_cpu(ept->dst_prog));
2202 if (sym)
2203 i += scnprintf(buf + i, max - i, " (%s)\n", sym);
2204 else
2205 i += scnprintf(buf + i, max - i, "\n");
2206 i += scnprintf(buf + i, max - i, "dst_vers: 0x%08x\n",
2207 be32_to_cpu(ept->dst_vers));
2208 i += scnprintf(buf + i, max - i, "reply_cnt: %i\n",
2209 ept->reply_cnt);
2210 i += scnprintf(buf + i, max - i, "restart_state: %i\n",
2211 ept->restart_state);
2212
2213 i += scnprintf(buf + i, max - i, "outstanding xids:\n");
2214 spin_lock(&ept->reply_q_lock);
2215 list_for_each_entry(reply, &ept->reply_pend_q, list)
2216 i += scnprintf(buf + i, max - i, " xid = %u\n",
2217 ntohl(reply->xid));
2218 spin_unlock(&ept->reply_q_lock);
2219
2220 i += scnprintf(buf + i, max - i, "complete unread packets:\n");
2221 spin_lock(&ept->read_q_lock);
2222 list_for_each_entry(pkt, &ept->read_q, list) {
2223 i += scnprintf(buf + i, max - i, " mid = %i\n",
2224 pkt->mid);
2225 i += scnprintf(buf + i, max - i, " length = %i\n",
2226 pkt->length);
2227 }
2228 spin_unlock(&ept->read_q_lock);
2229 i += scnprintf(buf + i, max - i, "\n");
2230 }
2231 spin_unlock_irqrestore(&local_endpoints_lock, flags);
2232
2233 return i;
2234}
2235
2236#define DEBUG_BUFMAX 4096
2237static char debug_buffer[DEBUG_BUFMAX];
2238
2239static ssize_t debug_read(struct file *file, char __user *buf,
2240 size_t count, loff_t *ppos)
2241{
2242 int (*fill)(char *buf, int max) = file->private_data;
2243 int bsize = fill(debug_buffer, DEBUG_BUFMAX);
2244 return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
2245}
2246
2247static int debug_open(struct inode *inode, struct file *file)
2248{
2249 file->private_data = inode->i_private;
2250 return 0;
2251}
2252
2253static const struct file_operations debug_ops = {
2254 .read = debug_read,
2255 .open = debug_open,
2256};
2257
2258static void debug_create(const char *name, mode_t mode,
2259 struct dentry *dent,
2260 int (*fill)(char *buf, int max))
2261{
2262 debugfs_create_file(name, mode, dent, fill, &debug_ops);
2263}
2264
2265static void debugfs_init(void)
2266{
2267 struct dentry *dent;
2268
2269 dent = debugfs_create_dir("smd_rpcrouter", 0);
2270 if (IS_ERR(dent))
2271 return;
2272
2273 debug_create("dump_msm_rpc_endpoints", 0444, dent,
2274 dump_msm_rpc_endpoint);
2275 debug_create("dump_remote_endpoints", 0444, dent,
2276 dump_remote_endpoints);
2277 debug_create("dump_servers", 0444, dent,
2278 dump_servers);
2279
2280}
2281
2282#else
2283static void debugfs_init(void) {}
2284#endif
2285
2286static int msm_rpcrouter_add_xprt(struct rpcrouter_xprt *xprt)
2287{
2288 struct rpcrouter_xprt_info *xprt_info;
2289
2290 D("Registering xprt %s to RPC Router\n", xprt->name);
2291
2292 xprt_info = kmalloc(sizeof(struct rpcrouter_xprt_info), GFP_KERNEL);
2293 if (!xprt_info)
2294 return -ENOMEM;
2295
2296 xprt_info->xprt = xprt;
2297 xprt_info->initialized = 0;
2298 xprt_info->remote_pid = -1;
2299 init_waitqueue_head(&xprt_info->read_wait);
2300 spin_lock_init(&xprt_info->lock);
2301 wake_lock_init(&xprt_info->wakelock,
2302 WAKE_LOCK_SUSPEND, xprt->name);
2303 xprt_info->need_len = 0;
2304 xprt_info->abort_data_read = 0;
2305 INIT_WORK(&xprt_info->read_data, do_read_data);
2306 INIT_LIST_HEAD(&xprt_info->list);
2307
2308 xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
2309 if (!xprt_info->workqueue) {
2310 kfree(xprt_info);
2311 return -ENOMEM;
2312 }
2313
2314 if (!strcmp(xprt->name, "rpcrouter_loopback_xprt")) {
2315 xprt_info->remote_pid = RPCROUTER_PID_LOCAL;
2316 xprt_info->initialized = 1;
2317 } else {
2318 smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
2319 }
2320
2321 mutex_lock(&xprt_info_list_lock);
2322 list_add_tail(&xprt_info->list, &xprt_info_list);
2323 mutex_unlock(&xprt_info_list_lock);
2324
2325 queue_work(xprt_info->workqueue, &xprt_info->read_data);
2326
2327 xprt->priv = xprt_info;
2328
2329 return 0;
2330}
2331
2332static void msm_rpcrouter_remove_xprt(struct rpcrouter_xprt *xprt)
2333{
2334 struct rpcrouter_xprt_info *xprt_info;
2335 unsigned long flags;
2336
2337 if (xprt && xprt->priv) {
2338 xprt_info = xprt->priv;
2339
2340 /* abort rr_read thread */
2341 xprt_info->abort_data_read = 1;
2342 wake_up(&xprt_info->read_wait);
2343
2344 /* remove xprt from available xprts */
2345 mutex_lock(&xprt_info_list_lock);
2346 spin_lock_irqsave(&xprt_info->lock, flags);
2347 list_del(&xprt_info->list);
2348
2349 /* unlock the spinlock last to avoid a race
2350 * condition with rpcrouter_get_xprt_info
2351 * in msm_rpc_write_pkt in which the
2352 * xprt is returned from rpcrouter_get_xprt_info
2353 * and then deleted here. */
2354 mutex_unlock(&xprt_info_list_lock);
2355 spin_unlock_irqrestore(&xprt_info->lock, flags);
2356
2357 /* cleanup workqueues and wakelocks */
2358 flush_workqueue(xprt_info->workqueue);
2359 destroy_workqueue(xprt_info->workqueue);
2360 wake_lock_destroy(&xprt_info->wakelock);
2361
2362
2363 /* free memory */
2364 xprt->priv = 0;
2365 kfree(xprt_info);
2366 }
2367}
2368
2369struct rpcrouter_xprt_work {
2370 struct rpcrouter_xprt *xprt;
2371 struct work_struct work;
2372};
2373
2374static void xprt_open_worker(struct work_struct *work)
2375{
2376 struct rpcrouter_xprt_work *xprt_work =
2377 container_of(work, struct rpcrouter_xprt_work, work);
2378
2379 msm_rpcrouter_add_xprt(xprt_work->xprt);
2380
2381 kfree(xprt_work);
2382}
2383
2384static void xprt_close_worker(struct work_struct *work)
2385{
2386 struct rpcrouter_xprt_work *xprt_work =
2387 container_of(work, struct rpcrouter_xprt_work, work);
2388
2389 modem_reset_cleanup(xprt_work->xprt->priv);
2390 msm_rpcrouter_remove_xprt(xprt_work->xprt);
2391
2392 if (atomic_dec_return(&pending_close_count) == 0)
2393 wake_up(&subsystem_restart_wait);
2394
2395 kfree(xprt_work);
2396}
2397
2398void msm_rpcrouter_xprt_notify(struct rpcrouter_xprt *xprt, unsigned event)
2399{
2400 struct rpcrouter_xprt_info *xprt_info;
2401 struct rpcrouter_xprt_work *xprt_work;
2402
2403 /* Workqueue is created in init function which works for all existing
2404 * clients. If this fails in the future, then it will need to be
2405 * created earlier. */
2406 BUG_ON(!rpcrouter_workqueue);
2407
2408 switch (event) {
2409 case RPCROUTER_XPRT_EVENT_OPEN:
2410 D("open event for '%s'\n", xprt->name);
2411 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2412 GFP_ATOMIC);
2413 xprt_work->xprt = xprt;
2414 INIT_WORK(&xprt_work->work, xprt_open_worker);
2415 queue_work(rpcrouter_workqueue, &xprt_work->work);
2416 break;
2417
2418 case RPCROUTER_XPRT_EVENT_CLOSE:
2419 D("close event for '%s'\n", xprt->name);
2420
2421 atomic_inc(&pending_close_count);
2422
2423 xprt_work = kmalloc(sizeof(struct rpcrouter_xprt_work),
2424 GFP_ATOMIC);
2425 xprt_work->xprt = xprt;
2426 INIT_WORK(&xprt_work->work, xprt_close_worker);
2427 queue_work(rpcrouter_workqueue, &xprt_work->work);
2428 break;
2429 }
2430
2431 xprt_info = xprt->priv;
2432 if (xprt_info) {
2433 /* Check read_avail even for OPEN event to handle missed
2434 DATA events while processing the OPEN event*/
2435 if (xprt->read_avail() >= xprt_info->need_len)
2436 wake_lock(&xprt_info->wakelock);
2437 wake_up(&xprt_info->read_wait);
2438 }
2439}
2440
2441static int modem_restart_notifier_cb(struct notifier_block *this,
2442 unsigned long code,
2443 void *data);
2444static struct notifier_block nb = {
2445 .notifier_call = modem_restart_notifier_cb,
2446};
2447
2448static int modem_restart_notifier_cb(struct notifier_block *this,
2449 unsigned long code,
2450 void *data)
2451{
2452 switch (code) {
2453 case SUBSYS_BEFORE_SHUTDOWN:
2454 D("%s: SUBSYS_BEFORE_SHUTDOWN\n", __func__);
2455 break;
2456
2457 case SUBSYS_BEFORE_POWERUP:
2458 D("%s: waiting for RPC restart to complete\n", __func__);
2459 wait_event(subsystem_restart_wait,
2460 atomic_read(&pending_close_count) == 0);
2461 D("%s: finished restart wait\n", __func__);
2462 break;
2463
2464 default:
2465 break;
2466 }
2467
2468 return NOTIFY_DONE;
2469}
2470
2471static void *restart_notifier_handle;
2472static __init int modem_restart_late_init(void)
2473{
2474 restart_notifier_handle = subsys_notif_register_notifier("modem", &nb);
2475 return 0;
2476}
2477late_initcall(modem_restart_late_init);
2478
2479static int __init rpcrouter_init(void)
2480{
2481 int ret;
2482
2483 msm_rpc_connect_timeout_ms = 0;
2484 smd_rpcrouter_debug_mask |= SMEM_LOG;
2485 debugfs_init();
2486
2487
2488 /* Initialize what we need to start processing */
2489 rpcrouter_workqueue =
2490 create_singlethread_workqueue("rpcrouter");
2491 if (!rpcrouter_workqueue) {
2492 msm_rpcrouter_exit_devices();
2493 return -ENOMEM;
2494 }
2495
2496 init_waitqueue_head(&newserver_wait);
2497 init_waitqueue_head(&subsystem_restart_wait);
2498
2499 ret = msm_rpcrouter_init_devices();
2500 if (ret < 0)
2501 return ret;
2502
2503 return ret;
2504}
2505
2506module_init(rpcrouter_init);
2507MODULE_DESCRIPTION("MSM RPC Router");
2508MODULE_AUTHOR("San Mehat <san@android.com>");
2509MODULE_LICENSE("GPL");