blob: 5b5de03554b1ca39c8f92f1c9815680596948b42 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
28#include <mach/msm_serial_hs.h>
29#include "smux_private.h"
30#include "smux_loopback.h"
31
32#define SMUX_NOTIFY_FIFO_SIZE 128
33#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060034#define SMUX_WM_LOW 2
35#define SMUX_WM_HIGH 4
36#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
49#define SMUX_INACTIVITY_TIMEOUT_MS 1000
50
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
79#define SMUX_LOG_PKT_RX(pkt) do { \
80 if (smux_debug_mask & MSM_SMUX_PKT) \
81 smux_log_pkt(pkt, 1); \
82} while (0)
83
84#define SMUX_LOG_PKT_TX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_PKT) \
86 smux_log_pkt(pkt, 0); \
87} while (0)
88
89/**
90 * Return true if channel is fully opened (both
91 * local and remote sides are in the OPENED state).
92 */
93#define IS_FULLY_OPENED(ch) \
94 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
95 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
96
97static struct platform_device smux_devs[] = {
98 {.name = "SMUX_CTL", .id = -1},
99 {.name = "SMUX_RMNET", .id = -1},
100 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
101 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
102 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
103 {.name = "SMUX_DIAG", .id = -1},
104};
105
106enum {
107 SMUX_CMD_STATUS_RTC = 1 << 0,
108 SMUX_CMD_STATUS_RTR = 1 << 1,
109 SMUX_CMD_STATUS_RI = 1 << 2,
110 SMUX_CMD_STATUS_DCD = 1 << 3,
111 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
112};
113
114/* Channel mode */
115enum {
116 SMUX_LCH_MODE_NORMAL,
117 SMUX_LCH_MODE_LOCAL_LOOPBACK,
118 SMUX_LCH_MODE_REMOTE_LOOPBACK,
119};
120
121enum {
122 SMUX_RX_IDLE,
123 SMUX_RX_MAGIC,
124 SMUX_RX_HDR,
125 SMUX_RX_PAYLOAD,
126 SMUX_RX_FAILURE,
127};
128
129/**
130 * Power states.
131 *
132 * The _FLUSH states are internal transitional states and are not part of the
133 * official state machine.
134 */
135enum {
136 SMUX_PWR_OFF,
137 SMUX_PWR_TURNING_ON,
138 SMUX_PWR_ON,
139 SMUX_PWR_TURNING_OFF_FLUSH,
140 SMUX_PWR_TURNING_OFF,
141 SMUX_PWR_OFF_FLUSH,
142};
143
144/**
145 * Logical Channel Structure. One instance per channel.
146 *
147 * Locking Hierarchy
148 * Each lock has a postfix that describes the locking level. If multiple locks
149 * are required, only increasing lock hierarchy numbers may be locked which
150 * ensures avoiding a deadlock.
151 *
152 * Locking Example
153 * If state_lock_lhb1 is currently held and the TX list needs to be
154 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
155 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
156 * not be acquired since it would result in a deadlock.
157 *
158 * Note that the Line Discipline locks (*_lha) should always be acquired
159 * before the logical channel locks.
160 */
161struct smux_lch_t {
162 /* channel state */
163 spinlock_t state_lock_lhb1;
164 uint8_t lcid;
165 unsigned local_state;
166 unsigned local_mode;
167 uint8_t local_tiocm;
168
169 unsigned remote_state;
170 unsigned remote_mode;
171 uint8_t remote_tiocm;
172
173 int tx_flow_control;
174
175 /* client callbacks and private data */
176 void *priv;
177 void (*notify)(void *priv, int event_type, const void *metadata);
178 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
179 int size);
180
Eric Holmbergb8435c82012-06-05 14:51:29 -0600181 /* RX Info */
182 struct list_head rx_retry_queue;
183 unsigned rx_retry_queue_cnt;
184 struct delayed_work rx_retry_work;
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186 /* TX Info */
187 spinlock_t tx_lock_lhb2;
188 struct list_head tx_queue;
189 struct list_head tx_ready_list;
190 unsigned tx_pending_data_cnt;
191 unsigned notify_lwm;
192};
193
194union notifier_metadata {
195 struct smux_meta_disconnected disconnected;
196 struct smux_meta_read read;
197 struct smux_meta_write write;
198 struct smux_meta_tiocm tiocm;
199};
200
201struct smux_notify_handle {
202 void (*notify)(void *priv, int event_type, const void *metadata);
203 void *priv;
204 int event_type;
205 union notifier_metadata *metadata;
206};
207
208/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600209 * Get RX Buffer Retry structure.
210 *
211 * This is used for clients that are unable to provide an RX buffer
212 * immediately. This temporary structure will be used to temporarily hold the
213 * data and perform a retry.
214 */
215struct smux_rx_pkt_retry {
216 struct smux_pkt_t *pkt;
217 struct list_head rx_retry_list;
218 unsigned timeout_in_ms;
219};
220
221/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600222 * Receive worker data structure.
223 *
224 * One instance is created for every call to smux_rx_state_machine.
225 */
226struct smux_rx_worker_data {
227 const unsigned char *data;
228 int len;
229 int flag;
230
231 struct work_struct work;
232 struct completion work_complete;
233};
234
235/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600236 * Line discipline and module structure.
237 *
238 * Only one instance since multiple instances of line discipline are not
239 * allowed.
240 */
241struct smux_ldisc_t {
242 spinlock_t lock_lha0;
243
244 int is_initialized;
245 int in_reset;
246 int ld_open_count;
247 struct tty_struct *tty;
248
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600249 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
251 unsigned int recv_len;
252 unsigned int pkt_remain;
253 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254
255 /* RX Activity - accessed by multiple threads */
256 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600257 unsigned rx_activity_flag;
258
259 /* TX / Power */
260 spinlock_t tx_lock_lha2;
261 struct list_head lch_tx_ready_list;
262 unsigned power_state;
263 unsigned pwr_wakeup_delay_us;
264 unsigned tx_activity_flag;
265 unsigned powerdown_enabled;
266};
267
268
269/* data structures */
270static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
271static struct smux_ldisc_t smux;
272static const char *tty_error_type[] = {
273 [TTY_NORMAL] = "normal",
274 [TTY_OVERRUN] = "overrun",
275 [TTY_BREAK] = "break",
276 [TTY_PARITY] = "parity",
277 [TTY_FRAME] = "framing",
278};
279
280static const char *smux_cmds[] = {
281 [SMUX_CMD_DATA] = "DATA",
282 [SMUX_CMD_OPEN_LCH] = "OPEN",
283 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
284 [SMUX_CMD_STATUS] = "STATUS",
285 [SMUX_CMD_PWR_CTL] = "PWR",
286 [SMUX_CMD_BYTE] = "Raw Byte",
287};
288
289static void smux_notify_local_fn(struct work_struct *work);
290static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
291
292static struct workqueue_struct *smux_notify_wq;
293static size_t handle_size;
294static struct kfifo smux_notify_fifo;
295static int queued_fifo_notifications;
296static DEFINE_SPINLOCK(notify_lock_lhc1);
297
298static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600299static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600300static void smux_tx_worker(struct work_struct *work);
301static DECLARE_WORK(smux_tx_work, smux_tx_worker);
302
303static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600304static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600305static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600306static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
307static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
308
309static void smux_inactivity_worker(struct work_struct *work);
310static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
311static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
312 smux_inactivity_worker);
313
314static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
315static void list_channel(struct smux_lch_t *ch);
316static int smux_send_status_cmd(struct smux_lch_t *ch);
317static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
318
319/**
320 * Convert TTY Error Flags to string for logging purposes.
321 *
322 * @flag TTY_* flag
323 * @returns String description or NULL if unknown
324 */
325static const char *tty_flag_to_str(unsigned flag)
326{
327 if (flag < ARRAY_SIZE(tty_error_type))
328 return tty_error_type[flag];
329 return NULL;
330}
331
332/**
333 * Convert SMUX Command to string for logging purposes.
334 *
335 * @cmd SMUX command
336 * @returns String description or NULL if unknown
337 */
338static const char *cmd_to_str(unsigned cmd)
339{
340 if (cmd < ARRAY_SIZE(smux_cmds))
341 return smux_cmds[cmd];
342 return NULL;
343}
344
345/**
346 * Set the reset state due to an unrecoverable failure.
347 */
348static void smux_enter_reset(void)
349{
350 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
351 smux.in_reset = 1;
352}
353
354static int lch_init(void)
355{
356 unsigned int id;
357 struct smux_lch_t *ch;
358 int i = 0;
359
360 handle_size = sizeof(struct smux_notify_handle *);
361
362 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
363 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600364 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600365
366 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
367 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
368 __func__);
369 return -ENOMEM;
370 }
371
372 i |= kfifo_alloc(&smux_notify_fifo,
373 SMUX_NOTIFY_FIFO_SIZE * handle_size,
374 GFP_KERNEL);
375 i |= smux_loopback_init();
376
377 if (i) {
378 pr_err("%s: out of memory error\n", __func__);
379 return -ENOMEM;
380 }
381
382 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
383 ch = &smux_lch[id];
384
385 spin_lock_init(&ch->state_lock_lhb1);
386 ch->lcid = id;
387 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
388 ch->local_mode = SMUX_LCH_MODE_NORMAL;
389 ch->local_tiocm = 0x0;
390 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
391 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
392 ch->remote_tiocm = 0x0;
393 ch->tx_flow_control = 0;
394 ch->priv = 0;
395 ch->notify = 0;
396 ch->get_rx_buffer = 0;
397
Eric Holmbergb8435c82012-06-05 14:51:29 -0600398 INIT_LIST_HEAD(&ch->rx_retry_queue);
399 ch->rx_retry_queue_cnt = 0;
400 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
401
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600402 spin_lock_init(&ch->tx_lock_lhb2);
403 INIT_LIST_HEAD(&ch->tx_queue);
404 INIT_LIST_HEAD(&ch->tx_ready_list);
405 ch->tx_pending_data_cnt = 0;
406 ch->notify_lwm = 0;
407 }
408
409 return 0;
410}
411
412int smux_assert_lch_id(uint32_t lcid)
413{
414 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
415 return -ENXIO;
416 else
417 return 0;
418}
419
420/**
421 * Log packet information for debug purposes.
422 *
423 * @pkt Packet to log
424 * @is_recv 1 = RX packet; 0 = TX Packet
425 *
426 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
427 *
428 * PKT Info:
429 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
430 *
431 * Direction: R = Receive, S = Send
432 * Local State: C = Closed; c = closing; o = opening; O = Opened
433 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
434 * Remote State: C = Closed; O = Opened
435 * Remote Mode: R = Remote loopback; N = Normal
436 */
437static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
438{
439 char logbuf[SMUX_PKT_LOG_SIZE];
440 char cmd_extra[16];
441 int i = 0;
442 int count;
443 int len;
444 char local_state;
445 char local_mode;
446 char remote_state;
447 char remote_mode;
448 struct smux_lch_t *ch;
449 unsigned char *data;
450
451 ch = &smux_lch[pkt->hdr.lcid];
452
453 switch (ch->local_state) {
454 case SMUX_LCH_LOCAL_CLOSED:
455 local_state = 'C';
456 break;
457 case SMUX_LCH_LOCAL_OPENING:
458 local_state = 'o';
459 break;
460 case SMUX_LCH_LOCAL_OPENED:
461 local_state = 'O';
462 break;
463 case SMUX_LCH_LOCAL_CLOSING:
464 local_state = 'c';
465 break;
466 default:
467 local_state = 'U';
468 break;
469 }
470
471 switch (ch->local_mode) {
472 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
473 local_mode = 'L';
474 break;
475 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
476 local_mode = 'R';
477 break;
478 case SMUX_LCH_MODE_NORMAL:
479 local_mode = 'N';
480 break;
481 default:
482 local_mode = 'U';
483 break;
484 }
485
486 switch (ch->remote_state) {
487 case SMUX_LCH_REMOTE_CLOSED:
488 remote_state = 'C';
489 break;
490 case SMUX_LCH_REMOTE_OPENED:
491 remote_state = 'O';
492 break;
493
494 default:
495 remote_state = 'U';
496 break;
497 }
498
499 switch (ch->remote_mode) {
500 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
501 remote_mode = 'R';
502 break;
503 case SMUX_LCH_MODE_NORMAL:
504 remote_mode = 'N';
505 break;
506 default:
507 remote_mode = 'U';
508 break;
509 }
510
511 /* determine command type (ACK, etc) */
512 cmd_extra[0] = '\0';
513 switch (pkt->hdr.cmd) {
514 case SMUX_CMD_OPEN_LCH:
515 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
516 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
517 break;
518 case SMUX_CMD_CLOSE_LCH:
519 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
520 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
521 break;
522 };
523
524 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
525 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
526 is_recv ? 'R' : 'S', pkt->hdr.lcid,
527 local_state, local_mode,
528 remote_state, remote_mode,
529 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
530 pkt->hdr.payload_len, pkt->hdr.pad_len);
531
532 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
533 data = (unsigned char *)pkt->payload;
534 for (count = 0; count < len; count++)
535 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
536 "%02x ", (unsigned)data[count]);
537
538 pr_info("%s\n", logbuf);
539}
540
541static void smux_notify_local_fn(struct work_struct *work)
542{
543 struct smux_notify_handle *notify_handle = NULL;
544 union notifier_metadata *metadata = NULL;
545 unsigned long flags;
546 int i;
547
548 for (;;) {
549 /* retrieve notification */
550 spin_lock_irqsave(&notify_lock_lhc1, flags);
551 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
552 i = kfifo_out(&smux_notify_fifo,
553 &notify_handle,
554 handle_size);
555 if (i != handle_size) {
556 pr_err("%s: unable to retrieve handle %d expected %d\n",
557 __func__, i, handle_size);
558 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
559 break;
560 }
561 } else {
562 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
563 break;
564 }
565 --queued_fifo_notifications;
566 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
567
568 /* notify client */
569 metadata = notify_handle->metadata;
570 notify_handle->notify(notify_handle->priv,
571 notify_handle->event_type,
572 metadata);
573
574 kfree(metadata);
575 kfree(notify_handle);
576 }
577}
578
579/**
580 * Initialize existing packet.
581 */
582void smux_init_pkt(struct smux_pkt_t *pkt)
583{
584 memset(pkt, 0x0, sizeof(*pkt));
585 pkt->hdr.magic = SMUX_MAGIC;
586 INIT_LIST_HEAD(&pkt->list);
587}
588
589/**
590 * Allocate and initialize packet.
591 *
592 * If a payload is needed, either set it directly and ensure that it's freed or
593 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
594 * automatically when smd_free_pkt() is called.
595 */
596struct smux_pkt_t *smux_alloc_pkt(void)
597{
598 struct smux_pkt_t *pkt;
599
600 /* Consider a free list implementation instead of kmalloc */
601 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
602 if (!pkt) {
603 pr_err("%s: out of memory\n", __func__);
604 return NULL;
605 }
606 smux_init_pkt(pkt);
607 pkt->allocated = 1;
608
609 return pkt;
610}
611
612/**
613 * Free packet.
614 *
615 * @pkt Packet to free (may be NULL)
616 *
617 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
618 * well. Otherwise, the caller is responsible for freeing the payload.
619 */
620void smux_free_pkt(struct smux_pkt_t *pkt)
621{
622 if (pkt) {
623 if (pkt->free_payload)
624 kfree(pkt->payload);
625 if (pkt->allocated)
626 kfree(pkt);
627 }
628}
629
630/**
631 * Allocate packet payload.
632 *
633 * @pkt Packet to add payload to
634 *
635 * @returns 0 on success, <0 upon error
636 *
637 * A flag is set to signal smux_free_pkt() to free the payload.
638 */
639int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
640{
641 if (!pkt)
642 return -EINVAL;
643
644 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
645 pkt->free_payload = 1;
646 if (!pkt->payload) {
647 pr_err("%s: unable to malloc %d bytes for payload\n",
648 __func__, pkt->hdr.payload_len);
649 return -ENOMEM;
650 }
651
652 return 0;
653}
654
655static int schedule_notify(uint8_t lcid, int event,
656 const union notifier_metadata *metadata)
657{
658 struct smux_notify_handle *notify_handle = 0;
659 union notifier_metadata *meta_copy = 0;
660 struct smux_lch_t *ch;
661 int i;
662 unsigned long flags;
663 int ret = 0;
664
665 ch = &smux_lch[lcid];
666 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
667 GFP_ATOMIC);
668 if (!notify_handle) {
669 pr_err("%s: out of memory\n", __func__);
670 ret = -ENOMEM;
671 goto free_out;
672 }
673
674 notify_handle->notify = ch->notify;
675 notify_handle->priv = ch->priv;
676 notify_handle->event_type = event;
677 if (metadata) {
678 meta_copy = kzalloc(sizeof(union notifier_metadata),
679 GFP_ATOMIC);
680 if (!meta_copy) {
681 pr_err("%s: out of memory\n", __func__);
682 ret = -ENOMEM;
683 goto free_out;
684 }
685 *meta_copy = *metadata;
686 notify_handle->metadata = meta_copy;
687 } else {
688 notify_handle->metadata = NULL;
689 }
690
691 spin_lock_irqsave(&notify_lock_lhc1, flags);
692 i = kfifo_avail(&smux_notify_fifo);
693 if (i < handle_size) {
694 pr_err("%s: fifo full error %d expected %d\n",
695 __func__, i, handle_size);
696 ret = -ENOMEM;
697 goto unlock_out;
698 }
699
700 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
701 if (i < 0 || i != handle_size) {
702 pr_err("%s: fifo not available error %d (expected %d)\n",
703 __func__, i, handle_size);
704 ret = -ENOSPC;
705 goto unlock_out;
706 }
707 ++queued_fifo_notifications;
708
709unlock_out:
710 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
711
712free_out:
713 queue_work(smux_notify_wq, &smux_notify_local);
714 if (ret < 0 && notify_handle) {
715 kfree(notify_handle->metadata);
716 kfree(notify_handle);
717 }
718 return ret;
719}
720
721/**
722 * Returns the serialized size of a packet.
723 *
724 * @pkt Packet to serialize
725 *
726 * @returns Serialized length of packet
727 */
728static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
729{
730 unsigned int size;
731
732 size = sizeof(struct smux_hdr_t);
733 size += pkt->hdr.payload_len;
734 size += pkt->hdr.pad_len;
735
736 return size;
737}
738
739/**
740 * Serialize packet @pkt into output buffer @data.
741 *
742 * @pkt Packet to serialize
743 * @out Destination buffer pointer
744 * @out_len Size of serialized packet
745 *
746 * @returns 0 for success
747 */
748int smux_serialize(struct smux_pkt_t *pkt, char *out,
749 unsigned int *out_len)
750{
751 char *data_start = out;
752
753 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
754 pr_err("%s: packet size %d too big\n",
755 __func__, smux_serialize_size(pkt));
756 return -E2BIG;
757 }
758
759 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
760 out += sizeof(struct smux_hdr_t);
761 if (pkt->payload) {
762 memcpy(out, pkt->payload, pkt->hdr.payload_len);
763 out += pkt->hdr.payload_len;
764 }
765 if (pkt->hdr.pad_len) {
766 memset(out, 0x0, pkt->hdr.pad_len);
767 out += pkt->hdr.pad_len;
768 }
769 *out_len = out - data_start;
770 return 0;
771}
772
773/**
774 * Serialize header and provide pointer to the data.
775 *
776 * @pkt Packet
777 * @out[out] Pointer to the serialized header data
778 * @out_len[out] Pointer to the serialized header length
779 */
780static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
781 unsigned int *out_len)
782{
783 *out = (char *)&pkt->hdr;
784 *out_len = sizeof(struct smux_hdr_t);
785}
786
787/**
788 * Serialize payload and provide pointer to the data.
789 *
790 * @pkt Packet
791 * @out[out] Pointer to the serialized payload data
792 * @out_len[out] Pointer to the serialized payload length
793 */
794static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
795 unsigned int *out_len)
796{
797 *out = pkt->payload;
798 *out_len = pkt->hdr.payload_len;
799}
800
801/**
802 * Serialize padding and provide pointer to the data.
803 *
804 * @pkt Packet
805 * @out[out] Pointer to the serialized padding (always NULL)
806 * @out_len[out] Pointer to the serialized payload length
807 *
808 * Since the padding field value is undefined, only the size of the patting
809 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
810 */
811static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
812 unsigned int *out_len)
813{
814 *out = NULL;
815 *out_len = pkt->hdr.pad_len;
816}
817
818/**
819 * Write data to TTY framework and handle breaking the writes up if needed.
820 *
821 * @data Data to write
822 * @len Length of data
823 *
824 * @returns 0 for success, < 0 for failure
825 */
826static int write_to_tty(char *data, unsigned len)
827{
828 int data_written;
829
830 if (!data)
831 return 0;
832
833 while (len > 0) {
834 data_written = smux.tty->ops->write(smux.tty, data, len);
835 if (data_written >= 0) {
836 len -= data_written;
837 data += data_written;
838 } else {
839 pr_err("%s: TTY write returned error %d\n",
840 __func__, data_written);
841 return data_written;
842 }
843
844 if (len)
845 tty_wait_until_sent(smux.tty,
846 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
847
848 /* FUTURE - add SSR logic */
849 }
850 return 0;
851}
852
853/**
854 * Write packet to TTY.
855 *
856 * @pkt packet to write
857 *
858 * @returns 0 on success
859 */
860static int smux_tx_tty(struct smux_pkt_t *pkt)
861{
862 char *data;
863 unsigned int len;
864 int ret;
865
866 if (!smux.tty) {
867 pr_err("%s: TTY not initialized", __func__);
868 return -ENOTTY;
869 }
870
871 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
872 SMUX_DBG("%s: tty send single byte\n", __func__);
873 ret = write_to_tty(&pkt->hdr.flags, 1);
874 return ret;
875 }
876
877 smux_serialize_hdr(pkt, &data, &len);
878 ret = write_to_tty(data, len);
879 if (ret) {
880 pr_err("%s: failed %d to write header %d\n",
881 __func__, ret, len);
882 return ret;
883 }
884
885 smux_serialize_payload(pkt, &data, &len);
886 ret = write_to_tty(data, len);
887 if (ret) {
888 pr_err("%s: failed %d to write payload %d\n",
889 __func__, ret, len);
890 return ret;
891 }
892
893 smux_serialize_padding(pkt, &data, &len);
894 while (len > 0) {
895 char zero = 0x0;
896 ret = write_to_tty(&zero, 1);
897 if (ret) {
898 pr_err("%s: failed %d to write padding %d\n",
899 __func__, ret, len);
900 return ret;
901 }
902 --len;
903 }
904 return 0;
905}
906
907/**
908 * Send a single character.
909 *
910 * @ch Character to send
911 */
912static void smux_send_byte(char ch)
913{
914 struct smux_pkt_t pkt;
915
916 smux_init_pkt(&pkt);
917
918 pkt.hdr.cmd = SMUX_CMD_BYTE;
919 pkt.hdr.flags = ch;
920 pkt.hdr.lcid = 0;
921 pkt.hdr.flags = ch;
922 SMUX_LOG_PKT_TX(&pkt);
923 if (!smux_byte_loopback)
924 smux_tx_tty(&pkt);
925 else
926 smux_tx_loopback(&pkt);
927}
928
929/**
930 * Receive a single-character packet (used for internal testing).
931 *
932 * @ch Character to receive
933 * @lcid Logical channel ID for packet
934 *
935 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600936 */
937static int smux_receive_byte(char ch, int lcid)
938{
939 struct smux_pkt_t pkt;
940
941 smux_init_pkt(&pkt);
942 pkt.hdr.lcid = lcid;
943 pkt.hdr.cmd = SMUX_CMD_BYTE;
944 pkt.hdr.flags = ch;
945
946 return smux_dispatch_rx_pkt(&pkt);
947}
948
949/**
950 * Queue packet for transmit.
951 *
952 * @pkt_ptr Packet to queue
953 * @ch Channel to queue packet on
954 * @queue Queue channel on ready list
955 */
956static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
957 int queue)
958{
959 unsigned long flags;
960
961 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
962
963 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
964 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
965 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
966
967 if (queue)
968 list_channel(ch);
969}
970
971/**
972 * Handle receive OPEN ACK command.
973 *
974 * @pkt Received packet
975 *
976 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600977 */
978static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
979{
980 uint8_t lcid;
981 int ret;
982 struct smux_lch_t *ch;
983 int enable_powerdown = 0;
984
985 lcid = pkt->hdr.lcid;
986 ch = &smux_lch[lcid];
987
988 spin_lock(&ch->state_lock_lhb1);
989 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
990 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
991 ch->local_state,
992 SMUX_LCH_LOCAL_OPENED);
993
994 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
995 enable_powerdown = 1;
996
997 ch->local_state = SMUX_LCH_LOCAL_OPENED;
998 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
999 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1000 ret = 0;
1001 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1002 SMUX_DBG("Remote loopback OPEN ACK received\n");
1003 ret = 0;
1004 } else {
1005 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1006 __func__, lcid, ch->local_state);
1007 ret = -EINVAL;
1008 }
1009 spin_unlock(&ch->state_lock_lhb1);
1010
1011 if (enable_powerdown) {
1012 spin_lock(&smux.tx_lock_lha2);
1013 if (!smux.powerdown_enabled) {
1014 smux.powerdown_enabled = 1;
1015 SMUX_DBG("%s: enabling power-collapse support\n",
1016 __func__);
1017 }
1018 spin_unlock(&smux.tx_lock_lha2);
1019 }
1020
1021 return ret;
1022}
1023
1024static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1025{
1026 uint8_t lcid;
1027 int ret;
1028 struct smux_lch_t *ch;
1029 union notifier_metadata meta_disconnected;
1030 unsigned long flags;
1031
1032 lcid = pkt->hdr.lcid;
1033 ch = &smux_lch[lcid];
1034 meta_disconnected.disconnected.is_ssr = 0;
1035
1036 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1037
1038 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1039 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1040 SMUX_LCH_LOCAL_CLOSING,
1041 SMUX_LCH_LOCAL_CLOSED);
1042 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1043 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1044 schedule_notify(lcid, SMUX_DISCONNECTED,
1045 &meta_disconnected);
1046 ret = 0;
1047 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1048 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1049 ret = 0;
1050 } else {
1051 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1052 __func__, lcid, ch->local_state);
1053 ret = -EINVAL;
1054 }
1055 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1056 return ret;
1057}
1058
1059/**
1060 * Handle receive OPEN command.
1061 *
1062 * @pkt Received packet
1063 *
1064 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001065 */
1066static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1067{
1068 uint8_t lcid;
1069 int ret;
1070 struct smux_lch_t *ch;
1071 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001072 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001073 int tx_ready = 0;
1074 int enable_powerdown = 0;
1075
1076 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1077 return smux_handle_rx_open_ack(pkt);
1078
1079 lcid = pkt->hdr.lcid;
1080 ch = &smux_lch[lcid];
1081
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001082 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001083
1084 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1085 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1086 SMUX_LCH_REMOTE_CLOSED,
1087 SMUX_LCH_REMOTE_OPENED);
1088
1089 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1090 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1091 enable_powerdown = 1;
1092
1093 /* Send Open ACK */
1094 ack_pkt = smux_alloc_pkt();
1095 if (!ack_pkt) {
1096 /* exit out to allow retrying this later */
1097 ret = -ENOMEM;
1098 goto out;
1099 }
1100 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1101 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1102 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1103 ack_pkt->hdr.lcid = lcid;
1104 ack_pkt->hdr.payload_len = 0;
1105 ack_pkt->hdr.pad_len = 0;
1106 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1107 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1108 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1109 }
1110 smux_tx_queue(ack_pkt, ch, 0);
1111 tx_ready = 1;
1112
1113 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1114 /*
1115 * Send an Open command to the remote side to
1116 * simulate our local client doing it.
1117 */
1118 ack_pkt = smux_alloc_pkt();
1119 if (ack_pkt) {
1120 ack_pkt->hdr.lcid = lcid;
1121 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1122 ack_pkt->hdr.flags =
1123 SMUX_CMD_OPEN_POWER_COLLAPSE;
1124 ack_pkt->hdr.payload_len = 0;
1125 ack_pkt->hdr.pad_len = 0;
1126 smux_tx_queue(ack_pkt, ch, 0);
1127 tx_ready = 1;
1128 } else {
1129 pr_err("%s: Remote loopack allocation failure\n",
1130 __func__);
1131 }
1132 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1133 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1134 }
1135 ret = 0;
1136 } else {
1137 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1138 __func__, lcid, ch->remote_state);
1139 ret = -EINVAL;
1140 }
1141
1142out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001143 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001144
1145 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001146 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001147 if (!smux.powerdown_enabled) {
1148 smux.powerdown_enabled = 1;
1149 SMUX_DBG("%s: enabling power-collapse support\n",
1150 __func__);
1151 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001152 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001153 }
1154
1155 if (tx_ready)
1156 list_channel(ch);
1157
1158 return ret;
1159}
1160
1161/**
1162 * Handle receive CLOSE command.
1163 *
1164 * @pkt Received packet
1165 *
1166 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001167 */
1168static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1169{
1170 uint8_t lcid;
1171 int ret;
1172 struct smux_lch_t *ch;
1173 struct smux_pkt_t *ack_pkt;
1174 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001175 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001176 int tx_ready = 0;
1177
1178 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1179 return smux_handle_close_ack(pkt);
1180
1181 lcid = pkt->hdr.lcid;
1182 ch = &smux_lch[lcid];
1183 meta_disconnected.disconnected.is_ssr = 0;
1184
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001185 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001186 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1187 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1188 SMUX_LCH_REMOTE_OPENED,
1189 SMUX_LCH_REMOTE_CLOSED);
1190
1191 ack_pkt = smux_alloc_pkt();
1192 if (!ack_pkt) {
1193 /* exit out to allow retrying this later */
1194 ret = -ENOMEM;
1195 goto out;
1196 }
1197 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1198 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1199 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1200 ack_pkt->hdr.lcid = lcid;
1201 ack_pkt->hdr.payload_len = 0;
1202 ack_pkt->hdr.pad_len = 0;
1203 smux_tx_queue(ack_pkt, ch, 0);
1204 tx_ready = 1;
1205
1206 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1207 /*
1208 * Send a Close command to the remote side to simulate
1209 * our local client doing it.
1210 */
1211 ack_pkt = smux_alloc_pkt();
1212 if (ack_pkt) {
1213 ack_pkt->hdr.lcid = lcid;
1214 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1215 ack_pkt->hdr.flags = 0;
1216 ack_pkt->hdr.payload_len = 0;
1217 ack_pkt->hdr.pad_len = 0;
1218 smux_tx_queue(ack_pkt, ch, 0);
1219 tx_ready = 1;
1220 } else {
1221 pr_err("%s: Remote loopack allocation failure\n",
1222 __func__);
1223 }
1224 }
1225
1226 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1227 schedule_notify(lcid, SMUX_DISCONNECTED,
1228 &meta_disconnected);
1229 ret = 0;
1230 } else {
1231 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1232 __func__, lcid, ch->remote_state);
1233 ret = -EINVAL;
1234 }
1235out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001236 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001237 if (tx_ready)
1238 list_channel(ch);
1239
1240 return ret;
1241}
1242
1243/*
1244 * Handle receive DATA command.
1245 *
1246 * @pkt Received packet
1247 *
1248 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001249 */
1250static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1251{
1252 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001253 int ret = 0;
1254 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001255 int tmp;
1256 int rx_len;
1257 struct smux_lch_t *ch;
1258 union notifier_metadata metadata;
1259 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001260 struct smux_pkt_t *ack_pkt;
1261 unsigned long flags;
1262
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001263 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1264 ret = -ENXIO;
1265 goto out;
1266 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001267
Eric Holmbergb8435c82012-06-05 14:51:29 -06001268 rx_len = pkt->hdr.payload_len;
1269 if (rx_len == 0) {
1270 ret = -EINVAL;
1271 goto out;
1272 }
1273
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001274 lcid = pkt->hdr.lcid;
1275 ch = &smux_lch[lcid];
1276 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1277 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1278
1279 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1280 && !remote_loopback) {
1281 pr_err("smux: ch %d error data on local state 0x%x",
1282 lcid, ch->local_state);
1283 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001284 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001285 goto out;
1286 }
1287
1288 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1289 pr_err("smux: ch %d error data on remote state 0x%x",
1290 lcid, ch->remote_state);
1291 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001292 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001293 goto out;
1294 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001295
1296 if (!list_empty(&ch->rx_retry_queue)) {
1297 do_retry = 1;
1298 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1299 /* retry queue full */
1300 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1301 ret = -ENOMEM;
1302 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1303 goto out;
1304 }
1305 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001306 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001307
Eric Holmbergb8435c82012-06-05 14:51:29 -06001308 if (remote_loopback) {
1309 /* Echo the data back to the remote client. */
1310 ack_pkt = smux_alloc_pkt();
1311 if (ack_pkt) {
1312 ack_pkt->hdr.lcid = lcid;
1313 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1314 ack_pkt->hdr.flags = 0;
1315 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1316 if (ack_pkt->hdr.payload_len) {
1317 smux_alloc_pkt_payload(ack_pkt);
1318 memcpy(ack_pkt->payload, pkt->payload,
1319 ack_pkt->hdr.payload_len);
1320 }
1321 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1322 smux_tx_queue(ack_pkt, ch, 0);
1323 list_channel(ch);
1324 } else {
1325 pr_err("%s: Remote loopack allocation failure\n",
1326 __func__);
1327 }
1328 } else if (!do_retry) {
1329 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001330 metadata.read.pkt_priv = 0;
1331 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001332 tmp = ch->get_rx_buffer(ch->priv,
1333 (void **)&metadata.read.pkt_priv,
1334 (void **)&metadata.read.buffer,
1335 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001336
Eric Holmbergb8435c82012-06-05 14:51:29 -06001337 if (tmp == 0 && metadata.read.buffer) {
1338 /* place data into RX buffer */
1339 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001340 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001341 metadata.read.len = rx_len;
1342 schedule_notify(lcid, SMUX_READ_DONE,
1343 &metadata);
1344 } else if (tmp == -EAGAIN ||
1345 (tmp == 0 && !metadata.read.buffer)) {
1346 /* buffer allocation failed - add to retry queue */
1347 do_retry = 1;
1348 } else if (tmp < 0) {
1349 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1350 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001351 }
1352 }
1353
Eric Holmbergb8435c82012-06-05 14:51:29 -06001354 if (do_retry) {
1355 struct smux_rx_pkt_retry *retry;
1356
1357 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1358 if (!retry) {
1359 pr_err("%s: retry alloc failure\n", __func__);
1360 ret = -ENOMEM;
1361 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1362 goto out;
1363 }
1364 INIT_LIST_HEAD(&retry->rx_retry_list);
1365 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1366
1367 /* copy packet */
1368 retry->pkt = smux_alloc_pkt();
1369 if (!retry->pkt) {
1370 kfree(retry);
1371 pr_err("%s: pkt alloc failure\n", __func__);
1372 ret = -ENOMEM;
1373 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1374 goto out;
1375 }
1376 retry->pkt->hdr.lcid = lcid;
1377 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1378 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1379 if (retry->pkt->hdr.payload_len) {
1380 smux_alloc_pkt_payload(retry->pkt);
1381 memcpy(retry->pkt->payload, pkt->payload,
1382 retry->pkt->hdr.payload_len);
1383 }
1384
1385 /* add to retry queue */
1386 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1387 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1388 ++ch->rx_retry_queue_cnt;
1389 if (ch->rx_retry_queue_cnt == 1)
1390 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1391 msecs_to_jiffies(retry->timeout_in_ms));
1392 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1393 }
1394
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001395out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001396 return ret;
1397}
1398
1399/**
1400 * Handle receive byte command for testing purposes.
1401 *
1402 * @pkt Received packet
1403 *
1404 * @returns 0 for success
1405 */
1406static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1407{
1408 uint8_t lcid;
1409 int ret;
1410 struct smux_lch_t *ch;
1411 union notifier_metadata metadata;
1412 unsigned long flags;
1413
1414 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
1415 return -ENXIO;
1416
1417 lcid = pkt->hdr.lcid;
1418 ch = &smux_lch[lcid];
1419 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1420
1421 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1422 pr_err("smux: ch %d error data on local state 0x%x",
1423 lcid, ch->local_state);
1424 ret = -EIO;
1425 goto out;
1426 }
1427
1428 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1429 pr_err("smux: ch %d error data on remote state 0x%x",
1430 lcid, ch->remote_state);
1431 ret = -EIO;
1432 goto out;
1433 }
1434
1435 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1436 metadata.read.buffer = 0;
1437 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1438 ret = 0;
1439
1440out:
1441 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1442 return ret;
1443}
1444
1445/**
1446 * Handle receive status command.
1447 *
1448 * @pkt Received packet
1449 *
1450 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001451 */
1452static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1453{
1454 uint8_t lcid;
1455 int ret;
1456 struct smux_lch_t *ch;
1457 union notifier_metadata meta;
1458 unsigned long flags;
1459 int tx_ready = 0;
1460
1461 lcid = pkt->hdr.lcid;
1462 ch = &smux_lch[lcid];
1463
1464 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1465 meta.tiocm.tiocm_old = ch->remote_tiocm;
1466 meta.tiocm.tiocm_new = pkt->hdr.flags;
1467
1468 /* update logical channel flow control */
1469 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1470 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1471 /* logical channel flow control changed */
1472 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1473 /* disabled TX */
1474 SMUX_DBG("TX Flow control enabled\n");
1475 ch->tx_flow_control = 1;
1476 } else {
1477 /* re-enable channel */
1478 SMUX_DBG("TX Flow control disabled\n");
1479 ch->tx_flow_control = 0;
1480 tx_ready = 1;
1481 }
1482 }
1483 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1484 ch->remote_tiocm = pkt->hdr.flags;
1485 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1486
1487 /* client notification for status change */
1488 if (IS_FULLY_OPENED(ch)) {
1489 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1490 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1491 ret = 0;
1492 }
1493 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1494 if (tx_ready)
1495 list_channel(ch);
1496
1497 return ret;
1498}
1499
1500/**
1501 * Handle receive power command.
1502 *
1503 * @pkt Received packet
1504 *
1505 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001506 */
1507static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1508{
1509 int tx_ready = 0;
1510 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001511 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001512
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001513 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001514 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1515 /* local sleep request ack */
1516 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1517 /* Power-down complete, turn off UART */
1518 SMUX_DBG("%s: Power %d->%d\n", __func__,
1519 smux.power_state, SMUX_PWR_OFF_FLUSH);
1520 smux.power_state = SMUX_PWR_OFF_FLUSH;
1521 queue_work(smux_tx_wq, &smux_inactivity_work);
1522 } else {
1523 pr_err("%s: sleep request ack invalid in state %d\n",
1524 __func__, smux.power_state);
1525 }
1526 } else {
1527 /* remote sleep request */
1528 if (smux.power_state == SMUX_PWR_ON
1529 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1530 ack_pkt = smux_alloc_pkt();
1531 if (ack_pkt) {
1532 SMUX_DBG("%s: Power %d->%d\n", __func__,
1533 smux.power_state,
1534 SMUX_PWR_TURNING_OFF_FLUSH);
1535
1536 /* send power-down request */
1537 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1538 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
1539 ack_pkt->hdr.lcid = pkt->hdr.lcid;
1540 smux_tx_queue(ack_pkt,
1541 &smux_lch[ack_pkt->hdr.lcid], 0);
1542 tx_ready = 1;
1543 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1544 queue_delayed_work(smux_tx_wq,
1545 &smux_delayed_inactivity_work,
1546 msecs_to_jiffies(
1547 SMUX_INACTIVITY_TIMEOUT_MS));
1548 }
1549 } else {
1550 pr_err("%s: sleep request invalid in state %d\n",
1551 __func__, smux.power_state);
1552 }
1553 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001554 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001555
1556 if (tx_ready)
1557 list_channel(&smux_lch[ack_pkt->hdr.lcid]);
1558
1559 return 0;
1560}
1561
1562/**
1563 * Handle dispatching a completed packet for receive processing.
1564 *
1565 * @pkt Packet to process
1566 *
1567 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001568 */
1569static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1570{
1571 int ret;
1572
1573 SMUX_LOG_PKT_RX(pkt);
1574
1575 switch (pkt->hdr.cmd) {
1576 case SMUX_CMD_OPEN_LCH:
1577 ret = smux_handle_rx_open_cmd(pkt);
1578 break;
1579
1580 case SMUX_CMD_DATA:
1581 ret = smux_handle_rx_data_cmd(pkt);
1582 break;
1583
1584 case SMUX_CMD_CLOSE_LCH:
1585 ret = smux_handle_rx_close_cmd(pkt);
1586 break;
1587
1588 case SMUX_CMD_STATUS:
1589 ret = smux_handle_rx_status_cmd(pkt);
1590 break;
1591
1592 case SMUX_CMD_PWR_CTL:
1593 ret = smux_handle_rx_power_cmd(pkt);
1594 break;
1595
1596 case SMUX_CMD_BYTE:
1597 ret = smux_handle_rx_byte_cmd(pkt);
1598 break;
1599
1600 default:
1601 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1602 ret = -EINVAL;
1603 }
1604 return ret;
1605}
1606
1607/**
1608 * Deserializes a packet and dispatches it to the packet receive logic.
1609 *
1610 * @data Raw data for one packet
1611 * @len Length of the data
1612 *
1613 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001614 */
1615static int smux_deserialize(unsigned char *data, int len)
1616{
1617 struct smux_pkt_t recv;
1618 uint8_t lcid;
1619
1620 smux_init_pkt(&recv);
1621
1622 /*
1623 * It may be possible to optimize this to not use the
1624 * temporary buffer.
1625 */
1626 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1627
1628 if (recv.hdr.magic != SMUX_MAGIC) {
1629 pr_err("%s: invalid header magic\n", __func__);
1630 return -EINVAL;
1631 }
1632
1633 lcid = recv.hdr.lcid;
1634 if (smux_assert_lch_id(lcid)) {
1635 pr_err("%s: invalid channel id %d\n", __func__, lcid);
1636 return -ENXIO;
1637 }
1638
1639 if (recv.hdr.payload_len)
1640 recv.payload = data + sizeof(struct smux_hdr_t);
1641
1642 return smux_dispatch_rx_pkt(&recv);
1643}
1644
1645/**
1646 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001647 */
1648static void smux_handle_wakeup_req(void)
1649{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001650 unsigned long flags;
1651
1652 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001653 if (smux.power_state == SMUX_PWR_OFF
1654 || smux.power_state == SMUX_PWR_TURNING_ON) {
1655 /* wakeup system */
1656 SMUX_DBG("%s: Power %d->%d\n", __func__,
1657 smux.power_state, SMUX_PWR_ON);
1658 smux.power_state = SMUX_PWR_ON;
1659 queue_work(smux_tx_wq, &smux_wakeup_work);
1660 queue_work(smux_tx_wq, &smux_tx_work);
1661 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1662 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1663 smux_send_byte(SMUX_WAKEUP_ACK);
1664 } else {
1665 smux_send_byte(SMUX_WAKEUP_ACK);
1666 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001667 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668}
1669
1670/**
1671 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001672 */
1673static void smux_handle_wakeup_ack(void)
1674{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001675 unsigned long flags;
1676
1677 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001678 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1679 /* received response to wakeup request */
1680 SMUX_DBG("%s: Power %d->%d\n", __func__,
1681 smux.power_state, SMUX_PWR_ON);
1682 smux.power_state = SMUX_PWR_ON;
1683 queue_work(smux_tx_wq, &smux_tx_work);
1684 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1685 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1686
1687 } else if (smux.power_state != SMUX_PWR_ON) {
1688 /* invalid message */
1689 pr_err("%s: wakeup request ack invalid in state %d\n",
1690 __func__, smux.power_state);
1691 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001692 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001693}
1694
1695/**
1696 * RX State machine - IDLE state processing.
1697 *
1698 * @data New RX data to process
1699 * @len Length of the data
1700 * @used Return value of length processed
1701 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001702 */
1703static void smux_rx_handle_idle(const unsigned char *data,
1704 int len, int *used, int flag)
1705{
1706 int i;
1707
1708 if (flag) {
1709 if (smux_byte_loopback)
1710 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1711 smux_byte_loopback);
1712 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1713 ++*used;
1714 return;
1715 }
1716
1717 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1718 switch (data[i]) {
1719 case SMUX_MAGIC_WORD1:
1720 smux.rx_state = SMUX_RX_MAGIC;
1721 break;
1722 case SMUX_WAKEUP_REQ:
1723 smux_handle_wakeup_req();
1724 break;
1725 case SMUX_WAKEUP_ACK:
1726 smux_handle_wakeup_ack();
1727 break;
1728 default:
1729 /* unexpected character */
1730 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1731 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1732 smux_byte_loopback);
1733 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1734 (unsigned)data[i]);
1735 break;
1736 }
1737 }
1738
1739 *used = i;
1740}
1741
1742/**
1743 * RX State machine - Header Magic state processing.
1744 *
1745 * @data New RX data to process
1746 * @len Length of the data
1747 * @used Return value of length processed
1748 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001749 */
1750static void smux_rx_handle_magic(const unsigned char *data,
1751 int len, int *used, int flag)
1752{
1753 int i;
1754
1755 if (flag) {
1756 pr_err("%s: TTY RX error %d\n", __func__, flag);
1757 smux_enter_reset();
1758 smux.rx_state = SMUX_RX_FAILURE;
1759 ++*used;
1760 return;
1761 }
1762
1763 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1764 /* wait for completion of the magic */
1765 if (data[i] == SMUX_MAGIC_WORD2) {
1766 smux.recv_len = 0;
1767 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1768 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1769 smux.rx_state = SMUX_RX_HDR;
1770 } else {
1771 /* unexpected / trash character */
1772 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1773 __func__, data[i], *used, len);
1774 smux.rx_state = SMUX_RX_IDLE;
1775 }
1776 }
1777
1778 *used = i;
1779}
1780
1781/**
1782 * RX State machine - Packet Header state processing.
1783 *
1784 * @data New RX data to process
1785 * @len Length of the data
1786 * @used Return value of length processed
1787 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001788 */
1789static void smux_rx_handle_hdr(const unsigned char *data,
1790 int len, int *used, int flag)
1791{
1792 int i;
1793 struct smux_hdr_t *hdr;
1794
1795 if (flag) {
1796 pr_err("%s: TTY RX error %d\n", __func__, flag);
1797 smux_enter_reset();
1798 smux.rx_state = SMUX_RX_FAILURE;
1799 ++*used;
1800 return;
1801 }
1802
1803 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1804 smux.recv_buf[smux.recv_len++] = data[i];
1805
1806 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1807 /* complete header received */
1808 hdr = (struct smux_hdr_t *)smux.recv_buf;
1809 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1810 smux.rx_state = SMUX_RX_PAYLOAD;
1811 }
1812 }
1813 *used = i;
1814}
1815
1816/**
1817 * RX State machine - Packet Payload state processing.
1818 *
1819 * @data New RX data to process
1820 * @len Length of the data
1821 * @used Return value of length processed
1822 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001823 */
1824static void smux_rx_handle_pkt_payload(const unsigned char *data,
1825 int len, int *used, int flag)
1826{
1827 int remaining;
1828
1829 if (flag) {
1830 pr_err("%s: TTY RX error %d\n", __func__, flag);
1831 smux_enter_reset();
1832 smux.rx_state = SMUX_RX_FAILURE;
1833 ++*used;
1834 return;
1835 }
1836
1837 /* copy data into rx buffer */
1838 if (smux.pkt_remain < (len - *used))
1839 remaining = smux.pkt_remain;
1840 else
1841 remaining = len - *used;
1842
1843 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1844 smux.recv_len += remaining;
1845 smux.pkt_remain -= remaining;
1846 *used += remaining;
1847
1848 if (smux.pkt_remain == 0) {
1849 /* complete packet received */
1850 smux_deserialize(smux.recv_buf, smux.recv_len);
1851 smux.rx_state = SMUX_RX_IDLE;
1852 }
1853}
1854
1855/**
1856 * Feed data to the receive state machine.
1857 *
1858 * @data Pointer to data block
1859 * @len Length of data
1860 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001861 */
1862void smux_rx_state_machine(const unsigned char *data,
1863 int len, int flag)
1864{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001865 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001866
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001867 work.data = data;
1868 work.len = len;
1869 work.flag = flag;
1870 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1871 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001872
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001873 queue_work(smux_rx_wq, &work.work);
1874 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001875}
1876
1877/**
1878 * Add channel to transmit-ready list and trigger transmit worker.
1879 *
1880 * @ch Channel to add
1881 */
1882static void list_channel(struct smux_lch_t *ch)
1883{
1884 unsigned long flags;
1885
1886 SMUX_DBG("%s: listing channel %d\n",
1887 __func__, ch->lcid);
1888
1889 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1890 spin_lock(&ch->tx_lock_lhb2);
1891 smux.tx_activity_flag = 1;
1892 if (list_empty(&ch->tx_ready_list))
1893 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
1894 spin_unlock(&ch->tx_lock_lhb2);
1895 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1896
1897 queue_work(smux_tx_wq, &smux_tx_work);
1898}
1899
1900/**
1901 * Transmit packet on correct transport and then perform client
1902 * notification.
1903 *
1904 * @ch Channel to transmit on
1905 * @pkt Packet to transmit
1906 */
1907static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
1908{
1909 union notifier_metadata meta_write;
1910 int ret;
1911
1912 if (ch && pkt) {
1913 SMUX_LOG_PKT_TX(pkt);
1914 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
1915 ret = smux_tx_loopback(pkt);
1916 else
1917 ret = smux_tx_tty(pkt);
1918
1919 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
1920 /* notify write-done */
1921 meta_write.write.pkt_priv = pkt->priv;
1922 meta_write.write.buffer = pkt->payload;
1923 meta_write.write.len = pkt->hdr.payload_len;
1924 if (ret >= 0) {
1925 SMUX_DBG("%s: PKT write done", __func__);
1926 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
1927 &meta_write);
1928 } else {
1929 pr_err("%s: failed to write pkt %d\n",
1930 __func__, ret);
1931 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
1932 &meta_write);
1933 }
1934 }
1935 }
1936}
1937
1938/**
1939 * Power-up the UART.
1940 */
1941static void smux_uart_power_on(void)
1942{
1943 struct uart_state *state;
1944
1945 if (!smux.tty || !smux.tty->driver_data) {
1946 pr_err("%s: unable to find UART port for tty %p\n",
1947 __func__, smux.tty);
1948 return;
1949 }
1950 state = smux.tty->driver_data;
1951 msm_hs_request_clock_on(state->uart_port);
1952}
1953
1954/**
1955 * Power down the UART.
1956 */
1957static void smux_uart_power_off(void)
1958{
1959 struct uart_state *state;
1960
1961 if (!smux.tty || !smux.tty->driver_data) {
1962 pr_err("%s: unable to find UART port for tty %p\n",
1963 __func__, smux.tty);
1964 return;
1965 }
1966 state = smux.tty->driver_data;
1967 msm_hs_request_clock_off(state->uart_port);
1968}
1969
1970/**
1971 * TX Wakeup Worker
1972 *
1973 * @work Not used
1974 *
1975 * Do an exponential back-off wakeup sequence with a maximum period
1976 * of approximately 1 second (1 << 20 microseconds).
1977 */
1978static void smux_wakeup_worker(struct work_struct *work)
1979{
1980 unsigned long flags;
1981 unsigned wakeup_delay;
1982 int complete = 0;
1983
1984 for (;;) {
1985 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1986 if (smux.power_state == SMUX_PWR_ON) {
1987 /* wakeup complete */
1988 complete = 1;
1989 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1990 break;
1991 } else {
1992 /* retry */
1993 wakeup_delay = smux.pwr_wakeup_delay_us;
1994 smux.pwr_wakeup_delay_us <<= 1;
1995 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
1996 smux.pwr_wakeup_delay_us =
1997 SMUX_WAKEUP_DELAY_MAX;
1998 }
1999 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2000 SMUX_DBG("%s: triggering wakeup\n", __func__);
2001 smux_send_byte(SMUX_WAKEUP_REQ);
2002
2003 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2004 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2005 wakeup_delay);
2006 usleep_range(wakeup_delay, 2*wakeup_delay);
2007 } else {
2008 /* schedule delayed work */
2009 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2010 __func__, wakeup_delay / 1000);
2011 queue_delayed_work(smux_tx_wq,
2012 &smux_wakeup_delayed_work,
2013 msecs_to_jiffies(wakeup_delay / 1000));
2014 break;
2015 }
2016 }
2017
2018 if (complete) {
2019 SMUX_DBG("%s: wakeup complete\n", __func__);
2020 /*
2021 * Cancel any pending retry. This avoids a race condition with
2022 * a new power-up request because:
2023 * 1) this worker doesn't modify the state
2024 * 2) this worker is processed on the same single-threaded
2025 * workqueue as new TX wakeup requests
2026 */
2027 cancel_delayed_work(&smux_wakeup_delayed_work);
2028 }
2029}
2030
2031
2032/**
2033 * Inactivity timeout worker. Periodically scheduled when link is active.
2034 * When it detects inactivity, it will power-down the UART link.
2035 *
2036 * @work Work structure (not used)
2037 */
2038static void smux_inactivity_worker(struct work_struct *work)
2039{
2040 int tx_ready = 0;
2041 struct smux_pkt_t *pkt;
2042 unsigned long flags;
2043
2044 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2045 spin_lock(&smux.tx_lock_lha2);
2046
2047 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2048 /* no activity */
2049 if (smux.powerdown_enabled) {
2050 if (smux.power_state == SMUX_PWR_ON) {
2051 /* start power-down sequence */
2052 pkt = smux_alloc_pkt();
2053 if (pkt) {
2054 SMUX_DBG("%s: Power %d->%d\n", __func__,
2055 smux.power_state,
2056 SMUX_PWR_TURNING_OFF);
2057 smux.power_state = SMUX_PWR_TURNING_OFF;
2058
2059 /* send power-down request */
2060 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2061 pkt->hdr.flags = 0;
2062 pkt->hdr.lcid = 0;
2063 smux_tx_queue(pkt,
2064 &smux_lch[SMUX_TEST_LCID],
2065 0);
2066 tx_ready = 1;
2067 }
2068 }
2069 } else {
2070 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2071 __func__);
2072 }
2073 }
2074 smux.tx_activity_flag = 0;
2075 smux.rx_activity_flag = 0;
2076
2077 spin_unlock(&smux.tx_lock_lha2);
2078 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2079
2080 if (tx_ready)
2081 list_channel(&smux_lch[SMUX_TEST_LCID]);
2082
2083 if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
2084 (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
2085 /* ready to power-down the UART */
2086 SMUX_DBG("%s: Power %d->%d\n", __func__,
2087 smux.power_state, SMUX_PWR_OFF);
2088 smux_uart_power_off();
2089 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2090 smux.power_state = SMUX_PWR_OFF;
2091 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2092 }
2093
2094 /* reschedule inactivity worker */
2095 if (smux.power_state != SMUX_PWR_OFF)
2096 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2097 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2098}
2099
2100/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002101 * Remove RX retry packet from channel and free it.
2102 *
2103 * Must be called with state_lock_lhb1 locked.
2104 *
2105 * @ch Channel for retry packet
2106 * @retry Retry packet to remove
2107 */
2108void smux_remove_rx_retry(struct smux_lch_t *ch,
2109 struct smux_rx_pkt_retry *retry)
2110{
2111 list_del(&retry->rx_retry_list);
2112 --ch->rx_retry_queue_cnt;
2113 smux_free_pkt(retry->pkt);
2114 kfree(retry);
2115}
2116
2117/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002118 * RX worker handles all receive operations.
2119 *
2120 * @work Work structure contained in TBD structure
2121 */
2122static void smux_rx_worker(struct work_struct *work)
2123{
2124 unsigned long flags;
2125 int used;
2126 int initial_rx_state;
2127 struct smux_rx_worker_data *w;
2128 const unsigned char *data;
2129 int len;
2130 int flag;
2131
2132 w = container_of(work, struct smux_rx_worker_data, work);
2133 data = w->data;
2134 len = w->len;
2135 flag = w->flag;
2136
2137 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2138 smux.rx_activity_flag = 1;
2139 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2140
2141 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2142 used = 0;
2143 do {
2144 SMUX_DBG("%s: state %d; %d of %d\n",
2145 __func__, smux.rx_state, used, len);
2146 initial_rx_state = smux.rx_state;
2147
2148 switch (smux.rx_state) {
2149 case SMUX_RX_IDLE:
2150 smux_rx_handle_idle(data, len, &used, flag);
2151 break;
2152 case SMUX_RX_MAGIC:
2153 smux_rx_handle_magic(data, len, &used, flag);
2154 break;
2155 case SMUX_RX_HDR:
2156 smux_rx_handle_hdr(data, len, &used, flag);
2157 break;
2158 case SMUX_RX_PAYLOAD:
2159 smux_rx_handle_pkt_payload(data, len, &used, flag);
2160 break;
2161 default:
2162 SMUX_DBG("%s: invalid state %d\n",
2163 __func__, smux.rx_state);
2164 smux.rx_state = SMUX_RX_IDLE;
2165 break;
2166 }
2167 } while (used < len || smux.rx_state != initial_rx_state);
2168
2169 complete(&w->work_complete);
2170}
2171
2172/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002173 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2174 * because the client was not ready (-EAGAIN).
2175 *
2176 * @work Work structure contained in smux_lch_t structure
2177 */
2178static void smux_rx_retry_worker(struct work_struct *work)
2179{
2180 struct smux_lch_t *ch;
2181 struct smux_rx_pkt_retry *retry;
2182 union notifier_metadata metadata;
2183 int tmp;
2184 unsigned long flags;
2185
2186 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2187
2188 /* get next retry packet */
2189 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2190 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2191 /* port has been closed - remove all retries */
2192 while (!list_empty(&ch->rx_retry_queue)) {
2193 retry = list_first_entry(&ch->rx_retry_queue,
2194 struct smux_rx_pkt_retry,
2195 rx_retry_list);
2196 smux_remove_rx_retry(ch, retry);
2197 }
2198 }
2199
2200 if (list_empty(&ch->rx_retry_queue)) {
2201 SMUX_DBG("%s: retry list empty for channel %d\n",
2202 __func__, ch->lcid);
2203 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2204 return;
2205 }
2206 retry = list_first_entry(&ch->rx_retry_queue,
2207 struct smux_rx_pkt_retry,
2208 rx_retry_list);
2209 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2210
2211 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2212 metadata.read.pkt_priv = 0;
2213 metadata.read.buffer = 0;
2214 tmp = ch->get_rx_buffer(ch->priv,
2215 (void **)&metadata.read.pkt_priv,
2216 (void **)&metadata.read.buffer,
2217 retry->pkt->hdr.payload_len);
2218 if (tmp == 0 && metadata.read.buffer) {
2219 /* have valid RX buffer */
2220 memcpy(metadata.read.buffer, retry->pkt->payload,
2221 retry->pkt->hdr.payload_len);
2222 metadata.read.len = retry->pkt->hdr.payload_len;
2223
2224 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2225 smux_remove_rx_retry(ch, retry);
2226 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2227
2228 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2229 } else if (tmp == -EAGAIN ||
2230 (tmp == 0 && !metadata.read.buffer)) {
2231 /* retry again */
2232 retry->timeout_in_ms <<= 1;
2233 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2234 /* timed out */
2235 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2236 smux_remove_rx_retry(ch, retry);
2237 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2238 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2239 }
2240 } else {
2241 /* client error - drop packet */
2242 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2243 smux_remove_rx_retry(ch, retry);
2244 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2245
2246 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2247 }
2248
2249 /* schedule next retry */
2250 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2251 if (!list_empty(&ch->rx_retry_queue)) {
2252 retry = list_first_entry(&ch->rx_retry_queue,
2253 struct smux_rx_pkt_retry,
2254 rx_retry_list);
2255 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2256 msecs_to_jiffies(retry->timeout_in_ms));
2257 }
2258 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2259}
2260
2261/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002262 * Transmit worker handles serializing and transmitting packets onto the
2263 * underlying transport.
2264 *
2265 * @work Work structure (not used)
2266 */
2267static void smux_tx_worker(struct work_struct *work)
2268{
2269 struct smux_pkt_t *pkt;
2270 struct smux_lch_t *ch;
2271 unsigned low_wm_notif;
2272 unsigned lcid;
2273 unsigned long flags;
2274
2275
2276 /*
2277 * Transmit packets in round-robin fashion based upon ready
2278 * channels.
2279 *
2280 * To eliminate the need to hold a lock for the entire
2281 * iteration through the channel ready list, the head of the
2282 * ready-channel list is always the next channel to be
2283 * processed. To send a packet, the first valid packet in
2284 * the head channel is removed and the head channel is then
2285 * rescheduled at the end of the queue by removing it and
2286 * inserting after the tail. The locks can then be released
2287 * while the packet is processed.
2288 */
2289 for (;;) {
2290 pkt = NULL;
2291 low_wm_notif = 0;
2292
2293 /* get the next ready channel */
2294 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2295 if (list_empty(&smux.lch_tx_ready_list)) {
2296 /* no ready channels */
2297 SMUX_DBG("%s: no more ready channels, exiting\n",
2298 __func__);
2299 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2300 break;
2301 }
2302 smux.tx_activity_flag = 1;
2303
2304 if (smux.power_state != SMUX_PWR_ON
2305 && smux.power_state != SMUX_PWR_TURNING_OFF
2306 && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
2307 /* Link isn't ready to transmit */
2308 if (smux.power_state == SMUX_PWR_OFF) {
2309 /* link is off, trigger wakeup */
2310 smux.pwr_wakeup_delay_us = 1;
2311 SMUX_DBG("%s: Power %d->%d\n", __func__,
2312 smux.power_state,
2313 SMUX_PWR_TURNING_ON);
2314 smux.power_state = SMUX_PWR_TURNING_ON;
2315 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2316 flags);
2317 smux_uart_power_on();
2318 queue_work(smux_tx_wq, &smux_wakeup_work);
2319 } else {
2320 SMUX_DBG("%s: can not tx with power state %d\n",
2321 __func__,
2322 smux.power_state);
2323 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2324 flags);
2325 }
2326 break;
2327 }
2328
2329 /* get the next packet to send and rotate channel list */
2330 ch = list_first_entry(&smux.lch_tx_ready_list,
2331 struct smux_lch_t,
2332 tx_ready_list);
2333
2334 spin_lock(&ch->state_lock_lhb1);
2335 spin_lock(&ch->tx_lock_lhb2);
2336 if (!list_empty(&ch->tx_queue)) {
2337 /*
2338 * If remote TX flow control is enabled or
2339 * the channel is not fully opened, then only
2340 * send command packets.
2341 */
2342 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2343 struct smux_pkt_t *curr;
2344 list_for_each_entry(curr, &ch->tx_queue, list) {
2345 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2346 pkt = curr;
2347 break;
2348 }
2349 }
2350 } else {
2351 /* get next cmd/data packet to send */
2352 pkt = list_first_entry(&ch->tx_queue,
2353 struct smux_pkt_t, list);
2354 }
2355 }
2356
2357 if (pkt) {
2358 list_del(&pkt->list);
2359
2360 /* update packet stats */
2361 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2362 --ch->tx_pending_data_cnt;
2363 if (ch->notify_lwm &&
2364 ch->tx_pending_data_cnt
2365 <= SMUX_WM_LOW) {
2366 ch->notify_lwm = 0;
2367 low_wm_notif = 1;
2368 }
2369 }
2370
2371 /* advance to the next ready channel */
2372 list_rotate_left(&smux.lch_tx_ready_list);
2373 } else {
2374 /* no data in channel to send, remove from ready list */
2375 list_del(&ch->tx_ready_list);
2376 INIT_LIST_HEAD(&ch->tx_ready_list);
2377 }
2378 lcid = ch->lcid;
2379 spin_unlock(&ch->tx_lock_lhb2);
2380 spin_unlock(&ch->state_lock_lhb1);
2381 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2382
2383 if (low_wm_notif)
2384 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2385
2386 /* send the packet */
2387 smux_tx_pkt(ch, pkt);
2388 smux_free_pkt(pkt);
2389 }
2390}
2391
2392
2393/**********************************************************************/
2394/* Kernel API */
2395/**********************************************************************/
2396
2397/**
2398 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2399 * flags.
2400 *
2401 * @lcid Logical channel ID
2402 * @set Options to set
2403 * @clear Options to clear
2404 *
2405 * @returns 0 for success, < 0 for failure
2406 */
2407int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2408{
2409 unsigned long flags;
2410 struct smux_lch_t *ch;
2411 int tx_ready = 0;
2412 int ret = 0;
2413
2414 if (smux_assert_lch_id(lcid))
2415 return -ENXIO;
2416
2417 ch = &smux_lch[lcid];
2418 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2419
2420 /* Local loopback mode */
2421 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2422 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2423
2424 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2425 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2426
2427 /* Remote loopback mode */
2428 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2429 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2430
2431 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2432 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2433
2434 /* Flow control */
2435 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2436 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2437 ret = smux_send_status_cmd(ch);
2438 tx_ready = 1;
2439 }
2440
2441 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2442 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2443 ret = smux_send_status_cmd(ch);
2444 tx_ready = 1;
2445 }
2446
2447 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2448
2449 if (tx_ready)
2450 list_channel(ch);
2451
2452 return ret;
2453}
2454
2455/**
2456 * Starts the opening sequence for a logical channel.
2457 *
2458 * @lcid Logical channel ID
2459 * @priv Free for client usage
2460 * @notify Event notification function
2461 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2462 *
2463 * @returns 0 for success, <0 otherwise
2464 *
2465 * A channel must be fully closed (either not previously opened or
2466 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2467 * received.
2468 *
2469 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2470 * event.
2471 */
2472int msm_smux_open(uint8_t lcid, void *priv,
2473 void (*notify)(void *priv, int event_type, const void *metadata),
2474 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2475 int size))
2476{
2477 int ret;
2478 struct smux_lch_t *ch;
2479 struct smux_pkt_t *pkt;
2480 int tx_ready = 0;
2481 unsigned long flags;
2482
2483 if (smux_assert_lch_id(lcid))
2484 return -ENXIO;
2485
2486 ch = &smux_lch[lcid];
2487 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2488
2489 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2490 ret = -EAGAIN;
2491 goto out;
2492 }
2493
2494 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2495 pr_err("%s: open lcid %d local state %x invalid\n",
2496 __func__, lcid, ch->local_state);
2497 ret = -EINVAL;
2498 goto out;
2499 }
2500
2501 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2502 ch->local_state,
2503 SMUX_LCH_LOCAL_OPENING);
2504
2505 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2506
2507 ch->priv = priv;
2508 ch->notify = notify;
2509 ch->get_rx_buffer = get_rx_buffer;
2510 ret = 0;
2511
2512 /* Send Open Command */
2513 pkt = smux_alloc_pkt();
2514 if (!pkt) {
2515 ret = -ENOMEM;
2516 goto out;
2517 }
2518 pkt->hdr.magic = SMUX_MAGIC;
2519 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2520 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2521 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2522 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2523 pkt->hdr.lcid = lcid;
2524 pkt->hdr.payload_len = 0;
2525 pkt->hdr.pad_len = 0;
2526 smux_tx_queue(pkt, ch, 0);
2527 tx_ready = 1;
2528
2529out:
2530 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2531 if (tx_ready)
2532 list_channel(ch);
2533 return ret;
2534}
2535
2536/**
2537 * Starts the closing sequence for a logical channel.
2538 *
2539 * @lcid Logical channel ID
2540 *
2541 * @returns 0 for success, <0 otherwise
2542 *
2543 * Once the close event has been acknowledge by the remote side, the client
2544 * will receive a SMUX_DISCONNECTED notification.
2545 */
2546int msm_smux_close(uint8_t lcid)
2547{
2548 int ret = 0;
2549 struct smux_lch_t *ch;
2550 struct smux_pkt_t *pkt;
2551 int tx_ready = 0;
2552 unsigned long flags;
2553
2554 if (smux_assert_lch_id(lcid))
2555 return -ENXIO;
2556
2557 ch = &smux_lch[lcid];
2558 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2559 ch->local_tiocm = 0x0;
2560 ch->remote_tiocm = 0x0;
2561 ch->tx_pending_data_cnt = 0;
2562 ch->notify_lwm = 0;
2563
2564 /* Purge TX queue */
2565 spin_lock(&ch->tx_lock_lhb2);
2566 while (!list_empty(&ch->tx_queue)) {
2567 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2568 list);
2569 list_del(&pkt->list);
2570
2571 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2572 /* Open was never sent, just force to closed state */
2573 union notifier_metadata meta_disconnected;
2574
2575 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2576 meta_disconnected.disconnected.is_ssr = 0;
2577 schedule_notify(lcid, SMUX_DISCONNECTED,
2578 &meta_disconnected);
2579 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2580 /* Notify client of failed write */
2581 union notifier_metadata meta_write;
2582
2583 meta_write.write.pkt_priv = pkt->priv;
2584 meta_write.write.buffer = pkt->payload;
2585 meta_write.write.len = pkt->hdr.payload_len;
2586 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2587 }
2588 smux_free_pkt(pkt);
2589 }
2590 spin_unlock(&ch->tx_lock_lhb2);
2591
2592 /* Send Close Command */
2593 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2594 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2595 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2596 ch->local_state,
2597 SMUX_LCH_LOCAL_CLOSING);
2598
2599 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2600 pkt = smux_alloc_pkt();
2601 if (pkt) {
2602 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2603 pkt->hdr.flags = 0;
2604 pkt->hdr.lcid = lcid;
2605 pkt->hdr.payload_len = 0;
2606 pkt->hdr.pad_len = 0;
2607 smux_tx_queue(pkt, ch, 0);
2608 tx_ready = 1;
2609 } else {
2610 pr_err("%s: pkt allocation failed\n", __func__);
2611 ret = -ENOMEM;
2612 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002613
2614 /* Purge RX retry queue */
2615 if (ch->rx_retry_queue_cnt)
2616 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002617 }
2618 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2619
2620 if (tx_ready)
2621 list_channel(ch);
2622
2623 return ret;
2624}
2625
2626/**
2627 * Write data to a logical channel.
2628 *
2629 * @lcid Logical channel ID
2630 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2631 * SMUX_WRITE_FAIL notification.
2632 * @data Data to write
2633 * @len Length of @data
2634 *
2635 * @returns 0 for success, <0 otherwise
2636 *
2637 * Data may be written immediately after msm_smux_open() is called,
2638 * but the data will wait in the transmit queue until the channel has
2639 * been fully opened.
2640 *
2641 * Once the data has been written, the client will receive either a completion
2642 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2643 */
2644int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2645{
2646 struct smux_lch_t *ch;
2647 struct smux_pkt_t *pkt;
2648 int tx_ready = 0;
2649 unsigned long flags;
2650 int ret;
2651
2652 if (smux_assert_lch_id(lcid))
2653 return -ENXIO;
2654
2655 ch = &smux_lch[lcid];
2656 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2657
2658 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2659 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2660 pr_err("%s: hdr.invalid local state %d channel %d\n",
2661 __func__, ch->local_state, lcid);
2662 ret = -EINVAL;
2663 goto out;
2664 }
2665
2666 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2667 pr_err("%s: payload %d too large\n",
2668 __func__, len);
2669 ret = -E2BIG;
2670 goto out;
2671 }
2672
2673 pkt = smux_alloc_pkt();
2674 if (!pkt) {
2675 ret = -ENOMEM;
2676 goto out;
2677 }
2678
2679 pkt->hdr.cmd = SMUX_CMD_DATA;
2680 pkt->hdr.lcid = lcid;
2681 pkt->hdr.flags = 0;
2682 pkt->hdr.payload_len = len;
2683 pkt->payload = (void *)data;
2684 pkt->priv = pkt_priv;
2685 pkt->hdr.pad_len = 0;
2686
2687 spin_lock(&ch->tx_lock_lhb2);
2688 /* verify high watermark */
2689 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2690
2691 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2692 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2693 __func__, lcid, SMUX_WM_HIGH,
2694 ch->tx_pending_data_cnt);
2695 ret = -EAGAIN;
2696 goto out_inner;
2697 }
2698
2699 /* queue packet for transmit */
2700 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2701 ch->notify_lwm = 1;
2702 pr_err("%s: high watermark hit\n", __func__);
2703 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2704 }
2705 list_add_tail(&pkt->list, &ch->tx_queue);
2706
2707 /* add to ready list */
2708 if (IS_FULLY_OPENED(ch))
2709 tx_ready = 1;
2710
2711 ret = 0;
2712
2713out_inner:
2714 spin_unlock(&ch->tx_lock_lhb2);
2715
2716out:
2717 if (ret)
2718 smux_free_pkt(pkt);
2719 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2720
2721 if (tx_ready)
2722 list_channel(ch);
2723
2724 return ret;
2725}
2726
2727/**
2728 * Returns true if the TX queue is currently full (high water mark).
2729 *
2730 * @lcid Logical channel ID
2731 * @returns 0 if channel is not full
2732 * 1 if it is full
2733 * < 0 for error
2734 */
2735int msm_smux_is_ch_full(uint8_t lcid)
2736{
2737 struct smux_lch_t *ch;
2738 unsigned long flags;
2739 int is_full = 0;
2740
2741 if (smux_assert_lch_id(lcid))
2742 return -ENXIO;
2743
2744 ch = &smux_lch[lcid];
2745
2746 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2747 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2748 is_full = 1;
2749 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2750
2751 return is_full;
2752}
2753
2754/**
2755 * Returns true if the TX queue has space for more packets it is at or
2756 * below the low water mark).
2757 *
2758 * @lcid Logical channel ID
2759 * @returns 0 if channel is above low watermark
2760 * 1 if it's at or below the low watermark
2761 * < 0 for error
2762 */
2763int msm_smux_is_ch_low(uint8_t lcid)
2764{
2765 struct smux_lch_t *ch;
2766 unsigned long flags;
2767 int is_low = 0;
2768
2769 if (smux_assert_lch_id(lcid))
2770 return -ENXIO;
2771
2772 ch = &smux_lch[lcid];
2773
2774 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2775 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2776 is_low = 1;
2777 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2778
2779 return is_low;
2780}
2781
2782/**
2783 * Send TIOCM status update.
2784 *
2785 * @ch Channel for update
2786 *
2787 * @returns 0 for success, <0 for failure
2788 *
2789 * Channel lock must be held before calling.
2790 */
2791static int smux_send_status_cmd(struct smux_lch_t *ch)
2792{
2793 struct smux_pkt_t *pkt;
2794
2795 if (!ch)
2796 return -EINVAL;
2797
2798 pkt = smux_alloc_pkt();
2799 if (!pkt)
2800 return -ENOMEM;
2801
2802 pkt->hdr.lcid = ch->lcid;
2803 pkt->hdr.cmd = SMUX_CMD_STATUS;
2804 pkt->hdr.flags = ch->local_tiocm;
2805 pkt->hdr.payload_len = 0;
2806 pkt->hdr.pad_len = 0;
2807 smux_tx_queue(pkt, ch, 0);
2808
2809 return 0;
2810}
2811
2812/**
2813 * Internal helper function for getting the TIOCM status with
2814 * state_lock_lhb1 already locked.
2815 *
2816 * @ch Channel pointer
2817 *
2818 * @returns TIOCM status
2819 */
2820static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
2821{
2822 long status = 0x0;
2823
2824 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
2825 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
2826 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
2827 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
2828
2829 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
2830 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
2831
2832 return status;
2833}
2834
2835/**
2836 * Get the TIOCM status bits.
2837 *
2838 * @lcid Logical channel ID
2839 *
2840 * @returns >= 0 TIOCM status bits
2841 * < 0 Error condition
2842 */
2843long msm_smux_tiocm_get(uint8_t lcid)
2844{
2845 struct smux_lch_t *ch;
2846 unsigned long flags;
2847 long status = 0x0;
2848
2849 if (smux_assert_lch_id(lcid))
2850 return -ENXIO;
2851
2852 ch = &smux_lch[lcid];
2853 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2854 status = msm_smux_tiocm_get_atomic(ch);
2855 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2856
2857 return status;
2858}
2859
2860/**
2861 * Set/clear the TIOCM status bits.
2862 *
2863 * @lcid Logical channel ID
2864 * @set Bits to set
2865 * @clear Bits to clear
2866 *
2867 * @returns 0 for success; < 0 for failure
2868 *
2869 * If a bit is specified in both the @set and @clear masks, then the clear bit
2870 * definition will dominate and the bit will be cleared.
2871 */
2872int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
2873{
2874 struct smux_lch_t *ch;
2875 unsigned long flags;
2876 uint8_t old_status;
2877 uint8_t status_set = 0x0;
2878 uint8_t status_clear = 0x0;
2879 int tx_ready = 0;
2880 int ret = 0;
2881
2882 if (smux_assert_lch_id(lcid))
2883 return -ENXIO;
2884
2885 ch = &smux_lch[lcid];
2886 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2887
2888 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2889 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2890 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2891 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2892
2893 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2894 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2895 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2896 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2897
2898 old_status = ch->local_tiocm;
2899 ch->local_tiocm |= status_set;
2900 ch->local_tiocm &= ~status_clear;
2901
2902 if (ch->local_tiocm != old_status) {
2903 ret = smux_send_status_cmd(ch);
2904 tx_ready = 1;
2905 }
2906 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2907
2908 if (tx_ready)
2909 list_channel(ch);
2910
2911 return ret;
2912}
2913
2914/**********************************************************************/
2915/* Line Discipline Interface */
2916/**********************************************************************/
2917static int smuxld_open(struct tty_struct *tty)
2918{
2919 int i;
2920 int tmp;
2921 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002922
2923 if (!smux.is_initialized)
2924 return -ENODEV;
2925
2926 spin_lock_irqsave(&smux.lock_lha0, flags);
2927 if (smux.ld_open_count) {
2928 pr_err("%s: %p multiple instances not supported\n",
2929 __func__, tty);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002930 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2931 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002932 }
2933
2934 ++smux.ld_open_count;
2935 if (tty->ops->write == NULL) {
Eric Holmberg902c51e2012-05-29 12:12:16 -06002936 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2937 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002938 }
2939
2940 /* connect to TTY */
2941 smux.tty = tty;
2942 tty->disc_data = &smux;
2943 tty->receive_room = TTY_RECEIVE_ROOM;
2944 tty_driver_flush_buffer(tty);
2945
2946 /* power-down the UART if we are idle */
2947 spin_lock(&smux.tx_lock_lha2);
2948 if (smux.power_state == SMUX_PWR_OFF) {
2949 SMUX_DBG("%s: powering off uart\n", __func__);
2950 smux.power_state = SMUX_PWR_OFF_FLUSH;
2951 spin_unlock(&smux.tx_lock_lha2);
2952 queue_work(smux_tx_wq, &smux_inactivity_work);
2953 } else {
2954 spin_unlock(&smux.tx_lock_lha2);
2955 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002956 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002957
2958 /* register platform devices */
2959 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
2960 tmp = platform_device_register(&smux_devs[i]);
2961 if (tmp)
2962 pr_err("%s: error %d registering device %s\n",
2963 __func__, tmp, smux_devs[i].name);
2964 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002965 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002966}
2967
2968static void smuxld_close(struct tty_struct *tty)
2969{
2970 unsigned long flags;
2971 int i;
2972
2973 spin_lock_irqsave(&smux.lock_lha0, flags);
2974 if (smux.ld_open_count <= 0) {
2975 pr_err("%s: invalid ld count %d\n", __func__,
2976 smux.ld_open_count);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002977 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2978 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002979 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002980 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002981
2982 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
2983 platform_device_unregister(&smux_devs[i]);
2984
2985 --smux.ld_open_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002986}
2987
2988/**
2989 * Receive data from TTY Line Discipline.
2990 *
2991 * @tty TTY structure
2992 * @cp Character data
2993 * @fp Flag data
2994 * @count Size of character and flag data
2995 */
2996void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
2997 char *fp, int count)
2998{
2999 int i;
3000 int last_idx = 0;
3001 const char *tty_name = NULL;
3002 char *f;
3003
3004 if (smux_debug_mask & MSM_SMUX_DEBUG)
3005 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3006 16, 1, cp, count, true);
3007
3008 /* verify error flags */
3009 for (i = 0, f = fp; i < count; ++i, ++f) {
3010 if (*f != TTY_NORMAL) {
3011 if (tty)
3012 tty_name = tty->name;
3013 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3014 tty_name, *f, tty_flag_to_str(*f));
3015
3016 /* feed all previous valid data to the parser */
3017 smux_rx_state_machine(cp + last_idx, i - last_idx,
3018 TTY_NORMAL);
3019
3020 /* feed bad data to parser */
3021 smux_rx_state_machine(cp + i, 1, *f);
3022 last_idx = i + 1;
3023 }
3024 }
3025
3026 /* feed data to RX state machine */
3027 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3028}
3029
3030static void smuxld_flush_buffer(struct tty_struct *tty)
3031{
3032 pr_err("%s: not supported\n", __func__);
3033}
3034
3035static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3036{
3037 pr_err("%s: not supported\n", __func__);
3038 return -ENODEV;
3039}
3040
3041static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3042 unsigned char __user *buf, size_t nr)
3043{
3044 pr_err("%s: not supported\n", __func__);
3045 return -ENODEV;
3046}
3047
3048static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3049 const unsigned char *buf, size_t nr)
3050{
3051 pr_err("%s: not supported\n", __func__);
3052 return -ENODEV;
3053}
3054
3055static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3056 unsigned int cmd, unsigned long arg)
3057{
3058 pr_err("%s: not supported\n", __func__);
3059 return -ENODEV;
3060}
3061
3062static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3063 struct poll_table_struct *tbl)
3064{
3065 pr_err("%s: not supported\n", __func__);
3066 return -ENODEV;
3067}
3068
3069static void smuxld_write_wakeup(struct tty_struct *tty)
3070{
3071 pr_err("%s: not supported\n", __func__);
3072}
3073
3074static struct tty_ldisc_ops smux_ldisc_ops = {
3075 .owner = THIS_MODULE,
3076 .magic = TTY_LDISC_MAGIC,
3077 .name = "n_smux",
3078 .open = smuxld_open,
3079 .close = smuxld_close,
3080 .flush_buffer = smuxld_flush_buffer,
3081 .chars_in_buffer = smuxld_chars_in_buffer,
3082 .read = smuxld_read,
3083 .write = smuxld_write,
3084 .ioctl = smuxld_ioctl,
3085 .poll = smuxld_poll,
3086 .receive_buf = smuxld_receive_buf,
3087 .write_wakeup = smuxld_write_wakeup
3088};
3089
3090static int __init smux_init(void)
3091{
3092 int ret;
3093
3094 spin_lock_init(&smux.lock_lha0);
3095
3096 spin_lock_init(&smux.rx_lock_lha1);
3097 smux.rx_state = SMUX_RX_IDLE;
3098 smux.power_state = SMUX_PWR_OFF;
3099 smux.pwr_wakeup_delay_us = 1;
3100 smux.powerdown_enabled = 0;
3101 smux.rx_activity_flag = 0;
3102 smux.tx_activity_flag = 0;
3103 smux.recv_len = 0;
3104 smux.tty = NULL;
3105 smux.ld_open_count = 0;
3106 smux.in_reset = 0;
3107 smux.is_initialized = 1;
3108 smux_byte_loopback = 0;
3109
3110 spin_lock_init(&smux.tx_lock_lha2);
3111 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3112
3113 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3114 if (ret != 0) {
3115 pr_err("%s: error %d registering line discipline\n",
3116 __func__, ret);
3117 return ret;
3118 }
3119
3120 ret = lch_init();
3121 if (ret != 0) {
3122 pr_err("%s: lch_init failed\n", __func__);
3123 return ret;
3124 }
3125
3126 return 0;
3127}
3128
3129static void __exit smux_exit(void)
3130{
3131 int ret;
3132
3133 ret = tty_unregister_ldisc(N_SMUX);
3134 if (ret != 0) {
3135 pr_err("%s error %d unregistering line discipline\n",
3136 __func__, ret);
3137 return;
3138 }
3139}
3140
3141module_init(smux_init);
3142module_exit(smux_exit);
3143
3144MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3145MODULE_LICENSE("GPL v2");
3146MODULE_ALIAS_LDISC(N_SMUX);