blob: bcfa4f6d57bc98044d30184f0420bf74f731fb1c [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_WM_LOW 2
37#define SMUX_WM_HIGH 4
38#define SMUX_PKT_LOG_SIZE 80
39
40/* Maximum size we can accept in a single RX buffer */
41#define TTY_RECEIVE_ROOM 65536
42#define TTY_BUFFER_FULL_WAIT_MS 50
43
44/* maximum sleep time between wakeup attempts */
45#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
46
47/* minimum delay for scheduling delayed work */
48#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
49
50/* inactivity timeout for no rx/tx activity */
51#define SMUX_INACTIVITY_TIMEOUT_MS 1000
52
Eric Holmbergb8435c82012-06-05 14:51:29 -060053/* RX get_rx_buffer retry timeout values */
54#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
55#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
56
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057enum {
58 MSM_SMUX_DEBUG = 1U << 0,
59 MSM_SMUX_INFO = 1U << 1,
60 MSM_SMUX_POWER_INFO = 1U << 2,
61 MSM_SMUX_PKT = 1U << 3,
62};
63
64static int smux_debug_mask;
65module_param_named(debug_mask, smux_debug_mask,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67
68/* Simulated wakeup used for testing */
69int smux_byte_loopback;
70module_param_named(byte_loopback, smux_byte_loopback,
71 int, S_IRUGO | S_IWUSR | S_IWGRP);
72int smux_simulate_wakeup_delay = 1;
73module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
74 int, S_IRUGO | S_IWUSR | S_IWGRP);
75
76#define SMUX_DBG(x...) do { \
77 if (smux_debug_mask & MSM_SMUX_DEBUG) \
78 pr_info(x); \
79} while (0)
80
Eric Holmbergff0b0112012-06-08 15:06:57 -060081#define SMUX_PWR(x...) do { \
82 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
83 pr_info(x); \
84} while (0)
85
Eric Holmberg8ed30f22012-05-10 19:16:51 -060086#define SMUX_LOG_PKT_RX(pkt) do { \
87 if (smux_debug_mask & MSM_SMUX_PKT) \
88 smux_log_pkt(pkt, 1); \
89} while (0)
90
91#define SMUX_LOG_PKT_TX(pkt) do { \
92 if (smux_debug_mask & MSM_SMUX_PKT) \
93 smux_log_pkt(pkt, 0); \
94} while (0)
95
96/**
97 * Return true if channel is fully opened (both
98 * local and remote sides are in the OPENED state).
99 */
100#define IS_FULLY_OPENED(ch) \
101 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
102 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
103
104static struct platform_device smux_devs[] = {
105 {.name = "SMUX_CTL", .id = -1},
106 {.name = "SMUX_RMNET", .id = -1},
107 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
108 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
109 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
110 {.name = "SMUX_DIAG", .id = -1},
111};
112
113enum {
114 SMUX_CMD_STATUS_RTC = 1 << 0,
115 SMUX_CMD_STATUS_RTR = 1 << 1,
116 SMUX_CMD_STATUS_RI = 1 << 2,
117 SMUX_CMD_STATUS_DCD = 1 << 3,
118 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
119};
120
121/* Channel mode */
122enum {
123 SMUX_LCH_MODE_NORMAL,
124 SMUX_LCH_MODE_LOCAL_LOOPBACK,
125 SMUX_LCH_MODE_REMOTE_LOOPBACK,
126};
127
128enum {
129 SMUX_RX_IDLE,
130 SMUX_RX_MAGIC,
131 SMUX_RX_HDR,
132 SMUX_RX_PAYLOAD,
133 SMUX_RX_FAILURE,
134};
135
136/**
137 * Power states.
138 *
139 * The _FLUSH states are internal transitional states and are not part of the
140 * official state machine.
141 */
142enum {
143 SMUX_PWR_OFF,
144 SMUX_PWR_TURNING_ON,
145 SMUX_PWR_ON,
146 SMUX_PWR_TURNING_OFF_FLUSH,
147 SMUX_PWR_TURNING_OFF,
148 SMUX_PWR_OFF_FLUSH,
149};
150
151/**
152 * Logical Channel Structure. One instance per channel.
153 *
154 * Locking Hierarchy
155 * Each lock has a postfix that describes the locking level. If multiple locks
156 * are required, only increasing lock hierarchy numbers may be locked which
157 * ensures avoiding a deadlock.
158 *
159 * Locking Example
160 * If state_lock_lhb1 is currently held and the TX list needs to be
161 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
162 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
163 * not be acquired since it would result in a deadlock.
164 *
165 * Note that the Line Discipline locks (*_lha) should always be acquired
166 * before the logical channel locks.
167 */
168struct smux_lch_t {
169 /* channel state */
170 spinlock_t state_lock_lhb1;
171 uint8_t lcid;
172 unsigned local_state;
173 unsigned local_mode;
174 uint8_t local_tiocm;
175
176 unsigned remote_state;
177 unsigned remote_mode;
178 uint8_t remote_tiocm;
179
180 int tx_flow_control;
181
182 /* client callbacks and private data */
183 void *priv;
184 void (*notify)(void *priv, int event_type, const void *metadata);
185 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
186 int size);
187
Eric Holmbergb8435c82012-06-05 14:51:29 -0600188 /* RX Info */
189 struct list_head rx_retry_queue;
190 unsigned rx_retry_queue_cnt;
191 struct delayed_work rx_retry_work;
192
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600193 /* TX Info */
194 spinlock_t tx_lock_lhb2;
195 struct list_head tx_queue;
196 struct list_head tx_ready_list;
197 unsigned tx_pending_data_cnt;
198 unsigned notify_lwm;
199};
200
201union notifier_metadata {
202 struct smux_meta_disconnected disconnected;
203 struct smux_meta_read read;
204 struct smux_meta_write write;
205 struct smux_meta_tiocm tiocm;
206};
207
208struct smux_notify_handle {
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 void *priv;
211 int event_type;
212 union notifier_metadata *metadata;
213};
214
215/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600216 * Get RX Buffer Retry structure.
217 *
218 * This is used for clients that are unable to provide an RX buffer
219 * immediately. This temporary structure will be used to temporarily hold the
220 * data and perform a retry.
221 */
222struct smux_rx_pkt_retry {
223 struct smux_pkt_t *pkt;
224 struct list_head rx_retry_list;
225 unsigned timeout_in_ms;
226};
227
228/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600229 * Receive worker data structure.
230 *
231 * One instance is created for every call to smux_rx_state_machine.
232 */
233struct smux_rx_worker_data {
234 const unsigned char *data;
235 int len;
236 int flag;
237
238 struct work_struct work;
239 struct completion work_complete;
240};
241
242/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600243 * Line discipline and module structure.
244 *
245 * Only one instance since multiple instances of line discipline are not
246 * allowed.
247 */
248struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600249 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250
251 int is_initialized;
252 int in_reset;
253 int ld_open_count;
254 struct tty_struct *tty;
255
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600256 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600257 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
258 unsigned int recv_len;
259 unsigned int pkt_remain;
260 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600261
262 /* RX Activity - accessed by multiple threads */
263 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264 unsigned rx_activity_flag;
265
266 /* TX / Power */
267 spinlock_t tx_lock_lha2;
268 struct list_head lch_tx_ready_list;
269 unsigned power_state;
270 unsigned pwr_wakeup_delay_us;
271 unsigned tx_activity_flag;
272 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600273 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600274};
275
276
277/* data structures */
278static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
279static struct smux_ldisc_t smux;
280static const char *tty_error_type[] = {
281 [TTY_NORMAL] = "normal",
282 [TTY_OVERRUN] = "overrun",
283 [TTY_BREAK] = "break",
284 [TTY_PARITY] = "parity",
285 [TTY_FRAME] = "framing",
286};
287
288static const char *smux_cmds[] = {
289 [SMUX_CMD_DATA] = "DATA",
290 [SMUX_CMD_OPEN_LCH] = "OPEN",
291 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
292 [SMUX_CMD_STATUS] = "STATUS",
293 [SMUX_CMD_PWR_CTL] = "PWR",
294 [SMUX_CMD_BYTE] = "Raw Byte",
295};
296
297static void smux_notify_local_fn(struct work_struct *work);
298static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
299
300static struct workqueue_struct *smux_notify_wq;
301static size_t handle_size;
302static struct kfifo smux_notify_fifo;
303static int queued_fifo_notifications;
304static DEFINE_SPINLOCK(notify_lock_lhc1);
305
306static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600307static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600308static void smux_tx_worker(struct work_struct *work);
309static DECLARE_WORK(smux_tx_work, smux_tx_worker);
310
311static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600312static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600313static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600314static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
315static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
316
317static void smux_inactivity_worker(struct work_struct *work);
318static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
319static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
320 smux_inactivity_worker);
321
322static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
323static void list_channel(struct smux_lch_t *ch);
324static int smux_send_status_cmd(struct smux_lch_t *ch);
325static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600326static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600327static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
328static int schedule_notify(uint8_t lcid, int event,
329 const union notifier_metadata *metadata);
330static int ssr_notifier_cb(struct notifier_block *this,
331 unsigned long code,
332 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600333static void smux_uart_power_on_atomic(void);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334
335/**
336 * Convert TTY Error Flags to string for logging purposes.
337 *
338 * @flag TTY_* flag
339 * @returns String description or NULL if unknown
340 */
341static const char *tty_flag_to_str(unsigned flag)
342{
343 if (flag < ARRAY_SIZE(tty_error_type))
344 return tty_error_type[flag];
345 return NULL;
346}
347
348/**
349 * Convert SMUX Command to string for logging purposes.
350 *
351 * @cmd SMUX command
352 * @returns String description or NULL if unknown
353 */
354static const char *cmd_to_str(unsigned cmd)
355{
356 if (cmd < ARRAY_SIZE(smux_cmds))
357 return smux_cmds[cmd];
358 return NULL;
359}
360
361/**
362 * Set the reset state due to an unrecoverable failure.
363 */
364static void smux_enter_reset(void)
365{
366 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
367 smux.in_reset = 1;
368}
369
370static int lch_init(void)
371{
372 unsigned int id;
373 struct smux_lch_t *ch;
374 int i = 0;
375
376 handle_size = sizeof(struct smux_notify_handle *);
377
378 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
379 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600380 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600381
382 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
383 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
384 __func__);
385 return -ENOMEM;
386 }
387
388 i |= kfifo_alloc(&smux_notify_fifo,
389 SMUX_NOTIFY_FIFO_SIZE * handle_size,
390 GFP_KERNEL);
391 i |= smux_loopback_init();
392
393 if (i) {
394 pr_err("%s: out of memory error\n", __func__);
395 return -ENOMEM;
396 }
397
398 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
399 ch = &smux_lch[id];
400
401 spin_lock_init(&ch->state_lock_lhb1);
402 ch->lcid = id;
403 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
404 ch->local_mode = SMUX_LCH_MODE_NORMAL;
405 ch->local_tiocm = 0x0;
406 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
407 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
408 ch->remote_tiocm = 0x0;
409 ch->tx_flow_control = 0;
410 ch->priv = 0;
411 ch->notify = 0;
412 ch->get_rx_buffer = 0;
413
Eric Holmbergb8435c82012-06-05 14:51:29 -0600414 INIT_LIST_HEAD(&ch->rx_retry_queue);
415 ch->rx_retry_queue_cnt = 0;
416 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
417
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600418 spin_lock_init(&ch->tx_lock_lhb2);
419 INIT_LIST_HEAD(&ch->tx_queue);
420 INIT_LIST_HEAD(&ch->tx_ready_list);
421 ch->tx_pending_data_cnt = 0;
422 ch->notify_lwm = 0;
423 }
424
425 return 0;
426}
427
Eric Holmberged1f00c2012-06-07 09:45:18 -0600428/**
429 * Empty and cleanup all SMUX logical channels for subsystem restart or line
430 * discipline disconnect.
431 */
432static void smux_lch_purge(void)
433{
434 struct smux_lch_t *ch;
435 unsigned long flags;
436 int i;
437
438 /* Empty TX ready list */
439 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
440 while (!list_empty(&smux.lch_tx_ready_list)) {
441 SMUX_DBG("%s: emptying ready list %p\n",
442 __func__, smux.lch_tx_ready_list.next);
443 ch = list_first_entry(&smux.lch_tx_ready_list,
444 struct smux_lch_t,
445 tx_ready_list);
446 list_del(&ch->tx_ready_list);
447 INIT_LIST_HEAD(&ch->tx_ready_list);
448 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600449
450 /* Purge Power Queue */
451 while (!list_empty(&smux.power_queue)) {
452 struct smux_pkt_t *pkt;
453
454 pkt = list_first_entry(&smux.power_queue,
455 struct smux_pkt_t,
456 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600457 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600458 SMUX_DBG("%s: emptying power queue pkt=%p\n",
459 __func__, pkt);
460 smux_free_pkt(pkt);
461 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600462 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
463
464 /* Close all ports */
465 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
466 ch = &smux_lch[i];
467 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
468
469 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
470
471 /* Purge TX queue */
472 spin_lock(&ch->tx_lock_lhb2);
473 smux_purge_ch_tx_queue(ch);
474 spin_unlock(&ch->tx_lock_lhb2);
475
476 /* Notify user of disconnect and reset channel state */
477 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
478 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
479 union notifier_metadata meta;
480
481 meta.disconnected.is_ssr = smux.in_reset;
482 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
483 }
484
485 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
486 ch->local_mode = SMUX_LCH_MODE_NORMAL;
487 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
488 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
489 ch->tx_flow_control = 0;
490
491 /* Purge RX retry queue */
492 if (ch->rx_retry_queue_cnt)
493 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
494
495 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
496 }
497
498 /* Flush TX/RX workqueues */
499 SMUX_DBG("%s: flushing tx wq\n", __func__);
500 flush_workqueue(smux_tx_wq);
501 SMUX_DBG("%s: flushing rx wq\n", __func__);
502 flush_workqueue(smux_rx_wq);
503}
504
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600505int smux_assert_lch_id(uint32_t lcid)
506{
507 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
508 return -ENXIO;
509 else
510 return 0;
511}
512
513/**
514 * Log packet information for debug purposes.
515 *
516 * @pkt Packet to log
517 * @is_recv 1 = RX packet; 0 = TX Packet
518 *
519 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
520 *
521 * PKT Info:
522 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
523 *
524 * Direction: R = Receive, S = Send
525 * Local State: C = Closed; c = closing; o = opening; O = Opened
526 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
527 * Remote State: C = Closed; O = Opened
528 * Remote Mode: R = Remote loopback; N = Normal
529 */
530static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
531{
532 char logbuf[SMUX_PKT_LOG_SIZE];
533 char cmd_extra[16];
534 int i = 0;
535 int count;
536 int len;
537 char local_state;
538 char local_mode;
539 char remote_state;
540 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600541 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600542 unsigned char *data;
543
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600544 if (!smux_assert_lch_id(pkt->hdr.lcid))
545 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600546
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600547 if (ch) {
548 switch (ch->local_state) {
549 case SMUX_LCH_LOCAL_CLOSED:
550 local_state = 'C';
551 break;
552 case SMUX_LCH_LOCAL_OPENING:
553 local_state = 'o';
554 break;
555 case SMUX_LCH_LOCAL_OPENED:
556 local_state = 'O';
557 break;
558 case SMUX_LCH_LOCAL_CLOSING:
559 local_state = 'c';
560 break;
561 default:
562 local_state = 'U';
563 break;
564 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600565
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600566 switch (ch->local_mode) {
567 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
568 local_mode = 'L';
569 break;
570 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
571 local_mode = 'R';
572 break;
573 case SMUX_LCH_MODE_NORMAL:
574 local_mode = 'N';
575 break;
576 default:
577 local_mode = 'U';
578 break;
579 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600580
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600581 switch (ch->remote_state) {
582 case SMUX_LCH_REMOTE_CLOSED:
583 remote_state = 'C';
584 break;
585 case SMUX_LCH_REMOTE_OPENED:
586 remote_state = 'O';
587 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600588
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600589 default:
590 remote_state = 'U';
591 break;
592 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600593
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600594 switch (ch->remote_mode) {
595 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
596 remote_mode = 'R';
597 break;
598 case SMUX_LCH_MODE_NORMAL:
599 remote_mode = 'N';
600 break;
601 default:
602 remote_mode = 'U';
603 break;
604 }
605 } else {
606 /* broadcast channel */
607 local_state = '-';
608 local_mode = '-';
609 remote_state = '-';
610 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600611 }
612
613 /* determine command type (ACK, etc) */
614 cmd_extra[0] = '\0';
615 switch (pkt->hdr.cmd) {
616 case SMUX_CMD_OPEN_LCH:
617 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
618 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
619 break;
620 case SMUX_CMD_CLOSE_LCH:
621 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
622 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
623 break;
624 };
625
626 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
627 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
628 is_recv ? 'R' : 'S', pkt->hdr.lcid,
629 local_state, local_mode,
630 remote_state, remote_mode,
631 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
632 pkt->hdr.payload_len, pkt->hdr.pad_len);
633
634 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
635 data = (unsigned char *)pkt->payload;
636 for (count = 0; count < len; count++)
637 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
638 "%02x ", (unsigned)data[count]);
639
640 pr_info("%s\n", logbuf);
641}
642
643static void smux_notify_local_fn(struct work_struct *work)
644{
645 struct smux_notify_handle *notify_handle = NULL;
646 union notifier_metadata *metadata = NULL;
647 unsigned long flags;
648 int i;
649
650 for (;;) {
651 /* retrieve notification */
652 spin_lock_irqsave(&notify_lock_lhc1, flags);
653 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
654 i = kfifo_out(&smux_notify_fifo,
655 &notify_handle,
656 handle_size);
657 if (i != handle_size) {
658 pr_err("%s: unable to retrieve handle %d expected %d\n",
659 __func__, i, handle_size);
660 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
661 break;
662 }
663 } else {
664 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
665 break;
666 }
667 --queued_fifo_notifications;
668 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
669
670 /* notify client */
671 metadata = notify_handle->metadata;
672 notify_handle->notify(notify_handle->priv,
673 notify_handle->event_type,
674 metadata);
675
676 kfree(metadata);
677 kfree(notify_handle);
678 }
679}
680
681/**
682 * Initialize existing packet.
683 */
684void smux_init_pkt(struct smux_pkt_t *pkt)
685{
686 memset(pkt, 0x0, sizeof(*pkt));
687 pkt->hdr.magic = SMUX_MAGIC;
688 INIT_LIST_HEAD(&pkt->list);
689}
690
691/**
692 * Allocate and initialize packet.
693 *
694 * If a payload is needed, either set it directly and ensure that it's freed or
695 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
696 * automatically when smd_free_pkt() is called.
697 */
698struct smux_pkt_t *smux_alloc_pkt(void)
699{
700 struct smux_pkt_t *pkt;
701
702 /* Consider a free list implementation instead of kmalloc */
703 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
704 if (!pkt) {
705 pr_err("%s: out of memory\n", __func__);
706 return NULL;
707 }
708 smux_init_pkt(pkt);
709 pkt->allocated = 1;
710
711 return pkt;
712}
713
714/**
715 * Free packet.
716 *
717 * @pkt Packet to free (may be NULL)
718 *
719 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
720 * well. Otherwise, the caller is responsible for freeing the payload.
721 */
722void smux_free_pkt(struct smux_pkt_t *pkt)
723{
724 if (pkt) {
725 if (pkt->free_payload)
726 kfree(pkt->payload);
727 if (pkt->allocated)
728 kfree(pkt);
729 }
730}
731
732/**
733 * Allocate packet payload.
734 *
735 * @pkt Packet to add payload to
736 *
737 * @returns 0 on success, <0 upon error
738 *
739 * A flag is set to signal smux_free_pkt() to free the payload.
740 */
741int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
742{
743 if (!pkt)
744 return -EINVAL;
745
746 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
747 pkt->free_payload = 1;
748 if (!pkt->payload) {
749 pr_err("%s: unable to malloc %d bytes for payload\n",
750 __func__, pkt->hdr.payload_len);
751 return -ENOMEM;
752 }
753
754 return 0;
755}
756
757static int schedule_notify(uint8_t lcid, int event,
758 const union notifier_metadata *metadata)
759{
760 struct smux_notify_handle *notify_handle = 0;
761 union notifier_metadata *meta_copy = 0;
762 struct smux_lch_t *ch;
763 int i;
764 unsigned long flags;
765 int ret = 0;
766
767 ch = &smux_lch[lcid];
768 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
769 GFP_ATOMIC);
770 if (!notify_handle) {
771 pr_err("%s: out of memory\n", __func__);
772 ret = -ENOMEM;
773 goto free_out;
774 }
775
776 notify_handle->notify = ch->notify;
777 notify_handle->priv = ch->priv;
778 notify_handle->event_type = event;
779 if (metadata) {
780 meta_copy = kzalloc(sizeof(union notifier_metadata),
781 GFP_ATOMIC);
782 if (!meta_copy) {
783 pr_err("%s: out of memory\n", __func__);
784 ret = -ENOMEM;
785 goto free_out;
786 }
787 *meta_copy = *metadata;
788 notify_handle->metadata = meta_copy;
789 } else {
790 notify_handle->metadata = NULL;
791 }
792
793 spin_lock_irqsave(&notify_lock_lhc1, flags);
794 i = kfifo_avail(&smux_notify_fifo);
795 if (i < handle_size) {
796 pr_err("%s: fifo full error %d expected %d\n",
797 __func__, i, handle_size);
798 ret = -ENOMEM;
799 goto unlock_out;
800 }
801
802 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
803 if (i < 0 || i != handle_size) {
804 pr_err("%s: fifo not available error %d (expected %d)\n",
805 __func__, i, handle_size);
806 ret = -ENOSPC;
807 goto unlock_out;
808 }
809 ++queued_fifo_notifications;
810
811unlock_out:
812 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
813
814free_out:
815 queue_work(smux_notify_wq, &smux_notify_local);
816 if (ret < 0 && notify_handle) {
817 kfree(notify_handle->metadata);
818 kfree(notify_handle);
819 }
820 return ret;
821}
822
823/**
824 * Returns the serialized size of a packet.
825 *
826 * @pkt Packet to serialize
827 *
828 * @returns Serialized length of packet
829 */
830static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
831{
832 unsigned int size;
833
834 size = sizeof(struct smux_hdr_t);
835 size += pkt->hdr.payload_len;
836 size += pkt->hdr.pad_len;
837
838 return size;
839}
840
841/**
842 * Serialize packet @pkt into output buffer @data.
843 *
844 * @pkt Packet to serialize
845 * @out Destination buffer pointer
846 * @out_len Size of serialized packet
847 *
848 * @returns 0 for success
849 */
850int smux_serialize(struct smux_pkt_t *pkt, char *out,
851 unsigned int *out_len)
852{
853 char *data_start = out;
854
855 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
856 pr_err("%s: packet size %d too big\n",
857 __func__, smux_serialize_size(pkt));
858 return -E2BIG;
859 }
860
861 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
862 out += sizeof(struct smux_hdr_t);
863 if (pkt->payload) {
864 memcpy(out, pkt->payload, pkt->hdr.payload_len);
865 out += pkt->hdr.payload_len;
866 }
867 if (pkt->hdr.pad_len) {
868 memset(out, 0x0, pkt->hdr.pad_len);
869 out += pkt->hdr.pad_len;
870 }
871 *out_len = out - data_start;
872 return 0;
873}
874
875/**
876 * Serialize header and provide pointer to the data.
877 *
878 * @pkt Packet
879 * @out[out] Pointer to the serialized header data
880 * @out_len[out] Pointer to the serialized header length
881 */
882static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
883 unsigned int *out_len)
884{
885 *out = (char *)&pkt->hdr;
886 *out_len = sizeof(struct smux_hdr_t);
887}
888
889/**
890 * Serialize payload and provide pointer to the data.
891 *
892 * @pkt Packet
893 * @out[out] Pointer to the serialized payload data
894 * @out_len[out] Pointer to the serialized payload length
895 */
896static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
897 unsigned int *out_len)
898{
899 *out = pkt->payload;
900 *out_len = pkt->hdr.payload_len;
901}
902
903/**
904 * Serialize padding and provide pointer to the data.
905 *
906 * @pkt Packet
907 * @out[out] Pointer to the serialized padding (always NULL)
908 * @out_len[out] Pointer to the serialized payload length
909 *
910 * Since the padding field value is undefined, only the size of the patting
911 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
912 */
913static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
914 unsigned int *out_len)
915{
916 *out = NULL;
917 *out_len = pkt->hdr.pad_len;
918}
919
920/**
921 * Write data to TTY framework and handle breaking the writes up if needed.
922 *
923 * @data Data to write
924 * @len Length of data
925 *
926 * @returns 0 for success, < 0 for failure
927 */
928static int write_to_tty(char *data, unsigned len)
929{
930 int data_written;
931
932 if (!data)
933 return 0;
934
Eric Holmberged1f00c2012-06-07 09:45:18 -0600935 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600936 data_written = smux.tty->ops->write(smux.tty, data, len);
937 if (data_written >= 0) {
938 len -= data_written;
939 data += data_written;
940 } else {
941 pr_err("%s: TTY write returned error %d\n",
942 __func__, data_written);
943 return data_written;
944 }
945
946 if (len)
947 tty_wait_until_sent(smux.tty,
948 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600949 }
950 return 0;
951}
952
953/**
954 * Write packet to TTY.
955 *
956 * @pkt packet to write
957 *
958 * @returns 0 on success
959 */
960static int smux_tx_tty(struct smux_pkt_t *pkt)
961{
962 char *data;
963 unsigned int len;
964 int ret;
965
966 if (!smux.tty) {
967 pr_err("%s: TTY not initialized", __func__);
968 return -ENOTTY;
969 }
970
971 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
972 SMUX_DBG("%s: tty send single byte\n", __func__);
973 ret = write_to_tty(&pkt->hdr.flags, 1);
974 return ret;
975 }
976
977 smux_serialize_hdr(pkt, &data, &len);
978 ret = write_to_tty(data, len);
979 if (ret) {
980 pr_err("%s: failed %d to write header %d\n",
981 __func__, ret, len);
982 return ret;
983 }
984
985 smux_serialize_payload(pkt, &data, &len);
986 ret = write_to_tty(data, len);
987 if (ret) {
988 pr_err("%s: failed %d to write payload %d\n",
989 __func__, ret, len);
990 return ret;
991 }
992
993 smux_serialize_padding(pkt, &data, &len);
994 while (len > 0) {
995 char zero = 0x0;
996 ret = write_to_tty(&zero, 1);
997 if (ret) {
998 pr_err("%s: failed %d to write padding %d\n",
999 __func__, ret, len);
1000 return ret;
1001 }
1002 --len;
1003 }
1004 return 0;
1005}
1006
1007/**
1008 * Send a single character.
1009 *
1010 * @ch Character to send
1011 */
1012static void smux_send_byte(char ch)
1013{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001014 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001015
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001016 pkt = smux_alloc_pkt();
1017 if (!pkt) {
1018 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1019 return;
1020 }
1021 pkt->hdr.cmd = SMUX_CMD_BYTE;
1022 pkt->hdr.flags = ch;
1023 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001024
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001025 list_add_tail(&pkt->list, &smux.power_queue);
1026 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001027}
1028
1029/**
1030 * Receive a single-character packet (used for internal testing).
1031 *
1032 * @ch Character to receive
1033 * @lcid Logical channel ID for packet
1034 *
1035 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001036 */
1037static int smux_receive_byte(char ch, int lcid)
1038{
1039 struct smux_pkt_t pkt;
1040
1041 smux_init_pkt(&pkt);
1042 pkt.hdr.lcid = lcid;
1043 pkt.hdr.cmd = SMUX_CMD_BYTE;
1044 pkt.hdr.flags = ch;
1045
1046 return smux_dispatch_rx_pkt(&pkt);
1047}
1048
1049/**
1050 * Queue packet for transmit.
1051 *
1052 * @pkt_ptr Packet to queue
1053 * @ch Channel to queue packet on
1054 * @queue Queue channel on ready list
1055 */
1056static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1057 int queue)
1058{
1059 unsigned long flags;
1060
1061 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1062
1063 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1064 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1065 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1066
1067 if (queue)
1068 list_channel(ch);
1069}
1070
1071/**
1072 * Handle receive OPEN ACK command.
1073 *
1074 * @pkt Received packet
1075 *
1076 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001077 */
1078static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1079{
1080 uint8_t lcid;
1081 int ret;
1082 struct smux_lch_t *ch;
1083 int enable_powerdown = 0;
1084
1085 lcid = pkt->hdr.lcid;
1086 ch = &smux_lch[lcid];
1087
1088 spin_lock(&ch->state_lock_lhb1);
1089 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1090 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1091 ch->local_state,
1092 SMUX_LCH_LOCAL_OPENED);
1093
1094 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1095 enable_powerdown = 1;
1096
1097 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1098 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1099 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1100 ret = 0;
1101 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1102 SMUX_DBG("Remote loopback OPEN ACK received\n");
1103 ret = 0;
1104 } else {
1105 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1106 __func__, lcid, ch->local_state);
1107 ret = -EINVAL;
1108 }
1109 spin_unlock(&ch->state_lock_lhb1);
1110
1111 if (enable_powerdown) {
1112 spin_lock(&smux.tx_lock_lha2);
1113 if (!smux.powerdown_enabled) {
1114 smux.powerdown_enabled = 1;
1115 SMUX_DBG("%s: enabling power-collapse support\n",
1116 __func__);
1117 }
1118 spin_unlock(&smux.tx_lock_lha2);
1119 }
1120
1121 return ret;
1122}
1123
1124static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1125{
1126 uint8_t lcid;
1127 int ret;
1128 struct smux_lch_t *ch;
1129 union notifier_metadata meta_disconnected;
1130 unsigned long flags;
1131
1132 lcid = pkt->hdr.lcid;
1133 ch = &smux_lch[lcid];
1134 meta_disconnected.disconnected.is_ssr = 0;
1135
1136 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1137
1138 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1139 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1140 SMUX_LCH_LOCAL_CLOSING,
1141 SMUX_LCH_LOCAL_CLOSED);
1142 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1143 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1144 schedule_notify(lcid, SMUX_DISCONNECTED,
1145 &meta_disconnected);
1146 ret = 0;
1147 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1148 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1149 ret = 0;
1150 } else {
1151 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1152 __func__, lcid, ch->local_state);
1153 ret = -EINVAL;
1154 }
1155 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1156 return ret;
1157}
1158
1159/**
1160 * Handle receive OPEN command.
1161 *
1162 * @pkt Received packet
1163 *
1164 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001165 */
1166static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1167{
1168 uint8_t lcid;
1169 int ret;
1170 struct smux_lch_t *ch;
1171 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001172 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001173 int tx_ready = 0;
1174 int enable_powerdown = 0;
1175
1176 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1177 return smux_handle_rx_open_ack(pkt);
1178
1179 lcid = pkt->hdr.lcid;
1180 ch = &smux_lch[lcid];
1181
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001182 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001183
1184 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1185 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1186 SMUX_LCH_REMOTE_CLOSED,
1187 SMUX_LCH_REMOTE_OPENED);
1188
1189 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1190 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1191 enable_powerdown = 1;
1192
1193 /* Send Open ACK */
1194 ack_pkt = smux_alloc_pkt();
1195 if (!ack_pkt) {
1196 /* exit out to allow retrying this later */
1197 ret = -ENOMEM;
1198 goto out;
1199 }
1200 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1201 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1202 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1203 ack_pkt->hdr.lcid = lcid;
1204 ack_pkt->hdr.payload_len = 0;
1205 ack_pkt->hdr.pad_len = 0;
1206 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1207 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1208 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1209 }
1210 smux_tx_queue(ack_pkt, ch, 0);
1211 tx_ready = 1;
1212
1213 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1214 /*
1215 * Send an Open command to the remote side to
1216 * simulate our local client doing it.
1217 */
1218 ack_pkt = smux_alloc_pkt();
1219 if (ack_pkt) {
1220 ack_pkt->hdr.lcid = lcid;
1221 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1222 ack_pkt->hdr.flags =
1223 SMUX_CMD_OPEN_POWER_COLLAPSE;
1224 ack_pkt->hdr.payload_len = 0;
1225 ack_pkt->hdr.pad_len = 0;
1226 smux_tx_queue(ack_pkt, ch, 0);
1227 tx_ready = 1;
1228 } else {
1229 pr_err("%s: Remote loopack allocation failure\n",
1230 __func__);
1231 }
1232 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1233 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1234 }
1235 ret = 0;
1236 } else {
1237 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1238 __func__, lcid, ch->remote_state);
1239 ret = -EINVAL;
1240 }
1241
1242out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001243 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001244
1245 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001246 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001247 if (!smux.powerdown_enabled) {
1248 smux.powerdown_enabled = 1;
1249 SMUX_DBG("%s: enabling power-collapse support\n",
1250 __func__);
1251 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001252 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001253 }
1254
1255 if (tx_ready)
1256 list_channel(ch);
1257
1258 return ret;
1259}
1260
1261/**
1262 * Handle receive CLOSE command.
1263 *
1264 * @pkt Received packet
1265 *
1266 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001267 */
1268static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1269{
1270 uint8_t lcid;
1271 int ret;
1272 struct smux_lch_t *ch;
1273 struct smux_pkt_t *ack_pkt;
1274 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001275 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001276 int tx_ready = 0;
1277
1278 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1279 return smux_handle_close_ack(pkt);
1280
1281 lcid = pkt->hdr.lcid;
1282 ch = &smux_lch[lcid];
1283 meta_disconnected.disconnected.is_ssr = 0;
1284
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001285 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001286 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1287 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1288 SMUX_LCH_REMOTE_OPENED,
1289 SMUX_LCH_REMOTE_CLOSED);
1290
1291 ack_pkt = smux_alloc_pkt();
1292 if (!ack_pkt) {
1293 /* exit out to allow retrying this later */
1294 ret = -ENOMEM;
1295 goto out;
1296 }
1297 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1298 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1299 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1300 ack_pkt->hdr.lcid = lcid;
1301 ack_pkt->hdr.payload_len = 0;
1302 ack_pkt->hdr.pad_len = 0;
1303 smux_tx_queue(ack_pkt, ch, 0);
1304 tx_ready = 1;
1305
1306 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1307 /*
1308 * Send a Close command to the remote side to simulate
1309 * our local client doing it.
1310 */
1311 ack_pkt = smux_alloc_pkt();
1312 if (ack_pkt) {
1313 ack_pkt->hdr.lcid = lcid;
1314 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1315 ack_pkt->hdr.flags = 0;
1316 ack_pkt->hdr.payload_len = 0;
1317 ack_pkt->hdr.pad_len = 0;
1318 smux_tx_queue(ack_pkt, ch, 0);
1319 tx_ready = 1;
1320 } else {
1321 pr_err("%s: Remote loopack allocation failure\n",
1322 __func__);
1323 }
1324 }
1325
1326 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1327 schedule_notify(lcid, SMUX_DISCONNECTED,
1328 &meta_disconnected);
1329 ret = 0;
1330 } else {
1331 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1332 __func__, lcid, ch->remote_state);
1333 ret = -EINVAL;
1334 }
1335out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001336 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001337 if (tx_ready)
1338 list_channel(ch);
1339
1340 return ret;
1341}
1342
1343/*
1344 * Handle receive DATA command.
1345 *
1346 * @pkt Received packet
1347 *
1348 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001349 */
1350static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1351{
1352 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001353 int ret = 0;
1354 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001355 int tmp;
1356 int rx_len;
1357 struct smux_lch_t *ch;
1358 union notifier_metadata metadata;
1359 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001360 struct smux_pkt_t *ack_pkt;
1361 unsigned long flags;
1362
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001363 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1364 ret = -ENXIO;
1365 goto out;
1366 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001367
Eric Holmbergb8435c82012-06-05 14:51:29 -06001368 rx_len = pkt->hdr.payload_len;
1369 if (rx_len == 0) {
1370 ret = -EINVAL;
1371 goto out;
1372 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001373
1374 lcid = pkt->hdr.lcid;
1375 ch = &smux_lch[lcid];
1376 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1377 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1378
1379 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1380 && !remote_loopback) {
1381 pr_err("smux: ch %d error data on local state 0x%x",
1382 lcid, ch->local_state);
1383 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001384 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001385 goto out;
1386 }
1387
1388 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1389 pr_err("smux: ch %d error data on remote state 0x%x",
1390 lcid, ch->remote_state);
1391 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001392 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001393 goto out;
1394 }
1395
Eric Holmbergb8435c82012-06-05 14:51:29 -06001396 if (!list_empty(&ch->rx_retry_queue)) {
1397 do_retry = 1;
1398 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1399 /* retry queue full */
1400 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1401 ret = -ENOMEM;
1402 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1403 goto out;
1404 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001405 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001406 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001407
Eric Holmbergb8435c82012-06-05 14:51:29 -06001408 if (remote_loopback) {
1409 /* Echo the data back to the remote client. */
1410 ack_pkt = smux_alloc_pkt();
1411 if (ack_pkt) {
1412 ack_pkt->hdr.lcid = lcid;
1413 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1414 ack_pkt->hdr.flags = 0;
1415 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1416 if (ack_pkt->hdr.payload_len) {
1417 smux_alloc_pkt_payload(ack_pkt);
1418 memcpy(ack_pkt->payload, pkt->payload,
1419 ack_pkt->hdr.payload_len);
1420 }
1421 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1422 smux_tx_queue(ack_pkt, ch, 0);
1423 list_channel(ch);
1424 } else {
1425 pr_err("%s: Remote loopack allocation failure\n",
1426 __func__);
1427 }
1428 } else if (!do_retry) {
1429 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001430 metadata.read.pkt_priv = 0;
1431 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001432 tmp = ch->get_rx_buffer(ch->priv,
1433 (void **)&metadata.read.pkt_priv,
1434 (void **)&metadata.read.buffer,
1435 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001436
Eric Holmbergb8435c82012-06-05 14:51:29 -06001437 if (tmp == 0 && metadata.read.buffer) {
1438 /* place data into RX buffer */
1439 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001440 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001441 metadata.read.len = rx_len;
1442 schedule_notify(lcid, SMUX_READ_DONE,
1443 &metadata);
1444 } else if (tmp == -EAGAIN ||
1445 (tmp == 0 && !metadata.read.buffer)) {
1446 /* buffer allocation failed - add to retry queue */
1447 do_retry = 1;
1448 } else if (tmp < 0) {
1449 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1450 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001451 }
1452 }
1453
Eric Holmbergb8435c82012-06-05 14:51:29 -06001454 if (do_retry) {
1455 struct smux_rx_pkt_retry *retry;
1456
1457 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1458 if (!retry) {
1459 pr_err("%s: retry alloc failure\n", __func__);
1460 ret = -ENOMEM;
1461 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1462 goto out;
1463 }
1464 INIT_LIST_HEAD(&retry->rx_retry_list);
1465 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1466
1467 /* copy packet */
1468 retry->pkt = smux_alloc_pkt();
1469 if (!retry->pkt) {
1470 kfree(retry);
1471 pr_err("%s: pkt alloc failure\n", __func__);
1472 ret = -ENOMEM;
1473 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1474 goto out;
1475 }
1476 retry->pkt->hdr.lcid = lcid;
1477 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1478 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1479 if (retry->pkt->hdr.payload_len) {
1480 smux_alloc_pkt_payload(retry->pkt);
1481 memcpy(retry->pkt->payload, pkt->payload,
1482 retry->pkt->hdr.payload_len);
1483 }
1484
1485 /* add to retry queue */
1486 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1487 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1488 ++ch->rx_retry_queue_cnt;
1489 if (ch->rx_retry_queue_cnt == 1)
1490 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1491 msecs_to_jiffies(retry->timeout_in_ms));
1492 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1493 }
1494
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001495out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001496 return ret;
1497}
1498
1499/**
1500 * Handle receive byte command for testing purposes.
1501 *
1502 * @pkt Received packet
1503 *
1504 * @returns 0 for success
1505 */
1506static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1507{
1508 uint8_t lcid;
1509 int ret;
1510 struct smux_lch_t *ch;
1511 union notifier_metadata metadata;
1512 unsigned long flags;
1513
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001514 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1515 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001516 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001517 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001518
1519 lcid = pkt->hdr.lcid;
1520 ch = &smux_lch[lcid];
1521 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1522
1523 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1524 pr_err("smux: ch %d error data on local state 0x%x",
1525 lcid, ch->local_state);
1526 ret = -EIO;
1527 goto out;
1528 }
1529
1530 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1531 pr_err("smux: ch %d error data on remote state 0x%x",
1532 lcid, ch->remote_state);
1533 ret = -EIO;
1534 goto out;
1535 }
1536
1537 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1538 metadata.read.buffer = 0;
1539 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1540 ret = 0;
1541
1542out:
1543 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1544 return ret;
1545}
1546
1547/**
1548 * Handle receive status command.
1549 *
1550 * @pkt Received packet
1551 *
1552 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001553 */
1554static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1555{
1556 uint8_t lcid;
1557 int ret;
1558 struct smux_lch_t *ch;
1559 union notifier_metadata meta;
1560 unsigned long flags;
1561 int tx_ready = 0;
1562
1563 lcid = pkt->hdr.lcid;
1564 ch = &smux_lch[lcid];
1565
1566 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1567 meta.tiocm.tiocm_old = ch->remote_tiocm;
1568 meta.tiocm.tiocm_new = pkt->hdr.flags;
1569
1570 /* update logical channel flow control */
1571 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1572 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1573 /* logical channel flow control changed */
1574 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1575 /* disabled TX */
1576 SMUX_DBG("TX Flow control enabled\n");
1577 ch->tx_flow_control = 1;
1578 } else {
1579 /* re-enable channel */
1580 SMUX_DBG("TX Flow control disabled\n");
1581 ch->tx_flow_control = 0;
1582 tx_ready = 1;
1583 }
1584 }
1585 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1586 ch->remote_tiocm = pkt->hdr.flags;
1587 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1588
1589 /* client notification for status change */
1590 if (IS_FULLY_OPENED(ch)) {
1591 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1592 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1593 ret = 0;
1594 }
1595 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1596 if (tx_ready)
1597 list_channel(ch);
1598
1599 return ret;
1600}
1601
1602/**
1603 * Handle receive power command.
1604 *
1605 * @pkt Received packet
1606 *
1607 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001608 */
1609static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1610{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001611 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001612 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001613
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001614 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001615 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1616 /* local sleep request ack */
1617 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1618 /* Power-down complete, turn off UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001619 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001620 smux.power_state, SMUX_PWR_OFF_FLUSH);
1621 smux.power_state = SMUX_PWR_OFF_FLUSH;
1622 queue_work(smux_tx_wq, &smux_inactivity_work);
1623 } else {
1624 pr_err("%s: sleep request ack invalid in state %d\n",
1625 __func__, smux.power_state);
1626 }
1627 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001628 /*
1629 * Remote sleep request
1630 *
1631 * Even if we have data pending, we need to transition to the
1632 * POWER_OFF state and then perform a wakeup since the remote
1633 * side has requested a power-down.
1634 *
1635 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1636 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1637 * when it sends the packet.
1638 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001639 if (smux.power_state == SMUX_PWR_ON
1640 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1641 ack_pkt = smux_alloc_pkt();
1642 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001643 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001644 smux.power_state,
1645 SMUX_PWR_TURNING_OFF_FLUSH);
1646
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001647 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1648
1649 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001650 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1651 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001652 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1653 list_add_tail(&ack_pkt->list,
1654 &smux.power_queue);
1655 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001656 }
1657 } else {
1658 pr_err("%s: sleep request invalid in state %d\n",
1659 __func__, smux.power_state);
1660 }
1661 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001662 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001663
1664 return 0;
1665}
1666
1667/**
1668 * Handle dispatching a completed packet for receive processing.
1669 *
1670 * @pkt Packet to process
1671 *
1672 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001673 */
1674static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1675{
Eric Holmbergf9622662012-06-13 15:55:45 -06001676 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001677
1678 SMUX_LOG_PKT_RX(pkt);
1679
1680 switch (pkt->hdr.cmd) {
1681 case SMUX_CMD_OPEN_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001682 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1683 pr_err("%s: invalid channel id %d\n",
1684 __func__, pkt->hdr.lcid);
1685 break;
1686 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001687 ret = smux_handle_rx_open_cmd(pkt);
1688 break;
1689
1690 case SMUX_CMD_DATA:
Eric Holmbergf9622662012-06-13 15:55:45 -06001691 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1692 pr_err("%s: invalid channel id %d\n",
1693 __func__, pkt->hdr.lcid);
1694 break;
1695 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001696 ret = smux_handle_rx_data_cmd(pkt);
1697 break;
1698
1699 case SMUX_CMD_CLOSE_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001700 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1701 pr_err("%s: invalid channel id %d\n",
1702 __func__, pkt->hdr.lcid);
1703 break;
1704 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001705 ret = smux_handle_rx_close_cmd(pkt);
1706 break;
1707
1708 case SMUX_CMD_STATUS:
Eric Holmbergf9622662012-06-13 15:55:45 -06001709 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1710 pr_err("%s: invalid channel id %d\n",
1711 __func__, pkt->hdr.lcid);
1712 break;
1713 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001714 ret = smux_handle_rx_status_cmd(pkt);
1715 break;
1716
1717 case SMUX_CMD_PWR_CTL:
1718 ret = smux_handle_rx_power_cmd(pkt);
1719 break;
1720
1721 case SMUX_CMD_BYTE:
1722 ret = smux_handle_rx_byte_cmd(pkt);
1723 break;
1724
1725 default:
1726 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1727 ret = -EINVAL;
1728 }
1729 return ret;
1730}
1731
1732/**
1733 * Deserializes a packet and dispatches it to the packet receive logic.
1734 *
1735 * @data Raw data for one packet
1736 * @len Length of the data
1737 *
1738 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001739 */
1740static int smux_deserialize(unsigned char *data, int len)
1741{
1742 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001743
1744 smux_init_pkt(&recv);
1745
1746 /*
1747 * It may be possible to optimize this to not use the
1748 * temporary buffer.
1749 */
1750 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1751
1752 if (recv.hdr.magic != SMUX_MAGIC) {
1753 pr_err("%s: invalid header magic\n", __func__);
1754 return -EINVAL;
1755 }
1756
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001757 if (recv.hdr.payload_len)
1758 recv.payload = data + sizeof(struct smux_hdr_t);
1759
1760 return smux_dispatch_rx_pkt(&recv);
1761}
1762
1763/**
1764 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001765 */
1766static void smux_handle_wakeup_req(void)
1767{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001768 unsigned long flags;
1769
1770 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001771 if (smux.power_state == SMUX_PWR_OFF
1772 || smux.power_state == SMUX_PWR_TURNING_ON) {
1773 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001774 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001775 smux.power_state, SMUX_PWR_ON);
1776 smux.power_state = SMUX_PWR_ON;
1777 queue_work(smux_tx_wq, &smux_wakeup_work);
1778 queue_work(smux_tx_wq, &smux_tx_work);
1779 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1780 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1781 smux_send_byte(SMUX_WAKEUP_ACK);
1782 } else {
1783 smux_send_byte(SMUX_WAKEUP_ACK);
1784 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001785 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001786}
1787
1788/**
1789 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001790 */
1791static void smux_handle_wakeup_ack(void)
1792{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001793 unsigned long flags;
1794
1795 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001796 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1797 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001798 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001799 smux.power_state, SMUX_PWR_ON);
1800 smux.power_state = SMUX_PWR_ON;
1801 queue_work(smux_tx_wq, &smux_tx_work);
1802 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1803 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1804
1805 } else if (smux.power_state != SMUX_PWR_ON) {
1806 /* invalid message */
1807 pr_err("%s: wakeup request ack invalid in state %d\n",
1808 __func__, smux.power_state);
1809 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001810 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001811}
1812
1813/**
1814 * RX State machine - IDLE state processing.
1815 *
1816 * @data New RX data to process
1817 * @len Length of the data
1818 * @used Return value of length processed
1819 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001820 */
1821static void smux_rx_handle_idle(const unsigned char *data,
1822 int len, int *used, int flag)
1823{
1824 int i;
1825
1826 if (flag) {
1827 if (smux_byte_loopback)
1828 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1829 smux_byte_loopback);
1830 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1831 ++*used;
1832 return;
1833 }
1834
1835 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1836 switch (data[i]) {
1837 case SMUX_MAGIC_WORD1:
1838 smux.rx_state = SMUX_RX_MAGIC;
1839 break;
1840 case SMUX_WAKEUP_REQ:
1841 smux_handle_wakeup_req();
1842 break;
1843 case SMUX_WAKEUP_ACK:
1844 smux_handle_wakeup_ack();
1845 break;
1846 default:
1847 /* unexpected character */
1848 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1849 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1850 smux_byte_loopback);
1851 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1852 (unsigned)data[i]);
1853 break;
1854 }
1855 }
1856
1857 *used = i;
1858}
1859
1860/**
1861 * RX State machine - Header Magic state processing.
1862 *
1863 * @data New RX data to process
1864 * @len Length of the data
1865 * @used Return value of length processed
1866 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001867 */
1868static void smux_rx_handle_magic(const unsigned char *data,
1869 int len, int *used, int flag)
1870{
1871 int i;
1872
1873 if (flag) {
1874 pr_err("%s: TTY RX error %d\n", __func__, flag);
1875 smux_enter_reset();
1876 smux.rx_state = SMUX_RX_FAILURE;
1877 ++*used;
1878 return;
1879 }
1880
1881 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1882 /* wait for completion of the magic */
1883 if (data[i] == SMUX_MAGIC_WORD2) {
1884 smux.recv_len = 0;
1885 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1886 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1887 smux.rx_state = SMUX_RX_HDR;
1888 } else {
1889 /* unexpected / trash character */
1890 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1891 __func__, data[i], *used, len);
1892 smux.rx_state = SMUX_RX_IDLE;
1893 }
1894 }
1895
1896 *used = i;
1897}
1898
1899/**
1900 * RX State machine - Packet Header state processing.
1901 *
1902 * @data New RX data to process
1903 * @len Length of the data
1904 * @used Return value of length processed
1905 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001906 */
1907static void smux_rx_handle_hdr(const unsigned char *data,
1908 int len, int *used, int flag)
1909{
1910 int i;
1911 struct smux_hdr_t *hdr;
1912
1913 if (flag) {
1914 pr_err("%s: TTY RX error %d\n", __func__, flag);
1915 smux_enter_reset();
1916 smux.rx_state = SMUX_RX_FAILURE;
1917 ++*used;
1918 return;
1919 }
1920
1921 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1922 smux.recv_buf[smux.recv_len++] = data[i];
1923
1924 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1925 /* complete header received */
1926 hdr = (struct smux_hdr_t *)smux.recv_buf;
1927 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1928 smux.rx_state = SMUX_RX_PAYLOAD;
1929 }
1930 }
1931 *used = i;
1932}
1933
1934/**
1935 * RX State machine - Packet Payload state processing.
1936 *
1937 * @data New RX data to process
1938 * @len Length of the data
1939 * @used Return value of length processed
1940 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001941 */
1942static void smux_rx_handle_pkt_payload(const unsigned char *data,
1943 int len, int *used, int flag)
1944{
1945 int remaining;
1946
1947 if (flag) {
1948 pr_err("%s: TTY RX error %d\n", __func__, flag);
1949 smux_enter_reset();
1950 smux.rx_state = SMUX_RX_FAILURE;
1951 ++*used;
1952 return;
1953 }
1954
1955 /* copy data into rx buffer */
1956 if (smux.pkt_remain < (len - *used))
1957 remaining = smux.pkt_remain;
1958 else
1959 remaining = len - *used;
1960
1961 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1962 smux.recv_len += remaining;
1963 smux.pkt_remain -= remaining;
1964 *used += remaining;
1965
1966 if (smux.pkt_remain == 0) {
1967 /* complete packet received */
1968 smux_deserialize(smux.recv_buf, smux.recv_len);
1969 smux.rx_state = SMUX_RX_IDLE;
1970 }
1971}
1972
1973/**
1974 * Feed data to the receive state machine.
1975 *
1976 * @data Pointer to data block
1977 * @len Length of data
1978 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001979 */
1980void smux_rx_state_machine(const unsigned char *data,
1981 int len, int flag)
1982{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001983 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001984
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001985 work.data = data;
1986 work.len = len;
1987 work.flag = flag;
1988 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1989 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001990
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001991 queue_work(smux_rx_wq, &work.work);
1992 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001993}
1994
1995/**
1996 * Add channel to transmit-ready list and trigger transmit worker.
1997 *
1998 * @ch Channel to add
1999 */
2000static void list_channel(struct smux_lch_t *ch)
2001{
2002 unsigned long flags;
2003
2004 SMUX_DBG("%s: listing channel %d\n",
2005 __func__, ch->lcid);
2006
2007 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2008 spin_lock(&ch->tx_lock_lhb2);
2009 smux.tx_activity_flag = 1;
2010 if (list_empty(&ch->tx_ready_list))
2011 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2012 spin_unlock(&ch->tx_lock_lhb2);
2013 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2014
2015 queue_work(smux_tx_wq, &smux_tx_work);
2016}
2017
2018/**
2019 * Transmit packet on correct transport and then perform client
2020 * notification.
2021 *
2022 * @ch Channel to transmit on
2023 * @pkt Packet to transmit
2024 */
2025static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2026{
2027 union notifier_metadata meta_write;
2028 int ret;
2029
2030 if (ch && pkt) {
2031 SMUX_LOG_PKT_TX(pkt);
2032 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2033 ret = smux_tx_loopback(pkt);
2034 else
2035 ret = smux_tx_tty(pkt);
2036
2037 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2038 /* notify write-done */
2039 meta_write.write.pkt_priv = pkt->priv;
2040 meta_write.write.buffer = pkt->payload;
2041 meta_write.write.len = pkt->hdr.payload_len;
2042 if (ret >= 0) {
2043 SMUX_DBG("%s: PKT write done", __func__);
2044 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2045 &meta_write);
2046 } else {
2047 pr_err("%s: failed to write pkt %d\n",
2048 __func__, ret);
2049 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2050 &meta_write);
2051 }
2052 }
2053 }
2054}
2055
2056/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002057 * Flush pending TTY TX data.
2058 */
2059static void smux_flush_tty(void)
2060{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002061 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002062 if (!smux.tty) {
2063 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002064 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002065 return;
2066 }
2067
2068 tty_wait_until_sent(smux.tty,
2069 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2070
2071 if (tty_chars_in_buffer(smux.tty) > 0)
2072 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002073
2074 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002075}
2076
2077/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002078 * Purge TX queue for logical channel.
2079 *
2080 * @ch Logical channel pointer
2081 *
2082 * Must be called with the following spinlocks locked:
2083 * state_lock_lhb1
2084 * tx_lock_lhb2
2085 */
2086static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2087{
2088 struct smux_pkt_t *pkt;
2089 int send_disconnect = 0;
2090
2091 while (!list_empty(&ch->tx_queue)) {
2092 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2093 list);
2094 list_del(&pkt->list);
2095
2096 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2097 /* Open was never sent, just force to closed state */
2098 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2099 send_disconnect = 1;
2100 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2101 /* Notify client of failed write */
2102 union notifier_metadata meta_write;
2103
2104 meta_write.write.pkt_priv = pkt->priv;
2105 meta_write.write.buffer = pkt->payload;
2106 meta_write.write.len = pkt->hdr.payload_len;
2107 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2108 }
2109 smux_free_pkt(pkt);
2110 }
2111
2112 if (send_disconnect) {
2113 union notifier_metadata meta_disconnected;
2114
2115 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2116 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2117 &meta_disconnected);
2118 }
2119}
2120
2121/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002122 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002123 *
2124 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002125 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002126static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002127{
2128 struct uart_state *state;
2129
2130 if (!smux.tty || !smux.tty->driver_data) {
2131 pr_err("%s: unable to find UART port for tty %p\n",
2132 __func__, smux.tty);
2133 return;
2134 }
2135 state = smux.tty->driver_data;
2136 msm_hs_request_clock_on(state->uart_port);
2137}
2138
2139/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002140 * Power-up the UART.
2141 */
2142static void smux_uart_power_on(void)
2143{
2144 mutex_lock(&smux.mutex_lha0);
2145 smux_uart_power_on_atomic();
2146 mutex_unlock(&smux.mutex_lha0);
2147}
2148
2149/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002150 * Power down the UART.
2151 */
2152static void smux_uart_power_off(void)
2153{
2154 struct uart_state *state;
2155
Eric Holmberg92a67df2012-06-25 13:56:24 -06002156 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002157 if (!smux.tty || !smux.tty->driver_data) {
2158 pr_err("%s: unable to find UART port for tty %p\n",
2159 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002160 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002161 return;
2162 }
2163 state = smux.tty->driver_data;
2164 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002165 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002166}
2167
2168/**
2169 * TX Wakeup Worker
2170 *
2171 * @work Not used
2172 *
2173 * Do an exponential back-off wakeup sequence with a maximum period
2174 * of approximately 1 second (1 << 20 microseconds).
2175 */
2176static void smux_wakeup_worker(struct work_struct *work)
2177{
2178 unsigned long flags;
2179 unsigned wakeup_delay;
2180 int complete = 0;
2181
Eric Holmberged1f00c2012-06-07 09:45:18 -06002182 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002183 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2184 if (smux.power_state == SMUX_PWR_ON) {
2185 /* wakeup complete */
2186 complete = 1;
2187 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2188 break;
2189 } else {
2190 /* retry */
2191 wakeup_delay = smux.pwr_wakeup_delay_us;
2192 smux.pwr_wakeup_delay_us <<= 1;
2193 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2194 smux.pwr_wakeup_delay_us =
2195 SMUX_WAKEUP_DELAY_MAX;
2196 }
2197 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2198 SMUX_DBG("%s: triggering wakeup\n", __func__);
2199 smux_send_byte(SMUX_WAKEUP_REQ);
2200
2201 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2202 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2203 wakeup_delay);
2204 usleep_range(wakeup_delay, 2*wakeup_delay);
2205 } else {
2206 /* schedule delayed work */
2207 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2208 __func__, wakeup_delay / 1000);
2209 queue_delayed_work(smux_tx_wq,
2210 &smux_wakeup_delayed_work,
2211 msecs_to_jiffies(wakeup_delay / 1000));
2212 break;
2213 }
2214 }
2215
2216 if (complete) {
2217 SMUX_DBG("%s: wakeup complete\n", __func__);
2218 /*
2219 * Cancel any pending retry. This avoids a race condition with
2220 * a new power-up request because:
2221 * 1) this worker doesn't modify the state
2222 * 2) this worker is processed on the same single-threaded
2223 * workqueue as new TX wakeup requests
2224 */
2225 cancel_delayed_work(&smux_wakeup_delayed_work);
2226 }
2227}
2228
2229
2230/**
2231 * Inactivity timeout worker. Periodically scheduled when link is active.
2232 * When it detects inactivity, it will power-down the UART link.
2233 *
2234 * @work Work structure (not used)
2235 */
2236static void smux_inactivity_worker(struct work_struct *work)
2237{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002238 struct smux_pkt_t *pkt;
2239 unsigned long flags;
2240
2241 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2242 spin_lock(&smux.tx_lock_lha2);
2243
2244 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2245 /* no activity */
2246 if (smux.powerdown_enabled) {
2247 if (smux.power_state == SMUX_PWR_ON) {
2248 /* start power-down sequence */
2249 pkt = smux_alloc_pkt();
2250 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002251 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002252 smux.power_state,
2253 SMUX_PWR_TURNING_OFF);
2254 smux.power_state = SMUX_PWR_TURNING_OFF;
2255
2256 /* send power-down request */
2257 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2258 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002259 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2260 list_add_tail(&pkt->list,
2261 &smux.power_queue);
2262 queue_work(smux_tx_wq, &smux_tx_work);
2263 } else {
2264 pr_err("%s: packet alloc failed\n",
2265 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002266 }
2267 }
2268 } else {
2269 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2270 __func__);
2271 }
2272 }
2273 smux.tx_activity_flag = 0;
2274 smux.rx_activity_flag = 0;
2275
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002276 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002277 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002278 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002279 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002280 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002281
2282 /* if data is pending, schedule a new wakeup */
2283 if (!list_empty(&smux.lch_tx_ready_list) ||
2284 !list_empty(&smux.power_queue))
2285 queue_work(smux_tx_wq, &smux_tx_work);
2286
2287 spin_unlock(&smux.tx_lock_lha2);
2288 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2289
2290 /* flush UART output queue and power down */
2291 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002292 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002293 } else {
2294 spin_unlock(&smux.tx_lock_lha2);
2295 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002296 }
2297
2298 /* reschedule inactivity worker */
2299 if (smux.power_state != SMUX_PWR_OFF)
2300 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2301 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2302}
2303
2304/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002305 * Remove RX retry packet from channel and free it.
2306 *
2307 * Must be called with state_lock_lhb1 locked.
2308 *
2309 * @ch Channel for retry packet
2310 * @retry Retry packet to remove
2311 */
2312void smux_remove_rx_retry(struct smux_lch_t *ch,
2313 struct smux_rx_pkt_retry *retry)
2314{
2315 list_del(&retry->rx_retry_list);
2316 --ch->rx_retry_queue_cnt;
2317 smux_free_pkt(retry->pkt);
2318 kfree(retry);
2319}
2320
2321/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002322 * RX worker handles all receive operations.
2323 *
2324 * @work Work structure contained in TBD structure
2325 */
2326static void smux_rx_worker(struct work_struct *work)
2327{
2328 unsigned long flags;
2329 int used;
2330 int initial_rx_state;
2331 struct smux_rx_worker_data *w;
2332 const unsigned char *data;
2333 int len;
2334 int flag;
2335
2336 w = container_of(work, struct smux_rx_worker_data, work);
2337 data = w->data;
2338 len = w->len;
2339 flag = w->flag;
2340
2341 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2342 smux.rx_activity_flag = 1;
2343 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2344
2345 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2346 used = 0;
2347 do {
2348 SMUX_DBG("%s: state %d; %d of %d\n",
2349 __func__, smux.rx_state, used, len);
2350 initial_rx_state = smux.rx_state;
2351
2352 switch (smux.rx_state) {
2353 case SMUX_RX_IDLE:
2354 smux_rx_handle_idle(data, len, &used, flag);
2355 break;
2356 case SMUX_RX_MAGIC:
2357 smux_rx_handle_magic(data, len, &used, flag);
2358 break;
2359 case SMUX_RX_HDR:
2360 smux_rx_handle_hdr(data, len, &used, flag);
2361 break;
2362 case SMUX_RX_PAYLOAD:
2363 smux_rx_handle_pkt_payload(data, len, &used, flag);
2364 break;
2365 default:
2366 SMUX_DBG("%s: invalid state %d\n",
2367 __func__, smux.rx_state);
2368 smux.rx_state = SMUX_RX_IDLE;
2369 break;
2370 }
2371 } while (used < len || smux.rx_state != initial_rx_state);
2372
2373 complete(&w->work_complete);
2374}
2375
2376/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002377 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2378 * because the client was not ready (-EAGAIN).
2379 *
2380 * @work Work structure contained in smux_lch_t structure
2381 */
2382static void smux_rx_retry_worker(struct work_struct *work)
2383{
2384 struct smux_lch_t *ch;
2385 struct smux_rx_pkt_retry *retry;
2386 union notifier_metadata metadata;
2387 int tmp;
2388 unsigned long flags;
2389
2390 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2391
2392 /* get next retry packet */
2393 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2394 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2395 /* port has been closed - remove all retries */
2396 while (!list_empty(&ch->rx_retry_queue)) {
2397 retry = list_first_entry(&ch->rx_retry_queue,
2398 struct smux_rx_pkt_retry,
2399 rx_retry_list);
2400 smux_remove_rx_retry(ch, retry);
2401 }
2402 }
2403
2404 if (list_empty(&ch->rx_retry_queue)) {
2405 SMUX_DBG("%s: retry list empty for channel %d\n",
2406 __func__, ch->lcid);
2407 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2408 return;
2409 }
2410 retry = list_first_entry(&ch->rx_retry_queue,
2411 struct smux_rx_pkt_retry,
2412 rx_retry_list);
2413 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2414
2415 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2416 metadata.read.pkt_priv = 0;
2417 metadata.read.buffer = 0;
2418 tmp = ch->get_rx_buffer(ch->priv,
2419 (void **)&metadata.read.pkt_priv,
2420 (void **)&metadata.read.buffer,
2421 retry->pkt->hdr.payload_len);
2422 if (tmp == 0 && metadata.read.buffer) {
2423 /* have valid RX buffer */
2424 memcpy(metadata.read.buffer, retry->pkt->payload,
2425 retry->pkt->hdr.payload_len);
2426 metadata.read.len = retry->pkt->hdr.payload_len;
2427
2428 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2429 smux_remove_rx_retry(ch, retry);
2430 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2431
2432 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2433 } else if (tmp == -EAGAIN ||
2434 (tmp == 0 && !metadata.read.buffer)) {
2435 /* retry again */
2436 retry->timeout_in_ms <<= 1;
2437 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2438 /* timed out */
2439 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2440 smux_remove_rx_retry(ch, retry);
2441 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2442 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2443 }
2444 } else {
2445 /* client error - drop packet */
2446 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2447 smux_remove_rx_retry(ch, retry);
2448 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2449
2450 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2451 }
2452
2453 /* schedule next retry */
2454 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2455 if (!list_empty(&ch->rx_retry_queue)) {
2456 retry = list_first_entry(&ch->rx_retry_queue,
2457 struct smux_rx_pkt_retry,
2458 rx_retry_list);
2459 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2460 msecs_to_jiffies(retry->timeout_in_ms));
2461 }
2462 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2463}
2464
2465/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002466 * Transmit worker handles serializing and transmitting packets onto the
2467 * underlying transport.
2468 *
2469 * @work Work structure (not used)
2470 */
2471static void smux_tx_worker(struct work_struct *work)
2472{
2473 struct smux_pkt_t *pkt;
2474 struct smux_lch_t *ch;
2475 unsigned low_wm_notif;
2476 unsigned lcid;
2477 unsigned long flags;
2478
2479
2480 /*
2481 * Transmit packets in round-robin fashion based upon ready
2482 * channels.
2483 *
2484 * To eliminate the need to hold a lock for the entire
2485 * iteration through the channel ready list, the head of the
2486 * ready-channel list is always the next channel to be
2487 * processed. To send a packet, the first valid packet in
2488 * the head channel is removed and the head channel is then
2489 * rescheduled at the end of the queue by removing it and
2490 * inserting after the tail. The locks can then be released
2491 * while the packet is processed.
2492 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002493 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002494 pkt = NULL;
2495 low_wm_notif = 0;
2496
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002497 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002498
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002499 /* handle wakeup if needed */
2500 if (smux.power_state == SMUX_PWR_OFF) {
2501 if (!list_empty(&smux.lch_tx_ready_list) ||
2502 !list_empty(&smux.power_queue)) {
2503 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002504 smux.pwr_wakeup_delay_us = 1;
Eric Holmbergff0b0112012-06-08 15:06:57 -06002505 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002506 smux.power_state,
2507 SMUX_PWR_TURNING_ON);
2508 smux.power_state = SMUX_PWR_TURNING_ON;
2509 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2510 flags);
2511 smux_uart_power_on();
2512 queue_work(smux_tx_wq, &smux_wakeup_work);
2513 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002514 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002515 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2516 flags);
2517 }
2518 break;
2519 }
2520
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002521 /* process any pending power packets */
2522 if (!list_empty(&smux.power_queue)) {
2523 pkt = list_first_entry(&smux.power_queue,
2524 struct smux_pkt_t, list);
2525 list_del(&pkt->list);
2526 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2527
2528 /* send the packet */
2529 SMUX_LOG_PKT_TX(pkt);
2530 if (!smux_byte_loopback) {
2531 smux_tx_tty(pkt);
2532 smux_flush_tty();
2533 } else {
2534 smux_tx_loopback(pkt);
2535 }
2536
2537 /* Adjust power state if this is a flush command */
2538 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2539 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2540 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2541 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002542 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002543 smux.power_state,
2544 SMUX_PWR_OFF_FLUSH);
2545 smux.power_state = SMUX_PWR_OFF_FLUSH;
2546 queue_work(smux_tx_wq, &smux_inactivity_work);
2547 }
2548 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2549
2550 smux_free_pkt(pkt);
2551 continue;
2552 }
2553
2554 /* get the next ready channel */
2555 if (list_empty(&smux.lch_tx_ready_list)) {
2556 /* no ready channels */
2557 SMUX_DBG("%s: no more ready channels, exiting\n",
2558 __func__);
2559 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2560 break;
2561 }
2562 smux.tx_activity_flag = 1;
2563
2564 if (smux.power_state != SMUX_PWR_ON) {
2565 /* channel not ready to transmit */
2566 SMUX_DBG("%s: can not tx with power state %d\n",
2567 __func__,
2568 smux.power_state);
2569 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2570 break;
2571 }
2572
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002573 /* get the next packet to send and rotate channel list */
2574 ch = list_first_entry(&smux.lch_tx_ready_list,
2575 struct smux_lch_t,
2576 tx_ready_list);
2577
2578 spin_lock(&ch->state_lock_lhb1);
2579 spin_lock(&ch->tx_lock_lhb2);
2580 if (!list_empty(&ch->tx_queue)) {
2581 /*
2582 * If remote TX flow control is enabled or
2583 * the channel is not fully opened, then only
2584 * send command packets.
2585 */
2586 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2587 struct smux_pkt_t *curr;
2588 list_for_each_entry(curr, &ch->tx_queue, list) {
2589 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2590 pkt = curr;
2591 break;
2592 }
2593 }
2594 } else {
2595 /* get next cmd/data packet to send */
2596 pkt = list_first_entry(&ch->tx_queue,
2597 struct smux_pkt_t, list);
2598 }
2599 }
2600
2601 if (pkt) {
2602 list_del(&pkt->list);
2603
2604 /* update packet stats */
2605 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2606 --ch->tx_pending_data_cnt;
2607 if (ch->notify_lwm &&
2608 ch->tx_pending_data_cnt
2609 <= SMUX_WM_LOW) {
2610 ch->notify_lwm = 0;
2611 low_wm_notif = 1;
2612 }
2613 }
2614
2615 /* advance to the next ready channel */
2616 list_rotate_left(&smux.lch_tx_ready_list);
2617 } else {
2618 /* no data in channel to send, remove from ready list */
2619 list_del(&ch->tx_ready_list);
2620 INIT_LIST_HEAD(&ch->tx_ready_list);
2621 }
2622 lcid = ch->lcid;
2623 spin_unlock(&ch->tx_lock_lhb2);
2624 spin_unlock(&ch->state_lock_lhb1);
2625 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2626
2627 if (low_wm_notif)
2628 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2629
2630 /* send the packet */
2631 smux_tx_pkt(ch, pkt);
2632 smux_free_pkt(pkt);
2633 }
2634}
2635
2636
2637/**********************************************************************/
2638/* Kernel API */
2639/**********************************************************************/
2640
2641/**
2642 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2643 * flags.
2644 *
2645 * @lcid Logical channel ID
2646 * @set Options to set
2647 * @clear Options to clear
2648 *
2649 * @returns 0 for success, < 0 for failure
2650 */
2651int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2652{
2653 unsigned long flags;
2654 struct smux_lch_t *ch;
2655 int tx_ready = 0;
2656 int ret = 0;
2657
2658 if (smux_assert_lch_id(lcid))
2659 return -ENXIO;
2660
2661 ch = &smux_lch[lcid];
2662 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2663
2664 /* Local loopback mode */
2665 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2666 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2667
2668 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2669 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2670
2671 /* Remote loopback mode */
2672 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2673 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2674
2675 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2676 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2677
2678 /* Flow control */
2679 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2680 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2681 ret = smux_send_status_cmd(ch);
2682 tx_ready = 1;
2683 }
2684
2685 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2686 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2687 ret = smux_send_status_cmd(ch);
2688 tx_ready = 1;
2689 }
2690
2691 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2692
2693 if (tx_ready)
2694 list_channel(ch);
2695
2696 return ret;
2697}
2698
2699/**
2700 * Starts the opening sequence for a logical channel.
2701 *
2702 * @lcid Logical channel ID
2703 * @priv Free for client usage
2704 * @notify Event notification function
2705 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2706 *
2707 * @returns 0 for success, <0 otherwise
2708 *
2709 * A channel must be fully closed (either not previously opened or
2710 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2711 * received.
2712 *
2713 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2714 * event.
2715 */
2716int msm_smux_open(uint8_t lcid, void *priv,
2717 void (*notify)(void *priv, int event_type, const void *metadata),
2718 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2719 int size))
2720{
2721 int ret;
2722 struct smux_lch_t *ch;
2723 struct smux_pkt_t *pkt;
2724 int tx_ready = 0;
2725 unsigned long flags;
2726
2727 if (smux_assert_lch_id(lcid))
2728 return -ENXIO;
2729
2730 ch = &smux_lch[lcid];
2731 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2732
2733 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2734 ret = -EAGAIN;
2735 goto out;
2736 }
2737
2738 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2739 pr_err("%s: open lcid %d local state %x invalid\n",
2740 __func__, lcid, ch->local_state);
2741 ret = -EINVAL;
2742 goto out;
2743 }
2744
2745 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2746 ch->local_state,
2747 SMUX_LCH_LOCAL_OPENING);
2748
2749 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2750
2751 ch->priv = priv;
2752 ch->notify = notify;
2753 ch->get_rx_buffer = get_rx_buffer;
2754 ret = 0;
2755
2756 /* Send Open Command */
2757 pkt = smux_alloc_pkt();
2758 if (!pkt) {
2759 ret = -ENOMEM;
2760 goto out;
2761 }
2762 pkt->hdr.magic = SMUX_MAGIC;
2763 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2764 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2765 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2766 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2767 pkt->hdr.lcid = lcid;
2768 pkt->hdr.payload_len = 0;
2769 pkt->hdr.pad_len = 0;
2770 smux_tx_queue(pkt, ch, 0);
2771 tx_ready = 1;
2772
2773out:
2774 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2775 if (tx_ready)
2776 list_channel(ch);
2777 return ret;
2778}
2779
2780/**
2781 * Starts the closing sequence for a logical channel.
2782 *
2783 * @lcid Logical channel ID
2784 *
2785 * @returns 0 for success, <0 otherwise
2786 *
2787 * Once the close event has been acknowledge by the remote side, the client
2788 * will receive a SMUX_DISCONNECTED notification.
2789 */
2790int msm_smux_close(uint8_t lcid)
2791{
2792 int ret = 0;
2793 struct smux_lch_t *ch;
2794 struct smux_pkt_t *pkt;
2795 int tx_ready = 0;
2796 unsigned long flags;
2797
2798 if (smux_assert_lch_id(lcid))
2799 return -ENXIO;
2800
2801 ch = &smux_lch[lcid];
2802 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2803 ch->local_tiocm = 0x0;
2804 ch->remote_tiocm = 0x0;
2805 ch->tx_pending_data_cnt = 0;
2806 ch->notify_lwm = 0;
2807
2808 /* Purge TX queue */
2809 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002810 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002811 spin_unlock(&ch->tx_lock_lhb2);
2812
2813 /* Send Close Command */
2814 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2815 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2816 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2817 ch->local_state,
2818 SMUX_LCH_LOCAL_CLOSING);
2819
2820 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2821 pkt = smux_alloc_pkt();
2822 if (pkt) {
2823 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2824 pkt->hdr.flags = 0;
2825 pkt->hdr.lcid = lcid;
2826 pkt->hdr.payload_len = 0;
2827 pkt->hdr.pad_len = 0;
2828 smux_tx_queue(pkt, ch, 0);
2829 tx_ready = 1;
2830 } else {
2831 pr_err("%s: pkt allocation failed\n", __func__);
2832 ret = -ENOMEM;
2833 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002834
2835 /* Purge RX retry queue */
2836 if (ch->rx_retry_queue_cnt)
2837 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002838 }
2839 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2840
2841 if (tx_ready)
2842 list_channel(ch);
2843
2844 return ret;
2845}
2846
2847/**
2848 * Write data to a logical channel.
2849 *
2850 * @lcid Logical channel ID
2851 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2852 * SMUX_WRITE_FAIL notification.
2853 * @data Data to write
2854 * @len Length of @data
2855 *
2856 * @returns 0 for success, <0 otherwise
2857 *
2858 * Data may be written immediately after msm_smux_open() is called,
2859 * but the data will wait in the transmit queue until the channel has
2860 * been fully opened.
2861 *
2862 * Once the data has been written, the client will receive either a completion
2863 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2864 */
2865int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2866{
2867 struct smux_lch_t *ch;
2868 struct smux_pkt_t *pkt;
2869 int tx_ready = 0;
2870 unsigned long flags;
2871 int ret;
2872
2873 if (smux_assert_lch_id(lcid))
2874 return -ENXIO;
2875
2876 ch = &smux_lch[lcid];
2877 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2878
2879 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2880 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2881 pr_err("%s: hdr.invalid local state %d channel %d\n",
2882 __func__, ch->local_state, lcid);
2883 ret = -EINVAL;
2884 goto out;
2885 }
2886
2887 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2888 pr_err("%s: payload %d too large\n",
2889 __func__, len);
2890 ret = -E2BIG;
2891 goto out;
2892 }
2893
2894 pkt = smux_alloc_pkt();
2895 if (!pkt) {
2896 ret = -ENOMEM;
2897 goto out;
2898 }
2899
2900 pkt->hdr.cmd = SMUX_CMD_DATA;
2901 pkt->hdr.lcid = lcid;
2902 pkt->hdr.flags = 0;
2903 pkt->hdr.payload_len = len;
2904 pkt->payload = (void *)data;
2905 pkt->priv = pkt_priv;
2906 pkt->hdr.pad_len = 0;
2907
2908 spin_lock(&ch->tx_lock_lhb2);
2909 /* verify high watermark */
2910 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2911
2912 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2913 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2914 __func__, lcid, SMUX_WM_HIGH,
2915 ch->tx_pending_data_cnt);
2916 ret = -EAGAIN;
2917 goto out_inner;
2918 }
2919
2920 /* queue packet for transmit */
2921 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2922 ch->notify_lwm = 1;
2923 pr_err("%s: high watermark hit\n", __func__);
2924 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2925 }
2926 list_add_tail(&pkt->list, &ch->tx_queue);
2927
2928 /* add to ready list */
2929 if (IS_FULLY_OPENED(ch))
2930 tx_ready = 1;
2931
2932 ret = 0;
2933
2934out_inner:
2935 spin_unlock(&ch->tx_lock_lhb2);
2936
2937out:
2938 if (ret)
2939 smux_free_pkt(pkt);
2940 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2941
2942 if (tx_ready)
2943 list_channel(ch);
2944
2945 return ret;
2946}
2947
2948/**
2949 * Returns true if the TX queue is currently full (high water mark).
2950 *
2951 * @lcid Logical channel ID
2952 * @returns 0 if channel is not full
2953 * 1 if it is full
2954 * < 0 for error
2955 */
2956int msm_smux_is_ch_full(uint8_t lcid)
2957{
2958 struct smux_lch_t *ch;
2959 unsigned long flags;
2960 int is_full = 0;
2961
2962 if (smux_assert_lch_id(lcid))
2963 return -ENXIO;
2964
2965 ch = &smux_lch[lcid];
2966
2967 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2968 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2969 is_full = 1;
2970 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2971
2972 return is_full;
2973}
2974
2975/**
2976 * Returns true if the TX queue has space for more packets it is at or
2977 * below the low water mark).
2978 *
2979 * @lcid Logical channel ID
2980 * @returns 0 if channel is above low watermark
2981 * 1 if it's at or below the low watermark
2982 * < 0 for error
2983 */
2984int msm_smux_is_ch_low(uint8_t lcid)
2985{
2986 struct smux_lch_t *ch;
2987 unsigned long flags;
2988 int is_low = 0;
2989
2990 if (smux_assert_lch_id(lcid))
2991 return -ENXIO;
2992
2993 ch = &smux_lch[lcid];
2994
2995 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2996 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2997 is_low = 1;
2998 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2999
3000 return is_low;
3001}
3002
3003/**
3004 * Send TIOCM status update.
3005 *
3006 * @ch Channel for update
3007 *
3008 * @returns 0 for success, <0 for failure
3009 *
3010 * Channel lock must be held before calling.
3011 */
3012static int smux_send_status_cmd(struct smux_lch_t *ch)
3013{
3014 struct smux_pkt_t *pkt;
3015
3016 if (!ch)
3017 return -EINVAL;
3018
3019 pkt = smux_alloc_pkt();
3020 if (!pkt)
3021 return -ENOMEM;
3022
3023 pkt->hdr.lcid = ch->lcid;
3024 pkt->hdr.cmd = SMUX_CMD_STATUS;
3025 pkt->hdr.flags = ch->local_tiocm;
3026 pkt->hdr.payload_len = 0;
3027 pkt->hdr.pad_len = 0;
3028 smux_tx_queue(pkt, ch, 0);
3029
3030 return 0;
3031}
3032
3033/**
3034 * Internal helper function for getting the TIOCM status with
3035 * state_lock_lhb1 already locked.
3036 *
3037 * @ch Channel pointer
3038 *
3039 * @returns TIOCM status
3040 */
3041static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3042{
3043 long status = 0x0;
3044
3045 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3046 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3047 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3048 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3049
3050 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3051 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3052
3053 return status;
3054}
3055
3056/**
3057 * Get the TIOCM status bits.
3058 *
3059 * @lcid Logical channel ID
3060 *
3061 * @returns >= 0 TIOCM status bits
3062 * < 0 Error condition
3063 */
3064long msm_smux_tiocm_get(uint8_t lcid)
3065{
3066 struct smux_lch_t *ch;
3067 unsigned long flags;
3068 long status = 0x0;
3069
3070 if (smux_assert_lch_id(lcid))
3071 return -ENXIO;
3072
3073 ch = &smux_lch[lcid];
3074 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3075 status = msm_smux_tiocm_get_atomic(ch);
3076 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3077
3078 return status;
3079}
3080
3081/**
3082 * Set/clear the TIOCM status bits.
3083 *
3084 * @lcid Logical channel ID
3085 * @set Bits to set
3086 * @clear Bits to clear
3087 *
3088 * @returns 0 for success; < 0 for failure
3089 *
3090 * If a bit is specified in both the @set and @clear masks, then the clear bit
3091 * definition will dominate and the bit will be cleared.
3092 */
3093int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3094{
3095 struct smux_lch_t *ch;
3096 unsigned long flags;
3097 uint8_t old_status;
3098 uint8_t status_set = 0x0;
3099 uint8_t status_clear = 0x0;
3100 int tx_ready = 0;
3101 int ret = 0;
3102
3103 if (smux_assert_lch_id(lcid))
3104 return -ENXIO;
3105
3106 ch = &smux_lch[lcid];
3107 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3108
3109 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3110 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3111 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3112 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3113
3114 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3115 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3116 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3117 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3118
3119 old_status = ch->local_tiocm;
3120 ch->local_tiocm |= status_set;
3121 ch->local_tiocm &= ~status_clear;
3122
3123 if (ch->local_tiocm != old_status) {
3124 ret = smux_send_status_cmd(ch);
3125 tx_ready = 1;
3126 }
3127 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3128
3129 if (tx_ready)
3130 list_channel(ch);
3131
3132 return ret;
3133}
3134
3135/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003136/* Subsystem Restart */
3137/**********************************************************************/
3138static struct notifier_block ssr_notifier = {
3139 .notifier_call = ssr_notifier_cb,
3140};
3141
3142/**
3143 * Handle Subsystem Restart (SSR) notifications.
3144 *
3145 * @this Pointer to ssr_notifier
3146 * @code SSR Code
3147 * @data Data pointer (not used)
3148 */
3149static int ssr_notifier_cb(struct notifier_block *this,
3150 unsigned long code,
3151 void *data)
3152{
3153 unsigned long flags;
3154 int power_off_uart = 0;
3155
Eric Holmbergd2697902012-06-15 09:58:46 -06003156 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3157 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3158 mutex_lock(&smux.mutex_lha0);
3159 smux.in_reset = 1;
3160 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003161 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003162 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3163 return NOTIFY_DONE;
3164 }
3165 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003166
3167 /* Cleanup channels */
Eric Holmbergd2697902012-06-15 09:58:46 -06003168 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003169 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003170 if (smux.tty)
3171 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003172
3173 /* Power-down UART */
3174 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3175 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003176 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003177 smux.power_state = SMUX_PWR_OFF;
3178 power_off_uart = 1;
3179 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003180 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003181 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3182
3183 if (power_off_uart)
3184 smux_uart_power_off();
3185
Eric Holmbergd2697902012-06-15 09:58:46 -06003186 smux.in_reset = 0;
3187 mutex_unlock(&smux.mutex_lha0);
3188
Eric Holmberged1f00c2012-06-07 09:45:18 -06003189 return NOTIFY_DONE;
3190}
3191
3192/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003193/* Line Discipline Interface */
3194/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003195static void smux_pdev_release(struct device *dev)
3196{
3197 struct platform_device *pdev;
3198
3199 pdev = container_of(dev, struct platform_device, dev);
3200 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3201 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3202}
3203
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003204static int smuxld_open(struct tty_struct *tty)
3205{
3206 int i;
3207 int tmp;
3208 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003209
3210 if (!smux.is_initialized)
3211 return -ENODEV;
3212
Eric Holmberged1f00c2012-06-07 09:45:18 -06003213 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003214 if (smux.ld_open_count) {
3215 pr_err("%s: %p multiple instances not supported\n",
3216 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003217 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003218 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003219 }
3220
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003221 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003222 pr_err("%s: tty->ops->write already NULL\n", __func__);
3223 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003224 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003225 }
3226
3227 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003228 ++smux.ld_open_count;
3229 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003230 smux.tty = tty;
3231 tty->disc_data = &smux;
3232 tty->receive_room = TTY_RECEIVE_ROOM;
3233 tty_driver_flush_buffer(tty);
3234
3235 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003236 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003237 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003238 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003239 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003240 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003241 queue_work(smux_tx_wq, &smux_inactivity_work);
3242 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003243 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003244 }
3245
3246 /* register platform devices */
3247 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003248 SMUX_DBG("%s: register pdev '%s'\n",
3249 __func__, smux_devs[i].name);
3250 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003251 tmp = platform_device_register(&smux_devs[i]);
3252 if (tmp)
3253 pr_err("%s: error %d registering device %s\n",
3254 __func__, tmp, smux_devs[i].name);
3255 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003256 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003257 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003258}
3259
3260static void smuxld_close(struct tty_struct *tty)
3261{
3262 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003263 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003264 int i;
3265
Eric Holmberged1f00c2012-06-07 09:45:18 -06003266 SMUX_DBG("%s: ldisc unload\n", __func__);
3267 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003268 if (smux.ld_open_count <= 0) {
3269 pr_err("%s: invalid ld count %d\n", __func__,
3270 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003271 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003272 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003273 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003274 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003275 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003276
3277 /* Cleanup channels */
3278 smux_lch_purge();
3279
3280 /* Unregister platform devices */
3281 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3282 SMUX_DBG("%s: unregister pdev '%s'\n",
3283 __func__, smux_devs[i].name);
3284 platform_device_unregister(&smux_devs[i]);
3285 }
3286
3287 /* Schedule UART power-up if it's down */
3288 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003289 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003290 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003291 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003292 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003293 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3294
3295 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003296 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003297
3298 /* Disconnect from TTY */
3299 smux.tty = NULL;
3300 mutex_unlock(&smux.mutex_lha0);
3301 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003302}
3303
3304/**
3305 * Receive data from TTY Line Discipline.
3306 *
3307 * @tty TTY structure
3308 * @cp Character data
3309 * @fp Flag data
3310 * @count Size of character and flag data
3311 */
3312void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3313 char *fp, int count)
3314{
3315 int i;
3316 int last_idx = 0;
3317 const char *tty_name = NULL;
3318 char *f;
3319
3320 if (smux_debug_mask & MSM_SMUX_DEBUG)
3321 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3322 16, 1, cp, count, true);
3323
3324 /* verify error flags */
3325 for (i = 0, f = fp; i < count; ++i, ++f) {
3326 if (*f != TTY_NORMAL) {
3327 if (tty)
3328 tty_name = tty->name;
3329 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3330 tty_name, *f, tty_flag_to_str(*f));
3331
3332 /* feed all previous valid data to the parser */
3333 smux_rx_state_machine(cp + last_idx, i - last_idx,
3334 TTY_NORMAL);
3335
3336 /* feed bad data to parser */
3337 smux_rx_state_machine(cp + i, 1, *f);
3338 last_idx = i + 1;
3339 }
3340 }
3341
3342 /* feed data to RX state machine */
3343 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3344}
3345
3346static void smuxld_flush_buffer(struct tty_struct *tty)
3347{
3348 pr_err("%s: not supported\n", __func__);
3349}
3350
3351static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3352{
3353 pr_err("%s: not supported\n", __func__);
3354 return -ENODEV;
3355}
3356
3357static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3358 unsigned char __user *buf, size_t nr)
3359{
3360 pr_err("%s: not supported\n", __func__);
3361 return -ENODEV;
3362}
3363
3364static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3365 const unsigned char *buf, size_t nr)
3366{
3367 pr_err("%s: not supported\n", __func__);
3368 return -ENODEV;
3369}
3370
3371static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3372 unsigned int cmd, unsigned long arg)
3373{
3374 pr_err("%s: not supported\n", __func__);
3375 return -ENODEV;
3376}
3377
3378static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3379 struct poll_table_struct *tbl)
3380{
3381 pr_err("%s: not supported\n", __func__);
3382 return -ENODEV;
3383}
3384
3385static void smuxld_write_wakeup(struct tty_struct *tty)
3386{
3387 pr_err("%s: not supported\n", __func__);
3388}
3389
3390static struct tty_ldisc_ops smux_ldisc_ops = {
3391 .owner = THIS_MODULE,
3392 .magic = TTY_LDISC_MAGIC,
3393 .name = "n_smux",
3394 .open = smuxld_open,
3395 .close = smuxld_close,
3396 .flush_buffer = smuxld_flush_buffer,
3397 .chars_in_buffer = smuxld_chars_in_buffer,
3398 .read = smuxld_read,
3399 .write = smuxld_write,
3400 .ioctl = smuxld_ioctl,
3401 .poll = smuxld_poll,
3402 .receive_buf = smuxld_receive_buf,
3403 .write_wakeup = smuxld_write_wakeup
3404};
3405
3406static int __init smux_init(void)
3407{
3408 int ret;
3409
Eric Holmberged1f00c2012-06-07 09:45:18 -06003410 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003411
3412 spin_lock_init(&smux.rx_lock_lha1);
3413 smux.rx_state = SMUX_RX_IDLE;
3414 smux.power_state = SMUX_PWR_OFF;
3415 smux.pwr_wakeup_delay_us = 1;
3416 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003417 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003418 smux.rx_activity_flag = 0;
3419 smux.tx_activity_flag = 0;
3420 smux.recv_len = 0;
3421 smux.tty = NULL;
3422 smux.ld_open_count = 0;
3423 smux.in_reset = 0;
3424 smux.is_initialized = 1;
3425 smux_byte_loopback = 0;
3426
3427 spin_lock_init(&smux.tx_lock_lha2);
3428 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3429
3430 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3431 if (ret != 0) {
3432 pr_err("%s: error %d registering line discipline\n",
3433 __func__, ret);
3434 return ret;
3435 }
3436
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003437 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003438
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003439 ret = lch_init();
3440 if (ret != 0) {
3441 pr_err("%s: lch_init failed\n", __func__);
3442 return ret;
3443 }
3444
3445 return 0;
3446}
3447
3448static void __exit smux_exit(void)
3449{
3450 int ret;
3451
3452 ret = tty_unregister_ldisc(N_SMUX);
3453 if (ret != 0) {
3454 pr_err("%s error %d unregistering line discipline\n",
3455 __func__, ret);
3456 return;
3457 }
3458}
3459
3460module_init(smux_init);
3461module_exit(smux_exit);
3462
3463MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3464MODULE_LICENSE("GPL v2");
3465MODULE_ALIAS_LDISC(N_SMUX);