blob: 706ea047e3d94cf3b376ac73d1c35627bc0985c7 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg33f82522012-08-30 15:38:11 -060036#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060037
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060049#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060050
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060084#define SMUX_PWR_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_PWR_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
91 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
92 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
93 pr_info("smux: TX Wakeup ACK\n"); \
94 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
95 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
96 pr_info("smux: TX Wakeup REQ\n"); \
97 else \
98 smux_log_pkt(pkt, 0); \
99 } \
100} while (0)
101
102#define SMUX_PWR_BYTE_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 smux_log_pkt(pkt, 0); \
105 } \
106} while (0)
107
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600108#define SMUX_LOG_PKT_RX(pkt) do { \
109 if (smux_debug_mask & MSM_SMUX_PKT) \
110 smux_log_pkt(pkt, 1); \
111} while (0)
112
113#define SMUX_LOG_PKT_TX(pkt) do { \
114 if (smux_debug_mask & MSM_SMUX_PKT) \
115 smux_log_pkt(pkt, 0); \
116} while (0)
117
118/**
119 * Return true if channel is fully opened (both
120 * local and remote sides are in the OPENED state).
121 */
122#define IS_FULLY_OPENED(ch) \
123 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
124 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
125
126static struct platform_device smux_devs[] = {
127 {.name = "SMUX_CTL", .id = -1},
128 {.name = "SMUX_RMNET", .id = -1},
129 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
130 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
131 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
132 {.name = "SMUX_DIAG", .id = -1},
133};
134
135enum {
136 SMUX_CMD_STATUS_RTC = 1 << 0,
137 SMUX_CMD_STATUS_RTR = 1 << 1,
138 SMUX_CMD_STATUS_RI = 1 << 2,
139 SMUX_CMD_STATUS_DCD = 1 << 3,
140 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
141};
142
143/* Channel mode */
144enum {
145 SMUX_LCH_MODE_NORMAL,
146 SMUX_LCH_MODE_LOCAL_LOOPBACK,
147 SMUX_LCH_MODE_REMOTE_LOOPBACK,
148};
149
150enum {
151 SMUX_RX_IDLE,
152 SMUX_RX_MAGIC,
153 SMUX_RX_HDR,
154 SMUX_RX_PAYLOAD,
155 SMUX_RX_FAILURE,
156};
157
158/**
159 * Power states.
160 *
161 * The _FLUSH states are internal transitional states and are not part of the
162 * official state machine.
163 */
164enum {
165 SMUX_PWR_OFF,
166 SMUX_PWR_TURNING_ON,
167 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600168 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600169 SMUX_PWR_TURNING_OFF,
170 SMUX_PWR_OFF_FLUSH,
171};
172
173/**
174 * Logical Channel Structure. One instance per channel.
175 *
176 * Locking Hierarchy
177 * Each lock has a postfix that describes the locking level. If multiple locks
178 * are required, only increasing lock hierarchy numbers may be locked which
179 * ensures avoiding a deadlock.
180 *
181 * Locking Example
182 * If state_lock_lhb1 is currently held and the TX list needs to be
183 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
184 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
185 * not be acquired since it would result in a deadlock.
186 *
187 * Note that the Line Discipline locks (*_lha) should always be acquired
188 * before the logical channel locks.
189 */
190struct smux_lch_t {
191 /* channel state */
192 spinlock_t state_lock_lhb1;
193 uint8_t lcid;
194 unsigned local_state;
195 unsigned local_mode;
196 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600197 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600198
199 unsigned remote_state;
200 unsigned remote_mode;
201 uint8_t remote_tiocm;
202
203 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600204 int rx_flow_control_auto;
205 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 /* client callbacks and private data */
208 void *priv;
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
211 int size);
212
Eric Holmbergb8435c82012-06-05 14:51:29 -0600213 /* RX Info */
214 struct list_head rx_retry_queue;
215 unsigned rx_retry_queue_cnt;
216 struct delayed_work rx_retry_work;
217
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600218 /* TX Info */
219 spinlock_t tx_lock_lhb2;
220 struct list_head tx_queue;
221 struct list_head tx_ready_list;
222 unsigned tx_pending_data_cnt;
223 unsigned notify_lwm;
224};
225
226union notifier_metadata {
227 struct smux_meta_disconnected disconnected;
228 struct smux_meta_read read;
229 struct smux_meta_write write;
230 struct smux_meta_tiocm tiocm;
231};
232
233struct smux_notify_handle {
234 void (*notify)(void *priv, int event_type, const void *metadata);
235 void *priv;
236 int event_type;
237 union notifier_metadata *metadata;
238};
239
240/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600241 * Get RX Buffer Retry structure.
242 *
243 * This is used for clients that are unable to provide an RX buffer
244 * immediately. This temporary structure will be used to temporarily hold the
245 * data and perform a retry.
246 */
247struct smux_rx_pkt_retry {
248 struct smux_pkt_t *pkt;
249 struct list_head rx_retry_list;
250 unsigned timeout_in_ms;
251};
252
253/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254 * Receive worker data structure.
255 *
256 * One instance is created for every call to smux_rx_state_machine.
257 */
258struct smux_rx_worker_data {
259 const unsigned char *data;
260 int len;
261 int flag;
262
263 struct work_struct work;
264 struct completion work_complete;
265};
266
267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600268 * Line discipline and module structure.
269 *
270 * Only one instance since multiple instances of line discipline are not
271 * allowed.
272 */
273struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600274 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275
276 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600277 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600278 int in_reset;
279 int ld_open_count;
280 struct tty_struct *tty;
281
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600282 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600283 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
284 unsigned int recv_len;
285 unsigned int pkt_remain;
286 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600287
288 /* RX Activity - accessed by multiple threads */
289 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600290 unsigned rx_activity_flag;
291
292 /* TX / Power */
293 spinlock_t tx_lock_lha2;
294 struct list_head lch_tx_ready_list;
295 unsigned power_state;
296 unsigned pwr_wakeup_delay_us;
297 unsigned tx_activity_flag;
298 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600299 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600300 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600301};
302
303
304/* data structures */
305static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
306static struct smux_ldisc_t smux;
307static const char *tty_error_type[] = {
308 [TTY_NORMAL] = "normal",
309 [TTY_OVERRUN] = "overrun",
310 [TTY_BREAK] = "break",
311 [TTY_PARITY] = "parity",
312 [TTY_FRAME] = "framing",
313};
314
315static const char *smux_cmds[] = {
316 [SMUX_CMD_DATA] = "DATA",
317 [SMUX_CMD_OPEN_LCH] = "OPEN",
318 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
319 [SMUX_CMD_STATUS] = "STATUS",
320 [SMUX_CMD_PWR_CTL] = "PWR",
321 [SMUX_CMD_BYTE] = "Raw Byte",
322};
323
324static void smux_notify_local_fn(struct work_struct *work);
325static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
326
327static struct workqueue_struct *smux_notify_wq;
328static size_t handle_size;
329static struct kfifo smux_notify_fifo;
330static int queued_fifo_notifications;
331static DEFINE_SPINLOCK(notify_lock_lhc1);
332
333static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600334static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600335static void smux_tx_worker(struct work_struct *work);
336static DECLARE_WORK(smux_tx_work, smux_tx_worker);
337
338static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600339static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600340static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600341static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
342static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
343
344static void smux_inactivity_worker(struct work_struct *work);
345static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
346static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
347 smux_inactivity_worker);
348
349static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
350static void list_channel(struct smux_lch_t *ch);
351static int smux_send_status_cmd(struct smux_lch_t *ch);
352static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600353static void smux_flush_tty(void);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600354static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600355static int schedule_notify(uint8_t lcid, int event,
356 const union notifier_metadata *metadata);
357static int ssr_notifier_cb(struct notifier_block *this,
358 unsigned long code,
359 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600360static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600361static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600362static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600363static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600364
365/**
366 * Convert TTY Error Flags to string for logging purposes.
367 *
368 * @flag TTY_* flag
369 * @returns String description or NULL if unknown
370 */
371static const char *tty_flag_to_str(unsigned flag)
372{
373 if (flag < ARRAY_SIZE(tty_error_type))
374 return tty_error_type[flag];
375 return NULL;
376}
377
378/**
379 * Convert SMUX Command to string for logging purposes.
380 *
381 * @cmd SMUX command
382 * @returns String description or NULL if unknown
383 */
384static const char *cmd_to_str(unsigned cmd)
385{
386 if (cmd < ARRAY_SIZE(smux_cmds))
387 return smux_cmds[cmd];
388 return NULL;
389}
390
391/**
392 * Set the reset state due to an unrecoverable failure.
393 */
394static void smux_enter_reset(void)
395{
396 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
397 smux.in_reset = 1;
398}
399
400static int lch_init(void)
401{
402 unsigned int id;
403 struct smux_lch_t *ch;
404 int i = 0;
405
406 handle_size = sizeof(struct smux_notify_handle *);
407
408 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
409 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600410 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600411
412 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
413 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
414 __func__);
415 return -ENOMEM;
416 }
417
418 i |= kfifo_alloc(&smux_notify_fifo,
419 SMUX_NOTIFY_FIFO_SIZE * handle_size,
420 GFP_KERNEL);
421 i |= smux_loopback_init();
422
423 if (i) {
424 pr_err("%s: out of memory error\n", __func__);
425 return -ENOMEM;
426 }
427
428 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
429 ch = &smux_lch[id];
430
431 spin_lock_init(&ch->state_lock_lhb1);
432 ch->lcid = id;
433 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
434 ch->local_mode = SMUX_LCH_MODE_NORMAL;
435 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600436 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600437 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
438 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
439 ch->remote_tiocm = 0x0;
440 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600441 ch->rx_flow_control_auto = 0;
442 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600443 ch->priv = 0;
444 ch->notify = 0;
445 ch->get_rx_buffer = 0;
446
Eric Holmbergb8435c82012-06-05 14:51:29 -0600447 INIT_LIST_HEAD(&ch->rx_retry_queue);
448 ch->rx_retry_queue_cnt = 0;
449 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
450
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600451 spin_lock_init(&ch->tx_lock_lhb2);
452 INIT_LIST_HEAD(&ch->tx_queue);
453 INIT_LIST_HEAD(&ch->tx_ready_list);
454 ch->tx_pending_data_cnt = 0;
455 ch->notify_lwm = 0;
456 }
457
458 return 0;
459}
460
Eric Holmberged1f00c2012-06-07 09:45:18 -0600461/**
462 * Empty and cleanup all SMUX logical channels for subsystem restart or line
463 * discipline disconnect.
464 */
465static void smux_lch_purge(void)
466{
467 struct smux_lch_t *ch;
468 unsigned long flags;
469 int i;
470
471 /* Empty TX ready list */
472 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
473 while (!list_empty(&smux.lch_tx_ready_list)) {
474 SMUX_DBG("%s: emptying ready list %p\n",
475 __func__, smux.lch_tx_ready_list.next);
476 ch = list_first_entry(&smux.lch_tx_ready_list,
477 struct smux_lch_t,
478 tx_ready_list);
479 list_del(&ch->tx_ready_list);
480 INIT_LIST_HEAD(&ch->tx_ready_list);
481 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600482
483 /* Purge Power Queue */
484 while (!list_empty(&smux.power_queue)) {
485 struct smux_pkt_t *pkt;
486
487 pkt = list_first_entry(&smux.power_queue,
488 struct smux_pkt_t,
489 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600490 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600491 SMUX_DBG("%s: emptying power queue pkt=%p\n",
492 __func__, pkt);
493 smux_free_pkt(pkt);
494 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600495 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
496
497 /* Close all ports */
498 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
499 ch = &smux_lch[i];
500 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
501
502 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
503
504 /* Purge TX queue */
505 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600506 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600507 spin_unlock(&ch->tx_lock_lhb2);
508
509 /* Notify user of disconnect and reset channel state */
510 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
511 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
512 union notifier_metadata meta;
513
514 meta.disconnected.is_ssr = smux.in_reset;
515 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
516 }
517
518 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600519 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
520 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
521 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600522 ch->rx_flow_control_auto = 0;
523 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600524
525 /* Purge RX retry queue */
526 if (ch->rx_retry_queue_cnt)
527 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
528
529 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
530 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600531}
532
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600533int smux_assert_lch_id(uint32_t lcid)
534{
535 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
536 return -ENXIO;
537 else
538 return 0;
539}
540
541/**
542 * Log packet information for debug purposes.
543 *
544 * @pkt Packet to log
545 * @is_recv 1 = RX packet; 0 = TX Packet
546 *
547 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
548 *
549 * PKT Info:
550 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
551 *
552 * Direction: R = Receive, S = Send
553 * Local State: C = Closed; c = closing; o = opening; O = Opened
554 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
555 * Remote State: C = Closed; O = Opened
556 * Remote Mode: R = Remote loopback; N = Normal
557 */
558static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
559{
560 char logbuf[SMUX_PKT_LOG_SIZE];
561 char cmd_extra[16];
562 int i = 0;
563 int count;
564 int len;
565 char local_state;
566 char local_mode;
567 char remote_state;
568 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600569 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600570 unsigned char *data;
571
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600572 if (!smux_assert_lch_id(pkt->hdr.lcid))
573 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600574
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600575 if (ch) {
576 switch (ch->local_state) {
577 case SMUX_LCH_LOCAL_CLOSED:
578 local_state = 'C';
579 break;
580 case SMUX_LCH_LOCAL_OPENING:
581 local_state = 'o';
582 break;
583 case SMUX_LCH_LOCAL_OPENED:
584 local_state = 'O';
585 break;
586 case SMUX_LCH_LOCAL_CLOSING:
587 local_state = 'c';
588 break;
589 default:
590 local_state = 'U';
591 break;
592 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600593
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600594 switch (ch->local_mode) {
595 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
596 local_mode = 'L';
597 break;
598 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
599 local_mode = 'R';
600 break;
601 case SMUX_LCH_MODE_NORMAL:
602 local_mode = 'N';
603 break;
604 default:
605 local_mode = 'U';
606 break;
607 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600608
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600609 switch (ch->remote_state) {
610 case SMUX_LCH_REMOTE_CLOSED:
611 remote_state = 'C';
612 break;
613 case SMUX_LCH_REMOTE_OPENED:
614 remote_state = 'O';
615 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600616
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600617 default:
618 remote_state = 'U';
619 break;
620 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600621
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600622 switch (ch->remote_mode) {
623 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
624 remote_mode = 'R';
625 break;
626 case SMUX_LCH_MODE_NORMAL:
627 remote_mode = 'N';
628 break;
629 default:
630 remote_mode = 'U';
631 break;
632 }
633 } else {
634 /* broadcast channel */
635 local_state = '-';
636 local_mode = '-';
637 remote_state = '-';
638 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600639 }
640
641 /* determine command type (ACK, etc) */
642 cmd_extra[0] = '\0';
643 switch (pkt->hdr.cmd) {
644 case SMUX_CMD_OPEN_LCH:
645 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
646 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
647 break;
648 case SMUX_CMD_CLOSE_LCH:
649 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
650 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
651 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600652
653 case SMUX_CMD_PWR_CTL:
654 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
655 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
656 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600657 };
658
659 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
660 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
661 is_recv ? 'R' : 'S', pkt->hdr.lcid,
662 local_state, local_mode,
663 remote_state, remote_mode,
664 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
665 pkt->hdr.payload_len, pkt->hdr.pad_len);
666
667 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
668 data = (unsigned char *)pkt->payload;
669 for (count = 0; count < len; count++)
670 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
671 "%02x ", (unsigned)data[count]);
672
673 pr_info("%s\n", logbuf);
674}
675
676static void smux_notify_local_fn(struct work_struct *work)
677{
678 struct smux_notify_handle *notify_handle = NULL;
679 union notifier_metadata *metadata = NULL;
680 unsigned long flags;
681 int i;
682
683 for (;;) {
684 /* retrieve notification */
685 spin_lock_irqsave(&notify_lock_lhc1, flags);
686 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
687 i = kfifo_out(&smux_notify_fifo,
688 &notify_handle,
689 handle_size);
690 if (i != handle_size) {
691 pr_err("%s: unable to retrieve handle %d expected %d\n",
692 __func__, i, handle_size);
693 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
694 break;
695 }
696 } else {
697 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
698 break;
699 }
700 --queued_fifo_notifications;
701 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
702
703 /* notify client */
704 metadata = notify_handle->metadata;
705 notify_handle->notify(notify_handle->priv,
706 notify_handle->event_type,
707 metadata);
708
709 kfree(metadata);
710 kfree(notify_handle);
711 }
712}
713
714/**
715 * Initialize existing packet.
716 */
717void smux_init_pkt(struct smux_pkt_t *pkt)
718{
719 memset(pkt, 0x0, sizeof(*pkt));
720 pkt->hdr.magic = SMUX_MAGIC;
721 INIT_LIST_HEAD(&pkt->list);
722}
723
724/**
725 * Allocate and initialize packet.
726 *
727 * If a payload is needed, either set it directly and ensure that it's freed or
728 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
729 * automatically when smd_free_pkt() is called.
730 */
731struct smux_pkt_t *smux_alloc_pkt(void)
732{
733 struct smux_pkt_t *pkt;
734
735 /* Consider a free list implementation instead of kmalloc */
736 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
737 if (!pkt) {
738 pr_err("%s: out of memory\n", __func__);
739 return NULL;
740 }
741 smux_init_pkt(pkt);
742 pkt->allocated = 1;
743
744 return pkt;
745}
746
747/**
748 * Free packet.
749 *
750 * @pkt Packet to free (may be NULL)
751 *
752 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
753 * well. Otherwise, the caller is responsible for freeing the payload.
754 */
755void smux_free_pkt(struct smux_pkt_t *pkt)
756{
757 if (pkt) {
758 if (pkt->free_payload)
759 kfree(pkt->payload);
760 if (pkt->allocated)
761 kfree(pkt);
762 }
763}
764
765/**
766 * Allocate packet payload.
767 *
768 * @pkt Packet to add payload to
769 *
770 * @returns 0 on success, <0 upon error
771 *
772 * A flag is set to signal smux_free_pkt() to free the payload.
773 */
774int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
775{
776 if (!pkt)
777 return -EINVAL;
778
779 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
780 pkt->free_payload = 1;
781 if (!pkt->payload) {
782 pr_err("%s: unable to malloc %d bytes for payload\n",
783 __func__, pkt->hdr.payload_len);
784 return -ENOMEM;
785 }
786
787 return 0;
788}
789
790static int schedule_notify(uint8_t lcid, int event,
791 const union notifier_metadata *metadata)
792{
793 struct smux_notify_handle *notify_handle = 0;
794 union notifier_metadata *meta_copy = 0;
795 struct smux_lch_t *ch;
796 int i;
797 unsigned long flags;
798 int ret = 0;
799
800 ch = &smux_lch[lcid];
801 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
802 GFP_ATOMIC);
803 if (!notify_handle) {
804 pr_err("%s: out of memory\n", __func__);
805 ret = -ENOMEM;
806 goto free_out;
807 }
808
809 notify_handle->notify = ch->notify;
810 notify_handle->priv = ch->priv;
811 notify_handle->event_type = event;
812 if (metadata) {
813 meta_copy = kzalloc(sizeof(union notifier_metadata),
814 GFP_ATOMIC);
815 if (!meta_copy) {
816 pr_err("%s: out of memory\n", __func__);
817 ret = -ENOMEM;
818 goto free_out;
819 }
820 *meta_copy = *metadata;
821 notify_handle->metadata = meta_copy;
822 } else {
823 notify_handle->metadata = NULL;
824 }
825
826 spin_lock_irqsave(&notify_lock_lhc1, flags);
827 i = kfifo_avail(&smux_notify_fifo);
828 if (i < handle_size) {
829 pr_err("%s: fifo full error %d expected %d\n",
830 __func__, i, handle_size);
831 ret = -ENOMEM;
832 goto unlock_out;
833 }
834
835 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
836 if (i < 0 || i != handle_size) {
837 pr_err("%s: fifo not available error %d (expected %d)\n",
838 __func__, i, handle_size);
839 ret = -ENOSPC;
840 goto unlock_out;
841 }
842 ++queued_fifo_notifications;
843
844unlock_out:
845 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
846
847free_out:
848 queue_work(smux_notify_wq, &smux_notify_local);
849 if (ret < 0 && notify_handle) {
850 kfree(notify_handle->metadata);
851 kfree(notify_handle);
852 }
853 return ret;
854}
855
856/**
857 * Returns the serialized size of a packet.
858 *
859 * @pkt Packet to serialize
860 *
861 * @returns Serialized length of packet
862 */
863static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
864{
865 unsigned int size;
866
867 size = sizeof(struct smux_hdr_t);
868 size += pkt->hdr.payload_len;
869 size += pkt->hdr.pad_len;
870
871 return size;
872}
873
874/**
875 * Serialize packet @pkt into output buffer @data.
876 *
877 * @pkt Packet to serialize
878 * @out Destination buffer pointer
879 * @out_len Size of serialized packet
880 *
881 * @returns 0 for success
882 */
883int smux_serialize(struct smux_pkt_t *pkt, char *out,
884 unsigned int *out_len)
885{
886 char *data_start = out;
887
888 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
889 pr_err("%s: packet size %d too big\n",
890 __func__, smux_serialize_size(pkt));
891 return -E2BIG;
892 }
893
894 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
895 out += sizeof(struct smux_hdr_t);
896 if (pkt->payload) {
897 memcpy(out, pkt->payload, pkt->hdr.payload_len);
898 out += pkt->hdr.payload_len;
899 }
900 if (pkt->hdr.pad_len) {
901 memset(out, 0x0, pkt->hdr.pad_len);
902 out += pkt->hdr.pad_len;
903 }
904 *out_len = out - data_start;
905 return 0;
906}
907
908/**
909 * Serialize header and provide pointer to the data.
910 *
911 * @pkt Packet
912 * @out[out] Pointer to the serialized header data
913 * @out_len[out] Pointer to the serialized header length
914 */
915static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
916 unsigned int *out_len)
917{
918 *out = (char *)&pkt->hdr;
919 *out_len = sizeof(struct smux_hdr_t);
920}
921
922/**
923 * Serialize payload and provide pointer to the data.
924 *
925 * @pkt Packet
926 * @out[out] Pointer to the serialized payload data
927 * @out_len[out] Pointer to the serialized payload length
928 */
929static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
930 unsigned int *out_len)
931{
932 *out = pkt->payload;
933 *out_len = pkt->hdr.payload_len;
934}
935
936/**
937 * Serialize padding and provide pointer to the data.
938 *
939 * @pkt Packet
940 * @out[out] Pointer to the serialized padding (always NULL)
941 * @out_len[out] Pointer to the serialized payload length
942 *
943 * Since the padding field value is undefined, only the size of the patting
944 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
945 */
946static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
947 unsigned int *out_len)
948{
949 *out = NULL;
950 *out_len = pkt->hdr.pad_len;
951}
952
953/**
954 * Write data to TTY framework and handle breaking the writes up if needed.
955 *
956 * @data Data to write
957 * @len Length of data
958 *
959 * @returns 0 for success, < 0 for failure
960 */
961static int write_to_tty(char *data, unsigned len)
962{
963 int data_written;
964
965 if (!data)
966 return 0;
967
Eric Holmberged1f00c2012-06-07 09:45:18 -0600968 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600969 data_written = smux.tty->ops->write(smux.tty, data, len);
970 if (data_written >= 0) {
971 len -= data_written;
972 data += data_written;
973 } else {
974 pr_err("%s: TTY write returned error %d\n",
975 __func__, data_written);
976 return data_written;
977 }
978
979 if (len)
980 tty_wait_until_sent(smux.tty,
981 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600982 }
983 return 0;
984}
985
986/**
987 * Write packet to TTY.
988 *
989 * @pkt packet to write
990 *
991 * @returns 0 on success
992 */
993static int smux_tx_tty(struct smux_pkt_t *pkt)
994{
995 char *data;
996 unsigned int len;
997 int ret;
998
999 if (!smux.tty) {
1000 pr_err("%s: TTY not initialized", __func__);
1001 return -ENOTTY;
1002 }
1003
1004 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
1005 SMUX_DBG("%s: tty send single byte\n", __func__);
1006 ret = write_to_tty(&pkt->hdr.flags, 1);
1007 return ret;
1008 }
1009
1010 smux_serialize_hdr(pkt, &data, &len);
1011 ret = write_to_tty(data, len);
1012 if (ret) {
1013 pr_err("%s: failed %d to write header %d\n",
1014 __func__, ret, len);
1015 return ret;
1016 }
1017
1018 smux_serialize_payload(pkt, &data, &len);
1019 ret = write_to_tty(data, len);
1020 if (ret) {
1021 pr_err("%s: failed %d to write payload %d\n",
1022 __func__, ret, len);
1023 return ret;
1024 }
1025
1026 smux_serialize_padding(pkt, &data, &len);
1027 while (len > 0) {
1028 char zero = 0x0;
1029 ret = write_to_tty(&zero, 1);
1030 if (ret) {
1031 pr_err("%s: failed %d to write padding %d\n",
1032 __func__, ret, len);
1033 return ret;
1034 }
1035 --len;
1036 }
1037 return 0;
1038}
1039
1040/**
1041 * Send a single character.
1042 *
1043 * @ch Character to send
1044 */
1045static void smux_send_byte(char ch)
1046{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001047 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001048
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001049 pkt = smux_alloc_pkt();
1050 if (!pkt) {
1051 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1052 return;
1053 }
1054 pkt->hdr.cmd = SMUX_CMD_BYTE;
1055 pkt->hdr.flags = ch;
1056 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001057
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001058 list_add_tail(&pkt->list, &smux.power_queue);
1059 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001060}
1061
1062/**
1063 * Receive a single-character packet (used for internal testing).
1064 *
1065 * @ch Character to receive
1066 * @lcid Logical channel ID for packet
1067 *
1068 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001069 */
1070static int smux_receive_byte(char ch, int lcid)
1071{
1072 struct smux_pkt_t pkt;
1073
1074 smux_init_pkt(&pkt);
1075 pkt.hdr.lcid = lcid;
1076 pkt.hdr.cmd = SMUX_CMD_BYTE;
1077 pkt.hdr.flags = ch;
1078
1079 return smux_dispatch_rx_pkt(&pkt);
1080}
1081
1082/**
1083 * Queue packet for transmit.
1084 *
1085 * @pkt_ptr Packet to queue
1086 * @ch Channel to queue packet on
1087 * @queue Queue channel on ready list
1088 */
1089static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1090 int queue)
1091{
1092 unsigned long flags;
1093
1094 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1095
1096 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1097 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1098 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1099
1100 if (queue)
1101 list_channel(ch);
1102}
1103
1104/**
1105 * Handle receive OPEN ACK command.
1106 *
1107 * @pkt Received packet
1108 *
1109 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001110 */
1111static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1112{
1113 uint8_t lcid;
1114 int ret;
1115 struct smux_lch_t *ch;
1116 int enable_powerdown = 0;
1117
1118 lcid = pkt->hdr.lcid;
1119 ch = &smux_lch[lcid];
1120
1121 spin_lock(&ch->state_lock_lhb1);
1122 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1123 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1124 ch->local_state,
1125 SMUX_LCH_LOCAL_OPENED);
1126
1127 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1128 enable_powerdown = 1;
1129
1130 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1131 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1132 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1133 ret = 0;
1134 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1135 SMUX_DBG("Remote loopback OPEN ACK received\n");
1136 ret = 0;
1137 } else {
1138 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1139 __func__, lcid, ch->local_state);
1140 ret = -EINVAL;
1141 }
1142 spin_unlock(&ch->state_lock_lhb1);
1143
1144 if (enable_powerdown) {
1145 spin_lock(&smux.tx_lock_lha2);
1146 if (!smux.powerdown_enabled) {
1147 smux.powerdown_enabled = 1;
1148 SMUX_DBG("%s: enabling power-collapse support\n",
1149 __func__);
1150 }
1151 spin_unlock(&smux.tx_lock_lha2);
1152 }
1153
1154 return ret;
1155}
1156
1157static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1158{
1159 uint8_t lcid;
1160 int ret;
1161 struct smux_lch_t *ch;
1162 union notifier_metadata meta_disconnected;
1163 unsigned long flags;
1164
1165 lcid = pkt->hdr.lcid;
1166 ch = &smux_lch[lcid];
1167 meta_disconnected.disconnected.is_ssr = 0;
1168
1169 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1170
1171 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1172 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1173 SMUX_LCH_LOCAL_CLOSING,
1174 SMUX_LCH_LOCAL_CLOSED);
1175 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1176 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1177 schedule_notify(lcid, SMUX_DISCONNECTED,
1178 &meta_disconnected);
1179 ret = 0;
1180 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1181 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1182 ret = 0;
1183 } else {
1184 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1185 __func__, lcid, ch->local_state);
1186 ret = -EINVAL;
1187 }
1188 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1189 return ret;
1190}
1191
1192/**
1193 * Handle receive OPEN command.
1194 *
1195 * @pkt Received packet
1196 *
1197 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001198 */
1199static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1200{
1201 uint8_t lcid;
1202 int ret;
1203 struct smux_lch_t *ch;
1204 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001205 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001206 int tx_ready = 0;
1207 int enable_powerdown = 0;
1208
1209 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1210 return smux_handle_rx_open_ack(pkt);
1211
1212 lcid = pkt->hdr.lcid;
1213 ch = &smux_lch[lcid];
1214
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001215 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001216
1217 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1218 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1219 SMUX_LCH_REMOTE_CLOSED,
1220 SMUX_LCH_REMOTE_OPENED);
1221
1222 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1223 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1224 enable_powerdown = 1;
1225
1226 /* Send Open ACK */
1227 ack_pkt = smux_alloc_pkt();
1228 if (!ack_pkt) {
1229 /* exit out to allow retrying this later */
1230 ret = -ENOMEM;
1231 goto out;
1232 }
1233 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1234 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1235 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1236 ack_pkt->hdr.lcid = lcid;
1237 ack_pkt->hdr.payload_len = 0;
1238 ack_pkt->hdr.pad_len = 0;
1239 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1240 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1241 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1242 }
1243 smux_tx_queue(ack_pkt, ch, 0);
1244 tx_ready = 1;
1245
1246 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1247 /*
1248 * Send an Open command to the remote side to
1249 * simulate our local client doing it.
1250 */
1251 ack_pkt = smux_alloc_pkt();
1252 if (ack_pkt) {
1253 ack_pkt->hdr.lcid = lcid;
1254 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1255 ack_pkt->hdr.flags =
1256 SMUX_CMD_OPEN_POWER_COLLAPSE;
1257 ack_pkt->hdr.payload_len = 0;
1258 ack_pkt->hdr.pad_len = 0;
1259 smux_tx_queue(ack_pkt, ch, 0);
1260 tx_ready = 1;
1261 } else {
1262 pr_err("%s: Remote loopack allocation failure\n",
1263 __func__);
1264 }
1265 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1266 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1267 }
1268 ret = 0;
1269 } else {
1270 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1271 __func__, lcid, ch->remote_state);
1272 ret = -EINVAL;
1273 }
1274
1275out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001276 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001277
1278 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001279 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001280 if (!smux.powerdown_enabled) {
1281 smux.powerdown_enabled = 1;
1282 SMUX_DBG("%s: enabling power-collapse support\n",
1283 __func__);
1284 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001285 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001286 }
1287
1288 if (tx_ready)
1289 list_channel(ch);
1290
1291 return ret;
1292}
1293
1294/**
1295 * Handle receive CLOSE command.
1296 *
1297 * @pkt Received packet
1298 *
1299 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001300 */
1301static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1302{
1303 uint8_t lcid;
1304 int ret;
1305 struct smux_lch_t *ch;
1306 struct smux_pkt_t *ack_pkt;
1307 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001308 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001309 int tx_ready = 0;
1310
1311 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1312 return smux_handle_close_ack(pkt);
1313
1314 lcid = pkt->hdr.lcid;
1315 ch = &smux_lch[lcid];
1316 meta_disconnected.disconnected.is_ssr = 0;
1317
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001318 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001319 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1320 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1321 SMUX_LCH_REMOTE_OPENED,
1322 SMUX_LCH_REMOTE_CLOSED);
1323
1324 ack_pkt = smux_alloc_pkt();
1325 if (!ack_pkt) {
1326 /* exit out to allow retrying this later */
1327 ret = -ENOMEM;
1328 goto out;
1329 }
1330 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1331 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1332 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1333 ack_pkt->hdr.lcid = lcid;
1334 ack_pkt->hdr.payload_len = 0;
1335 ack_pkt->hdr.pad_len = 0;
1336 smux_tx_queue(ack_pkt, ch, 0);
1337 tx_ready = 1;
1338
1339 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1340 /*
1341 * Send a Close command to the remote side to simulate
1342 * our local client doing it.
1343 */
1344 ack_pkt = smux_alloc_pkt();
1345 if (ack_pkt) {
1346 ack_pkt->hdr.lcid = lcid;
1347 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1348 ack_pkt->hdr.flags = 0;
1349 ack_pkt->hdr.payload_len = 0;
1350 ack_pkt->hdr.pad_len = 0;
1351 smux_tx_queue(ack_pkt, ch, 0);
1352 tx_ready = 1;
1353 } else {
1354 pr_err("%s: Remote loopack allocation failure\n",
1355 __func__);
1356 }
1357 }
1358
1359 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1360 schedule_notify(lcid, SMUX_DISCONNECTED,
1361 &meta_disconnected);
1362 ret = 0;
1363 } else {
1364 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1365 __func__, lcid, ch->remote_state);
1366 ret = -EINVAL;
1367 }
1368out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001369 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001370 if (tx_ready)
1371 list_channel(ch);
1372
1373 return ret;
1374}
1375
1376/*
1377 * Handle receive DATA command.
1378 *
1379 * @pkt Received packet
1380 *
1381 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001382 */
1383static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1384{
1385 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001386 int ret = 0;
1387 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001388 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001389 int tmp;
1390 int rx_len;
1391 struct smux_lch_t *ch;
1392 union notifier_metadata metadata;
1393 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001394 struct smux_pkt_t *ack_pkt;
1395 unsigned long flags;
1396
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001397 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1398 ret = -ENXIO;
1399 goto out;
1400 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001401
Eric Holmbergb8435c82012-06-05 14:51:29 -06001402 rx_len = pkt->hdr.payload_len;
1403 if (rx_len == 0) {
1404 ret = -EINVAL;
1405 goto out;
1406 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001407
1408 lcid = pkt->hdr.lcid;
1409 ch = &smux_lch[lcid];
1410 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1411 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1412
1413 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1414 && !remote_loopback) {
1415 pr_err("smux: ch %d error data on local state 0x%x",
1416 lcid, ch->local_state);
1417 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001418 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001419 goto out;
1420 }
1421
1422 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1423 pr_err("smux: ch %d error data on remote state 0x%x",
1424 lcid, ch->remote_state);
1425 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001426 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001427 goto out;
1428 }
1429
Eric Holmbergb8435c82012-06-05 14:51:29 -06001430 if (!list_empty(&ch->rx_retry_queue)) {
1431 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001432
1433 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1434 !ch->rx_flow_control_auto &&
1435 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1436 /* need to flow control RX */
1437 ch->rx_flow_control_auto = 1;
1438 tx_ready |= smux_rx_flow_control_updated(ch);
1439 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1440 NULL);
1441 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001442 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1443 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001444 pr_err("%s: ch %d RX retry queue full\n",
1445 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001446 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1447 ret = -ENOMEM;
1448 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1449 goto out;
1450 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001451 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001452 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001453
Eric Holmbergb8435c82012-06-05 14:51:29 -06001454 if (remote_loopback) {
1455 /* Echo the data back to the remote client. */
1456 ack_pkt = smux_alloc_pkt();
1457 if (ack_pkt) {
1458 ack_pkt->hdr.lcid = lcid;
1459 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1460 ack_pkt->hdr.flags = 0;
1461 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1462 if (ack_pkt->hdr.payload_len) {
1463 smux_alloc_pkt_payload(ack_pkt);
1464 memcpy(ack_pkt->payload, pkt->payload,
1465 ack_pkt->hdr.payload_len);
1466 }
1467 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1468 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001469 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001470 } else {
1471 pr_err("%s: Remote loopack allocation failure\n",
1472 __func__);
1473 }
1474 } else if (!do_retry) {
1475 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001476 metadata.read.pkt_priv = 0;
1477 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001478 tmp = ch->get_rx_buffer(ch->priv,
1479 (void **)&metadata.read.pkt_priv,
1480 (void **)&metadata.read.buffer,
1481 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001482
Eric Holmbergb8435c82012-06-05 14:51:29 -06001483 if (tmp == 0 && metadata.read.buffer) {
1484 /* place data into RX buffer */
1485 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001487 metadata.read.len = rx_len;
1488 schedule_notify(lcid, SMUX_READ_DONE,
1489 &metadata);
1490 } else if (tmp == -EAGAIN ||
1491 (tmp == 0 && !metadata.read.buffer)) {
1492 /* buffer allocation failed - add to retry queue */
1493 do_retry = 1;
1494 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001495 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1496 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001497 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1498 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001499 }
1500 }
1501
Eric Holmbergb8435c82012-06-05 14:51:29 -06001502 if (do_retry) {
1503 struct smux_rx_pkt_retry *retry;
1504
1505 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1506 if (!retry) {
1507 pr_err("%s: retry alloc failure\n", __func__);
1508 ret = -ENOMEM;
1509 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1510 goto out;
1511 }
1512 INIT_LIST_HEAD(&retry->rx_retry_list);
1513 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1514
1515 /* copy packet */
1516 retry->pkt = smux_alloc_pkt();
1517 if (!retry->pkt) {
1518 kfree(retry);
1519 pr_err("%s: pkt alloc failure\n", __func__);
1520 ret = -ENOMEM;
1521 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1522 goto out;
1523 }
1524 retry->pkt->hdr.lcid = lcid;
1525 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1526 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1527 if (retry->pkt->hdr.payload_len) {
1528 smux_alloc_pkt_payload(retry->pkt);
1529 memcpy(retry->pkt->payload, pkt->payload,
1530 retry->pkt->hdr.payload_len);
1531 }
1532
1533 /* add to retry queue */
1534 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1535 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1536 ++ch->rx_retry_queue_cnt;
1537 if (ch->rx_retry_queue_cnt == 1)
1538 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1539 msecs_to_jiffies(retry->timeout_in_ms));
1540 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1541 }
1542
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001543 if (tx_ready)
1544 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001545out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001546 return ret;
1547}
1548
1549/**
1550 * Handle receive byte command for testing purposes.
1551 *
1552 * @pkt Received packet
1553 *
1554 * @returns 0 for success
1555 */
1556static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1557{
1558 uint8_t lcid;
1559 int ret;
1560 struct smux_lch_t *ch;
1561 union notifier_metadata metadata;
1562 unsigned long flags;
1563
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001564 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1565 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001566 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001567 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001568
1569 lcid = pkt->hdr.lcid;
1570 ch = &smux_lch[lcid];
1571 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1572
1573 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1574 pr_err("smux: ch %d error data on local state 0x%x",
1575 lcid, ch->local_state);
1576 ret = -EIO;
1577 goto out;
1578 }
1579
1580 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1581 pr_err("smux: ch %d error data on remote state 0x%x",
1582 lcid, ch->remote_state);
1583 ret = -EIO;
1584 goto out;
1585 }
1586
1587 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1588 metadata.read.buffer = 0;
1589 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1590 ret = 0;
1591
1592out:
1593 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1594 return ret;
1595}
1596
1597/**
1598 * Handle receive status command.
1599 *
1600 * @pkt Received packet
1601 *
1602 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001603 */
1604static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1605{
1606 uint8_t lcid;
1607 int ret;
1608 struct smux_lch_t *ch;
1609 union notifier_metadata meta;
1610 unsigned long flags;
1611 int tx_ready = 0;
1612
1613 lcid = pkt->hdr.lcid;
1614 ch = &smux_lch[lcid];
1615
1616 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1617 meta.tiocm.tiocm_old = ch->remote_tiocm;
1618 meta.tiocm.tiocm_new = pkt->hdr.flags;
1619
1620 /* update logical channel flow control */
1621 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1622 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1623 /* logical channel flow control changed */
1624 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1625 /* disabled TX */
1626 SMUX_DBG("TX Flow control enabled\n");
1627 ch->tx_flow_control = 1;
1628 } else {
1629 /* re-enable channel */
1630 SMUX_DBG("TX Flow control disabled\n");
1631 ch->tx_flow_control = 0;
1632 tx_ready = 1;
1633 }
1634 }
1635 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1636 ch->remote_tiocm = pkt->hdr.flags;
1637 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1638
1639 /* client notification for status change */
1640 if (IS_FULLY_OPENED(ch)) {
1641 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1642 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1643 ret = 0;
1644 }
1645 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1646 if (tx_ready)
1647 list_channel(ch);
1648
1649 return ret;
1650}
1651
1652/**
1653 * Handle receive power command.
1654 *
1655 * @pkt Received packet
1656 *
1657 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001658 */
1659static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1660{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001661 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001662 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001663 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001664
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001665 SMUX_PWR_PKT_RX(pkt);
1666
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001667 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1669 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001670 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001671 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001672 power_down = 1;
1673 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001674 pr_err("%s: sleep request ack invalid in state %d\n",
1675 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001676 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001677 /*
1678 * Remote sleep request
1679 *
1680 * Even if we have data pending, we need to transition to the
1681 * POWER_OFF state and then perform a wakeup since the remote
1682 * side has requested a power-down.
1683 *
1684 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1685 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1686 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001687 *
1688 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001689 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001690 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001691 ack_pkt = smux_alloc_pkt();
1692 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001693 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001694 smux.power_state,
1695 SMUX_PWR_TURNING_OFF_FLUSH);
1696
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001697 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1698
1699 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001700 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1701 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001702 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1703 list_add_tail(&ack_pkt->list,
1704 &smux.power_queue);
1705 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001706 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001707 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1708 /* Local power-down request still in TX queue */
1709 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1710 __func__);
1711 smux.power_ctl_remote_req_received = 1;
1712 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1713 /*
1714 * Local power-down request already sent to remote
1715 * side, so this request gets treated as an ACK.
1716 */
1717 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1718 __func__);
1719 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001720 } else {
1721 pr_err("%s: sleep request invalid in state %d\n",
1722 __func__, smux.power_state);
1723 }
1724 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001725
1726 if (power_down) {
1727 SMUX_PWR("%s: Power %d->%d\n", __func__,
1728 smux.power_state, SMUX_PWR_OFF_FLUSH);
1729 smux.power_state = SMUX_PWR_OFF_FLUSH;
1730 queue_work(smux_tx_wq, &smux_inactivity_work);
1731 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001732 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001733
1734 return 0;
1735}
1736
1737/**
1738 * Handle dispatching a completed packet for receive processing.
1739 *
1740 * @pkt Packet to process
1741 *
1742 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001743 */
1744static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1745{
Eric Holmbergf9622662012-06-13 15:55:45 -06001746 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001748 switch (pkt->hdr.cmd) {
1749 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001750 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001751 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1752 pr_err("%s: invalid channel id %d\n",
1753 __func__, pkt->hdr.lcid);
1754 break;
1755 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001756 ret = smux_handle_rx_open_cmd(pkt);
1757 break;
1758
1759 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001760 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001761 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1762 pr_err("%s: invalid channel id %d\n",
1763 __func__, pkt->hdr.lcid);
1764 break;
1765 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001766 ret = smux_handle_rx_data_cmd(pkt);
1767 break;
1768
1769 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001770 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001771 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1772 pr_err("%s: invalid channel id %d\n",
1773 __func__, pkt->hdr.lcid);
1774 break;
1775 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001776 ret = smux_handle_rx_close_cmd(pkt);
1777 break;
1778
1779 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001780 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001781 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1782 pr_err("%s: invalid channel id %d\n",
1783 __func__, pkt->hdr.lcid);
1784 break;
1785 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001786 ret = smux_handle_rx_status_cmd(pkt);
1787 break;
1788
1789 case SMUX_CMD_PWR_CTL:
1790 ret = smux_handle_rx_power_cmd(pkt);
1791 break;
1792
1793 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001794 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001795 ret = smux_handle_rx_byte_cmd(pkt);
1796 break;
1797
1798 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001799 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001800 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1801 ret = -EINVAL;
1802 }
1803 return ret;
1804}
1805
1806/**
1807 * Deserializes a packet and dispatches it to the packet receive logic.
1808 *
1809 * @data Raw data for one packet
1810 * @len Length of the data
1811 *
1812 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001813 */
1814static int smux_deserialize(unsigned char *data, int len)
1815{
1816 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001817
1818 smux_init_pkt(&recv);
1819
1820 /*
1821 * It may be possible to optimize this to not use the
1822 * temporary buffer.
1823 */
1824 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1825
1826 if (recv.hdr.magic != SMUX_MAGIC) {
1827 pr_err("%s: invalid header magic\n", __func__);
1828 return -EINVAL;
1829 }
1830
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001831 if (recv.hdr.payload_len)
1832 recv.payload = data + sizeof(struct smux_hdr_t);
1833
1834 return smux_dispatch_rx_pkt(&recv);
1835}
1836
1837/**
1838 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001839 */
1840static void smux_handle_wakeup_req(void)
1841{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001842 unsigned long flags;
1843
1844 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001845 if (smux.power_state == SMUX_PWR_OFF
1846 || smux.power_state == SMUX_PWR_TURNING_ON) {
1847 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001848 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001849 smux.power_state, SMUX_PWR_ON);
1850 smux.power_state = SMUX_PWR_ON;
1851 queue_work(smux_tx_wq, &smux_wakeup_work);
1852 queue_work(smux_tx_wq, &smux_tx_work);
1853 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1854 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1855 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001856 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001857 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001858 } else {
1859 /* stale wakeup request from previous wakeup */
1860 SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
1861 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001862 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001863 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001864}
1865
1866/**
1867 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001868 */
1869static void smux_handle_wakeup_ack(void)
1870{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001871 unsigned long flags;
1872
1873 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001874 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1875 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001876 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001877 smux.power_state, SMUX_PWR_ON);
1878 smux.power_state = SMUX_PWR_ON;
1879 queue_work(smux_tx_wq, &smux_tx_work);
1880 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1881 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1882
1883 } else if (smux.power_state != SMUX_PWR_ON) {
1884 /* invalid message */
Eric Holmberga9b06472012-06-22 09:46:34 -06001885 SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001886 __func__, smux.power_state);
1887 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001888 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001889}
1890
1891/**
1892 * RX State machine - IDLE state processing.
1893 *
1894 * @data New RX data to process
1895 * @len Length of the data
1896 * @used Return value of length processed
1897 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001898 */
1899static void smux_rx_handle_idle(const unsigned char *data,
1900 int len, int *used, int flag)
1901{
1902 int i;
1903
1904 if (flag) {
1905 if (smux_byte_loopback)
1906 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1907 smux_byte_loopback);
1908 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1909 ++*used;
1910 return;
1911 }
1912
1913 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1914 switch (data[i]) {
1915 case SMUX_MAGIC_WORD1:
1916 smux.rx_state = SMUX_RX_MAGIC;
1917 break;
1918 case SMUX_WAKEUP_REQ:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001919 SMUX_PWR("smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001920 smux_handle_wakeup_req();
1921 break;
1922 case SMUX_WAKEUP_ACK:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001923 SMUX_PWR("smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001924 smux_handle_wakeup_ack();
1925 break;
1926 default:
1927 /* unexpected character */
1928 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1929 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1930 smux_byte_loopback);
1931 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1932 (unsigned)data[i]);
1933 break;
1934 }
1935 }
1936
1937 *used = i;
1938}
1939
1940/**
1941 * RX State machine - Header Magic state processing.
1942 *
1943 * @data New RX data to process
1944 * @len Length of the data
1945 * @used Return value of length processed
1946 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001947 */
1948static void smux_rx_handle_magic(const unsigned char *data,
1949 int len, int *used, int flag)
1950{
1951 int i;
1952
1953 if (flag) {
1954 pr_err("%s: TTY RX error %d\n", __func__, flag);
1955 smux_enter_reset();
1956 smux.rx_state = SMUX_RX_FAILURE;
1957 ++*used;
1958 return;
1959 }
1960
1961 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1962 /* wait for completion of the magic */
1963 if (data[i] == SMUX_MAGIC_WORD2) {
1964 smux.recv_len = 0;
1965 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1966 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1967 smux.rx_state = SMUX_RX_HDR;
1968 } else {
1969 /* unexpected / trash character */
1970 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1971 __func__, data[i], *used, len);
1972 smux.rx_state = SMUX_RX_IDLE;
1973 }
1974 }
1975
1976 *used = i;
1977}
1978
1979/**
1980 * RX State machine - Packet Header state processing.
1981 *
1982 * @data New RX data to process
1983 * @len Length of the data
1984 * @used Return value of length processed
1985 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001986 */
1987static void smux_rx_handle_hdr(const unsigned char *data,
1988 int len, int *used, int flag)
1989{
1990 int i;
1991 struct smux_hdr_t *hdr;
1992
1993 if (flag) {
1994 pr_err("%s: TTY RX error %d\n", __func__, flag);
1995 smux_enter_reset();
1996 smux.rx_state = SMUX_RX_FAILURE;
1997 ++*used;
1998 return;
1999 }
2000
2001 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2002 smux.recv_buf[smux.recv_len++] = data[i];
2003
2004 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2005 /* complete header received */
2006 hdr = (struct smux_hdr_t *)smux.recv_buf;
2007 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2008 smux.rx_state = SMUX_RX_PAYLOAD;
2009 }
2010 }
2011 *used = i;
2012}
2013
2014/**
2015 * RX State machine - Packet Payload state processing.
2016 *
2017 * @data New RX data to process
2018 * @len Length of the data
2019 * @used Return value of length processed
2020 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002021 */
2022static void smux_rx_handle_pkt_payload(const unsigned char *data,
2023 int len, int *used, int flag)
2024{
2025 int remaining;
2026
2027 if (flag) {
2028 pr_err("%s: TTY RX error %d\n", __func__, flag);
2029 smux_enter_reset();
2030 smux.rx_state = SMUX_RX_FAILURE;
2031 ++*used;
2032 return;
2033 }
2034
2035 /* copy data into rx buffer */
2036 if (smux.pkt_remain < (len - *used))
2037 remaining = smux.pkt_remain;
2038 else
2039 remaining = len - *used;
2040
2041 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2042 smux.recv_len += remaining;
2043 smux.pkt_remain -= remaining;
2044 *used += remaining;
2045
2046 if (smux.pkt_remain == 0) {
2047 /* complete packet received */
2048 smux_deserialize(smux.recv_buf, smux.recv_len);
2049 smux.rx_state = SMUX_RX_IDLE;
2050 }
2051}
2052
2053/**
2054 * Feed data to the receive state machine.
2055 *
2056 * @data Pointer to data block
2057 * @len Length of data
2058 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002059 */
2060void smux_rx_state_machine(const unsigned char *data,
2061 int len, int flag)
2062{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002063 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002064
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002065 work.data = data;
2066 work.len = len;
2067 work.flag = flag;
2068 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2069 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002070
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002071 queue_work(smux_rx_wq, &work.work);
2072 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002073}
2074
2075/**
2076 * Add channel to transmit-ready list and trigger transmit worker.
2077 *
2078 * @ch Channel to add
2079 */
2080static void list_channel(struct smux_lch_t *ch)
2081{
2082 unsigned long flags;
2083
2084 SMUX_DBG("%s: listing channel %d\n",
2085 __func__, ch->lcid);
2086
2087 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2088 spin_lock(&ch->tx_lock_lhb2);
2089 smux.tx_activity_flag = 1;
2090 if (list_empty(&ch->tx_ready_list))
2091 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2092 spin_unlock(&ch->tx_lock_lhb2);
2093 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2094
2095 queue_work(smux_tx_wq, &smux_tx_work);
2096}
2097
2098/**
2099 * Transmit packet on correct transport and then perform client
2100 * notification.
2101 *
2102 * @ch Channel to transmit on
2103 * @pkt Packet to transmit
2104 */
2105static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2106{
2107 union notifier_metadata meta_write;
2108 int ret;
2109
2110 if (ch && pkt) {
2111 SMUX_LOG_PKT_TX(pkt);
2112 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2113 ret = smux_tx_loopback(pkt);
2114 else
2115 ret = smux_tx_tty(pkt);
2116
2117 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2118 /* notify write-done */
2119 meta_write.write.pkt_priv = pkt->priv;
2120 meta_write.write.buffer = pkt->payload;
2121 meta_write.write.len = pkt->hdr.payload_len;
2122 if (ret >= 0) {
2123 SMUX_DBG("%s: PKT write done", __func__);
2124 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2125 &meta_write);
2126 } else {
2127 pr_err("%s: failed to write pkt %d\n",
2128 __func__, ret);
2129 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2130 &meta_write);
2131 }
2132 }
2133 }
2134}
2135
2136/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002137 * Flush pending TTY TX data.
2138 */
2139static void smux_flush_tty(void)
2140{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002141 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002142 if (!smux.tty) {
2143 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002144 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002145 return;
2146 }
2147
2148 tty_wait_until_sent(smux.tty,
2149 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2150
2151 if (tty_chars_in_buffer(smux.tty) > 0)
2152 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002153
2154 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002155}
2156
2157/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002158 * Purge TX queue for logical channel.
2159 *
2160 * @ch Logical channel pointer
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002161 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002162 *
2163 * Must be called with the following spinlocks locked:
2164 * state_lock_lhb1
2165 * tx_lock_lhb2
2166 */
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002167static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002168{
2169 struct smux_pkt_t *pkt;
2170 int send_disconnect = 0;
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002171 struct smux_pkt_t *pkt_tmp;
2172 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002173
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002174 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2175 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002176 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002177 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2178 /* Open ACK must still be sent */
2179 is_state_pkt = 1;
2180 } else {
2181 /* Open never sent -- force to closed state */
2182 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2183 send_disconnect = 1;
2184 }
2185 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2186 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2187 is_state_pkt = 1;
2188 if (!send_disconnect)
2189 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002190 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2191 /* Notify client of failed write */
2192 union notifier_metadata meta_write;
2193
2194 meta_write.write.pkt_priv = pkt->priv;
2195 meta_write.write.buffer = pkt->payload;
2196 meta_write.write.len = pkt->hdr.payload_len;
2197 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2198 }
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002199
2200 if (!is_state_pkt || is_ssr) {
2201 list_del(&pkt->list);
2202 smux_free_pkt(pkt);
2203 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002204 }
2205
2206 if (send_disconnect) {
2207 union notifier_metadata meta_disconnected;
2208
2209 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2210 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2211 &meta_disconnected);
2212 }
2213}
2214
2215/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002216 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002217 *
2218 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002219 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002220static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002221{
2222 struct uart_state *state;
2223
2224 if (!smux.tty || !smux.tty->driver_data) {
2225 pr_err("%s: unable to find UART port for tty %p\n",
2226 __func__, smux.tty);
2227 return;
2228 }
2229 state = smux.tty->driver_data;
2230 msm_hs_request_clock_on(state->uart_port);
2231}
2232
2233/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002234 * Power-up the UART.
2235 */
2236static void smux_uart_power_on(void)
2237{
2238 mutex_lock(&smux.mutex_lha0);
2239 smux_uart_power_on_atomic();
2240 mutex_unlock(&smux.mutex_lha0);
2241}
2242
2243/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002244 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002245 *
2246 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002247 */
Eric Holmberg06011322012-07-06 18:17:03 -06002248static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002249{
2250 struct uart_state *state;
2251
2252 if (!smux.tty || !smux.tty->driver_data) {
2253 pr_err("%s: unable to find UART port for tty %p\n",
2254 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002255 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002256 return;
2257 }
2258 state = smux.tty->driver_data;
2259 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002260}
2261
2262/**
2263 * Power down the UART.
2264 */
2265static void smux_uart_power_off(void)
2266{
2267 mutex_lock(&smux.mutex_lha0);
2268 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002269 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002270}
2271
2272/**
2273 * TX Wakeup Worker
2274 *
2275 * @work Not used
2276 *
2277 * Do an exponential back-off wakeup sequence with a maximum period
2278 * of approximately 1 second (1 << 20 microseconds).
2279 */
2280static void smux_wakeup_worker(struct work_struct *work)
2281{
2282 unsigned long flags;
2283 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002284
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002285 if (smux.in_reset)
2286 return;
2287
2288 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2289 if (smux.power_state == SMUX_PWR_ON) {
2290 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002291 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002292 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002293 SMUX_DBG("%s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002294
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002295 /*
2296 * Cancel any pending retry. This avoids a race condition with
2297 * a new power-up request because:
2298 * 1) this worker doesn't modify the state
2299 * 2) this worker is processed on the same single-threaded
2300 * workqueue as new TX wakeup requests
2301 */
2302 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002303 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002304 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002305 /* retry wakeup */
2306 wakeup_delay = smux.pwr_wakeup_delay_us;
2307 smux.pwr_wakeup_delay_us <<= 1;
2308 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2309 smux.pwr_wakeup_delay_us =
2310 SMUX_WAKEUP_DELAY_MAX;
2311
2312 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberga9b06472012-06-22 09:46:34 -06002313 SMUX_PWR("%s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002314 smux_send_byte(SMUX_WAKEUP_REQ);
2315
2316 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2317 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2318 wakeup_delay);
2319 usleep_range(wakeup_delay, 2*wakeup_delay);
2320 queue_work(smux_tx_wq, &smux_wakeup_work);
2321 } else {
2322 /* schedule delayed work */
2323 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2324 __func__, wakeup_delay / 1000);
2325 queue_delayed_work(smux_tx_wq,
2326 &smux_wakeup_delayed_work,
2327 msecs_to_jiffies(wakeup_delay / 1000));
2328 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002329 } else {
2330 /* wakeup aborted */
2331 smux.pwr_wakeup_delay_us = 1;
2332 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2333 SMUX_PWR("%s: wakeup aborted\n", __func__);
2334 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002335 }
2336}
2337
2338
2339/**
2340 * Inactivity timeout worker. Periodically scheduled when link is active.
2341 * When it detects inactivity, it will power-down the UART link.
2342 *
2343 * @work Work structure (not used)
2344 */
2345static void smux_inactivity_worker(struct work_struct *work)
2346{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002347 struct smux_pkt_t *pkt;
2348 unsigned long flags;
2349
Eric Holmberg06011322012-07-06 18:17:03 -06002350 if (smux.in_reset)
2351 return;
2352
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002353 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2354 spin_lock(&smux.tx_lock_lha2);
2355
2356 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2357 /* no activity */
2358 if (smux.powerdown_enabled) {
2359 if (smux.power_state == SMUX_PWR_ON) {
2360 /* start power-down sequence */
2361 pkt = smux_alloc_pkt();
2362 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002363 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002364 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002365 SMUX_PWR_TURNING_OFF_FLUSH);
2366 smux.power_state =
2367 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002368
2369 /* send power-down request */
2370 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2371 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002372 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2373 list_add_tail(&pkt->list,
2374 &smux.power_queue);
2375 queue_work(smux_tx_wq, &smux_tx_work);
2376 } else {
2377 pr_err("%s: packet alloc failed\n",
2378 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002379 }
2380 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002381 }
2382 }
2383 smux.tx_activity_flag = 0;
2384 smux.rx_activity_flag = 0;
2385
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002386 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002387 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002388 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002389 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002390 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002391
2392 /* if data is pending, schedule a new wakeup */
2393 if (!list_empty(&smux.lch_tx_ready_list) ||
2394 !list_empty(&smux.power_queue))
2395 queue_work(smux_tx_wq, &smux_tx_work);
2396
2397 spin_unlock(&smux.tx_lock_lha2);
2398 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2399
2400 /* flush UART output queue and power down */
2401 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002402 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002403 } else {
2404 spin_unlock(&smux.tx_lock_lha2);
2405 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002406 }
2407
2408 /* reschedule inactivity worker */
2409 if (smux.power_state != SMUX_PWR_OFF)
2410 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2411 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2412}
2413
2414/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002415 * Remove RX retry packet from channel and free it.
2416 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002417 * @ch Channel for retry packet
2418 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002419 *
2420 * @returns 1 if flow control updated; 0 otherwise
2421 *
2422 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002423 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002424int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002425 struct smux_rx_pkt_retry *retry)
2426{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002427 int tx_ready = 0;
2428
Eric Holmbergb8435c82012-06-05 14:51:29 -06002429 list_del(&retry->rx_retry_list);
2430 --ch->rx_retry_queue_cnt;
2431 smux_free_pkt(retry->pkt);
2432 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002433
2434 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2435 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2436 ch->rx_flow_control_auto) {
2437 ch->rx_flow_control_auto = 0;
2438 smux_rx_flow_control_updated(ch);
2439 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2440 tx_ready = 1;
2441 }
2442 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002443}
2444
2445/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002446 * RX worker handles all receive operations.
2447 *
2448 * @work Work structure contained in TBD structure
2449 */
2450static void smux_rx_worker(struct work_struct *work)
2451{
2452 unsigned long flags;
2453 int used;
2454 int initial_rx_state;
2455 struct smux_rx_worker_data *w;
2456 const unsigned char *data;
2457 int len;
2458 int flag;
2459
2460 w = container_of(work, struct smux_rx_worker_data, work);
2461 data = w->data;
2462 len = w->len;
2463 flag = w->flag;
2464
2465 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2466 smux.rx_activity_flag = 1;
2467 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2468
2469 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2470 used = 0;
2471 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002472 if (smux.in_reset) {
2473 SMUX_DBG("%s: abort RX due to reset\n", __func__);
2474 smux.rx_state = SMUX_RX_IDLE;
2475 break;
2476 }
2477
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002478 SMUX_DBG("%s: state %d; %d of %d\n",
2479 __func__, smux.rx_state, used, len);
2480 initial_rx_state = smux.rx_state;
2481
2482 switch (smux.rx_state) {
2483 case SMUX_RX_IDLE:
2484 smux_rx_handle_idle(data, len, &used, flag);
2485 break;
2486 case SMUX_RX_MAGIC:
2487 smux_rx_handle_magic(data, len, &used, flag);
2488 break;
2489 case SMUX_RX_HDR:
2490 smux_rx_handle_hdr(data, len, &used, flag);
2491 break;
2492 case SMUX_RX_PAYLOAD:
2493 smux_rx_handle_pkt_payload(data, len, &used, flag);
2494 break;
2495 default:
2496 SMUX_DBG("%s: invalid state %d\n",
2497 __func__, smux.rx_state);
2498 smux.rx_state = SMUX_RX_IDLE;
2499 break;
2500 }
2501 } while (used < len || smux.rx_state != initial_rx_state);
2502
2503 complete(&w->work_complete);
2504}
2505
2506/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002507 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2508 * because the client was not ready (-EAGAIN).
2509 *
2510 * @work Work structure contained in smux_lch_t structure
2511 */
2512static void smux_rx_retry_worker(struct work_struct *work)
2513{
2514 struct smux_lch_t *ch;
2515 struct smux_rx_pkt_retry *retry;
2516 union notifier_metadata metadata;
2517 int tmp;
2518 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002519 int immediate_retry = 0;
2520 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002521
2522 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2523
2524 /* get next retry packet */
2525 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002526 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002527 /* port has been closed - remove all retries */
2528 while (!list_empty(&ch->rx_retry_queue)) {
2529 retry = list_first_entry(&ch->rx_retry_queue,
2530 struct smux_rx_pkt_retry,
2531 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002532 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002533 }
2534 }
2535
2536 if (list_empty(&ch->rx_retry_queue)) {
2537 SMUX_DBG("%s: retry list empty for channel %d\n",
2538 __func__, ch->lcid);
2539 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2540 return;
2541 }
2542 retry = list_first_entry(&ch->rx_retry_queue,
2543 struct smux_rx_pkt_retry,
2544 rx_retry_list);
2545 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2546
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002547 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2548 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002549 metadata.read.pkt_priv = 0;
2550 metadata.read.buffer = 0;
2551 tmp = ch->get_rx_buffer(ch->priv,
2552 (void **)&metadata.read.pkt_priv,
2553 (void **)&metadata.read.buffer,
2554 retry->pkt->hdr.payload_len);
2555 if (tmp == 0 && metadata.read.buffer) {
2556 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002557
Eric Holmbergb8435c82012-06-05 14:51:29 -06002558 memcpy(metadata.read.buffer, retry->pkt->payload,
2559 retry->pkt->hdr.payload_len);
2560 metadata.read.len = retry->pkt->hdr.payload_len;
2561
2562 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002563 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002564 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002565 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002566 if (tx_ready)
2567 list_channel(ch);
2568
2569 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002570 } else if (tmp == -EAGAIN ||
2571 (tmp == 0 && !metadata.read.buffer)) {
2572 /* retry again */
2573 retry->timeout_in_ms <<= 1;
2574 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2575 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002576 pr_err("%s: ch %d RX retry client timeout\n",
2577 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002578 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002579 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002580 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002581 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2582 if (tx_ready)
2583 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002584 }
2585 } else {
2586 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002587 pr_err("%s: ch %d RX retry client failed (%d)\n",
2588 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002589 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002590 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002591 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002592 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002593 if (tx_ready)
2594 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002595 }
2596
2597 /* schedule next retry */
2598 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2599 if (!list_empty(&ch->rx_retry_queue)) {
2600 retry = list_first_entry(&ch->rx_retry_queue,
2601 struct smux_rx_pkt_retry,
2602 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002603
2604 if (immediate_retry)
2605 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2606 else
2607 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2608 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002609 }
2610 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2611}
2612
2613/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002614 * Transmit worker handles serializing and transmitting packets onto the
2615 * underlying transport.
2616 *
2617 * @work Work structure (not used)
2618 */
2619static void smux_tx_worker(struct work_struct *work)
2620{
2621 struct smux_pkt_t *pkt;
2622 struct smux_lch_t *ch;
2623 unsigned low_wm_notif;
2624 unsigned lcid;
2625 unsigned long flags;
2626
2627
2628 /*
2629 * Transmit packets in round-robin fashion based upon ready
2630 * channels.
2631 *
2632 * To eliminate the need to hold a lock for the entire
2633 * iteration through the channel ready list, the head of the
2634 * ready-channel list is always the next channel to be
2635 * processed. To send a packet, the first valid packet in
2636 * the head channel is removed and the head channel is then
2637 * rescheduled at the end of the queue by removing it and
2638 * inserting after the tail. The locks can then be released
2639 * while the packet is processed.
2640 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002641 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002642 pkt = NULL;
2643 low_wm_notif = 0;
2644
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002645 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002646
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002647 /* handle wakeup if needed */
2648 if (smux.power_state == SMUX_PWR_OFF) {
2649 if (!list_empty(&smux.lch_tx_ready_list) ||
2650 !list_empty(&smux.power_queue)) {
2651 /* data to transmit, do wakeup */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002652 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002653 smux.power_state,
2654 SMUX_PWR_TURNING_ON);
2655 smux.power_state = SMUX_PWR_TURNING_ON;
2656 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2657 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002658 queue_work(smux_tx_wq, &smux_wakeup_work);
2659 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002660 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002661 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2662 flags);
2663 }
2664 break;
2665 }
2666
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002667 /* process any pending power packets */
2668 if (!list_empty(&smux.power_queue)) {
2669 pkt = list_first_entry(&smux.power_queue,
2670 struct smux_pkt_t, list);
2671 list_del(&pkt->list);
2672 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2673
Eric Holmberga9b06472012-06-22 09:46:34 -06002674 /* Adjust power state if this is a flush command */
2675 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2676 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2677 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2678 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2679 smux.power_ctl_remote_req_received) {
2680 /*
2681 * Sending remote power-down request ACK
2682 * or sending local power-down request
2683 * and we already received a remote
2684 * power-down request.
2685 */
2686 SMUX_PWR("%s: Power %d->%d\n", __func__,
2687 smux.power_state,
2688 SMUX_PWR_OFF_FLUSH);
2689 smux.power_state = SMUX_PWR_OFF_FLUSH;
2690 smux.power_ctl_remote_req_received = 0;
2691 queue_work(smux_tx_wq,
2692 &smux_inactivity_work);
2693 } else {
2694 /* sending local power-down request */
2695 SMUX_PWR("%s: Power %d->%d\n", __func__,
2696 smux.power_state,
2697 SMUX_PWR_TURNING_OFF);
2698 smux.power_state = SMUX_PWR_TURNING_OFF;
2699 }
2700 }
2701 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2702
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002703 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002704 smux_uart_power_on();
2705 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002706 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002707 if (!smux_byte_loopback) {
2708 smux_tx_tty(pkt);
2709 smux_flush_tty();
2710 } else {
2711 smux_tx_loopback(pkt);
2712 }
2713
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002714 smux_free_pkt(pkt);
2715 continue;
2716 }
2717
2718 /* get the next ready channel */
2719 if (list_empty(&smux.lch_tx_ready_list)) {
2720 /* no ready channels */
2721 SMUX_DBG("%s: no more ready channels, exiting\n",
2722 __func__);
2723 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2724 break;
2725 }
2726 smux.tx_activity_flag = 1;
2727
2728 if (smux.power_state != SMUX_PWR_ON) {
2729 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002730 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002731 __func__,
2732 smux.power_state);
2733 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2734 break;
2735 }
2736
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002737 /* get the next packet to send and rotate channel list */
2738 ch = list_first_entry(&smux.lch_tx_ready_list,
2739 struct smux_lch_t,
2740 tx_ready_list);
2741
2742 spin_lock(&ch->state_lock_lhb1);
2743 spin_lock(&ch->tx_lock_lhb2);
2744 if (!list_empty(&ch->tx_queue)) {
2745 /*
2746 * If remote TX flow control is enabled or
2747 * the channel is not fully opened, then only
2748 * send command packets.
2749 */
2750 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2751 struct smux_pkt_t *curr;
2752 list_for_each_entry(curr, &ch->tx_queue, list) {
2753 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2754 pkt = curr;
2755 break;
2756 }
2757 }
2758 } else {
2759 /* get next cmd/data packet to send */
2760 pkt = list_first_entry(&ch->tx_queue,
2761 struct smux_pkt_t, list);
2762 }
2763 }
2764
2765 if (pkt) {
2766 list_del(&pkt->list);
2767
2768 /* update packet stats */
2769 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2770 --ch->tx_pending_data_cnt;
2771 if (ch->notify_lwm &&
2772 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002773 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002774 ch->notify_lwm = 0;
2775 low_wm_notif = 1;
2776 }
2777 }
2778
2779 /* advance to the next ready channel */
2780 list_rotate_left(&smux.lch_tx_ready_list);
2781 } else {
2782 /* no data in channel to send, remove from ready list */
2783 list_del(&ch->tx_ready_list);
2784 INIT_LIST_HEAD(&ch->tx_ready_list);
2785 }
2786 lcid = ch->lcid;
2787 spin_unlock(&ch->tx_lock_lhb2);
2788 spin_unlock(&ch->state_lock_lhb1);
2789 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2790
2791 if (low_wm_notif)
2792 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2793
2794 /* send the packet */
2795 smux_tx_pkt(ch, pkt);
2796 smux_free_pkt(pkt);
2797 }
2798}
2799
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002800/**
2801 * Update the RX flow control (sent in the TIOCM Status command).
2802 *
2803 * @ch Channel for update
2804 *
2805 * @returns 1 for updated, 0 for not updated
2806 *
2807 * Must be called with ch->state_lock_lhb1 locked.
2808 */
2809static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2810{
2811 int updated = 0;
2812 int prev_state;
2813
2814 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2815
2816 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2817 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2818 else
2819 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2820
2821 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2822 smux_send_status_cmd(ch);
2823 updated = 1;
2824 }
2825
2826 return updated;
2827}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002828
Eric Holmberg06011322012-07-06 18:17:03 -06002829/**
2830 * Flush all SMUX workqueues.
2831 *
2832 * This sets the reset bit to abort any processing loops and then
2833 * flushes the workqueues to ensure that no new pending work is
2834 * running. Do not call with any locks used by workers held as
2835 * this will result in a deadlock.
2836 */
2837static void smux_flush_workqueues(void)
2838{
2839 smux.in_reset = 1;
2840
2841 SMUX_DBG("%s: flushing tx wq\n", __func__);
2842 flush_workqueue(smux_tx_wq);
2843 SMUX_DBG("%s: flushing rx wq\n", __func__);
2844 flush_workqueue(smux_rx_wq);
2845 SMUX_DBG("%s: flushing notify wq\n", __func__);
2846 flush_workqueue(smux_notify_wq);
2847}
2848
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002849/**********************************************************************/
2850/* Kernel API */
2851/**********************************************************************/
2852
2853/**
2854 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2855 * flags.
2856 *
2857 * @lcid Logical channel ID
2858 * @set Options to set
2859 * @clear Options to clear
2860 *
2861 * @returns 0 for success, < 0 for failure
2862 */
2863int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2864{
2865 unsigned long flags;
2866 struct smux_lch_t *ch;
2867 int tx_ready = 0;
2868 int ret = 0;
2869
2870 if (smux_assert_lch_id(lcid))
2871 return -ENXIO;
2872
2873 ch = &smux_lch[lcid];
2874 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2875
2876 /* Local loopback mode */
2877 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2878 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2879
2880 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2881 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2882
2883 /* Remote loopback mode */
2884 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2885 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2886
2887 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2888 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2889
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002890 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002891 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002892 ch->rx_flow_control_client = 1;
2893 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002894 }
2895
2896 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002897 ch->rx_flow_control_client = 0;
2898 tx_ready |= smux_rx_flow_control_updated(ch);
2899 }
2900
2901 /* Auto RX Flow Control */
2902 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2903 SMUX_DBG("%s: auto rx flow control option enabled\n",
2904 __func__);
2905 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2906 }
2907
2908 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2909 SMUX_DBG("%s: auto rx flow control option disabled\n",
2910 __func__);
2911 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2912 ch->rx_flow_control_auto = 0;
2913 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002914 }
2915
2916 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2917
2918 if (tx_ready)
2919 list_channel(ch);
2920
2921 return ret;
2922}
2923
2924/**
2925 * Starts the opening sequence for a logical channel.
2926 *
2927 * @lcid Logical channel ID
2928 * @priv Free for client usage
2929 * @notify Event notification function
2930 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2931 *
2932 * @returns 0 for success, <0 otherwise
2933 *
2934 * A channel must be fully closed (either not previously opened or
2935 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2936 * received.
2937 *
2938 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2939 * event.
2940 */
2941int msm_smux_open(uint8_t lcid, void *priv,
2942 void (*notify)(void *priv, int event_type, const void *metadata),
2943 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2944 int size))
2945{
2946 int ret;
2947 struct smux_lch_t *ch;
2948 struct smux_pkt_t *pkt;
2949 int tx_ready = 0;
2950 unsigned long flags;
2951
2952 if (smux_assert_lch_id(lcid))
2953 return -ENXIO;
2954
2955 ch = &smux_lch[lcid];
2956 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2957
2958 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2959 ret = -EAGAIN;
2960 goto out;
2961 }
2962
2963 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2964 pr_err("%s: open lcid %d local state %x invalid\n",
2965 __func__, lcid, ch->local_state);
2966 ret = -EINVAL;
2967 goto out;
2968 }
2969
2970 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2971 ch->local_state,
2972 SMUX_LCH_LOCAL_OPENING);
2973
Eric Holmberg06011322012-07-06 18:17:03 -06002974 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002975 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2976
2977 ch->priv = priv;
2978 ch->notify = notify;
2979 ch->get_rx_buffer = get_rx_buffer;
2980 ret = 0;
2981
2982 /* Send Open Command */
2983 pkt = smux_alloc_pkt();
2984 if (!pkt) {
2985 ret = -ENOMEM;
2986 goto out;
2987 }
2988 pkt->hdr.magic = SMUX_MAGIC;
2989 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2990 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2991 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2992 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2993 pkt->hdr.lcid = lcid;
2994 pkt->hdr.payload_len = 0;
2995 pkt->hdr.pad_len = 0;
2996 smux_tx_queue(pkt, ch, 0);
2997 tx_ready = 1;
2998
2999out:
3000 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003001 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003002 if (tx_ready)
3003 list_channel(ch);
3004 return ret;
3005}
3006
3007/**
3008 * Starts the closing sequence for a logical channel.
3009 *
3010 * @lcid Logical channel ID
3011 *
3012 * @returns 0 for success, <0 otherwise
3013 *
3014 * Once the close event has been acknowledge by the remote side, the client
3015 * will receive a SMUX_DISCONNECTED notification.
3016 */
3017int msm_smux_close(uint8_t lcid)
3018{
3019 int ret = 0;
3020 struct smux_lch_t *ch;
3021 struct smux_pkt_t *pkt;
3022 int tx_ready = 0;
3023 unsigned long flags;
3024
3025 if (smux_assert_lch_id(lcid))
3026 return -ENXIO;
3027
3028 ch = &smux_lch[lcid];
3029 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3030 ch->local_tiocm = 0x0;
3031 ch->remote_tiocm = 0x0;
3032 ch->tx_pending_data_cnt = 0;
3033 ch->notify_lwm = 0;
3034
3035 /* Purge TX queue */
3036 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -06003037 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003038 spin_unlock(&ch->tx_lock_lhb2);
3039
3040 /* Send Close Command */
3041 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3042 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
3043 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
3044 ch->local_state,
3045 SMUX_LCH_LOCAL_CLOSING);
3046
3047 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3048 pkt = smux_alloc_pkt();
3049 if (pkt) {
3050 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3051 pkt->hdr.flags = 0;
3052 pkt->hdr.lcid = lcid;
3053 pkt->hdr.payload_len = 0;
3054 pkt->hdr.pad_len = 0;
3055 smux_tx_queue(pkt, ch, 0);
3056 tx_ready = 1;
3057 } else {
3058 pr_err("%s: pkt allocation failed\n", __func__);
3059 ret = -ENOMEM;
3060 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003061
3062 /* Purge RX retry queue */
3063 if (ch->rx_retry_queue_cnt)
3064 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003065 }
3066 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3067
3068 if (tx_ready)
3069 list_channel(ch);
3070
3071 return ret;
3072}
3073
3074/**
3075 * Write data to a logical channel.
3076 *
3077 * @lcid Logical channel ID
3078 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3079 * SMUX_WRITE_FAIL notification.
3080 * @data Data to write
3081 * @len Length of @data
3082 *
3083 * @returns 0 for success, <0 otherwise
3084 *
3085 * Data may be written immediately after msm_smux_open() is called,
3086 * but the data will wait in the transmit queue until the channel has
3087 * been fully opened.
3088 *
3089 * Once the data has been written, the client will receive either a completion
3090 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3091 */
3092int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3093{
3094 struct smux_lch_t *ch;
3095 struct smux_pkt_t *pkt;
3096 int tx_ready = 0;
3097 unsigned long flags;
3098 int ret;
3099
3100 if (smux_assert_lch_id(lcid))
3101 return -ENXIO;
3102
3103 ch = &smux_lch[lcid];
3104 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3105
3106 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3107 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3108 pr_err("%s: hdr.invalid local state %d channel %d\n",
3109 __func__, ch->local_state, lcid);
3110 ret = -EINVAL;
3111 goto out;
3112 }
3113
3114 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3115 pr_err("%s: payload %d too large\n",
3116 __func__, len);
3117 ret = -E2BIG;
3118 goto out;
3119 }
3120
3121 pkt = smux_alloc_pkt();
3122 if (!pkt) {
3123 ret = -ENOMEM;
3124 goto out;
3125 }
3126
3127 pkt->hdr.cmd = SMUX_CMD_DATA;
3128 pkt->hdr.lcid = lcid;
3129 pkt->hdr.flags = 0;
3130 pkt->hdr.payload_len = len;
3131 pkt->payload = (void *)data;
3132 pkt->priv = pkt_priv;
3133 pkt->hdr.pad_len = 0;
3134
3135 spin_lock(&ch->tx_lock_lhb2);
3136 /* verify high watermark */
3137 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3138
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003139 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003140 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003141 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003142 ch->tx_pending_data_cnt);
3143 ret = -EAGAIN;
3144 goto out_inner;
3145 }
3146
3147 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003148 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003149 ch->notify_lwm = 1;
3150 pr_err("%s: high watermark hit\n", __func__);
3151 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3152 }
3153 list_add_tail(&pkt->list, &ch->tx_queue);
3154
3155 /* add to ready list */
3156 if (IS_FULLY_OPENED(ch))
3157 tx_ready = 1;
3158
3159 ret = 0;
3160
3161out_inner:
3162 spin_unlock(&ch->tx_lock_lhb2);
3163
3164out:
3165 if (ret)
3166 smux_free_pkt(pkt);
3167 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3168
3169 if (tx_ready)
3170 list_channel(ch);
3171
3172 return ret;
3173}
3174
3175/**
3176 * Returns true if the TX queue is currently full (high water mark).
3177 *
3178 * @lcid Logical channel ID
3179 * @returns 0 if channel is not full
3180 * 1 if it is full
3181 * < 0 for error
3182 */
3183int msm_smux_is_ch_full(uint8_t lcid)
3184{
3185 struct smux_lch_t *ch;
3186 unsigned long flags;
3187 int is_full = 0;
3188
3189 if (smux_assert_lch_id(lcid))
3190 return -ENXIO;
3191
3192 ch = &smux_lch[lcid];
3193
3194 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003195 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003196 is_full = 1;
3197 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3198
3199 return is_full;
3200}
3201
3202/**
3203 * Returns true if the TX queue has space for more packets it is at or
3204 * below the low water mark).
3205 *
3206 * @lcid Logical channel ID
3207 * @returns 0 if channel is above low watermark
3208 * 1 if it's at or below the low watermark
3209 * < 0 for error
3210 */
3211int msm_smux_is_ch_low(uint8_t lcid)
3212{
3213 struct smux_lch_t *ch;
3214 unsigned long flags;
3215 int is_low = 0;
3216
3217 if (smux_assert_lch_id(lcid))
3218 return -ENXIO;
3219
3220 ch = &smux_lch[lcid];
3221
3222 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003223 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003224 is_low = 1;
3225 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3226
3227 return is_low;
3228}
3229
3230/**
3231 * Send TIOCM status update.
3232 *
3233 * @ch Channel for update
3234 *
3235 * @returns 0 for success, <0 for failure
3236 *
3237 * Channel lock must be held before calling.
3238 */
3239static int smux_send_status_cmd(struct smux_lch_t *ch)
3240{
3241 struct smux_pkt_t *pkt;
3242
3243 if (!ch)
3244 return -EINVAL;
3245
3246 pkt = smux_alloc_pkt();
3247 if (!pkt)
3248 return -ENOMEM;
3249
3250 pkt->hdr.lcid = ch->lcid;
3251 pkt->hdr.cmd = SMUX_CMD_STATUS;
3252 pkt->hdr.flags = ch->local_tiocm;
3253 pkt->hdr.payload_len = 0;
3254 pkt->hdr.pad_len = 0;
3255 smux_tx_queue(pkt, ch, 0);
3256
3257 return 0;
3258}
3259
3260/**
3261 * Internal helper function for getting the TIOCM status with
3262 * state_lock_lhb1 already locked.
3263 *
3264 * @ch Channel pointer
3265 *
3266 * @returns TIOCM status
3267 */
3268static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3269{
3270 long status = 0x0;
3271
3272 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3273 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3274 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3275 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3276
3277 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3278 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3279
3280 return status;
3281}
3282
3283/**
3284 * Get the TIOCM status bits.
3285 *
3286 * @lcid Logical channel ID
3287 *
3288 * @returns >= 0 TIOCM status bits
3289 * < 0 Error condition
3290 */
3291long msm_smux_tiocm_get(uint8_t lcid)
3292{
3293 struct smux_lch_t *ch;
3294 unsigned long flags;
3295 long status = 0x0;
3296
3297 if (smux_assert_lch_id(lcid))
3298 return -ENXIO;
3299
3300 ch = &smux_lch[lcid];
3301 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3302 status = msm_smux_tiocm_get_atomic(ch);
3303 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3304
3305 return status;
3306}
3307
3308/**
3309 * Set/clear the TIOCM status bits.
3310 *
3311 * @lcid Logical channel ID
3312 * @set Bits to set
3313 * @clear Bits to clear
3314 *
3315 * @returns 0 for success; < 0 for failure
3316 *
3317 * If a bit is specified in both the @set and @clear masks, then the clear bit
3318 * definition will dominate and the bit will be cleared.
3319 */
3320int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3321{
3322 struct smux_lch_t *ch;
3323 unsigned long flags;
3324 uint8_t old_status;
3325 uint8_t status_set = 0x0;
3326 uint8_t status_clear = 0x0;
3327 int tx_ready = 0;
3328 int ret = 0;
3329
3330 if (smux_assert_lch_id(lcid))
3331 return -ENXIO;
3332
3333 ch = &smux_lch[lcid];
3334 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3335
3336 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3337 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3338 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3339 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3340
3341 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3342 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3343 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3344 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3345
3346 old_status = ch->local_tiocm;
3347 ch->local_tiocm |= status_set;
3348 ch->local_tiocm &= ~status_clear;
3349
3350 if (ch->local_tiocm != old_status) {
3351 ret = smux_send_status_cmd(ch);
3352 tx_ready = 1;
3353 }
3354 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3355
3356 if (tx_ready)
3357 list_channel(ch);
3358
3359 return ret;
3360}
3361
3362/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003363/* Subsystem Restart */
3364/**********************************************************************/
3365static struct notifier_block ssr_notifier = {
3366 .notifier_call = ssr_notifier_cb,
3367};
3368
3369/**
3370 * Handle Subsystem Restart (SSR) notifications.
3371 *
3372 * @this Pointer to ssr_notifier
3373 * @code SSR Code
3374 * @data Data pointer (not used)
3375 */
3376static int ssr_notifier_cb(struct notifier_block *this,
3377 unsigned long code,
3378 void *data)
3379{
3380 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003381 int i;
3382 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003383 int power_off_uart = 0;
3384
Eric Holmbergd2697902012-06-15 09:58:46 -06003385 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3386 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3387 mutex_lock(&smux.mutex_lha0);
3388 smux.in_reset = 1;
3389 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003390 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003391 } else if (code == SUBSYS_AFTER_POWERUP) {
3392 /* re-register platform devices */
3393 SMUX_DBG("%s: ssr - after power-up\n", __func__);
3394 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003395 if (smux.ld_open_count > 0
3396 && !smux.platform_devs_registered) {
3397 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3398 SMUX_DBG("%s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003399 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003400 smux_devs[i].dev.release = smux_pdev_release;
3401 tmp = platform_device_register(&smux_devs[i]);
3402 if (tmp)
3403 pr_err("%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003404 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003405 }
3406 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003407 }
3408 mutex_unlock(&smux.mutex_lha0);
3409 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003410 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3411 return NOTIFY_DONE;
3412 }
3413 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003414
3415 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003416 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003417 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003418 if (smux.ld_open_count > 0) {
3419 smux_lch_purge();
3420 if (smux.tty)
3421 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003422
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003423 /* Unregister platform devices */
3424 if (smux.platform_devs_registered) {
3425 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3426 SMUX_DBG("%s: unregister pdev '%s'\n",
3427 __func__, smux_devs[i].name);
3428 platform_device_unregister(&smux_devs[i]);
3429 }
3430 smux.platform_devs_registered = 0;
3431 }
3432
3433 /* Power-down UART */
3434 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3435 if (smux.power_state != SMUX_PWR_OFF) {
3436 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
3437 smux.power_state = SMUX_PWR_OFF;
3438 power_off_uart = 1;
3439 }
3440 smux.powerdown_enabled = 0;
3441 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3442
3443 if (power_off_uart)
3444 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003445 }
Eric Holmberg06011322012-07-06 18:17:03 -06003446 smux.tx_activity_flag = 0;
3447 smux.rx_activity_flag = 0;
3448 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003449 smux.in_reset = 0;
3450 mutex_unlock(&smux.mutex_lha0);
3451
Eric Holmberged1f00c2012-06-07 09:45:18 -06003452 return NOTIFY_DONE;
3453}
3454
3455/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003456/* Line Discipline Interface */
3457/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003458static void smux_pdev_release(struct device *dev)
3459{
3460 struct platform_device *pdev;
3461
3462 pdev = container_of(dev, struct platform_device, dev);
3463 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3464 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3465}
3466
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003467static int smuxld_open(struct tty_struct *tty)
3468{
3469 int i;
3470 int tmp;
3471 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003472
3473 if (!smux.is_initialized)
3474 return -ENODEV;
3475
Eric Holmberged1f00c2012-06-07 09:45:18 -06003476 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003477 if (smux.ld_open_count) {
3478 pr_err("%s: %p multiple instances not supported\n",
3479 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003480 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003481 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003482 }
3483
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003484 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003485 pr_err("%s: tty->ops->write already NULL\n", __func__);
3486 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003487 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003488 }
3489
3490 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003491 ++smux.ld_open_count;
3492 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003493 smux.tty = tty;
3494 tty->disc_data = &smux;
3495 tty->receive_room = TTY_RECEIVE_ROOM;
3496 tty_driver_flush_buffer(tty);
3497
3498 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003499 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003500 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003501 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003502 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003503 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003504 queue_work(smux_tx_wq, &smux_inactivity_work);
3505 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003506 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003507 }
3508
3509 /* register platform devices */
3510 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003511 SMUX_DBG("%s: register pdev '%s'\n",
3512 __func__, smux_devs[i].name);
3513 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003514 tmp = platform_device_register(&smux_devs[i]);
3515 if (tmp)
3516 pr_err("%s: error %d registering device %s\n",
3517 __func__, tmp, smux_devs[i].name);
3518 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003519 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003520 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003521 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003522}
3523
3524static void smuxld_close(struct tty_struct *tty)
3525{
3526 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003527 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003528 int i;
3529
Eric Holmberged1f00c2012-06-07 09:45:18 -06003530 SMUX_DBG("%s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003531 smux_flush_workqueues();
3532
Eric Holmberged1f00c2012-06-07 09:45:18 -06003533 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003534 if (smux.ld_open_count <= 0) {
3535 pr_err("%s: invalid ld count %d\n", __func__,
3536 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003537 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003538 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003539 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003540 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003541
3542 /* Cleanup channels */
3543 smux_lch_purge();
3544
3545 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003546 if (smux.platform_devs_registered) {
3547 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3548 SMUX_DBG("%s: unregister pdev '%s'\n",
3549 __func__, smux_devs[i].name);
3550 platform_device_unregister(&smux_devs[i]);
3551 }
3552 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003553 }
3554
3555 /* Schedule UART power-up if it's down */
3556 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003557 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003558 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003559 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003560 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003561 smux.tx_activity_flag = 0;
3562 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003563 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3564
3565 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003566 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003567
Eric Holmberg06011322012-07-06 18:17:03 -06003568 smux.rx_state = SMUX_RX_IDLE;
3569
Eric Holmberged1f00c2012-06-07 09:45:18 -06003570 /* Disconnect from TTY */
3571 smux.tty = NULL;
3572 mutex_unlock(&smux.mutex_lha0);
3573 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003574}
3575
3576/**
3577 * Receive data from TTY Line Discipline.
3578 *
3579 * @tty TTY structure
3580 * @cp Character data
3581 * @fp Flag data
3582 * @count Size of character and flag data
3583 */
3584void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3585 char *fp, int count)
3586{
3587 int i;
3588 int last_idx = 0;
3589 const char *tty_name = NULL;
3590 char *f;
3591
3592 if (smux_debug_mask & MSM_SMUX_DEBUG)
3593 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3594 16, 1, cp, count, true);
3595
3596 /* verify error flags */
3597 for (i = 0, f = fp; i < count; ++i, ++f) {
3598 if (*f != TTY_NORMAL) {
3599 if (tty)
3600 tty_name = tty->name;
3601 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3602 tty_name, *f, tty_flag_to_str(*f));
3603
3604 /* feed all previous valid data to the parser */
3605 smux_rx_state_machine(cp + last_idx, i - last_idx,
3606 TTY_NORMAL);
3607
3608 /* feed bad data to parser */
3609 smux_rx_state_machine(cp + i, 1, *f);
3610 last_idx = i + 1;
3611 }
3612 }
3613
3614 /* feed data to RX state machine */
3615 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3616}
3617
3618static void smuxld_flush_buffer(struct tty_struct *tty)
3619{
3620 pr_err("%s: not supported\n", __func__);
3621}
3622
3623static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3624{
3625 pr_err("%s: not supported\n", __func__);
3626 return -ENODEV;
3627}
3628
3629static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3630 unsigned char __user *buf, size_t nr)
3631{
3632 pr_err("%s: not supported\n", __func__);
3633 return -ENODEV;
3634}
3635
3636static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3637 const unsigned char *buf, size_t nr)
3638{
3639 pr_err("%s: not supported\n", __func__);
3640 return -ENODEV;
3641}
3642
3643static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3644 unsigned int cmd, unsigned long arg)
3645{
3646 pr_err("%s: not supported\n", __func__);
3647 return -ENODEV;
3648}
3649
3650static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3651 struct poll_table_struct *tbl)
3652{
3653 pr_err("%s: not supported\n", __func__);
3654 return -ENODEV;
3655}
3656
3657static void smuxld_write_wakeup(struct tty_struct *tty)
3658{
3659 pr_err("%s: not supported\n", __func__);
3660}
3661
3662static struct tty_ldisc_ops smux_ldisc_ops = {
3663 .owner = THIS_MODULE,
3664 .magic = TTY_LDISC_MAGIC,
3665 .name = "n_smux",
3666 .open = smuxld_open,
3667 .close = smuxld_close,
3668 .flush_buffer = smuxld_flush_buffer,
3669 .chars_in_buffer = smuxld_chars_in_buffer,
3670 .read = smuxld_read,
3671 .write = smuxld_write,
3672 .ioctl = smuxld_ioctl,
3673 .poll = smuxld_poll,
3674 .receive_buf = smuxld_receive_buf,
3675 .write_wakeup = smuxld_write_wakeup
3676};
3677
3678static int __init smux_init(void)
3679{
3680 int ret;
3681
Eric Holmberged1f00c2012-06-07 09:45:18 -06003682 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003683
3684 spin_lock_init(&smux.rx_lock_lha1);
3685 smux.rx_state = SMUX_RX_IDLE;
3686 smux.power_state = SMUX_PWR_OFF;
3687 smux.pwr_wakeup_delay_us = 1;
3688 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003689 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003690 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003691 smux.rx_activity_flag = 0;
3692 smux.tx_activity_flag = 0;
3693 smux.recv_len = 0;
3694 smux.tty = NULL;
3695 smux.ld_open_count = 0;
3696 smux.in_reset = 0;
3697 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003698 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003699 smux_byte_loopback = 0;
3700
3701 spin_lock_init(&smux.tx_lock_lha2);
3702 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3703
3704 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3705 if (ret != 0) {
3706 pr_err("%s: error %d registering line discipline\n",
3707 __func__, ret);
3708 return ret;
3709 }
3710
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003711 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003712
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003713 ret = lch_init();
3714 if (ret != 0) {
3715 pr_err("%s: lch_init failed\n", __func__);
3716 return ret;
3717 }
3718
3719 return 0;
3720}
3721
3722static void __exit smux_exit(void)
3723{
3724 int ret;
3725
3726 ret = tty_unregister_ldisc(N_SMUX);
3727 if (ret != 0) {
3728 pr_err("%s error %d unregistering line discipline\n",
3729 __func__, ret);
3730 return;
3731 }
3732}
3733
3734module_init(smux_init);
3735module_exit(smux_exit);
3736
3737MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3738MODULE_LICENSE("GPL v2");
3739MODULE_ALIAS_LDISC(N_SMUX);