blob: 03481459bb6919bec71ee6f17924f7e61b21d4ba [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
Duy Truonge833aca2013-02-12 13:35:08 -08003 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkar98f78122012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg33f82522012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkar98f78122012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkar98f78122012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkar98f78122012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmberg01778932012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
Eric Holmbergf3f34592012-08-28 13:51:14 -0600239 int remote_is_alive;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600240 int ld_open_count;
241 struct tty_struct *tty;
242
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600243 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
245 unsigned int recv_len;
246 unsigned int pkt_remain;
247 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600248
249 /* RX Activity - accessed by multiple threads */
250 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600251 unsigned rx_activity_flag;
252
253 /* TX / Power */
254 spinlock_t tx_lock_lha2;
255 struct list_head lch_tx_ready_list;
256 unsigned power_state;
257 unsigned pwr_wakeup_delay_us;
258 unsigned tx_activity_flag;
259 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600260 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600261 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600262};
263
264
265/* data structures */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600266struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600267static struct smux_ldisc_t smux;
268static const char *tty_error_type[] = {
269 [TTY_NORMAL] = "normal",
270 [TTY_OVERRUN] = "overrun",
271 [TTY_BREAK] = "break",
272 [TTY_PARITY] = "parity",
273 [TTY_FRAME] = "framing",
274};
275
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600276static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600277 [SMUX_CMD_DATA] = "DATA",
278 [SMUX_CMD_OPEN_LCH] = "OPEN",
279 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
280 [SMUX_CMD_STATUS] = "STATUS",
281 [SMUX_CMD_PWR_CTL] = "PWR",
282 [SMUX_CMD_BYTE] = "Raw Byte",
283};
284
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530285static const char * const smux_events[] = {
286 [SMUX_CONNECTED] = "CONNECTED" ,
287 [SMUX_DISCONNECTED] = "DISCONNECTED",
288 [SMUX_READ_DONE] = "READ_DONE",
289 [SMUX_READ_FAIL] = "READ_FAIL",
290 [SMUX_WRITE_DONE] = "WRITE_DONE",
291 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
292 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
293 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
294 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
295 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
296 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
297};
298
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600299static const char * const smux_local_state[] = {
300 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
301 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
302 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
303 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
304};
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530305
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600306static const char * const smux_remote_state[] = {
307 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
308 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
309};
310
311static const char * const smux_mode[] = {
312 [SMUX_LCH_MODE_NORMAL] = "N",
313 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
314 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
315};
316
317static const char * const smux_undef[] = {
318 [SMUX_UNDEF_LONG] = "UNDEF",
319 [SMUX_UNDEF_SHORT] = "U",
320};
321
322static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600323static void smux_notify_local_fn(struct work_struct *work);
324static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
325
326static struct workqueue_struct *smux_notify_wq;
327static size_t handle_size;
328static struct kfifo smux_notify_fifo;
329static int queued_fifo_notifications;
330static DEFINE_SPINLOCK(notify_lock_lhc1);
331
332static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600333static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334static void smux_tx_worker(struct work_struct *work);
335static DECLARE_WORK(smux_tx_work, smux_tx_worker);
336
337static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600338static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600339static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600340static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
341static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
342
343static void smux_inactivity_worker(struct work_struct *work);
344static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
345static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
346 smux_inactivity_worker);
347
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600348static void list_channel(struct smux_lch_t *ch);
349static int smux_send_status_cmd(struct smux_lch_t *ch);
350static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600351static void smux_flush_tty(void);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600352static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600353static int schedule_notify(uint8_t lcid, int event,
354 const union notifier_metadata *metadata);
355static int ssr_notifier_cb(struct notifier_block *this,
356 unsigned long code,
357 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600358static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600359static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600360static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600361static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600362
363/**
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600364 * local_lch_state() - Return human readable form of local logical state.
365 * @state: Local logical channel state enum.
366 *
367 */
368const char *local_lch_state(unsigned state)
369{
370 if (state < ARRAY_SIZE(smux_local_state))
371 return smux_local_state[state];
372 else
373 return smux_undef[SMUX_UNDEF_LONG];
374}
375
376/**
377 * remote_lch_state() - Return human readable for of remote logical state.
378 * @state: Remote logical channel state enum.
379 *
380 */
381const char *remote_lch_state(unsigned state)
382{
383 if (state < ARRAY_SIZE(smux_remote_state))
384 return smux_remote_state[state];
385 else
386 return smux_undef[SMUX_UNDEF_LONG];
387}
388
389/**
390 * lch_mode() - Return human readable form of mode.
391 * @mode: Mode of the logical channel.
392 *
393 */
394const char *lch_mode(unsigned mode)
395{
396 if (mode < ARRAY_SIZE(smux_mode))
397 return smux_mode[mode];
398 else
399 return smux_undef[SMUX_UNDEF_SHORT];
400}
401
402/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600403 * Convert TTY Error Flags to string for logging purposes.
404 *
405 * @flag TTY_* flag
406 * @returns String description or NULL if unknown
407 */
408static const char *tty_flag_to_str(unsigned flag)
409{
410 if (flag < ARRAY_SIZE(tty_error_type))
411 return tty_error_type[flag];
412 return NULL;
413}
414
415/**
416 * Convert SMUX Command to string for logging purposes.
417 *
418 * @cmd SMUX command
419 * @returns String description or NULL if unknown
420 */
421static const char *cmd_to_str(unsigned cmd)
422{
423 if (cmd < ARRAY_SIZE(smux_cmds))
424 return smux_cmds[cmd];
425 return NULL;
426}
427
428/**
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530429 * Convert SMUX event to string for logging purposes.
430 *
431 * @event SMUX event
432 * @returns String description or NULL if unknown
433 */
434static const char *event_to_str(unsigned cmd)
435{
436 if (cmd < ARRAY_SIZE(smux_events))
437 return smux_events[cmd];
438 return NULL;
439}
440
441/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600442 * Set the reset state due to an unrecoverable failure.
443 */
444static void smux_enter_reset(void)
445{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600446 SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600447 smux.in_reset = 1;
Eric Holmbergf3f34592012-08-28 13:51:14 -0600448 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600449}
450
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600451/**
452 * Initialize the lch_structs.
453 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600454static int lch_init(void)
455{
456 unsigned int id;
457 struct smux_lch_t *ch;
458 int i = 0;
459
460 handle_size = sizeof(struct smux_notify_handle *);
461
462 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
463 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600464 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600465
466 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530467 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600468 __func__);
469 return -ENOMEM;
470 }
471
472 i |= kfifo_alloc(&smux_notify_fifo,
473 SMUX_NOTIFY_FIFO_SIZE * handle_size,
474 GFP_KERNEL);
475 i |= smux_loopback_init();
476
477 if (i) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600478 SMUX_ERR("%s: out of memory error\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600479 return -ENOMEM;
480 }
481
482 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
483 ch = &smux_lch[id];
484
485 spin_lock_init(&ch->state_lock_lhb1);
486 ch->lcid = id;
487 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
488 ch->local_mode = SMUX_LCH_MODE_NORMAL;
489 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600490 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600491 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
492 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
493 ch->remote_tiocm = 0x0;
494 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600495 ch->rx_flow_control_auto = 0;
496 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600497 ch->priv = 0;
498 ch->notify = 0;
499 ch->get_rx_buffer = 0;
500
Eric Holmbergb8435c82012-06-05 14:51:29 -0600501 INIT_LIST_HEAD(&ch->rx_retry_queue);
502 ch->rx_retry_queue_cnt = 0;
503 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
504
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600505 spin_lock_init(&ch->tx_lock_lhb2);
506 INIT_LIST_HEAD(&ch->tx_queue);
507 INIT_LIST_HEAD(&ch->tx_ready_list);
508 ch->tx_pending_data_cnt = 0;
509 ch->notify_lwm = 0;
510 }
511
512 return 0;
513}
514
Eric Holmberged1f00c2012-06-07 09:45:18 -0600515/**
516 * Empty and cleanup all SMUX logical channels for subsystem restart or line
517 * discipline disconnect.
518 */
519static void smux_lch_purge(void)
520{
521 struct smux_lch_t *ch;
522 unsigned long flags;
523 int i;
524
525 /* Empty TX ready list */
526 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
527 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530528 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600529 __func__, smux.lch_tx_ready_list.next);
530 ch = list_first_entry(&smux.lch_tx_ready_list,
531 struct smux_lch_t,
532 tx_ready_list);
533 list_del(&ch->tx_ready_list);
534 INIT_LIST_HEAD(&ch->tx_ready_list);
535 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600536
537 /* Purge Power Queue */
538 while (!list_empty(&smux.power_queue)) {
539 struct smux_pkt_t *pkt;
540
541 pkt = list_first_entry(&smux.power_queue,
542 struct smux_pkt_t,
543 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600544 list_del(&pkt->list);
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530545 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600546 __func__, pkt);
547 smux_free_pkt(pkt);
548 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600549 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
550
551 /* Close all ports */
552 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
553 ch = &smux_lch[i];
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530554 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600555
556 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
557
558 /* Purge TX queue */
559 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600560 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600561 spin_unlock(&ch->tx_lock_lhb2);
562
563 /* Notify user of disconnect and reset channel state */
564 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
565 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
566 union notifier_metadata meta;
567
568 meta.disconnected.is_ssr = smux.in_reset;
569 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
570 }
571
572 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600573 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
574 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
575 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600576 ch->rx_flow_control_auto = 0;
577 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600578
579 /* Purge RX retry queue */
580 if (ch->rx_retry_queue_cnt)
581 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
582
583 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
584 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600585}
586
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600587int smux_assert_lch_id(uint32_t lcid)
588{
589 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
590 return -ENXIO;
591 else
592 return 0;
593}
594
595/**
596 * Log packet information for debug purposes.
597 *
598 * @pkt Packet to log
599 * @is_recv 1 = RX packet; 0 = TX Packet
600 *
601 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
602 *
603 * PKT Info:
604 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
605 *
606 * Direction: R = Receive, S = Send
607 * Local State: C = Closed; c = closing; o = opening; O = Opened
608 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
609 * Remote State: C = Closed; O = Opened
610 * Remote Mode: R = Remote loopback; N = Normal
611 */
612static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
613{
614 char logbuf[SMUX_PKT_LOG_SIZE];
615 char cmd_extra[16];
616 int i = 0;
617 int count;
618 int len;
619 char local_state;
620 char local_mode;
621 char remote_state;
622 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600623 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600624 unsigned char *data;
625
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600626 if (!smux_assert_lch_id(pkt->hdr.lcid))
627 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600628
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600629 if (ch) {
630 switch (ch->local_state) {
631 case SMUX_LCH_LOCAL_CLOSED:
632 local_state = 'C';
633 break;
634 case SMUX_LCH_LOCAL_OPENING:
635 local_state = 'o';
636 break;
637 case SMUX_LCH_LOCAL_OPENED:
638 local_state = 'O';
639 break;
640 case SMUX_LCH_LOCAL_CLOSING:
641 local_state = 'c';
642 break;
643 default:
644 local_state = 'U';
645 break;
646 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600647
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600648 switch (ch->local_mode) {
649 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
650 local_mode = 'L';
651 break;
652 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
653 local_mode = 'R';
654 break;
655 case SMUX_LCH_MODE_NORMAL:
656 local_mode = 'N';
657 break;
658 default:
659 local_mode = 'U';
660 break;
661 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600662
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600663 switch (ch->remote_state) {
664 case SMUX_LCH_REMOTE_CLOSED:
665 remote_state = 'C';
666 break;
667 case SMUX_LCH_REMOTE_OPENED:
668 remote_state = 'O';
669 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600670
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600671 default:
672 remote_state = 'U';
673 break;
674 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600675
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600676 switch (ch->remote_mode) {
677 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
678 remote_mode = 'R';
679 break;
680 case SMUX_LCH_MODE_NORMAL:
681 remote_mode = 'N';
682 break;
683 default:
684 remote_mode = 'U';
685 break;
686 }
687 } else {
688 /* broadcast channel */
689 local_state = '-';
690 local_mode = '-';
691 remote_state = '-';
692 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600693 }
694
695 /* determine command type (ACK, etc) */
696 cmd_extra[0] = '\0';
697 switch (pkt->hdr.cmd) {
698 case SMUX_CMD_OPEN_LCH:
699 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
700 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
701 break;
702 case SMUX_CMD_CLOSE_LCH:
703 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
704 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
705 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600706
707 case SMUX_CMD_PWR_CTL:
708 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
709 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
710 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600711 };
712
713 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
714 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
715 is_recv ? 'R' : 'S', pkt->hdr.lcid,
716 local_state, local_mode,
717 remote_state, remote_mode,
718 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
719 pkt->hdr.payload_len, pkt->hdr.pad_len);
720
721 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
722 data = (unsigned char *)pkt->payload;
723 for (count = 0; count < len; count++)
724 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
725 "%02x ", (unsigned)data[count]);
726
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530727 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600728}
729
730static void smux_notify_local_fn(struct work_struct *work)
731{
732 struct smux_notify_handle *notify_handle = NULL;
733 union notifier_metadata *metadata = NULL;
734 unsigned long flags;
735 int i;
736
737 for (;;) {
738 /* retrieve notification */
739 spin_lock_irqsave(&notify_lock_lhc1, flags);
740 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
741 i = kfifo_out(&smux_notify_fifo,
742 &notify_handle,
743 handle_size);
744 if (i != handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600745 SMUX_ERR(
746 "%s: unable to retrieve handle %d expected %d\n",
747 __func__, i, handle_size);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600748 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
749 break;
750 }
751 } else {
752 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
753 break;
754 }
755 --queued_fifo_notifications;
756 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
757
758 /* notify client */
759 metadata = notify_handle->metadata;
760 notify_handle->notify(notify_handle->priv,
761 notify_handle->event_type,
762 metadata);
763
764 kfree(metadata);
765 kfree(notify_handle);
766 }
767}
768
769/**
770 * Initialize existing packet.
771 */
772void smux_init_pkt(struct smux_pkt_t *pkt)
773{
774 memset(pkt, 0x0, sizeof(*pkt));
775 pkt->hdr.magic = SMUX_MAGIC;
776 INIT_LIST_HEAD(&pkt->list);
777}
778
779/**
780 * Allocate and initialize packet.
781 *
782 * If a payload is needed, either set it directly and ensure that it's freed or
783 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
784 * automatically when smd_free_pkt() is called.
785 */
786struct smux_pkt_t *smux_alloc_pkt(void)
787{
788 struct smux_pkt_t *pkt;
789
790 /* Consider a free list implementation instead of kmalloc */
791 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
792 if (!pkt) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600793 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600794 return NULL;
795 }
796 smux_init_pkt(pkt);
797 pkt->allocated = 1;
798
799 return pkt;
800}
801
802/**
803 * Free packet.
804 *
805 * @pkt Packet to free (may be NULL)
806 *
807 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
808 * well. Otherwise, the caller is responsible for freeing the payload.
809 */
810void smux_free_pkt(struct smux_pkt_t *pkt)
811{
812 if (pkt) {
813 if (pkt->free_payload)
814 kfree(pkt->payload);
815 if (pkt->allocated)
816 kfree(pkt);
817 }
818}
819
820/**
821 * Allocate packet payload.
822 *
823 * @pkt Packet to add payload to
824 *
825 * @returns 0 on success, <0 upon error
826 *
827 * A flag is set to signal smux_free_pkt() to free the payload.
828 */
829int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
830{
831 if (!pkt)
832 return -EINVAL;
833
834 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
835 pkt->free_payload = 1;
836 if (!pkt->payload) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600837 SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600838 __func__, pkt->hdr.payload_len);
839 return -ENOMEM;
840 }
841
842 return 0;
843}
844
845static int schedule_notify(uint8_t lcid, int event,
846 const union notifier_metadata *metadata)
847{
848 struct smux_notify_handle *notify_handle = 0;
849 union notifier_metadata *meta_copy = 0;
850 struct smux_lch_t *ch;
851 int i;
852 unsigned long flags;
853 int ret = 0;
854
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530855 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600856 ch = &smux_lch[lcid];
857 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
858 GFP_ATOMIC);
859 if (!notify_handle) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600860 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600861 ret = -ENOMEM;
862 goto free_out;
863 }
864
865 notify_handle->notify = ch->notify;
866 notify_handle->priv = ch->priv;
867 notify_handle->event_type = event;
868 if (metadata) {
869 meta_copy = kzalloc(sizeof(union notifier_metadata),
870 GFP_ATOMIC);
871 if (!meta_copy) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600872 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600873 ret = -ENOMEM;
874 goto free_out;
875 }
876 *meta_copy = *metadata;
877 notify_handle->metadata = meta_copy;
878 } else {
879 notify_handle->metadata = NULL;
880 }
881
882 spin_lock_irqsave(&notify_lock_lhc1, flags);
883 i = kfifo_avail(&smux_notify_fifo);
884 if (i < handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600885 SMUX_ERR("%s: fifo full error %d expected %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600886 __func__, i, handle_size);
887 ret = -ENOMEM;
888 goto unlock_out;
889 }
890
891 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
892 if (i < 0 || i != handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600893 SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600894 __func__, i, handle_size);
895 ret = -ENOSPC;
896 goto unlock_out;
897 }
898 ++queued_fifo_notifications;
899
900unlock_out:
901 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
902
903free_out:
904 queue_work(smux_notify_wq, &smux_notify_local);
905 if (ret < 0 && notify_handle) {
906 kfree(notify_handle->metadata);
907 kfree(notify_handle);
908 }
909 return ret;
910}
911
912/**
913 * Returns the serialized size of a packet.
914 *
915 * @pkt Packet to serialize
916 *
917 * @returns Serialized length of packet
918 */
919static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
920{
921 unsigned int size;
922
923 size = sizeof(struct smux_hdr_t);
924 size += pkt->hdr.payload_len;
925 size += pkt->hdr.pad_len;
926
927 return size;
928}
929
930/**
931 * Serialize packet @pkt into output buffer @data.
932 *
933 * @pkt Packet to serialize
934 * @out Destination buffer pointer
935 * @out_len Size of serialized packet
936 *
937 * @returns 0 for success
938 */
939int smux_serialize(struct smux_pkt_t *pkt, char *out,
940 unsigned int *out_len)
941{
942 char *data_start = out;
943
944 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600945 SMUX_ERR("%s: packet size %d too big\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600946 __func__, smux_serialize_size(pkt));
947 return -E2BIG;
948 }
949
950 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
951 out += sizeof(struct smux_hdr_t);
952 if (pkt->payload) {
953 memcpy(out, pkt->payload, pkt->hdr.payload_len);
954 out += pkt->hdr.payload_len;
955 }
956 if (pkt->hdr.pad_len) {
957 memset(out, 0x0, pkt->hdr.pad_len);
958 out += pkt->hdr.pad_len;
959 }
960 *out_len = out - data_start;
961 return 0;
962}
963
964/**
965 * Serialize header and provide pointer to the data.
966 *
967 * @pkt Packet
968 * @out[out] Pointer to the serialized header data
969 * @out_len[out] Pointer to the serialized header length
970 */
971static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
972 unsigned int *out_len)
973{
974 *out = (char *)&pkt->hdr;
975 *out_len = sizeof(struct smux_hdr_t);
976}
977
978/**
979 * Serialize payload and provide pointer to the data.
980 *
981 * @pkt Packet
982 * @out[out] Pointer to the serialized payload data
983 * @out_len[out] Pointer to the serialized payload length
984 */
985static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
986 unsigned int *out_len)
987{
988 *out = pkt->payload;
989 *out_len = pkt->hdr.payload_len;
990}
991
992/**
993 * Serialize padding and provide pointer to the data.
994 *
995 * @pkt Packet
996 * @out[out] Pointer to the serialized padding (always NULL)
997 * @out_len[out] Pointer to the serialized payload length
998 *
999 * Since the padding field value is undefined, only the size of the patting
1000 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
1001 */
1002static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1003 unsigned int *out_len)
1004{
1005 *out = NULL;
1006 *out_len = pkt->hdr.pad_len;
1007}
1008
1009/**
1010 * Write data to TTY framework and handle breaking the writes up if needed.
1011 *
1012 * @data Data to write
1013 * @len Length of data
1014 *
1015 * @returns 0 for success, < 0 for failure
1016 */
1017static int write_to_tty(char *data, unsigned len)
1018{
1019 int data_written;
1020
1021 if (!data)
1022 return 0;
1023
Eric Holmberged1f00c2012-06-07 09:45:18 -06001024 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001025 data_written = smux.tty->ops->write(smux.tty, data, len);
1026 if (data_written >= 0) {
1027 len -= data_written;
1028 data += data_written;
1029 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001030 SMUX_ERR("%s: TTY write returned error %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001031 __func__, data_written);
1032 return data_written;
1033 }
1034
1035 if (len)
1036 tty_wait_until_sent(smux.tty,
1037 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001038 }
1039 return 0;
1040}
1041
1042/**
1043 * Write packet to TTY.
1044 *
1045 * @pkt packet to write
1046 *
1047 * @returns 0 on success
1048 */
1049static int smux_tx_tty(struct smux_pkt_t *pkt)
1050{
1051 char *data;
1052 unsigned int len;
1053 int ret;
1054
1055 if (!smux.tty) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001056 SMUX_ERR("%s: TTY not initialized", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001057 return -ENOTTY;
1058 }
1059
1060 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301061 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001062 ret = write_to_tty(&pkt->hdr.flags, 1);
1063 return ret;
1064 }
1065
1066 smux_serialize_hdr(pkt, &data, &len);
1067 ret = write_to_tty(data, len);
1068 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001069 SMUX_ERR("%s: failed %d to write header %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001070 __func__, ret, len);
1071 return ret;
1072 }
1073
1074 smux_serialize_payload(pkt, &data, &len);
1075 ret = write_to_tty(data, len);
1076 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001077 SMUX_ERR("%s: failed %d to write payload %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001078 __func__, ret, len);
1079 return ret;
1080 }
1081
1082 smux_serialize_padding(pkt, &data, &len);
1083 while (len > 0) {
1084 char zero = 0x0;
1085 ret = write_to_tty(&zero, 1);
1086 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001087 SMUX_ERR("%s: failed %d to write padding %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001088 __func__, ret, len);
1089 return ret;
1090 }
1091 --len;
1092 }
1093 return 0;
1094}
1095
1096/**
1097 * Send a single character.
1098 *
1099 * @ch Character to send
1100 */
1101static void smux_send_byte(char ch)
1102{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001103 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001104
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001105 pkt = smux_alloc_pkt();
1106 if (!pkt) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001107 SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001108 return;
1109 }
1110 pkt->hdr.cmd = SMUX_CMD_BYTE;
1111 pkt->hdr.flags = ch;
1112 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001113
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001114 list_add_tail(&pkt->list, &smux.power_queue);
1115 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001116}
1117
1118/**
1119 * Receive a single-character packet (used for internal testing).
1120 *
1121 * @ch Character to receive
1122 * @lcid Logical channel ID for packet
1123 *
1124 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001125 */
1126static int smux_receive_byte(char ch, int lcid)
1127{
1128 struct smux_pkt_t pkt;
1129
1130 smux_init_pkt(&pkt);
1131 pkt.hdr.lcid = lcid;
1132 pkt.hdr.cmd = SMUX_CMD_BYTE;
1133 pkt.hdr.flags = ch;
1134
1135 return smux_dispatch_rx_pkt(&pkt);
1136}
1137
1138/**
1139 * Queue packet for transmit.
1140 *
1141 * @pkt_ptr Packet to queue
1142 * @ch Channel to queue packet on
1143 * @queue Queue channel on ready list
1144 */
1145static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1146 int queue)
1147{
1148 unsigned long flags;
1149
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301150 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001151
1152 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1153 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1154 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1155
1156 if (queue)
1157 list_channel(ch);
1158}
1159
1160/**
1161 * Handle receive OPEN ACK command.
1162 *
1163 * @pkt Received packet
1164 *
1165 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001166 */
1167static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1168{
1169 uint8_t lcid;
1170 int ret;
1171 struct smux_lch_t *ch;
1172 int enable_powerdown = 0;
1173
1174 lcid = pkt->hdr.lcid;
1175 ch = &smux_lch[lcid];
1176
1177 spin_lock(&ch->state_lock_lhb1);
1178 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301179 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001180 ch->local_state,
1181 SMUX_LCH_LOCAL_OPENED);
1182
1183 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1184 enable_powerdown = 1;
1185
1186 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1187 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1188 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1189 ret = 0;
1190 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301191 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001192 ret = 0;
1193 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001194 SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001195 __func__, lcid, ch->local_state);
1196 ret = -EINVAL;
1197 }
1198 spin_unlock(&ch->state_lock_lhb1);
1199
1200 if (enable_powerdown) {
1201 spin_lock(&smux.tx_lock_lha2);
1202 if (!smux.powerdown_enabled) {
1203 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301204 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001205 __func__);
1206 }
1207 spin_unlock(&smux.tx_lock_lha2);
1208 }
1209
1210 return ret;
1211}
1212
1213static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1214{
1215 uint8_t lcid;
1216 int ret;
1217 struct smux_lch_t *ch;
1218 union notifier_metadata meta_disconnected;
1219 unsigned long flags;
1220
1221 lcid = pkt->hdr.lcid;
1222 ch = &smux_lch[lcid];
1223 meta_disconnected.disconnected.is_ssr = 0;
1224
1225 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1226
1227 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301228 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001229 SMUX_LCH_LOCAL_CLOSING,
1230 SMUX_LCH_LOCAL_CLOSED);
1231 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1232 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1233 schedule_notify(lcid, SMUX_DISCONNECTED,
1234 &meta_disconnected);
1235 ret = 0;
1236 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301237 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001238 ret = 0;
1239 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001240 SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001241 __func__, lcid, ch->local_state);
1242 ret = -EINVAL;
1243 }
1244 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1245 return ret;
1246}
1247
1248/**
1249 * Handle receive OPEN command.
1250 *
1251 * @pkt Received packet
1252 *
1253 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001254 */
1255static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1256{
1257 uint8_t lcid;
1258 int ret;
1259 struct smux_lch_t *ch;
1260 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001261 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001262 int tx_ready = 0;
1263 int enable_powerdown = 0;
1264
1265 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1266 return smux_handle_rx_open_ack(pkt);
1267
1268 lcid = pkt->hdr.lcid;
1269 ch = &smux_lch[lcid];
1270
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001271 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001272
1273 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301274 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001275 SMUX_LCH_REMOTE_CLOSED,
1276 SMUX_LCH_REMOTE_OPENED);
1277
1278 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1279 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1280 enable_powerdown = 1;
1281
1282 /* Send Open ACK */
1283 ack_pkt = smux_alloc_pkt();
1284 if (!ack_pkt) {
1285 /* exit out to allow retrying this later */
1286 ret = -ENOMEM;
1287 goto out;
1288 }
1289 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmberg4ce20122013-01-15 16:43:47 -07001290 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK;
1291 if (enable_powerdown)
1292 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001293 ack_pkt->hdr.lcid = lcid;
1294 ack_pkt->hdr.payload_len = 0;
1295 ack_pkt->hdr.pad_len = 0;
1296 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1297 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1298 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1299 }
1300 smux_tx_queue(ack_pkt, ch, 0);
1301 tx_ready = 1;
1302
1303 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1304 /*
1305 * Send an Open command to the remote side to
1306 * simulate our local client doing it.
1307 */
1308 ack_pkt = smux_alloc_pkt();
1309 if (ack_pkt) {
1310 ack_pkt->hdr.lcid = lcid;
1311 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmberg4ce20122013-01-15 16:43:47 -07001312 if (enable_powerdown)
1313 ack_pkt->hdr.flags |=
1314 SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001315 ack_pkt->hdr.payload_len = 0;
1316 ack_pkt->hdr.pad_len = 0;
1317 smux_tx_queue(ack_pkt, ch, 0);
1318 tx_ready = 1;
1319 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001320 SMUX_ERR(
1321 "%s: Remote loopack allocation failure\n",
1322 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001323 }
1324 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1325 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1326 }
1327 ret = 0;
1328 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001329 SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001330 __func__, lcid, ch->remote_state);
1331 ret = -EINVAL;
1332 }
1333
1334out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001335 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001336
1337 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001338 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001339 if (!smux.powerdown_enabled) {
1340 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301341 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001342 __func__);
1343 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001344 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001345 }
1346
1347 if (tx_ready)
1348 list_channel(ch);
1349
1350 return ret;
1351}
1352
1353/**
1354 * Handle receive CLOSE command.
1355 *
1356 * @pkt Received packet
1357 *
1358 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001359 */
1360static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1361{
1362 uint8_t lcid;
1363 int ret;
1364 struct smux_lch_t *ch;
1365 struct smux_pkt_t *ack_pkt;
1366 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001367 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001368 int tx_ready = 0;
1369
1370 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1371 return smux_handle_close_ack(pkt);
1372
1373 lcid = pkt->hdr.lcid;
1374 ch = &smux_lch[lcid];
1375 meta_disconnected.disconnected.is_ssr = 0;
1376
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001377 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001378 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301379 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001380 SMUX_LCH_REMOTE_OPENED,
1381 SMUX_LCH_REMOTE_CLOSED);
1382
1383 ack_pkt = smux_alloc_pkt();
1384 if (!ack_pkt) {
1385 /* exit out to allow retrying this later */
1386 ret = -ENOMEM;
1387 goto out;
1388 }
1389 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1390 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1391 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1392 ack_pkt->hdr.lcid = lcid;
1393 ack_pkt->hdr.payload_len = 0;
1394 ack_pkt->hdr.pad_len = 0;
1395 smux_tx_queue(ack_pkt, ch, 0);
1396 tx_ready = 1;
1397
1398 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1399 /*
1400 * Send a Close command to the remote side to simulate
1401 * our local client doing it.
1402 */
1403 ack_pkt = smux_alloc_pkt();
1404 if (ack_pkt) {
1405 ack_pkt->hdr.lcid = lcid;
1406 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1407 ack_pkt->hdr.flags = 0;
1408 ack_pkt->hdr.payload_len = 0;
1409 ack_pkt->hdr.pad_len = 0;
1410 smux_tx_queue(ack_pkt, ch, 0);
1411 tx_ready = 1;
1412 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001413 SMUX_ERR(
1414 "%s: Remote loopack allocation failure\n",
1415 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001416 }
1417 }
1418
1419 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1420 schedule_notify(lcid, SMUX_DISCONNECTED,
1421 &meta_disconnected);
1422 ret = 0;
1423 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001424 SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001425 __func__, lcid, ch->remote_state);
1426 ret = -EINVAL;
1427 }
1428out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001429 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001430 if (tx_ready)
1431 list_channel(ch);
1432
1433 return ret;
1434}
1435
1436/*
1437 * Handle receive DATA command.
1438 *
1439 * @pkt Received packet
1440 *
1441 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001442 */
1443static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1444{
1445 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001446 int ret = 0;
1447 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001448 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001449 int tmp;
1450 int rx_len;
1451 struct smux_lch_t *ch;
1452 union notifier_metadata metadata;
1453 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001454 struct smux_pkt_t *ack_pkt;
1455 unsigned long flags;
1456
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001457 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1458 ret = -ENXIO;
1459 goto out;
1460 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001461
Eric Holmbergb8435c82012-06-05 14:51:29 -06001462 rx_len = pkt->hdr.payload_len;
1463 if (rx_len == 0) {
1464 ret = -EINVAL;
1465 goto out;
1466 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001467
1468 lcid = pkt->hdr.lcid;
1469 ch = &smux_lch[lcid];
1470 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1471 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1472
1473 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1474 && !remote_loopback) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001475 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001476 lcid, ch->local_state);
1477 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001478 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001479 goto out;
1480 }
1481
1482 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001483 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001484 lcid, ch->remote_state);
1485 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001486 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001487 goto out;
1488 }
1489
Eric Holmbergb8435c82012-06-05 14:51:29 -06001490 if (!list_empty(&ch->rx_retry_queue)) {
1491 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001492
1493 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1494 !ch->rx_flow_control_auto &&
1495 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1496 /* need to flow control RX */
1497 ch->rx_flow_control_auto = 1;
1498 tx_ready |= smux_rx_flow_control_updated(ch);
1499 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1500 NULL);
1501 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001502 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1503 /* retry queue full */
Eric Holmberg01778932012-08-21 16:28:12 -06001504 SMUX_ERR(
1505 "%s: ch %d RX retry queue full; rx flow=%d\n",
1506 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001507 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1508 ret = -ENOMEM;
1509 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1510 goto out;
1511 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001512 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001513 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001514
Eric Holmbergb8435c82012-06-05 14:51:29 -06001515 if (remote_loopback) {
1516 /* Echo the data back to the remote client. */
1517 ack_pkt = smux_alloc_pkt();
1518 if (ack_pkt) {
1519 ack_pkt->hdr.lcid = lcid;
1520 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1521 ack_pkt->hdr.flags = 0;
1522 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1523 if (ack_pkt->hdr.payload_len) {
1524 smux_alloc_pkt_payload(ack_pkt);
1525 memcpy(ack_pkt->payload, pkt->payload,
1526 ack_pkt->hdr.payload_len);
1527 }
1528 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1529 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001530 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001531 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001532 SMUX_ERR("%s: Remote loopack allocation failure\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06001533 __func__);
1534 }
1535 } else if (!do_retry) {
1536 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001537 metadata.read.pkt_priv = 0;
1538 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001539 tmp = ch->get_rx_buffer(ch->priv,
1540 (void **)&metadata.read.pkt_priv,
1541 (void **)&metadata.read.buffer,
1542 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001543
Eric Holmbergb8435c82012-06-05 14:51:29 -06001544 if (tmp == 0 && metadata.read.buffer) {
1545 /* place data into RX buffer */
1546 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001547 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001548 metadata.read.len = rx_len;
1549 schedule_notify(lcid, SMUX_READ_DONE,
1550 &metadata);
1551 } else if (tmp == -EAGAIN ||
1552 (tmp == 0 && !metadata.read.buffer)) {
1553 /* buffer allocation failed - add to retry queue */
1554 do_retry = 1;
1555 } else if (tmp < 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001556 SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001557 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001558 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1559 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001560 }
1561 }
1562
Eric Holmbergb8435c82012-06-05 14:51:29 -06001563 if (do_retry) {
1564 struct smux_rx_pkt_retry *retry;
1565
1566 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1567 if (!retry) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001568 SMUX_ERR("%s: retry alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001569 ret = -ENOMEM;
1570 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1571 goto out;
1572 }
1573 INIT_LIST_HEAD(&retry->rx_retry_list);
1574 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1575
1576 /* copy packet */
1577 retry->pkt = smux_alloc_pkt();
1578 if (!retry->pkt) {
1579 kfree(retry);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001580 SMUX_ERR("%s: pkt alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001581 ret = -ENOMEM;
1582 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1583 goto out;
1584 }
1585 retry->pkt->hdr.lcid = lcid;
1586 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1587 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1588 if (retry->pkt->hdr.payload_len) {
1589 smux_alloc_pkt_payload(retry->pkt);
1590 memcpy(retry->pkt->payload, pkt->payload,
1591 retry->pkt->hdr.payload_len);
1592 }
1593
1594 /* add to retry queue */
1595 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1596 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1597 ++ch->rx_retry_queue_cnt;
1598 if (ch->rx_retry_queue_cnt == 1)
1599 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1600 msecs_to_jiffies(retry->timeout_in_ms));
1601 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1602 }
1603
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001604 if (tx_ready)
1605 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001606out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001607 return ret;
1608}
1609
1610/**
1611 * Handle receive byte command for testing purposes.
1612 *
1613 * @pkt Received packet
1614 *
1615 * @returns 0 for success
1616 */
1617static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1618{
1619 uint8_t lcid;
1620 int ret;
1621 struct smux_lch_t *ch;
1622 union notifier_metadata metadata;
1623 unsigned long flags;
1624
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001625 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001626 SMUX_ERR("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001627 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001628 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001629
1630 lcid = pkt->hdr.lcid;
1631 ch = &smux_lch[lcid];
1632 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1633
1634 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001635 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001636 lcid, ch->local_state);
1637 ret = -EIO;
1638 goto out;
1639 }
1640
1641 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001642 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001643 lcid, ch->remote_state);
1644 ret = -EIO;
1645 goto out;
1646 }
1647
1648 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1649 metadata.read.buffer = 0;
1650 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1651 ret = 0;
1652
1653out:
1654 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1655 return ret;
1656}
1657
1658/**
1659 * Handle receive status command.
1660 *
1661 * @pkt Received packet
1662 *
1663 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001664 */
1665static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1666{
1667 uint8_t lcid;
1668 int ret;
1669 struct smux_lch_t *ch;
1670 union notifier_metadata meta;
1671 unsigned long flags;
1672 int tx_ready = 0;
1673
1674 lcid = pkt->hdr.lcid;
1675 ch = &smux_lch[lcid];
1676
1677 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1678 meta.tiocm.tiocm_old = ch->remote_tiocm;
1679 meta.tiocm.tiocm_new = pkt->hdr.flags;
1680
1681 /* update logical channel flow control */
1682 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1683 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1684 /* logical channel flow control changed */
1685 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1686 /* disabled TX */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301687 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001688 ch->tx_flow_control = 1;
1689 } else {
1690 /* re-enable channel */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301691 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001692 ch->tx_flow_control = 0;
1693 tx_ready = 1;
1694 }
1695 }
1696 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1697 ch->remote_tiocm = pkt->hdr.flags;
1698 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1699
1700 /* client notification for status change */
1701 if (IS_FULLY_OPENED(ch)) {
1702 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1703 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1704 ret = 0;
1705 }
1706 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1707 if (tx_ready)
1708 list_channel(ch);
1709
1710 return ret;
1711}
1712
1713/**
1714 * Handle receive power command.
1715 *
1716 * @pkt Received packet
1717 *
1718 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001719 */
1720static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1721{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001722 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001723 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001724 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001725
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001726 SMUX_PWR_PKT_RX(pkt);
1727
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001728 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001729 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1730 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001731 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001732 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001733 power_down = 1;
1734 else
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001735 SMUX_ERR("%s: sleep request ack invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001736 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001737 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001738 /*
1739 * Remote sleep request
1740 *
1741 * Even if we have data pending, we need to transition to the
1742 * POWER_OFF state and then perform a wakeup since the remote
1743 * side has requested a power-down.
1744 *
1745 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1746 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1747 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001748 *
1749 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001750 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001751 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001752 ack_pkt = smux_alloc_pkt();
1753 if (ack_pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301754 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001755 smux.power_state,
1756 SMUX_PWR_TURNING_OFF_FLUSH);
1757
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001758 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1759
1760 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001761 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1762 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001763 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1764 list_add_tail(&ack_pkt->list,
1765 &smux.power_queue);
1766 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001767 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001768 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1769 /* Local power-down request still in TX queue */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301770 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001771 __func__);
1772 smux.power_ctl_remote_req_received = 1;
1773 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1774 /*
1775 * Local power-down request already sent to remote
1776 * side, so this request gets treated as an ACK.
1777 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301778 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001779 __func__);
1780 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001781 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001782 SMUX_ERR("%s: sleep request invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001783 __func__, smux.power_state);
1784 }
1785 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001786
1787 if (power_down) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301788 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001789 smux.power_state, SMUX_PWR_OFF_FLUSH);
1790 smux.power_state = SMUX_PWR_OFF_FLUSH;
1791 queue_work(smux_tx_wq, &smux_inactivity_work);
1792 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001793 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001794
1795 return 0;
1796}
1797
1798/**
1799 * Handle dispatching a completed packet for receive processing.
1800 *
1801 * @pkt Packet to process
1802 *
1803 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001804 */
1805static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1806{
Eric Holmbergf9622662012-06-13 15:55:45 -06001807 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001808
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001809 switch (pkt->hdr.cmd) {
1810 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001811 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001812 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001813 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001814 __func__, pkt->hdr.lcid);
1815 break;
1816 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001817 ret = smux_handle_rx_open_cmd(pkt);
1818 break;
1819
1820 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001821 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001822 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001823 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001824 __func__, pkt->hdr.lcid);
1825 break;
1826 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001827 ret = smux_handle_rx_data_cmd(pkt);
1828 break;
1829
1830 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001831 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001832 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001833 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001834 __func__, pkt->hdr.lcid);
1835 break;
1836 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001837 ret = smux_handle_rx_close_cmd(pkt);
1838 break;
1839
1840 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001841 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001842 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001843 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001844 __func__, pkt->hdr.lcid);
1845 break;
1846 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001847 ret = smux_handle_rx_status_cmd(pkt);
1848 break;
1849
1850 case SMUX_CMD_PWR_CTL:
1851 ret = smux_handle_rx_power_cmd(pkt);
1852 break;
1853
1854 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001855 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001856 ret = smux_handle_rx_byte_cmd(pkt);
1857 break;
1858
1859 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001860 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001861 SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001862 ret = -EINVAL;
1863 }
1864 return ret;
1865}
1866
1867/**
1868 * Deserializes a packet and dispatches it to the packet receive logic.
1869 *
1870 * @data Raw data for one packet
1871 * @len Length of the data
1872 *
1873 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001874 */
1875static int smux_deserialize(unsigned char *data, int len)
1876{
1877 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001878
1879 smux_init_pkt(&recv);
1880
1881 /*
1882 * It may be possible to optimize this to not use the
1883 * temporary buffer.
1884 */
1885 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1886
1887 if (recv.hdr.magic != SMUX_MAGIC) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001888 SMUX_ERR("%s: invalid header magic\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001889 return -EINVAL;
1890 }
1891
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001892 if (recv.hdr.payload_len)
1893 recv.payload = data + sizeof(struct smux_hdr_t);
1894
1895 return smux_dispatch_rx_pkt(&recv);
1896}
1897
1898/**
1899 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001900 */
1901static void smux_handle_wakeup_req(void)
1902{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001903 unsigned long flags;
1904
1905 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001906 if (smux.power_state == SMUX_PWR_OFF
1907 || smux.power_state == SMUX_PWR_TURNING_ON) {
1908 /* wakeup system */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301909 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001910 smux.power_state, SMUX_PWR_ON);
1911 smux.power_state = SMUX_PWR_ON;
1912 queue_work(smux_tx_wq, &smux_wakeup_work);
1913 queue_work(smux_tx_wq, &smux_tx_work);
1914 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1915 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1916 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001917 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001918 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001919 } else {
1920 /* stale wakeup request from previous wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301921 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001922 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001923 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001924 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001925}
1926
1927/**
1928 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001929 */
1930static void smux_handle_wakeup_ack(void)
1931{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001932 unsigned long flags;
1933
1934 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001935 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1936 /* received response to wakeup request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301937 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001938 smux.power_state, SMUX_PWR_ON);
1939 smux.power_state = SMUX_PWR_ON;
1940 queue_work(smux_tx_wq, &smux_tx_work);
1941 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1942 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1943
1944 } else if (smux.power_state != SMUX_PWR_ON) {
1945 /* invalid message */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301946 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001947 __func__, smux.power_state);
1948 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001949 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001950}
1951
1952/**
1953 * RX State machine - IDLE state processing.
1954 *
1955 * @data New RX data to process
1956 * @len Length of the data
1957 * @used Return value of length processed
1958 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001959 */
1960static void smux_rx_handle_idle(const unsigned char *data,
1961 int len, int *used, int flag)
1962{
1963 int i;
1964
1965 if (flag) {
1966 if (smux_byte_loopback)
1967 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1968 smux_byte_loopback);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001969 SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001970 ++*used;
1971 return;
1972 }
1973
1974 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1975 switch (data[i]) {
1976 case SMUX_MAGIC_WORD1:
1977 smux.rx_state = SMUX_RX_MAGIC;
1978 break;
1979 case SMUX_WAKEUP_REQ:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301980 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmbergf3f34592012-08-28 13:51:14 -06001981 if (unlikely(!smux.remote_is_alive)) {
1982 mutex_lock(&smux.mutex_lha0);
1983 smux.remote_is_alive = 1;
1984 mutex_unlock(&smux.mutex_lha0);
1985 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001986 smux_handle_wakeup_req();
1987 break;
1988 case SMUX_WAKEUP_ACK:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301989 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmbergf3f34592012-08-28 13:51:14 -06001990 if (unlikely(!smux.remote_is_alive)) {
1991 mutex_lock(&smux.mutex_lha0);
1992 smux.remote_is_alive = 1;
1993 mutex_unlock(&smux.mutex_lha0);
1994 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001995 smux_handle_wakeup_ack();
1996 break;
1997 default:
1998 /* unexpected character */
1999 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
2000 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
2001 smux_byte_loopback);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002002 SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
2003 __func__, (unsigned)data[i]);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002004 break;
2005 }
2006 }
2007
2008 *used = i;
2009}
2010
2011/**
2012 * RX State machine - Header Magic state processing.
2013 *
2014 * @data New RX data to process
2015 * @len Length of the data
2016 * @used Return value of length processed
2017 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002018 */
2019static void smux_rx_handle_magic(const unsigned char *data,
2020 int len, int *used, int flag)
2021{
2022 int i;
2023
2024 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002025 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002026 smux_enter_reset();
2027 smux.rx_state = SMUX_RX_FAILURE;
2028 ++*used;
2029 return;
2030 }
2031
2032 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2033 /* wait for completion of the magic */
2034 if (data[i] == SMUX_MAGIC_WORD2) {
2035 smux.recv_len = 0;
2036 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2037 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2038 smux.rx_state = SMUX_RX_HDR;
2039 } else {
2040 /* unexpected / trash character */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002041 SMUX_ERR(
2042 "%s: rx parse error for char %c; *used=%d, len=%d\n",
2043 __func__, data[i], *used, len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002044 smux.rx_state = SMUX_RX_IDLE;
2045 }
2046 }
2047
2048 *used = i;
2049}
2050
2051/**
2052 * RX State machine - Packet Header state processing.
2053 *
2054 * @data New RX data to process
2055 * @len Length of the data
2056 * @used Return value of length processed
2057 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002058 */
2059static void smux_rx_handle_hdr(const unsigned char *data,
2060 int len, int *used, int flag)
2061{
2062 int i;
2063 struct smux_hdr_t *hdr;
2064
2065 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002066 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002067 smux_enter_reset();
2068 smux.rx_state = SMUX_RX_FAILURE;
2069 ++*used;
2070 return;
2071 }
2072
2073 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2074 smux.recv_buf[smux.recv_len++] = data[i];
2075
2076 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2077 /* complete header received */
2078 hdr = (struct smux_hdr_t *)smux.recv_buf;
2079 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2080 smux.rx_state = SMUX_RX_PAYLOAD;
2081 }
2082 }
2083 *used = i;
2084}
2085
2086/**
2087 * RX State machine - Packet Payload state processing.
2088 *
2089 * @data New RX data to process
2090 * @len Length of the data
2091 * @used Return value of length processed
2092 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002093 */
2094static void smux_rx_handle_pkt_payload(const unsigned char *data,
2095 int len, int *used, int flag)
2096{
2097 int remaining;
2098
2099 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002100 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002101 smux_enter_reset();
2102 smux.rx_state = SMUX_RX_FAILURE;
2103 ++*used;
2104 return;
2105 }
2106
2107 /* copy data into rx buffer */
2108 if (smux.pkt_remain < (len - *used))
2109 remaining = smux.pkt_remain;
2110 else
2111 remaining = len - *used;
2112
2113 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2114 smux.recv_len += remaining;
2115 smux.pkt_remain -= remaining;
2116 *used += remaining;
2117
2118 if (smux.pkt_remain == 0) {
2119 /* complete packet received */
2120 smux_deserialize(smux.recv_buf, smux.recv_len);
2121 smux.rx_state = SMUX_RX_IDLE;
2122 }
2123}
2124
2125/**
2126 * Feed data to the receive state machine.
2127 *
2128 * @data Pointer to data block
2129 * @len Length of data
2130 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002131 */
2132void smux_rx_state_machine(const unsigned char *data,
2133 int len, int flag)
2134{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002135 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002136
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002137 work.data = data;
2138 work.len = len;
2139 work.flag = flag;
2140 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2141 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002142
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002143 queue_work(smux_rx_wq, &work.work);
2144 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002145}
2146
2147/**
Eric Holmbergf3f34592012-08-28 13:51:14 -06002148 * Returns true if the remote side has acknowledged a wakeup
2149 * request previously, so we know that the link is alive and active.
2150 *
2151 * @returns true for is alive, false for not alive
2152 */
2153bool smux_remote_is_active(void)
2154{
2155 bool is_active = false;
2156
2157 mutex_lock(&smux.mutex_lha0);
2158 if (smux.remote_is_alive)
2159 is_active = true;
2160 mutex_unlock(&smux.mutex_lha0);
2161
2162 return is_active;
2163}
2164
2165/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002166 * Add channel to transmit-ready list and trigger transmit worker.
2167 *
2168 * @ch Channel to add
2169 */
2170static void list_channel(struct smux_lch_t *ch)
2171{
2172 unsigned long flags;
2173
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302174 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002175 __func__, ch->lcid);
2176
2177 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2178 spin_lock(&ch->tx_lock_lhb2);
2179 smux.tx_activity_flag = 1;
2180 if (list_empty(&ch->tx_ready_list))
2181 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2182 spin_unlock(&ch->tx_lock_lhb2);
2183 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2184
2185 queue_work(smux_tx_wq, &smux_tx_work);
2186}
2187
2188/**
2189 * Transmit packet on correct transport and then perform client
2190 * notification.
2191 *
2192 * @ch Channel to transmit on
2193 * @pkt Packet to transmit
2194 */
2195static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2196{
2197 union notifier_metadata meta_write;
2198 int ret;
2199
2200 if (ch && pkt) {
2201 SMUX_LOG_PKT_TX(pkt);
2202 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2203 ret = smux_tx_loopback(pkt);
2204 else
2205 ret = smux_tx_tty(pkt);
2206
2207 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2208 /* notify write-done */
2209 meta_write.write.pkt_priv = pkt->priv;
2210 meta_write.write.buffer = pkt->payload;
2211 meta_write.write.len = pkt->hdr.payload_len;
2212 if (ret >= 0) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302213 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002214 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2215 &meta_write);
2216 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002217 SMUX_ERR("%s: failed to write pkt %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002218 __func__, ret);
2219 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2220 &meta_write);
2221 }
2222 }
2223 }
2224}
2225
2226/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002227 * Flush pending TTY TX data.
2228 */
2229static void smux_flush_tty(void)
2230{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002231 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002232 if (!smux.tty) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002233 SMUX_ERR("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002234 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002235 return;
2236 }
2237
2238 tty_wait_until_sent(smux.tty,
2239 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2240
2241 if (tty_chars_in_buffer(smux.tty) > 0)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002242 SMUX_ERR("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002243
2244 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002245}
2246
2247/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002248 * Purge TX queue for logical channel.
2249 *
2250 * @ch Logical channel pointer
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002251 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002252 *
2253 * Must be called with the following spinlocks locked:
2254 * state_lock_lhb1
2255 * tx_lock_lhb2
2256 */
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002257static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002258{
2259 struct smux_pkt_t *pkt;
2260 int send_disconnect = 0;
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002261 struct smux_pkt_t *pkt_tmp;
2262 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002263
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002264 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2265 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002266 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002267 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2268 /* Open ACK must still be sent */
2269 is_state_pkt = 1;
2270 } else {
2271 /* Open never sent -- force to closed state */
2272 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2273 send_disconnect = 1;
2274 }
2275 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2276 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2277 is_state_pkt = 1;
2278 if (!send_disconnect)
2279 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002280 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2281 /* Notify client of failed write */
2282 union notifier_metadata meta_write;
2283
2284 meta_write.write.pkt_priv = pkt->priv;
2285 meta_write.write.buffer = pkt->payload;
2286 meta_write.write.len = pkt->hdr.payload_len;
2287 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2288 }
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002289
2290 if (!is_state_pkt || is_ssr) {
2291 list_del(&pkt->list);
2292 smux_free_pkt(pkt);
2293 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002294 }
2295
2296 if (send_disconnect) {
2297 union notifier_metadata meta_disconnected;
2298
2299 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2300 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2301 &meta_disconnected);
2302 }
2303}
2304
2305/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002306 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002307 *
2308 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002309 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002310static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002311{
2312 struct uart_state *state;
2313
2314 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002315 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002316 __func__, smux.tty);
2317 return;
2318 }
2319 state = smux.tty->driver_data;
2320 msm_hs_request_clock_on(state->uart_port);
2321}
2322
2323/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002324 * Power-up the UART.
2325 */
2326static void smux_uart_power_on(void)
2327{
2328 mutex_lock(&smux.mutex_lha0);
2329 smux_uart_power_on_atomic();
2330 mutex_unlock(&smux.mutex_lha0);
2331}
2332
2333/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002334 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002335 *
2336 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002337 */
Eric Holmberg06011322012-07-06 18:17:03 -06002338static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002339{
2340 struct uart_state *state;
2341
2342 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002343 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002344 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002345 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002346 return;
2347 }
2348 state = smux.tty->driver_data;
2349 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002350}
2351
2352/**
2353 * Power down the UART.
2354 */
2355static void smux_uart_power_off(void)
2356{
2357 mutex_lock(&smux.mutex_lha0);
2358 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002359 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002360}
2361
2362/**
2363 * TX Wakeup Worker
2364 *
2365 * @work Not used
2366 *
2367 * Do an exponential back-off wakeup sequence with a maximum period
2368 * of approximately 1 second (1 << 20 microseconds).
2369 */
2370static void smux_wakeup_worker(struct work_struct *work)
2371{
2372 unsigned long flags;
2373 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002374
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002375 if (smux.in_reset)
2376 return;
2377
2378 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2379 if (smux.power_state == SMUX_PWR_ON) {
2380 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002381 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002382 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302383 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002384
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002385 /*
2386 * Cancel any pending retry. This avoids a race condition with
2387 * a new power-up request because:
2388 * 1) this worker doesn't modify the state
2389 * 2) this worker is processed on the same single-threaded
2390 * workqueue as new TX wakeup requests
2391 */
2392 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002393 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002394 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002395 /* retry wakeup */
2396 wakeup_delay = smux.pwr_wakeup_delay_us;
2397 smux.pwr_wakeup_delay_us <<= 1;
2398 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2399 smux.pwr_wakeup_delay_us =
2400 SMUX_WAKEUP_DELAY_MAX;
2401
2402 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302403 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002404 smux_send_byte(SMUX_WAKEUP_REQ);
2405
2406 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302407 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002408 wakeup_delay);
2409 usleep_range(wakeup_delay, 2*wakeup_delay);
2410 queue_work(smux_tx_wq, &smux_wakeup_work);
2411 } else {
2412 /* schedule delayed work */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302413 SMUX_DBG(
2414 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002415 __func__, wakeup_delay / 1000);
2416 queue_delayed_work(smux_tx_wq,
2417 &smux_wakeup_delayed_work,
2418 msecs_to_jiffies(wakeup_delay / 1000));
2419 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002420 } else {
2421 /* wakeup aborted */
2422 smux.pwr_wakeup_delay_us = 1;
2423 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302424 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002425 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002426 }
2427}
2428
2429
2430/**
2431 * Inactivity timeout worker. Periodically scheduled when link is active.
2432 * When it detects inactivity, it will power-down the UART link.
2433 *
2434 * @work Work structure (not used)
2435 */
2436static void smux_inactivity_worker(struct work_struct *work)
2437{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002438 struct smux_pkt_t *pkt;
2439 unsigned long flags;
2440
Eric Holmberg06011322012-07-06 18:17:03 -06002441 if (smux.in_reset)
2442 return;
2443
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002444 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2445 spin_lock(&smux.tx_lock_lha2);
2446
2447 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2448 /* no activity */
2449 if (smux.powerdown_enabled) {
2450 if (smux.power_state == SMUX_PWR_ON) {
2451 /* start power-down sequence */
2452 pkt = smux_alloc_pkt();
2453 if (pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302454 SMUX_PWR(
2455 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002456 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002457 SMUX_PWR_TURNING_OFF_FLUSH);
2458 smux.power_state =
2459 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002460
2461 /* send power-down request */
2462 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2463 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002464 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2465 list_add_tail(&pkt->list,
2466 &smux.power_queue);
2467 queue_work(smux_tx_wq, &smux_tx_work);
2468 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002469 SMUX_ERR("%s: packet alloc failed\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002470 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002471 }
2472 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002473 }
2474 }
2475 smux.tx_activity_flag = 0;
2476 smux.rx_activity_flag = 0;
2477
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002478 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002479 /* ready to power-down the UART */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302480 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002481 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002482 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002483
2484 /* if data is pending, schedule a new wakeup */
2485 if (!list_empty(&smux.lch_tx_ready_list) ||
2486 !list_empty(&smux.power_queue))
2487 queue_work(smux_tx_wq, &smux_tx_work);
2488
2489 spin_unlock(&smux.tx_lock_lha2);
2490 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2491
2492 /* flush UART output queue and power down */
2493 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002494 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002495 } else {
2496 spin_unlock(&smux.tx_lock_lha2);
2497 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002498 }
2499
2500 /* reschedule inactivity worker */
2501 if (smux.power_state != SMUX_PWR_OFF)
2502 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2503 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2504}
2505
2506/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002507 * Remove RX retry packet from channel and free it.
2508 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002509 * @ch Channel for retry packet
2510 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002511 *
2512 * @returns 1 if flow control updated; 0 otherwise
2513 *
2514 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002515 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002516int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002517 struct smux_rx_pkt_retry *retry)
2518{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002519 int tx_ready = 0;
2520
Eric Holmbergb8435c82012-06-05 14:51:29 -06002521 list_del(&retry->rx_retry_list);
2522 --ch->rx_retry_queue_cnt;
2523 smux_free_pkt(retry->pkt);
2524 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002525
2526 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2527 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2528 ch->rx_flow_control_auto) {
2529 ch->rx_flow_control_auto = 0;
2530 smux_rx_flow_control_updated(ch);
2531 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2532 tx_ready = 1;
2533 }
2534 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002535}
2536
2537/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002538 * RX worker handles all receive operations.
2539 *
2540 * @work Work structure contained in TBD structure
2541 */
2542static void smux_rx_worker(struct work_struct *work)
2543{
2544 unsigned long flags;
2545 int used;
2546 int initial_rx_state;
2547 struct smux_rx_worker_data *w;
2548 const unsigned char *data;
2549 int len;
2550 int flag;
2551
2552 w = container_of(work, struct smux_rx_worker_data, work);
2553 data = w->data;
2554 len = w->len;
2555 flag = w->flag;
2556
2557 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2558 smux.rx_activity_flag = 1;
2559 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2560
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302561 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002562 used = 0;
2563 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002564 if (smux.in_reset) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302565 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002566 smux.rx_state = SMUX_RX_IDLE;
2567 break;
2568 }
2569
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302570 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002571 __func__, smux.rx_state, used, len);
2572 initial_rx_state = smux.rx_state;
2573
2574 switch (smux.rx_state) {
2575 case SMUX_RX_IDLE:
2576 smux_rx_handle_idle(data, len, &used, flag);
2577 break;
2578 case SMUX_RX_MAGIC:
2579 smux_rx_handle_magic(data, len, &used, flag);
2580 break;
2581 case SMUX_RX_HDR:
2582 smux_rx_handle_hdr(data, len, &used, flag);
2583 break;
2584 case SMUX_RX_PAYLOAD:
2585 smux_rx_handle_pkt_payload(data, len, &used, flag);
2586 break;
2587 default:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302588 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002589 __func__, smux.rx_state);
2590 smux.rx_state = SMUX_RX_IDLE;
2591 break;
2592 }
2593 } while (used < len || smux.rx_state != initial_rx_state);
2594
2595 complete(&w->work_complete);
2596}
2597
2598/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002599 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2600 * because the client was not ready (-EAGAIN).
2601 *
2602 * @work Work structure contained in smux_lch_t structure
2603 */
2604static void smux_rx_retry_worker(struct work_struct *work)
2605{
2606 struct smux_lch_t *ch;
2607 struct smux_rx_pkt_retry *retry;
2608 union notifier_metadata metadata;
2609 int tmp;
2610 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002611 int immediate_retry = 0;
2612 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002613
2614 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2615
2616 /* get next retry packet */
2617 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002618 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002619 /* port has been closed - remove all retries */
2620 while (!list_empty(&ch->rx_retry_queue)) {
2621 retry = list_first_entry(&ch->rx_retry_queue,
2622 struct smux_rx_pkt_retry,
2623 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002624 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002625 }
2626 }
2627
2628 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302629 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002630 __func__, ch->lcid);
2631 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2632 return;
2633 }
2634 retry = list_first_entry(&ch->rx_retry_queue,
2635 struct smux_rx_pkt_retry,
2636 rx_retry_list);
2637 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2638
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302639 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002640 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002641 metadata.read.pkt_priv = 0;
2642 metadata.read.buffer = 0;
2643 tmp = ch->get_rx_buffer(ch->priv,
2644 (void **)&metadata.read.pkt_priv,
2645 (void **)&metadata.read.buffer,
2646 retry->pkt->hdr.payload_len);
2647 if (tmp == 0 && metadata.read.buffer) {
2648 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002649
Eric Holmbergb8435c82012-06-05 14:51:29 -06002650 memcpy(metadata.read.buffer, retry->pkt->payload,
2651 retry->pkt->hdr.payload_len);
2652 metadata.read.len = retry->pkt->hdr.payload_len;
2653
2654 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002655 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002656 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002657 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002658 if (tx_ready)
2659 list_channel(ch);
2660
2661 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002662 } else if (tmp == -EAGAIN ||
2663 (tmp == 0 && !metadata.read.buffer)) {
2664 /* retry again */
2665 retry->timeout_in_ms <<= 1;
2666 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2667 /* timed out */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002668 SMUX_ERR("%s: ch %d RX retry client timeout\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002669 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002670 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002671 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002672 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002673 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2674 if (tx_ready)
2675 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002676 }
2677 } else {
2678 /* client error - drop packet */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002679 SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002680 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002681 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002682 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002683 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002684 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002685 if (tx_ready)
2686 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002687 }
2688
2689 /* schedule next retry */
2690 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2691 if (!list_empty(&ch->rx_retry_queue)) {
2692 retry = list_first_entry(&ch->rx_retry_queue,
2693 struct smux_rx_pkt_retry,
2694 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002695
2696 if (immediate_retry)
2697 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2698 else
2699 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2700 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002701 }
2702 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2703}
2704
2705/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002706 * Transmit worker handles serializing and transmitting packets onto the
2707 * underlying transport.
2708 *
2709 * @work Work structure (not used)
2710 */
2711static void smux_tx_worker(struct work_struct *work)
2712{
2713 struct smux_pkt_t *pkt;
2714 struct smux_lch_t *ch;
2715 unsigned low_wm_notif;
2716 unsigned lcid;
2717 unsigned long flags;
2718
2719
2720 /*
2721 * Transmit packets in round-robin fashion based upon ready
2722 * channels.
2723 *
2724 * To eliminate the need to hold a lock for the entire
2725 * iteration through the channel ready list, the head of the
2726 * ready-channel list is always the next channel to be
2727 * processed. To send a packet, the first valid packet in
2728 * the head channel is removed and the head channel is then
2729 * rescheduled at the end of the queue by removing it and
2730 * inserting after the tail. The locks can then be released
2731 * while the packet is processed.
2732 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002733 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002734 pkt = NULL;
2735 low_wm_notif = 0;
2736
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002737 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002738
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002739 /* handle wakeup if needed */
2740 if (smux.power_state == SMUX_PWR_OFF) {
2741 if (!list_empty(&smux.lch_tx_ready_list) ||
2742 !list_empty(&smux.power_queue)) {
2743 /* data to transmit, do wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302744 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002745 smux.power_state,
2746 SMUX_PWR_TURNING_ON);
2747 smux.power_state = SMUX_PWR_TURNING_ON;
2748 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2749 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002750 queue_work(smux_tx_wq, &smux_wakeup_work);
2751 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002752 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002753 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2754 flags);
2755 }
2756 break;
2757 }
2758
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002759 /* process any pending power packets */
2760 if (!list_empty(&smux.power_queue)) {
2761 pkt = list_first_entry(&smux.power_queue,
2762 struct smux_pkt_t, list);
2763 list_del(&pkt->list);
2764 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2765
Eric Holmberga9b06472012-06-22 09:46:34 -06002766 /* Adjust power state if this is a flush command */
2767 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2768 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2769 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2770 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2771 smux.power_ctl_remote_req_received) {
2772 /*
2773 * Sending remote power-down request ACK
2774 * or sending local power-down request
2775 * and we already received a remote
2776 * power-down request.
2777 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302778 SMUX_PWR(
2779 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002780 smux.power_state,
2781 SMUX_PWR_OFF_FLUSH);
2782 smux.power_state = SMUX_PWR_OFF_FLUSH;
2783 smux.power_ctl_remote_req_received = 0;
2784 queue_work(smux_tx_wq,
2785 &smux_inactivity_work);
2786 } else {
2787 /* sending local power-down request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302788 SMUX_PWR(
2789 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002790 smux.power_state,
2791 SMUX_PWR_TURNING_OFF);
2792 smux.power_state = SMUX_PWR_TURNING_OFF;
2793 }
2794 }
2795 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2796
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002797 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002798 smux_uart_power_on();
2799 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002800 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002801 if (!smux_byte_loopback) {
2802 smux_tx_tty(pkt);
2803 smux_flush_tty();
2804 } else {
2805 smux_tx_loopback(pkt);
2806 }
2807
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002808 smux_free_pkt(pkt);
2809 continue;
2810 }
2811
2812 /* get the next ready channel */
2813 if (list_empty(&smux.lch_tx_ready_list)) {
2814 /* no ready channels */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302815 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002816 __func__);
2817 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2818 break;
2819 }
2820 smux.tx_activity_flag = 1;
2821
2822 if (smux.power_state != SMUX_PWR_ON) {
2823 /* channel not ready to transmit */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302824 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002825 __func__,
2826 smux.power_state);
2827 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2828 break;
2829 }
2830
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002831 /* get the next packet to send and rotate channel list */
2832 ch = list_first_entry(&smux.lch_tx_ready_list,
2833 struct smux_lch_t,
2834 tx_ready_list);
2835
2836 spin_lock(&ch->state_lock_lhb1);
2837 spin_lock(&ch->tx_lock_lhb2);
2838 if (!list_empty(&ch->tx_queue)) {
2839 /*
2840 * If remote TX flow control is enabled or
2841 * the channel is not fully opened, then only
2842 * send command packets.
2843 */
2844 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2845 struct smux_pkt_t *curr;
2846 list_for_each_entry(curr, &ch->tx_queue, list) {
2847 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2848 pkt = curr;
2849 break;
2850 }
2851 }
2852 } else {
2853 /* get next cmd/data packet to send */
2854 pkt = list_first_entry(&ch->tx_queue,
2855 struct smux_pkt_t, list);
2856 }
2857 }
2858
2859 if (pkt) {
2860 list_del(&pkt->list);
2861
2862 /* update packet stats */
2863 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2864 --ch->tx_pending_data_cnt;
2865 if (ch->notify_lwm &&
2866 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002867 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002868 ch->notify_lwm = 0;
2869 low_wm_notif = 1;
2870 }
2871 }
2872
2873 /* advance to the next ready channel */
2874 list_rotate_left(&smux.lch_tx_ready_list);
2875 } else {
2876 /* no data in channel to send, remove from ready list */
2877 list_del(&ch->tx_ready_list);
2878 INIT_LIST_HEAD(&ch->tx_ready_list);
2879 }
2880 lcid = ch->lcid;
2881 spin_unlock(&ch->tx_lock_lhb2);
2882 spin_unlock(&ch->state_lock_lhb1);
2883 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2884
2885 if (low_wm_notif)
2886 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2887
2888 /* send the packet */
2889 smux_tx_pkt(ch, pkt);
2890 smux_free_pkt(pkt);
2891 }
2892}
2893
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002894/**
2895 * Update the RX flow control (sent in the TIOCM Status command).
2896 *
2897 * @ch Channel for update
2898 *
2899 * @returns 1 for updated, 0 for not updated
2900 *
2901 * Must be called with ch->state_lock_lhb1 locked.
2902 */
2903static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2904{
2905 int updated = 0;
2906 int prev_state;
2907
2908 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2909
2910 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2911 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2912 else
2913 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2914
2915 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2916 smux_send_status_cmd(ch);
2917 updated = 1;
2918 }
2919
2920 return updated;
2921}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002922
Eric Holmberg06011322012-07-06 18:17:03 -06002923/**
2924 * Flush all SMUX workqueues.
2925 *
2926 * This sets the reset bit to abort any processing loops and then
2927 * flushes the workqueues to ensure that no new pending work is
2928 * running. Do not call with any locks used by workers held as
2929 * this will result in a deadlock.
2930 */
2931static void smux_flush_workqueues(void)
2932{
2933 smux.in_reset = 1;
2934
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302935 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002936 flush_workqueue(smux_tx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302937 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002938 flush_workqueue(smux_rx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302939 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002940 flush_workqueue(smux_notify_wq);
2941}
2942
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002943/**********************************************************************/
2944/* Kernel API */
2945/**********************************************************************/
2946
2947/**
2948 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2949 * flags.
2950 *
2951 * @lcid Logical channel ID
2952 * @set Options to set
2953 * @clear Options to clear
2954 *
2955 * @returns 0 for success, < 0 for failure
2956 */
2957int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2958{
2959 unsigned long flags;
2960 struct smux_lch_t *ch;
2961 int tx_ready = 0;
2962 int ret = 0;
2963
2964 if (smux_assert_lch_id(lcid))
2965 return -ENXIO;
2966
2967 ch = &smux_lch[lcid];
2968 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2969
2970 /* Local loopback mode */
2971 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2972 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2973
2974 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2975 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2976
2977 /* Remote loopback mode */
2978 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2979 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2980
2981 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2982 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2983
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002984 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002985 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002986 ch->rx_flow_control_client = 1;
2987 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002988 }
2989
2990 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002991 ch->rx_flow_control_client = 0;
2992 tx_ready |= smux_rx_flow_control_updated(ch);
2993 }
2994
2995 /* Auto RX Flow Control */
2996 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302997 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002998 __func__);
2999 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3000 }
3001
3002 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303003 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003004 __func__);
3005 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3006 ch->rx_flow_control_auto = 0;
3007 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003008 }
3009
3010 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3011
3012 if (tx_ready)
3013 list_channel(ch);
3014
3015 return ret;
3016}
3017
3018/**
3019 * Starts the opening sequence for a logical channel.
3020 *
3021 * @lcid Logical channel ID
3022 * @priv Free for client usage
3023 * @notify Event notification function
3024 * @get_rx_buffer Function used to provide a receive buffer to SMUX
3025 *
3026 * @returns 0 for success, <0 otherwise
3027 *
3028 * A channel must be fully closed (either not previously opened or
3029 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
3030 * received.
3031 *
3032 * One the remote side is opened, the client will receive a SMUX_CONNECTED
3033 * event.
3034 */
3035int msm_smux_open(uint8_t lcid, void *priv,
3036 void (*notify)(void *priv, int event_type, const void *metadata),
3037 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3038 int size))
3039{
3040 int ret;
3041 struct smux_lch_t *ch;
3042 struct smux_pkt_t *pkt;
3043 int tx_ready = 0;
3044 unsigned long flags;
3045
3046 if (smux_assert_lch_id(lcid))
3047 return -ENXIO;
3048
3049 ch = &smux_lch[lcid];
3050 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3051
3052 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3053 ret = -EAGAIN;
3054 goto out;
3055 }
3056
3057 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003058 SMUX_ERR("%s: open lcid %d local state %x invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003059 __func__, lcid, ch->local_state);
3060 ret = -EINVAL;
3061 goto out;
3062 }
3063
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303064 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003065 ch->local_state,
3066 SMUX_LCH_LOCAL_OPENING);
3067
Eric Holmberg06011322012-07-06 18:17:03 -06003068 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003069 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3070
3071 ch->priv = priv;
3072 ch->notify = notify;
3073 ch->get_rx_buffer = get_rx_buffer;
3074 ret = 0;
3075
3076 /* Send Open Command */
3077 pkt = smux_alloc_pkt();
3078 if (!pkt) {
3079 ret = -ENOMEM;
3080 goto out;
3081 }
3082 pkt->hdr.magic = SMUX_MAGIC;
3083 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3084 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3085 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3086 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3087 pkt->hdr.lcid = lcid;
3088 pkt->hdr.payload_len = 0;
3089 pkt->hdr.pad_len = 0;
3090 smux_tx_queue(pkt, ch, 0);
3091 tx_ready = 1;
3092
3093out:
3094 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003095 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003096 if (tx_ready)
3097 list_channel(ch);
3098 return ret;
3099}
3100
3101/**
3102 * Starts the closing sequence for a logical channel.
3103 *
3104 * @lcid Logical channel ID
3105 *
3106 * @returns 0 for success, <0 otherwise
3107 *
3108 * Once the close event has been acknowledge by the remote side, the client
3109 * will receive a SMUX_DISCONNECTED notification.
3110 */
3111int msm_smux_close(uint8_t lcid)
3112{
3113 int ret = 0;
3114 struct smux_lch_t *ch;
3115 struct smux_pkt_t *pkt;
3116 int tx_ready = 0;
3117 unsigned long flags;
3118
3119 if (smux_assert_lch_id(lcid))
3120 return -ENXIO;
3121
3122 ch = &smux_lch[lcid];
3123 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3124 ch->local_tiocm = 0x0;
3125 ch->remote_tiocm = 0x0;
3126 ch->tx_pending_data_cnt = 0;
3127 ch->notify_lwm = 0;
Eric Holmbergf61339e2012-08-13 14:45:27 -06003128 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003129
3130 /* Purge TX queue */
3131 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -06003132 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003133 spin_unlock(&ch->tx_lock_lhb2);
3134
3135 /* Send Close Command */
3136 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3137 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303138 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003139 ch->local_state,
3140 SMUX_LCH_LOCAL_CLOSING);
3141
3142 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3143 pkt = smux_alloc_pkt();
3144 if (pkt) {
3145 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3146 pkt->hdr.flags = 0;
3147 pkt->hdr.lcid = lcid;
3148 pkt->hdr.payload_len = 0;
3149 pkt->hdr.pad_len = 0;
3150 smux_tx_queue(pkt, ch, 0);
3151 tx_ready = 1;
3152 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003153 SMUX_ERR("%s: pkt allocation failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003154 ret = -ENOMEM;
3155 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003156
3157 /* Purge RX retry queue */
3158 if (ch->rx_retry_queue_cnt)
3159 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003160 }
3161 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3162
3163 if (tx_ready)
3164 list_channel(ch);
3165
3166 return ret;
3167}
3168
3169/**
3170 * Write data to a logical channel.
3171 *
3172 * @lcid Logical channel ID
3173 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3174 * SMUX_WRITE_FAIL notification.
3175 * @data Data to write
3176 * @len Length of @data
3177 *
3178 * @returns 0 for success, <0 otherwise
3179 *
3180 * Data may be written immediately after msm_smux_open() is called,
3181 * but the data will wait in the transmit queue until the channel has
3182 * been fully opened.
3183 *
3184 * Once the data has been written, the client will receive either a completion
3185 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3186 */
3187int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3188{
3189 struct smux_lch_t *ch;
3190 struct smux_pkt_t *pkt;
3191 int tx_ready = 0;
3192 unsigned long flags;
3193 int ret;
3194
3195 if (smux_assert_lch_id(lcid))
3196 return -ENXIO;
3197
3198 ch = &smux_lch[lcid];
3199 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3200
3201 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3202 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003203 SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003204 __func__, ch->local_state, lcid);
3205 ret = -EINVAL;
3206 goto out;
3207 }
3208
3209 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003210 SMUX_ERR("%s: payload %d too large\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003211 __func__, len);
3212 ret = -E2BIG;
3213 goto out;
3214 }
3215
3216 pkt = smux_alloc_pkt();
3217 if (!pkt) {
3218 ret = -ENOMEM;
3219 goto out;
3220 }
3221
3222 pkt->hdr.cmd = SMUX_CMD_DATA;
3223 pkt->hdr.lcid = lcid;
3224 pkt->hdr.flags = 0;
3225 pkt->hdr.payload_len = len;
3226 pkt->payload = (void *)data;
3227 pkt->priv = pkt_priv;
3228 pkt->hdr.pad_len = 0;
3229
3230 spin_lock(&ch->tx_lock_lhb2);
3231 /* verify high watermark */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303232 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003233
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003234 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003235 SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003236 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003237 ch->tx_pending_data_cnt);
3238 ret = -EAGAIN;
3239 goto out_inner;
3240 }
3241
3242 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003243 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003244 ch->notify_lwm = 1;
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003245 SMUX_ERR("%s: high watermark hit\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003246 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3247 }
3248 list_add_tail(&pkt->list, &ch->tx_queue);
3249
3250 /* add to ready list */
3251 if (IS_FULLY_OPENED(ch))
3252 tx_ready = 1;
3253
3254 ret = 0;
3255
3256out_inner:
3257 spin_unlock(&ch->tx_lock_lhb2);
3258
3259out:
3260 if (ret)
3261 smux_free_pkt(pkt);
3262 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3263
3264 if (tx_ready)
3265 list_channel(ch);
3266
3267 return ret;
3268}
3269
3270/**
3271 * Returns true if the TX queue is currently full (high water mark).
3272 *
3273 * @lcid Logical channel ID
3274 * @returns 0 if channel is not full
3275 * 1 if it is full
3276 * < 0 for error
3277 */
3278int msm_smux_is_ch_full(uint8_t lcid)
3279{
3280 struct smux_lch_t *ch;
3281 unsigned long flags;
3282 int is_full = 0;
3283
3284 if (smux_assert_lch_id(lcid))
3285 return -ENXIO;
3286
3287 ch = &smux_lch[lcid];
3288
3289 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003290 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003291 is_full = 1;
3292 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3293
3294 return is_full;
3295}
3296
3297/**
3298 * Returns true if the TX queue has space for more packets it is at or
3299 * below the low water mark).
3300 *
3301 * @lcid Logical channel ID
3302 * @returns 0 if channel is above low watermark
3303 * 1 if it's at or below the low watermark
3304 * < 0 for error
3305 */
3306int msm_smux_is_ch_low(uint8_t lcid)
3307{
3308 struct smux_lch_t *ch;
3309 unsigned long flags;
3310 int is_low = 0;
3311
3312 if (smux_assert_lch_id(lcid))
3313 return -ENXIO;
3314
3315 ch = &smux_lch[lcid];
3316
3317 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003318 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003319 is_low = 1;
3320 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3321
3322 return is_low;
3323}
3324
3325/**
3326 * Send TIOCM status update.
3327 *
3328 * @ch Channel for update
3329 *
3330 * @returns 0 for success, <0 for failure
3331 *
3332 * Channel lock must be held before calling.
3333 */
3334static int smux_send_status_cmd(struct smux_lch_t *ch)
3335{
3336 struct smux_pkt_t *pkt;
3337
3338 if (!ch)
3339 return -EINVAL;
3340
3341 pkt = smux_alloc_pkt();
3342 if (!pkt)
3343 return -ENOMEM;
3344
3345 pkt->hdr.lcid = ch->lcid;
3346 pkt->hdr.cmd = SMUX_CMD_STATUS;
3347 pkt->hdr.flags = ch->local_tiocm;
3348 pkt->hdr.payload_len = 0;
3349 pkt->hdr.pad_len = 0;
3350 smux_tx_queue(pkt, ch, 0);
3351
3352 return 0;
3353}
3354
3355/**
3356 * Internal helper function for getting the TIOCM status with
3357 * state_lock_lhb1 already locked.
3358 *
3359 * @ch Channel pointer
3360 *
3361 * @returns TIOCM status
3362 */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -06003363long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003364{
3365 long status = 0x0;
3366
3367 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3368 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3369 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3370 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3371
3372 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3373 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3374
3375 return status;
3376}
3377
3378/**
3379 * Get the TIOCM status bits.
3380 *
3381 * @lcid Logical channel ID
3382 *
3383 * @returns >= 0 TIOCM status bits
3384 * < 0 Error condition
3385 */
3386long msm_smux_tiocm_get(uint8_t lcid)
3387{
3388 struct smux_lch_t *ch;
3389 unsigned long flags;
3390 long status = 0x0;
3391
3392 if (smux_assert_lch_id(lcid))
3393 return -ENXIO;
3394
3395 ch = &smux_lch[lcid];
3396 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3397 status = msm_smux_tiocm_get_atomic(ch);
3398 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3399
3400 return status;
3401}
3402
3403/**
3404 * Set/clear the TIOCM status bits.
3405 *
3406 * @lcid Logical channel ID
3407 * @set Bits to set
3408 * @clear Bits to clear
3409 *
3410 * @returns 0 for success; < 0 for failure
3411 *
3412 * If a bit is specified in both the @set and @clear masks, then the clear bit
3413 * definition will dominate and the bit will be cleared.
3414 */
3415int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3416{
3417 struct smux_lch_t *ch;
3418 unsigned long flags;
3419 uint8_t old_status;
3420 uint8_t status_set = 0x0;
3421 uint8_t status_clear = 0x0;
3422 int tx_ready = 0;
3423 int ret = 0;
3424
3425 if (smux_assert_lch_id(lcid))
3426 return -ENXIO;
3427
3428 ch = &smux_lch[lcid];
3429 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3430
3431 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3432 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3433 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3434 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3435
3436 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3437 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3438 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3439 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3440
3441 old_status = ch->local_tiocm;
3442 ch->local_tiocm |= status_set;
3443 ch->local_tiocm &= ~status_clear;
3444
3445 if (ch->local_tiocm != old_status) {
3446 ret = smux_send_status_cmd(ch);
3447 tx_ready = 1;
3448 }
3449 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3450
3451 if (tx_ready)
3452 list_channel(ch);
3453
3454 return ret;
3455}
3456
3457/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003458/* Subsystem Restart */
3459/**********************************************************************/
3460static struct notifier_block ssr_notifier = {
3461 .notifier_call = ssr_notifier_cb,
3462};
3463
3464/**
3465 * Handle Subsystem Restart (SSR) notifications.
3466 *
3467 * @this Pointer to ssr_notifier
3468 * @code SSR Code
3469 * @data Data pointer (not used)
3470 */
3471static int ssr_notifier_cb(struct notifier_block *this,
3472 unsigned long code,
3473 void *data)
3474{
3475 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003476 int i;
3477 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003478 int power_off_uart = 0;
3479
Eric Holmbergd2697902012-06-15 09:58:46 -06003480 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303481 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003482 mutex_lock(&smux.mutex_lha0);
3483 smux.in_reset = 1;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003484 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003485 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003486 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003487 } else if (code == SUBSYS_AFTER_POWERUP) {
3488 /* re-register platform devices */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303489 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003490 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003491 if (smux.ld_open_count > 0
3492 && !smux.platform_devs_registered) {
3493 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303494 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003495 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003496 smux_devs[i].dev.release = smux_pdev_release;
3497 tmp = platform_device_register(&smux_devs[i]);
3498 if (tmp)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003499 SMUX_ERR(
3500 "%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003501 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003502 }
3503 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003504 }
3505 mutex_unlock(&smux.mutex_lha0);
3506 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003507 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3508 return NOTIFY_DONE;
3509 }
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303510 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003511
3512 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003513 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003514 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003515 if (smux.ld_open_count > 0) {
3516 smux_lch_purge();
3517 if (smux.tty)
3518 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003519
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003520 /* Unregister platform devices */
3521 if (smux.platform_devs_registered) {
3522 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303523 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003524 __func__, smux_devs[i].name);
3525 platform_device_unregister(&smux_devs[i]);
3526 }
3527 smux.platform_devs_registered = 0;
3528 }
3529
3530 /* Power-down UART */
3531 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3532 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303533 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3534 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003535 smux.power_state = SMUX_PWR_OFF;
3536 power_off_uart = 1;
3537 }
3538 smux.powerdown_enabled = 0;
3539 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3540
3541 if (power_off_uart)
3542 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003543 }
Eric Holmberg06011322012-07-06 18:17:03 -06003544 smux.tx_activity_flag = 0;
3545 smux.rx_activity_flag = 0;
3546 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003547 smux.in_reset = 0;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003548 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003549 mutex_unlock(&smux.mutex_lha0);
3550
Eric Holmberged1f00c2012-06-07 09:45:18 -06003551 return NOTIFY_DONE;
3552}
3553
3554/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003555/* Line Discipline Interface */
3556/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003557static void smux_pdev_release(struct device *dev)
3558{
3559 struct platform_device *pdev;
3560
3561 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303562 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3563 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003564 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3565}
3566
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003567static int smuxld_open(struct tty_struct *tty)
3568{
3569 int i;
3570 int tmp;
3571 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003572
3573 if (!smux.is_initialized)
3574 return -ENODEV;
3575
Eric Holmberged1f00c2012-06-07 09:45:18 -06003576 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003577 if (smux.ld_open_count) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003578 SMUX_ERR("%s: %p multiple instances not supported\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003579 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003580 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003581 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003582 }
3583
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003584 if (tty->ops->write == NULL) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003585 SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003586 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003587 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003588 }
3589
3590 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003591 ++smux.ld_open_count;
3592 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003593 smux.tty = tty;
3594 tty->disc_data = &smux;
3595 tty->receive_room = TTY_RECEIVE_ROOM;
3596 tty_driver_flush_buffer(tty);
3597
3598 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003599 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003600 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303601 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003602 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003603 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003604 queue_work(smux_tx_wq, &smux_inactivity_work);
3605 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003606 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003607 }
3608
3609 /* register platform devices */
3610 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303611 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003612 __func__, smux_devs[i].name);
3613 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003614 tmp = platform_device_register(&smux_devs[i]);
3615 if (tmp)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003616 SMUX_ERR("%s: error %d registering device %s\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003617 __func__, tmp, smux_devs[i].name);
3618 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003619 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003620 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003621 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003622}
3623
3624static void smuxld_close(struct tty_struct *tty)
3625{
3626 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003627 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003628 int i;
3629
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303630 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003631 smux_flush_workqueues();
3632
Eric Holmberged1f00c2012-06-07 09:45:18 -06003633 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003634 if (smux.ld_open_count <= 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003635 SMUX_ERR("%s: invalid ld count %d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003636 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003637 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003638 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003639 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003640 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003641
3642 /* Cleanup channels */
3643 smux_lch_purge();
3644
3645 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003646 if (smux.platform_devs_registered) {
3647 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303648 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003649 __func__, smux_devs[i].name);
3650 platform_device_unregister(&smux_devs[i]);
3651 }
3652 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003653 }
3654
3655 /* Schedule UART power-up if it's down */
3656 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003657 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003658 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003659 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003660 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003661 smux.tx_activity_flag = 0;
3662 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003663 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3664
3665 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003666 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003667
Eric Holmberg06011322012-07-06 18:17:03 -06003668 smux.rx_state = SMUX_RX_IDLE;
3669
Eric Holmberged1f00c2012-06-07 09:45:18 -06003670 /* Disconnect from TTY */
3671 smux.tty = NULL;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003672 smux.remote_is_alive = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003673 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303674 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003675}
3676
3677/**
3678 * Receive data from TTY Line Discipline.
3679 *
3680 * @tty TTY structure
3681 * @cp Character data
3682 * @fp Flag data
3683 * @count Size of character and flag data
3684 */
3685void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3686 char *fp, int count)
3687{
3688 int i;
3689 int last_idx = 0;
3690 const char *tty_name = NULL;
3691 char *f;
3692
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003693 /* verify error flags */
3694 for (i = 0, f = fp; i < count; ++i, ++f) {
3695 if (*f != TTY_NORMAL) {
3696 if (tty)
3697 tty_name = tty->name;
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003698 SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003699 tty_name, *f, tty_flag_to_str(*f));
3700
3701 /* feed all previous valid data to the parser */
3702 smux_rx_state_machine(cp + last_idx, i - last_idx,
3703 TTY_NORMAL);
3704
3705 /* feed bad data to parser */
3706 smux_rx_state_machine(cp + i, 1, *f);
3707 last_idx = i + 1;
3708 }
3709 }
3710
3711 /* feed data to RX state machine */
3712 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3713}
3714
3715static void smuxld_flush_buffer(struct tty_struct *tty)
3716{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003717 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003718}
3719
3720static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3721{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003722 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003723 return -ENODEV;
3724}
3725
3726static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3727 unsigned char __user *buf, size_t nr)
3728{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003729 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003730 return -ENODEV;
3731}
3732
3733static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3734 const unsigned char *buf, size_t nr)
3735{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003736 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003737 return -ENODEV;
3738}
3739
3740static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3741 unsigned int cmd, unsigned long arg)
3742{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003743 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003744 return -ENODEV;
3745}
3746
3747static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3748 struct poll_table_struct *tbl)
3749{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003750 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003751 return -ENODEV;
3752}
3753
3754static void smuxld_write_wakeup(struct tty_struct *tty)
3755{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003756 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003757}
3758
3759static struct tty_ldisc_ops smux_ldisc_ops = {
3760 .owner = THIS_MODULE,
3761 .magic = TTY_LDISC_MAGIC,
3762 .name = "n_smux",
3763 .open = smuxld_open,
3764 .close = smuxld_close,
3765 .flush_buffer = smuxld_flush_buffer,
3766 .chars_in_buffer = smuxld_chars_in_buffer,
3767 .read = smuxld_read,
3768 .write = smuxld_write,
3769 .ioctl = smuxld_ioctl,
3770 .poll = smuxld_poll,
3771 .receive_buf = smuxld_receive_buf,
3772 .write_wakeup = smuxld_write_wakeup
3773};
3774
3775static int __init smux_init(void)
3776{
3777 int ret;
3778
Eric Holmberged1f00c2012-06-07 09:45:18 -06003779 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003780
3781 spin_lock_init(&smux.rx_lock_lha1);
3782 smux.rx_state = SMUX_RX_IDLE;
3783 smux.power_state = SMUX_PWR_OFF;
3784 smux.pwr_wakeup_delay_us = 1;
3785 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003786 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003787 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003788 smux.rx_activity_flag = 0;
3789 smux.tx_activity_flag = 0;
3790 smux.recv_len = 0;
3791 smux.tty = NULL;
3792 smux.ld_open_count = 0;
3793 smux.in_reset = 0;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003794 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003795 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003796 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003797 smux_byte_loopback = 0;
3798
3799 spin_lock_init(&smux.tx_lock_lha2);
3800 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3801
3802 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3803 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003804 SMUX_ERR("%s: error %d registering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003805 __func__, ret);
3806 return ret;
3807 }
3808
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003809 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003810
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003811 ret = lch_init();
3812 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003813 SMUX_ERR("%s: lch_init failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003814 return ret;
3815 }
3816
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303817 log_ctx = ipc_log_context_create(1, "smux");
3818 if (!log_ctx) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003819 SMUX_ERR("%s: unable to create log context\n", __func__);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303820 disable_ipc_logging = 1;
3821 }
3822
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003823 return 0;
3824}
3825
3826static void __exit smux_exit(void)
3827{
3828 int ret;
3829
3830 ret = tty_unregister_ldisc(N_SMUX);
3831 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003832 SMUX_ERR("%s error %d unregistering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003833 __func__, ret);
3834 return;
3835 }
3836}
3837
3838module_init(smux_init);
3839module_exit(smux_exit);
3840
3841MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3842MODULE_LICENSE("GPL v2");
3843MODULE_ALIAS_LDISC(N_SMUX);