blob: 92f95055bec96cecaadf9fc227f543d51818223c [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
Eric Holmberg8570d442013-05-21 18:04:50 -06003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkar98f78122012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg33f82522012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkar98f78122012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkar98f78122012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkar98f78122012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmberg01778932012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
Eric Holmbergf3f34592012-08-28 13:51:14 -0600239 int remote_is_alive;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600240 int ld_open_count;
241 struct tty_struct *tty;
242
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600243 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
245 unsigned int recv_len;
246 unsigned int pkt_remain;
247 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600248
249 /* RX Activity - accessed by multiple threads */
250 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600251 unsigned rx_activity_flag;
252
253 /* TX / Power */
254 spinlock_t tx_lock_lha2;
255 struct list_head lch_tx_ready_list;
256 unsigned power_state;
257 unsigned pwr_wakeup_delay_us;
258 unsigned tx_activity_flag;
259 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600260 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600261 struct list_head power_queue;
Eric Holmberg8570d442013-05-21 18:04:50 -0600262 unsigned remote_initiated_wakeup_count;
263 unsigned local_initiated_wakeup_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264};
265
266
267/* data structures */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600268struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600269static struct smux_ldisc_t smux;
270static const char *tty_error_type[] = {
271 [TTY_NORMAL] = "normal",
272 [TTY_OVERRUN] = "overrun",
273 [TTY_BREAK] = "break",
274 [TTY_PARITY] = "parity",
275 [TTY_FRAME] = "framing",
276};
277
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600278static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600279 [SMUX_CMD_DATA] = "DATA",
280 [SMUX_CMD_OPEN_LCH] = "OPEN",
281 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
282 [SMUX_CMD_STATUS] = "STATUS",
283 [SMUX_CMD_PWR_CTL] = "PWR",
Eric Holmberg8570d442013-05-21 18:04:50 -0600284 [SMUX_CMD_DELAY] = "DELAY",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600285 [SMUX_CMD_BYTE] = "Raw Byte",
286};
287
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530288static const char * const smux_events[] = {
289 [SMUX_CONNECTED] = "CONNECTED" ,
290 [SMUX_DISCONNECTED] = "DISCONNECTED",
291 [SMUX_READ_DONE] = "READ_DONE",
292 [SMUX_READ_FAIL] = "READ_FAIL",
293 [SMUX_WRITE_DONE] = "WRITE_DONE",
294 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
295 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
296 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
297 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
298 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
299 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
300};
301
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600302static const char * const smux_local_state[] = {
303 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
304 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
305 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
306 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
307};
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530308
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600309static const char * const smux_remote_state[] = {
310 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
311 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
312};
313
314static const char * const smux_mode[] = {
315 [SMUX_LCH_MODE_NORMAL] = "N",
316 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
317 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
318};
319
320static const char * const smux_undef[] = {
321 [SMUX_UNDEF_LONG] = "UNDEF",
322 [SMUX_UNDEF_SHORT] = "U",
323};
324
325static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600326static void smux_notify_local_fn(struct work_struct *work);
327static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
328
329static struct workqueue_struct *smux_notify_wq;
330static size_t handle_size;
331static struct kfifo smux_notify_fifo;
332static int queued_fifo_notifications;
333static DEFINE_SPINLOCK(notify_lock_lhc1);
334
335static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600336static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600337static void smux_tx_worker(struct work_struct *work);
338static DECLARE_WORK(smux_tx_work, smux_tx_worker);
339
340static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600341static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600342static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600343static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
344static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
345
346static void smux_inactivity_worker(struct work_struct *work);
347static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
348static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
349 smux_inactivity_worker);
350
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600351static void list_channel(struct smux_lch_t *ch);
352static int smux_send_status_cmd(struct smux_lch_t *ch);
353static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600354static void smux_flush_tty(void);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600355static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600356static int schedule_notify(uint8_t lcid, int event,
357 const union notifier_metadata *metadata);
358static int ssr_notifier_cb(struct notifier_block *this,
359 unsigned long code,
360 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600361static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600362static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600363static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600364static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600365
366/**
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600367 * local_lch_state() - Return human readable form of local logical state.
368 * @state: Local logical channel state enum.
369 *
370 */
371const char *local_lch_state(unsigned state)
372{
373 if (state < ARRAY_SIZE(smux_local_state))
374 return smux_local_state[state];
375 else
376 return smux_undef[SMUX_UNDEF_LONG];
377}
378
379/**
380 * remote_lch_state() - Return human readable for of remote logical state.
381 * @state: Remote logical channel state enum.
382 *
383 */
384const char *remote_lch_state(unsigned state)
385{
386 if (state < ARRAY_SIZE(smux_remote_state))
387 return smux_remote_state[state];
388 else
389 return smux_undef[SMUX_UNDEF_LONG];
390}
391
392/**
393 * lch_mode() - Return human readable form of mode.
394 * @mode: Mode of the logical channel.
395 *
396 */
397const char *lch_mode(unsigned mode)
398{
399 if (mode < ARRAY_SIZE(smux_mode))
400 return smux_mode[mode];
401 else
402 return smux_undef[SMUX_UNDEF_SHORT];
403}
404
405/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600406 * Convert TTY Error Flags to string for logging purposes.
407 *
408 * @flag TTY_* flag
409 * @returns String description or NULL if unknown
410 */
411static const char *tty_flag_to_str(unsigned flag)
412{
413 if (flag < ARRAY_SIZE(tty_error_type))
414 return tty_error_type[flag];
415 return NULL;
416}
417
418/**
419 * Convert SMUX Command to string for logging purposes.
420 *
421 * @cmd SMUX command
422 * @returns String description or NULL if unknown
423 */
424static const char *cmd_to_str(unsigned cmd)
425{
426 if (cmd < ARRAY_SIZE(smux_cmds))
427 return smux_cmds[cmd];
428 return NULL;
429}
430
431/**
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530432 * Convert SMUX event to string for logging purposes.
433 *
434 * @event SMUX event
435 * @returns String description or NULL if unknown
436 */
437static const char *event_to_str(unsigned cmd)
438{
439 if (cmd < ARRAY_SIZE(smux_events))
440 return smux_events[cmd];
441 return NULL;
442}
443
444/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600445 * Set the reset state due to an unrecoverable failure.
446 */
447static void smux_enter_reset(void)
448{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600449 SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600450 smux.in_reset = 1;
Eric Holmbergf3f34592012-08-28 13:51:14 -0600451 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600452}
453
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600454/**
455 * Initialize the lch_structs.
456 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600457static int lch_init(void)
458{
459 unsigned int id;
460 struct smux_lch_t *ch;
461 int i = 0;
462
463 handle_size = sizeof(struct smux_notify_handle *);
464
465 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
466 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600467 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600468
469 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530470 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600471 __func__);
472 return -ENOMEM;
473 }
474
475 i |= kfifo_alloc(&smux_notify_fifo,
476 SMUX_NOTIFY_FIFO_SIZE * handle_size,
477 GFP_KERNEL);
478 i |= smux_loopback_init();
479
480 if (i) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600481 SMUX_ERR("%s: out of memory error\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600482 return -ENOMEM;
483 }
484
485 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
486 ch = &smux_lch[id];
487
488 spin_lock_init(&ch->state_lock_lhb1);
489 ch->lcid = id;
490 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
491 ch->local_mode = SMUX_LCH_MODE_NORMAL;
492 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600493 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600494 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
495 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
496 ch->remote_tiocm = 0x0;
497 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600498 ch->rx_flow_control_auto = 0;
499 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600500 ch->priv = 0;
501 ch->notify = 0;
502 ch->get_rx_buffer = 0;
503
Eric Holmbergb8435c82012-06-05 14:51:29 -0600504 INIT_LIST_HEAD(&ch->rx_retry_queue);
505 ch->rx_retry_queue_cnt = 0;
506 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
507
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600508 spin_lock_init(&ch->tx_lock_lhb2);
509 INIT_LIST_HEAD(&ch->tx_queue);
510 INIT_LIST_HEAD(&ch->tx_ready_list);
511 ch->tx_pending_data_cnt = 0;
512 ch->notify_lwm = 0;
513 }
514
515 return 0;
516}
517
Eric Holmberged1f00c2012-06-07 09:45:18 -0600518/**
519 * Empty and cleanup all SMUX logical channels for subsystem restart or line
520 * discipline disconnect.
521 */
522static void smux_lch_purge(void)
523{
524 struct smux_lch_t *ch;
525 unsigned long flags;
526 int i;
527
528 /* Empty TX ready list */
529 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
530 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530531 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600532 __func__, smux.lch_tx_ready_list.next);
533 ch = list_first_entry(&smux.lch_tx_ready_list,
534 struct smux_lch_t,
535 tx_ready_list);
536 list_del(&ch->tx_ready_list);
537 INIT_LIST_HEAD(&ch->tx_ready_list);
538 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600539
540 /* Purge Power Queue */
541 while (!list_empty(&smux.power_queue)) {
542 struct smux_pkt_t *pkt;
543
544 pkt = list_first_entry(&smux.power_queue,
545 struct smux_pkt_t,
546 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600547 list_del(&pkt->list);
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530548 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600549 __func__, pkt);
550 smux_free_pkt(pkt);
551 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600552 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
553
554 /* Close all ports */
555 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
556 ch = &smux_lch[i];
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530557 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600558
559 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
560
561 /* Purge TX queue */
562 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600563 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600564 spin_unlock(&ch->tx_lock_lhb2);
565
566 /* Notify user of disconnect and reset channel state */
567 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
568 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
569 union notifier_metadata meta;
570
571 meta.disconnected.is_ssr = smux.in_reset;
572 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
573 }
574
575 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600576 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
577 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
578 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600579 ch->rx_flow_control_auto = 0;
580 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600581
582 /* Purge RX retry queue */
583 if (ch->rx_retry_queue_cnt)
584 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
585
586 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
587 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600588}
589
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600590int smux_assert_lch_id(uint32_t lcid)
591{
592 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
593 return -ENXIO;
594 else
595 return 0;
596}
597
598/**
599 * Log packet information for debug purposes.
600 *
601 * @pkt Packet to log
602 * @is_recv 1 = RX packet; 0 = TX Packet
603 *
604 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
605 *
606 * PKT Info:
607 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
608 *
609 * Direction: R = Receive, S = Send
610 * Local State: C = Closed; c = closing; o = opening; O = Opened
611 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
612 * Remote State: C = Closed; O = Opened
613 * Remote Mode: R = Remote loopback; N = Normal
614 */
615static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
616{
617 char logbuf[SMUX_PKT_LOG_SIZE];
618 char cmd_extra[16];
619 int i = 0;
620 int count;
621 int len;
622 char local_state;
623 char local_mode;
624 char remote_state;
625 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600626 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600627 unsigned char *data;
628
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600629 if (!smux_assert_lch_id(pkt->hdr.lcid))
630 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600631
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600632 if (ch) {
633 switch (ch->local_state) {
634 case SMUX_LCH_LOCAL_CLOSED:
635 local_state = 'C';
636 break;
637 case SMUX_LCH_LOCAL_OPENING:
638 local_state = 'o';
639 break;
640 case SMUX_LCH_LOCAL_OPENED:
641 local_state = 'O';
642 break;
643 case SMUX_LCH_LOCAL_CLOSING:
644 local_state = 'c';
645 break;
646 default:
647 local_state = 'U';
648 break;
649 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600650
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600651 switch (ch->local_mode) {
652 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
653 local_mode = 'L';
654 break;
655 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
656 local_mode = 'R';
657 break;
658 case SMUX_LCH_MODE_NORMAL:
659 local_mode = 'N';
660 break;
661 default:
662 local_mode = 'U';
663 break;
664 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600665
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600666 switch (ch->remote_state) {
667 case SMUX_LCH_REMOTE_CLOSED:
668 remote_state = 'C';
669 break;
670 case SMUX_LCH_REMOTE_OPENED:
671 remote_state = 'O';
672 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600673
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600674 default:
675 remote_state = 'U';
676 break;
677 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600678
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600679 switch (ch->remote_mode) {
680 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
681 remote_mode = 'R';
682 break;
683 case SMUX_LCH_MODE_NORMAL:
684 remote_mode = 'N';
685 break;
686 default:
687 remote_mode = 'U';
688 break;
689 }
690 } else {
691 /* broadcast channel */
692 local_state = '-';
693 local_mode = '-';
694 remote_state = '-';
695 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600696 }
697
698 /* determine command type (ACK, etc) */
699 cmd_extra[0] = '\0';
700 switch (pkt->hdr.cmd) {
701 case SMUX_CMD_OPEN_LCH:
702 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
703 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
704 break;
705 case SMUX_CMD_CLOSE_LCH:
706 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
707 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
708 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600709
710 case SMUX_CMD_PWR_CTL:
711 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
712 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
713 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600714 };
715
716 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
717 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
718 is_recv ? 'R' : 'S', pkt->hdr.lcid,
719 local_state, local_mode,
720 remote_state, remote_mode,
721 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
722 pkt->hdr.payload_len, pkt->hdr.pad_len);
723
724 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
725 data = (unsigned char *)pkt->payload;
726 for (count = 0; count < len; count++)
727 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
728 "%02x ", (unsigned)data[count]);
729
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530730 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600731}
732
733static void smux_notify_local_fn(struct work_struct *work)
734{
735 struct smux_notify_handle *notify_handle = NULL;
736 union notifier_metadata *metadata = NULL;
737 unsigned long flags;
738 int i;
739
740 for (;;) {
741 /* retrieve notification */
742 spin_lock_irqsave(&notify_lock_lhc1, flags);
743 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
744 i = kfifo_out(&smux_notify_fifo,
745 &notify_handle,
746 handle_size);
747 if (i != handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600748 SMUX_ERR(
749 "%s: unable to retrieve handle %d expected %d\n",
750 __func__, i, handle_size);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600751 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
752 break;
753 }
754 } else {
755 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
756 break;
757 }
758 --queued_fifo_notifications;
759 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
760
761 /* notify client */
762 metadata = notify_handle->metadata;
763 notify_handle->notify(notify_handle->priv,
764 notify_handle->event_type,
765 metadata);
766
767 kfree(metadata);
768 kfree(notify_handle);
769 }
770}
771
772/**
773 * Initialize existing packet.
774 */
775void smux_init_pkt(struct smux_pkt_t *pkt)
776{
777 memset(pkt, 0x0, sizeof(*pkt));
778 pkt->hdr.magic = SMUX_MAGIC;
779 INIT_LIST_HEAD(&pkt->list);
780}
781
782/**
783 * Allocate and initialize packet.
784 *
785 * If a payload is needed, either set it directly and ensure that it's freed or
786 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
787 * automatically when smd_free_pkt() is called.
788 */
789struct smux_pkt_t *smux_alloc_pkt(void)
790{
791 struct smux_pkt_t *pkt;
792
793 /* Consider a free list implementation instead of kmalloc */
794 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
795 if (!pkt) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600796 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600797 return NULL;
798 }
799 smux_init_pkt(pkt);
800 pkt->allocated = 1;
801
802 return pkt;
803}
804
805/**
806 * Free packet.
807 *
808 * @pkt Packet to free (may be NULL)
809 *
810 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
811 * well. Otherwise, the caller is responsible for freeing the payload.
812 */
813void smux_free_pkt(struct smux_pkt_t *pkt)
814{
815 if (pkt) {
816 if (pkt->free_payload)
817 kfree(pkt->payload);
818 if (pkt->allocated)
819 kfree(pkt);
820 }
821}
822
823/**
824 * Allocate packet payload.
825 *
826 * @pkt Packet to add payload to
827 *
828 * @returns 0 on success, <0 upon error
829 *
830 * A flag is set to signal smux_free_pkt() to free the payload.
831 */
832int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
833{
834 if (!pkt)
835 return -EINVAL;
836
837 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
838 pkt->free_payload = 1;
839 if (!pkt->payload) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600840 SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600841 __func__, pkt->hdr.payload_len);
842 return -ENOMEM;
843 }
844
845 return 0;
846}
847
848static int schedule_notify(uint8_t lcid, int event,
849 const union notifier_metadata *metadata)
850{
851 struct smux_notify_handle *notify_handle = 0;
852 union notifier_metadata *meta_copy = 0;
853 struct smux_lch_t *ch;
854 int i;
855 unsigned long flags;
856 int ret = 0;
857
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530858 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600859 ch = &smux_lch[lcid];
860 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
861 GFP_ATOMIC);
862 if (!notify_handle) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600863 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600864 ret = -ENOMEM;
865 goto free_out;
866 }
867
868 notify_handle->notify = ch->notify;
869 notify_handle->priv = ch->priv;
870 notify_handle->event_type = event;
871 if (metadata) {
872 meta_copy = kzalloc(sizeof(union notifier_metadata),
873 GFP_ATOMIC);
874 if (!meta_copy) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600875 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600876 ret = -ENOMEM;
877 goto free_out;
878 }
879 *meta_copy = *metadata;
880 notify_handle->metadata = meta_copy;
881 } else {
882 notify_handle->metadata = NULL;
883 }
884
885 spin_lock_irqsave(&notify_lock_lhc1, flags);
886 i = kfifo_avail(&smux_notify_fifo);
887 if (i < handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600888 SMUX_ERR("%s: fifo full error %d expected %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600889 __func__, i, handle_size);
890 ret = -ENOMEM;
891 goto unlock_out;
892 }
893
894 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
895 if (i < 0 || i != handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600896 SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600897 __func__, i, handle_size);
898 ret = -ENOSPC;
899 goto unlock_out;
900 }
901 ++queued_fifo_notifications;
902
903unlock_out:
904 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
905
906free_out:
907 queue_work(smux_notify_wq, &smux_notify_local);
908 if (ret < 0 && notify_handle) {
909 kfree(notify_handle->metadata);
910 kfree(notify_handle);
911 }
912 return ret;
913}
914
915/**
916 * Returns the serialized size of a packet.
917 *
918 * @pkt Packet to serialize
919 *
920 * @returns Serialized length of packet
921 */
922static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
923{
924 unsigned int size;
925
926 size = sizeof(struct smux_hdr_t);
927 size += pkt->hdr.payload_len;
928 size += pkt->hdr.pad_len;
929
930 return size;
931}
932
933/**
934 * Serialize packet @pkt into output buffer @data.
935 *
936 * @pkt Packet to serialize
937 * @out Destination buffer pointer
938 * @out_len Size of serialized packet
939 *
940 * @returns 0 for success
941 */
942int smux_serialize(struct smux_pkt_t *pkt, char *out,
943 unsigned int *out_len)
944{
945 char *data_start = out;
946
947 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600948 SMUX_ERR("%s: packet size %d too big\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600949 __func__, smux_serialize_size(pkt));
950 return -E2BIG;
951 }
952
953 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
954 out += sizeof(struct smux_hdr_t);
955 if (pkt->payload) {
956 memcpy(out, pkt->payload, pkt->hdr.payload_len);
957 out += pkt->hdr.payload_len;
958 }
959 if (pkt->hdr.pad_len) {
960 memset(out, 0x0, pkt->hdr.pad_len);
961 out += pkt->hdr.pad_len;
962 }
963 *out_len = out - data_start;
964 return 0;
965}
966
967/**
968 * Serialize header and provide pointer to the data.
969 *
970 * @pkt Packet
971 * @out[out] Pointer to the serialized header data
972 * @out_len[out] Pointer to the serialized header length
973 */
974static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
975 unsigned int *out_len)
976{
977 *out = (char *)&pkt->hdr;
978 *out_len = sizeof(struct smux_hdr_t);
979}
980
981/**
982 * Serialize payload and provide pointer to the data.
983 *
984 * @pkt Packet
985 * @out[out] Pointer to the serialized payload data
986 * @out_len[out] Pointer to the serialized payload length
987 */
988static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
989 unsigned int *out_len)
990{
991 *out = pkt->payload;
992 *out_len = pkt->hdr.payload_len;
993}
994
995/**
996 * Serialize padding and provide pointer to the data.
997 *
998 * @pkt Packet
999 * @out[out] Pointer to the serialized padding (always NULL)
1000 * @out_len[out] Pointer to the serialized payload length
1001 *
1002 * Since the padding field value is undefined, only the size of the patting
1003 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
1004 */
1005static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1006 unsigned int *out_len)
1007{
1008 *out = NULL;
1009 *out_len = pkt->hdr.pad_len;
1010}
1011
1012/**
1013 * Write data to TTY framework and handle breaking the writes up if needed.
1014 *
1015 * @data Data to write
1016 * @len Length of data
1017 *
1018 * @returns 0 for success, < 0 for failure
1019 */
1020static int write_to_tty(char *data, unsigned len)
1021{
1022 int data_written;
1023
1024 if (!data)
1025 return 0;
1026
Eric Holmberged1f00c2012-06-07 09:45:18 -06001027 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001028 data_written = smux.tty->ops->write(smux.tty, data, len);
1029 if (data_written >= 0) {
1030 len -= data_written;
1031 data += data_written;
1032 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001033 SMUX_ERR("%s: TTY write returned error %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001034 __func__, data_written);
1035 return data_written;
1036 }
1037
1038 if (len)
1039 tty_wait_until_sent(smux.tty,
1040 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001041 }
1042 return 0;
1043}
1044
1045/**
1046 * Write packet to TTY.
1047 *
1048 * @pkt packet to write
1049 *
1050 * @returns 0 on success
1051 */
1052static int smux_tx_tty(struct smux_pkt_t *pkt)
1053{
1054 char *data;
1055 unsigned int len;
1056 int ret;
1057
1058 if (!smux.tty) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001059 SMUX_ERR("%s: TTY not initialized", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001060 return -ENOTTY;
1061 }
1062
1063 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301064 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001065 ret = write_to_tty(&pkt->hdr.flags, 1);
1066 return ret;
1067 }
1068
1069 smux_serialize_hdr(pkt, &data, &len);
1070 ret = write_to_tty(data, len);
1071 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001072 SMUX_ERR("%s: failed %d to write header %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001073 __func__, ret, len);
1074 return ret;
1075 }
1076
1077 smux_serialize_payload(pkt, &data, &len);
1078 ret = write_to_tty(data, len);
1079 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001080 SMUX_ERR("%s: failed %d to write payload %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001081 __func__, ret, len);
1082 return ret;
1083 }
1084
1085 smux_serialize_padding(pkt, &data, &len);
1086 while (len > 0) {
1087 char zero = 0x0;
1088 ret = write_to_tty(&zero, 1);
1089 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001090 SMUX_ERR("%s: failed %d to write padding %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001091 __func__, ret, len);
1092 return ret;
1093 }
1094 --len;
1095 }
1096 return 0;
1097}
1098
1099/**
1100 * Send a single character.
1101 *
1102 * @ch Character to send
1103 */
1104static void smux_send_byte(char ch)
1105{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001106 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001107
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001108 pkt = smux_alloc_pkt();
1109 if (!pkt) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001110 SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001111 return;
1112 }
1113 pkt->hdr.cmd = SMUX_CMD_BYTE;
1114 pkt->hdr.flags = ch;
1115 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001116
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001117 list_add_tail(&pkt->list, &smux.power_queue);
1118 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001119}
1120
1121/**
1122 * Receive a single-character packet (used for internal testing).
1123 *
1124 * @ch Character to receive
1125 * @lcid Logical channel ID for packet
1126 *
1127 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001128 */
1129static int smux_receive_byte(char ch, int lcid)
1130{
1131 struct smux_pkt_t pkt;
1132
1133 smux_init_pkt(&pkt);
1134 pkt.hdr.lcid = lcid;
1135 pkt.hdr.cmd = SMUX_CMD_BYTE;
1136 pkt.hdr.flags = ch;
1137
1138 return smux_dispatch_rx_pkt(&pkt);
1139}
1140
1141/**
1142 * Queue packet for transmit.
1143 *
1144 * @pkt_ptr Packet to queue
1145 * @ch Channel to queue packet on
1146 * @queue Queue channel on ready list
1147 */
1148static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1149 int queue)
1150{
1151 unsigned long flags;
1152
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301153 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001154
1155 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1156 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1157 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1158
1159 if (queue)
1160 list_channel(ch);
1161}
1162
1163/**
1164 * Handle receive OPEN ACK command.
1165 *
1166 * @pkt Received packet
1167 *
1168 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001169 */
1170static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1171{
1172 uint8_t lcid;
1173 int ret;
1174 struct smux_lch_t *ch;
1175 int enable_powerdown = 0;
1176
1177 lcid = pkt->hdr.lcid;
1178 ch = &smux_lch[lcid];
1179
1180 spin_lock(&ch->state_lock_lhb1);
1181 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301182 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001183 ch->local_state,
1184 SMUX_LCH_LOCAL_OPENED);
1185
1186 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1187 enable_powerdown = 1;
1188
1189 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1190 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1191 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1192 ret = 0;
1193 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301194 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001195 ret = 0;
1196 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001197 SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001198 __func__, lcid, ch->local_state);
1199 ret = -EINVAL;
1200 }
1201 spin_unlock(&ch->state_lock_lhb1);
1202
1203 if (enable_powerdown) {
1204 spin_lock(&smux.tx_lock_lha2);
1205 if (!smux.powerdown_enabled) {
1206 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301207 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001208 __func__);
1209 }
1210 spin_unlock(&smux.tx_lock_lha2);
1211 }
1212
1213 return ret;
1214}
1215
1216static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1217{
1218 uint8_t lcid;
1219 int ret;
1220 struct smux_lch_t *ch;
1221 union notifier_metadata meta_disconnected;
1222 unsigned long flags;
1223
1224 lcid = pkt->hdr.lcid;
1225 ch = &smux_lch[lcid];
1226 meta_disconnected.disconnected.is_ssr = 0;
1227
1228 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1229
1230 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301231 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001232 SMUX_LCH_LOCAL_CLOSING,
1233 SMUX_LCH_LOCAL_CLOSED);
1234 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1235 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1236 schedule_notify(lcid, SMUX_DISCONNECTED,
1237 &meta_disconnected);
1238 ret = 0;
1239 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301240 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001241 ret = 0;
1242 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001243 SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001244 __func__, lcid, ch->local_state);
1245 ret = -EINVAL;
1246 }
1247 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1248 return ret;
1249}
1250
1251/**
1252 * Handle receive OPEN command.
1253 *
1254 * @pkt Received packet
1255 *
1256 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001257 */
1258static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1259{
1260 uint8_t lcid;
1261 int ret;
1262 struct smux_lch_t *ch;
1263 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001264 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001265 int tx_ready = 0;
1266 int enable_powerdown = 0;
1267
1268 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1269 return smux_handle_rx_open_ack(pkt);
1270
1271 lcid = pkt->hdr.lcid;
1272 ch = &smux_lch[lcid];
1273
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001274 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001275
1276 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301277 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001278 SMUX_LCH_REMOTE_CLOSED,
1279 SMUX_LCH_REMOTE_OPENED);
1280
1281 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1282 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1283 enable_powerdown = 1;
1284
1285 /* Send Open ACK */
1286 ack_pkt = smux_alloc_pkt();
1287 if (!ack_pkt) {
1288 /* exit out to allow retrying this later */
1289 ret = -ENOMEM;
1290 goto out;
1291 }
1292 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmberg4ce20122013-01-15 16:43:47 -07001293 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK;
1294 if (enable_powerdown)
1295 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001296 ack_pkt->hdr.lcid = lcid;
1297 ack_pkt->hdr.payload_len = 0;
1298 ack_pkt->hdr.pad_len = 0;
1299 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1300 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1301 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1302 }
1303 smux_tx_queue(ack_pkt, ch, 0);
1304 tx_ready = 1;
1305
1306 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1307 /*
1308 * Send an Open command to the remote side to
1309 * simulate our local client doing it.
1310 */
1311 ack_pkt = smux_alloc_pkt();
1312 if (ack_pkt) {
1313 ack_pkt->hdr.lcid = lcid;
1314 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
Eric Holmberg4ce20122013-01-15 16:43:47 -07001315 if (enable_powerdown)
1316 ack_pkt->hdr.flags |=
1317 SMUX_CMD_OPEN_POWER_COLLAPSE;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001318 ack_pkt->hdr.payload_len = 0;
1319 ack_pkt->hdr.pad_len = 0;
1320 smux_tx_queue(ack_pkt, ch, 0);
1321 tx_ready = 1;
1322 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001323 SMUX_ERR(
1324 "%s: Remote loopack allocation failure\n",
1325 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001326 }
1327 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1328 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1329 }
1330 ret = 0;
1331 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001332 SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001333 __func__, lcid, ch->remote_state);
1334 ret = -EINVAL;
1335 }
1336
1337out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001338 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001339
1340 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001341 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001342 if (!smux.powerdown_enabled) {
1343 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301344 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001345 __func__);
1346 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001347 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001348 }
1349
1350 if (tx_ready)
1351 list_channel(ch);
1352
1353 return ret;
1354}
1355
1356/**
1357 * Handle receive CLOSE command.
1358 *
1359 * @pkt Received packet
1360 *
1361 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001362 */
1363static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1364{
1365 uint8_t lcid;
1366 int ret;
1367 struct smux_lch_t *ch;
1368 struct smux_pkt_t *ack_pkt;
1369 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001370 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001371 int tx_ready = 0;
1372
1373 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1374 return smux_handle_close_ack(pkt);
1375
1376 lcid = pkt->hdr.lcid;
1377 ch = &smux_lch[lcid];
1378 meta_disconnected.disconnected.is_ssr = 0;
1379
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001380 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001381 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301382 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001383 SMUX_LCH_REMOTE_OPENED,
1384 SMUX_LCH_REMOTE_CLOSED);
1385
1386 ack_pkt = smux_alloc_pkt();
1387 if (!ack_pkt) {
1388 /* exit out to allow retrying this later */
1389 ret = -ENOMEM;
1390 goto out;
1391 }
1392 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1393 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1394 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1395 ack_pkt->hdr.lcid = lcid;
1396 ack_pkt->hdr.payload_len = 0;
1397 ack_pkt->hdr.pad_len = 0;
1398 smux_tx_queue(ack_pkt, ch, 0);
1399 tx_ready = 1;
1400
1401 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1402 /*
1403 * Send a Close command to the remote side to simulate
1404 * our local client doing it.
1405 */
1406 ack_pkt = smux_alloc_pkt();
1407 if (ack_pkt) {
1408 ack_pkt->hdr.lcid = lcid;
1409 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1410 ack_pkt->hdr.flags = 0;
1411 ack_pkt->hdr.payload_len = 0;
1412 ack_pkt->hdr.pad_len = 0;
1413 smux_tx_queue(ack_pkt, ch, 0);
1414 tx_ready = 1;
1415 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001416 SMUX_ERR(
1417 "%s: Remote loopack allocation failure\n",
1418 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001419 }
1420 }
1421
1422 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1423 schedule_notify(lcid, SMUX_DISCONNECTED,
1424 &meta_disconnected);
1425 ret = 0;
1426 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001427 SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001428 __func__, lcid, ch->remote_state);
1429 ret = -EINVAL;
1430 }
1431out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001432 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001433 if (tx_ready)
1434 list_channel(ch);
1435
1436 return ret;
1437}
1438
1439/*
1440 * Handle receive DATA command.
1441 *
1442 * @pkt Received packet
1443 *
1444 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001445 */
1446static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1447{
1448 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001449 int ret = 0;
1450 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001451 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001452 int tmp;
1453 int rx_len;
1454 struct smux_lch_t *ch;
1455 union notifier_metadata metadata;
1456 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001457 struct smux_pkt_t *ack_pkt;
1458 unsigned long flags;
1459
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001460 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1461 ret = -ENXIO;
1462 goto out;
1463 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001464
Eric Holmbergb8435c82012-06-05 14:51:29 -06001465 rx_len = pkt->hdr.payload_len;
1466 if (rx_len == 0) {
1467 ret = -EINVAL;
1468 goto out;
1469 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001470
1471 lcid = pkt->hdr.lcid;
1472 ch = &smux_lch[lcid];
1473 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1474 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1475
1476 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1477 && !remote_loopback) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001478 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001479 lcid, ch->local_state);
1480 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001481 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001482 goto out;
1483 }
1484
1485 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001486 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001487 lcid, ch->remote_state);
1488 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001489 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001490 goto out;
1491 }
1492
Eric Holmbergb8435c82012-06-05 14:51:29 -06001493 if (!list_empty(&ch->rx_retry_queue)) {
1494 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001495
1496 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1497 !ch->rx_flow_control_auto &&
1498 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1499 /* need to flow control RX */
1500 ch->rx_flow_control_auto = 1;
1501 tx_ready |= smux_rx_flow_control_updated(ch);
1502 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1503 NULL);
1504 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001505 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1506 /* retry queue full */
Eric Holmberg01778932012-08-21 16:28:12 -06001507 SMUX_ERR(
1508 "%s: ch %d RX retry queue full; rx flow=%d\n",
1509 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001510 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1511 ret = -ENOMEM;
1512 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1513 goto out;
1514 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001515 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001516 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001517
Eric Holmbergb8435c82012-06-05 14:51:29 -06001518 if (remote_loopback) {
1519 /* Echo the data back to the remote client. */
1520 ack_pkt = smux_alloc_pkt();
1521 if (ack_pkt) {
1522 ack_pkt->hdr.lcid = lcid;
1523 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1524 ack_pkt->hdr.flags = 0;
1525 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1526 if (ack_pkt->hdr.payload_len) {
1527 smux_alloc_pkt_payload(ack_pkt);
1528 memcpy(ack_pkt->payload, pkt->payload,
1529 ack_pkt->hdr.payload_len);
1530 }
1531 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1532 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001533 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001534 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001535 SMUX_ERR("%s: Remote loopack allocation failure\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06001536 __func__);
1537 }
1538 } else if (!do_retry) {
1539 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001540 metadata.read.pkt_priv = 0;
1541 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001542 tmp = ch->get_rx_buffer(ch->priv,
1543 (void **)&metadata.read.pkt_priv,
1544 (void **)&metadata.read.buffer,
1545 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001546
Eric Holmbergb8435c82012-06-05 14:51:29 -06001547 if (tmp == 0 && metadata.read.buffer) {
1548 /* place data into RX buffer */
1549 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001550 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001551 metadata.read.len = rx_len;
1552 schedule_notify(lcid, SMUX_READ_DONE,
1553 &metadata);
1554 } else if (tmp == -EAGAIN ||
1555 (tmp == 0 && !metadata.read.buffer)) {
1556 /* buffer allocation failed - add to retry queue */
1557 do_retry = 1;
1558 } else if (tmp < 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001559 SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001560 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001561 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1562 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001563 }
1564 }
1565
Eric Holmbergb8435c82012-06-05 14:51:29 -06001566 if (do_retry) {
1567 struct smux_rx_pkt_retry *retry;
1568
1569 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1570 if (!retry) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001571 SMUX_ERR("%s: retry alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001572 ret = -ENOMEM;
1573 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1574 goto out;
1575 }
1576 INIT_LIST_HEAD(&retry->rx_retry_list);
1577 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1578
1579 /* copy packet */
1580 retry->pkt = smux_alloc_pkt();
1581 if (!retry->pkt) {
1582 kfree(retry);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001583 SMUX_ERR("%s: pkt alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001584 ret = -ENOMEM;
1585 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1586 goto out;
1587 }
1588 retry->pkt->hdr.lcid = lcid;
1589 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1590 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1591 if (retry->pkt->hdr.payload_len) {
1592 smux_alloc_pkt_payload(retry->pkt);
1593 memcpy(retry->pkt->payload, pkt->payload,
1594 retry->pkt->hdr.payload_len);
1595 }
1596
1597 /* add to retry queue */
1598 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1599 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1600 ++ch->rx_retry_queue_cnt;
1601 if (ch->rx_retry_queue_cnt == 1)
1602 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1603 msecs_to_jiffies(retry->timeout_in_ms));
1604 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1605 }
1606
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001607 if (tx_ready)
1608 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001609out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001610 return ret;
1611}
1612
1613/**
1614 * Handle receive byte command for testing purposes.
1615 *
1616 * @pkt Received packet
1617 *
1618 * @returns 0 for success
1619 */
1620static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1621{
1622 uint8_t lcid;
1623 int ret;
1624 struct smux_lch_t *ch;
1625 union notifier_metadata metadata;
1626 unsigned long flags;
1627
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001628 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001629 SMUX_ERR("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001630 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001631 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001632
1633 lcid = pkt->hdr.lcid;
1634 ch = &smux_lch[lcid];
1635 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1636
1637 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001638 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001639 lcid, ch->local_state);
1640 ret = -EIO;
1641 goto out;
1642 }
1643
1644 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001645 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001646 lcid, ch->remote_state);
1647 ret = -EIO;
1648 goto out;
1649 }
1650
1651 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1652 metadata.read.buffer = 0;
1653 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1654 ret = 0;
1655
1656out:
1657 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1658 return ret;
1659}
1660
1661/**
1662 * Handle receive status command.
1663 *
1664 * @pkt Received packet
1665 *
1666 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001667 */
1668static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1669{
1670 uint8_t lcid;
1671 int ret;
1672 struct smux_lch_t *ch;
1673 union notifier_metadata meta;
1674 unsigned long flags;
1675 int tx_ready = 0;
1676
1677 lcid = pkt->hdr.lcid;
1678 ch = &smux_lch[lcid];
1679
1680 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1681 meta.tiocm.tiocm_old = ch->remote_tiocm;
1682 meta.tiocm.tiocm_new = pkt->hdr.flags;
1683
1684 /* update logical channel flow control */
1685 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1686 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1687 /* logical channel flow control changed */
1688 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1689 /* disabled TX */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301690 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001691 ch->tx_flow_control = 1;
1692 } else {
1693 /* re-enable channel */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301694 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001695 ch->tx_flow_control = 0;
1696 tx_ready = 1;
1697 }
1698 }
1699 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1700 ch->remote_tiocm = pkt->hdr.flags;
1701 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1702
1703 /* client notification for status change */
1704 if (IS_FULLY_OPENED(ch)) {
1705 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1706 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1707 ret = 0;
1708 }
1709 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1710 if (tx_ready)
1711 list_channel(ch);
1712
1713 return ret;
1714}
1715
1716/**
1717 * Handle receive power command.
1718 *
1719 * @pkt Received packet
1720 *
1721 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001722 */
1723static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1724{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001725 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001726 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001727 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001728
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001729 SMUX_PWR_PKT_RX(pkt);
1730
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001731 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001732 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1733 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001734 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001735 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001736 power_down = 1;
1737 else
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001738 SMUX_ERR("%s: sleep request ack invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001739 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001740 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001741 /*
1742 * Remote sleep request
1743 *
1744 * Even if we have data pending, we need to transition to the
1745 * POWER_OFF state and then perform a wakeup since the remote
1746 * side has requested a power-down.
1747 *
1748 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1749 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1750 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001751 *
1752 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001753 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001754 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001755 ack_pkt = smux_alloc_pkt();
1756 if (ack_pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301757 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001758 smux.power_state,
1759 SMUX_PWR_TURNING_OFF_FLUSH);
1760
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001761 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1762
1763 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001764 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1765 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001766 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1767 list_add_tail(&ack_pkt->list,
1768 &smux.power_queue);
1769 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001770 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001771 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1772 /* Local power-down request still in TX queue */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301773 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001774 __func__);
1775 smux.power_ctl_remote_req_received = 1;
1776 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1777 /*
1778 * Local power-down request already sent to remote
1779 * side, so this request gets treated as an ACK.
1780 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301781 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001782 __func__);
1783 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001784 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001785 SMUX_ERR("%s: sleep request invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001786 __func__, smux.power_state);
1787 }
1788 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001789
1790 if (power_down) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301791 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001792 smux.power_state, SMUX_PWR_OFF_FLUSH);
1793 smux.power_state = SMUX_PWR_OFF_FLUSH;
1794 queue_work(smux_tx_wq, &smux_inactivity_work);
1795 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001796 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001797
1798 return 0;
1799}
1800
1801/**
1802 * Handle dispatching a completed packet for receive processing.
1803 *
1804 * @pkt Packet to process
1805 *
1806 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001807 */
1808static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1809{
Eric Holmbergf9622662012-06-13 15:55:45 -06001810 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001811
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001812 switch (pkt->hdr.cmd) {
1813 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001814 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001815 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001816 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001817 __func__, pkt->hdr.lcid);
1818 break;
1819 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001820 ret = smux_handle_rx_open_cmd(pkt);
1821 break;
1822
1823 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001824 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001825 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001826 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001827 __func__, pkt->hdr.lcid);
1828 break;
1829 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001830 ret = smux_handle_rx_data_cmd(pkt);
1831 break;
1832
1833 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001834 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001835 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001836 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001837 __func__, pkt->hdr.lcid);
1838 break;
1839 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001840 ret = smux_handle_rx_close_cmd(pkt);
1841 break;
1842
1843 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001844 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001845 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001846 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001847 __func__, pkt->hdr.lcid);
1848 break;
1849 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001850 ret = smux_handle_rx_status_cmd(pkt);
1851 break;
1852
1853 case SMUX_CMD_PWR_CTL:
1854 ret = smux_handle_rx_power_cmd(pkt);
1855 break;
1856
1857 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001858 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001859 ret = smux_handle_rx_byte_cmd(pkt);
1860 break;
1861
1862 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001863 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001864 SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001865 ret = -EINVAL;
1866 }
1867 return ret;
1868}
1869
1870/**
1871 * Deserializes a packet and dispatches it to the packet receive logic.
1872 *
1873 * @data Raw data for one packet
1874 * @len Length of the data
1875 *
1876 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001877 */
1878static int smux_deserialize(unsigned char *data, int len)
1879{
1880 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001881
1882 smux_init_pkt(&recv);
1883
1884 /*
1885 * It may be possible to optimize this to not use the
1886 * temporary buffer.
1887 */
1888 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1889
1890 if (recv.hdr.magic != SMUX_MAGIC) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001891 SMUX_ERR("%s: invalid header magic\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001892 return -EINVAL;
1893 }
1894
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001895 if (recv.hdr.payload_len)
1896 recv.payload = data + sizeof(struct smux_hdr_t);
1897
1898 return smux_dispatch_rx_pkt(&recv);
1899}
1900
1901/**
1902 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001903 */
1904static void smux_handle_wakeup_req(void)
1905{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001906 unsigned long flags;
1907
1908 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001909 if (smux.power_state == SMUX_PWR_OFF
1910 || smux.power_state == SMUX_PWR_TURNING_ON) {
1911 /* wakeup system */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301912 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001913 smux.power_state, SMUX_PWR_ON);
Eric Holmberg8570d442013-05-21 18:04:50 -06001914 smux.remote_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001915 smux.power_state = SMUX_PWR_ON;
1916 queue_work(smux_tx_wq, &smux_wakeup_work);
1917 queue_work(smux_tx_wq, &smux_tx_work);
1918 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1919 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1920 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001921 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001922 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001923 } else {
1924 /* stale wakeup request from previous wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301925 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001926 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001927 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001928 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001929}
1930
1931/**
1932 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001933 */
1934static void smux_handle_wakeup_ack(void)
1935{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001936 unsigned long flags;
1937
1938 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001939 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1940 /* received response to wakeup request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301941 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001942 smux.power_state, SMUX_PWR_ON);
1943 smux.power_state = SMUX_PWR_ON;
1944 queue_work(smux_tx_wq, &smux_tx_work);
1945 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1946 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1947
1948 } else if (smux.power_state != SMUX_PWR_ON) {
1949 /* invalid message */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301950 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001951 __func__, smux.power_state);
1952 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001953 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001954}
1955
1956/**
1957 * RX State machine - IDLE state processing.
1958 *
1959 * @data New RX data to process
1960 * @len Length of the data
1961 * @used Return value of length processed
1962 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001963 */
1964static void smux_rx_handle_idle(const unsigned char *data,
1965 int len, int *used, int flag)
1966{
1967 int i;
1968
1969 if (flag) {
1970 if (smux_byte_loopback)
1971 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1972 smux_byte_loopback);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001973 SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001974 ++*used;
1975 return;
1976 }
1977
1978 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1979 switch (data[i]) {
1980 case SMUX_MAGIC_WORD1:
1981 smux.rx_state = SMUX_RX_MAGIC;
1982 break;
1983 case SMUX_WAKEUP_REQ:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301984 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmbergf3f34592012-08-28 13:51:14 -06001985 if (unlikely(!smux.remote_is_alive)) {
1986 mutex_lock(&smux.mutex_lha0);
1987 smux.remote_is_alive = 1;
1988 mutex_unlock(&smux.mutex_lha0);
1989 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001990 smux_handle_wakeup_req();
1991 break;
1992 case SMUX_WAKEUP_ACK:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301993 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmbergf3f34592012-08-28 13:51:14 -06001994 if (unlikely(!smux.remote_is_alive)) {
1995 mutex_lock(&smux.mutex_lha0);
1996 smux.remote_is_alive = 1;
1997 mutex_unlock(&smux.mutex_lha0);
1998 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001999 smux_handle_wakeup_ack();
2000 break;
2001 default:
2002 /* unexpected character */
2003 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
2004 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
2005 smux_byte_loopback);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002006 SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
2007 __func__, (unsigned)data[i]);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002008 break;
2009 }
2010 }
2011
2012 *used = i;
2013}
2014
2015/**
2016 * RX State machine - Header Magic state processing.
2017 *
2018 * @data New RX data to process
2019 * @len Length of the data
2020 * @used Return value of length processed
2021 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002022 */
2023static void smux_rx_handle_magic(const unsigned char *data,
2024 int len, int *used, int flag)
2025{
2026 int i;
2027
2028 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002029 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002030 smux_enter_reset();
2031 smux.rx_state = SMUX_RX_FAILURE;
2032 ++*used;
2033 return;
2034 }
2035
2036 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2037 /* wait for completion of the magic */
2038 if (data[i] == SMUX_MAGIC_WORD2) {
2039 smux.recv_len = 0;
2040 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2041 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2042 smux.rx_state = SMUX_RX_HDR;
2043 } else {
2044 /* unexpected / trash character */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002045 SMUX_ERR(
2046 "%s: rx parse error for char %c; *used=%d, len=%d\n",
2047 __func__, data[i], *used, len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002048 smux.rx_state = SMUX_RX_IDLE;
2049 }
2050 }
2051
2052 *used = i;
2053}
2054
2055/**
2056 * RX State machine - Packet Header state processing.
2057 *
2058 * @data New RX data to process
2059 * @len Length of the data
2060 * @used Return value of length processed
2061 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002062 */
2063static void smux_rx_handle_hdr(const unsigned char *data,
2064 int len, int *used, int flag)
2065{
2066 int i;
2067 struct smux_hdr_t *hdr;
2068
2069 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002070 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002071 smux_enter_reset();
2072 smux.rx_state = SMUX_RX_FAILURE;
2073 ++*used;
2074 return;
2075 }
2076
2077 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2078 smux.recv_buf[smux.recv_len++] = data[i];
2079
2080 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2081 /* complete header received */
2082 hdr = (struct smux_hdr_t *)smux.recv_buf;
2083 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2084 smux.rx_state = SMUX_RX_PAYLOAD;
2085 }
2086 }
2087 *used = i;
2088}
2089
2090/**
2091 * RX State machine - Packet Payload state processing.
2092 *
2093 * @data New RX data to process
2094 * @len Length of the data
2095 * @used Return value of length processed
2096 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002097 */
2098static void smux_rx_handle_pkt_payload(const unsigned char *data,
2099 int len, int *used, int flag)
2100{
2101 int remaining;
2102
2103 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002104 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002105 smux_enter_reset();
2106 smux.rx_state = SMUX_RX_FAILURE;
2107 ++*used;
2108 return;
2109 }
2110
2111 /* copy data into rx buffer */
2112 if (smux.pkt_remain < (len - *used))
2113 remaining = smux.pkt_remain;
2114 else
2115 remaining = len - *used;
2116
2117 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2118 smux.recv_len += remaining;
2119 smux.pkt_remain -= remaining;
2120 *used += remaining;
2121
2122 if (smux.pkt_remain == 0) {
2123 /* complete packet received */
2124 smux_deserialize(smux.recv_buf, smux.recv_len);
2125 smux.rx_state = SMUX_RX_IDLE;
2126 }
2127}
2128
2129/**
2130 * Feed data to the receive state machine.
2131 *
2132 * @data Pointer to data block
2133 * @len Length of data
2134 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002135 */
2136void smux_rx_state_machine(const unsigned char *data,
2137 int len, int flag)
2138{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002139 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002140
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002141 work.data = data;
2142 work.len = len;
2143 work.flag = flag;
2144 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2145 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002146
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002147 queue_work(smux_rx_wq, &work.work);
2148 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002149}
2150
2151/**
Eric Holmbergf3f34592012-08-28 13:51:14 -06002152 * Returns true if the remote side has acknowledged a wakeup
2153 * request previously, so we know that the link is alive and active.
2154 *
2155 * @returns true for is alive, false for not alive
2156 */
2157bool smux_remote_is_active(void)
2158{
2159 bool is_active = false;
2160
2161 mutex_lock(&smux.mutex_lha0);
2162 if (smux.remote_is_alive)
2163 is_active = true;
2164 mutex_unlock(&smux.mutex_lha0);
2165
2166 return is_active;
2167}
2168
2169/**
Eric Holmberg8570d442013-05-21 18:04:50 -06002170 * Sends a delay command to the remote side.
2171 *
2172 * @ms: Time in milliseconds for the remote side to delay
2173 *
2174 * This command defines the delay that the remote side will use
2175 * to slow the response time for DATA commands.
2176 */
2177void smux_set_loopback_data_reply_delay(uint32_t ms)
2178{
2179 struct smux_lch_t *ch = &smux_lch[SMUX_TEST_LCID];
2180 struct smux_pkt_t *pkt;
2181
2182 pkt = smux_alloc_pkt();
2183 if (!pkt) {
2184 pr_err("%s: unable to allocate packet\n", __func__);
2185 return;
2186 }
2187
2188 pkt->hdr.lcid = ch->lcid;
2189 pkt->hdr.cmd = SMUX_CMD_DELAY;
2190 pkt->hdr.flags = 0;
2191 pkt->hdr.payload_len = sizeof(uint32_t);
2192 pkt->hdr.pad_len = 0;
2193
2194 if (smux_alloc_pkt_payload(pkt)) {
2195 pr_err("%s: unable to allocate payload\n", __func__);
2196 smux_free_pkt(pkt);
2197 return;
2198 }
2199 memcpy(pkt->payload, &ms, sizeof(uint32_t));
2200
2201 smux_tx_queue(pkt, ch, 1);
2202}
2203
2204/**
2205 * Retrieve wakeup counts.
2206 *
2207 * @local_cnt: Pointer to local wakeup count
2208 * @remote_cnt: Pointer to remote wakeup count
2209 */
2210void smux_get_wakeup_counts(int *local_cnt, int *remote_cnt)
2211{
2212 unsigned long flags;
2213
2214 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2215
2216 if (local_cnt)
2217 *local_cnt = smux.local_initiated_wakeup_count;
2218
2219 if (remote_cnt)
2220 *remote_cnt = smux.remote_initiated_wakeup_count;
2221
2222 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2223}
2224
2225/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002226 * Add channel to transmit-ready list and trigger transmit worker.
2227 *
2228 * @ch Channel to add
2229 */
2230static void list_channel(struct smux_lch_t *ch)
2231{
2232 unsigned long flags;
2233
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302234 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002235 __func__, ch->lcid);
2236
2237 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2238 spin_lock(&ch->tx_lock_lhb2);
2239 smux.tx_activity_flag = 1;
2240 if (list_empty(&ch->tx_ready_list))
2241 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2242 spin_unlock(&ch->tx_lock_lhb2);
2243 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2244
2245 queue_work(smux_tx_wq, &smux_tx_work);
2246}
2247
2248/**
2249 * Transmit packet on correct transport and then perform client
2250 * notification.
2251 *
2252 * @ch Channel to transmit on
2253 * @pkt Packet to transmit
2254 */
2255static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2256{
2257 union notifier_metadata meta_write;
2258 int ret;
2259
2260 if (ch && pkt) {
2261 SMUX_LOG_PKT_TX(pkt);
2262 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2263 ret = smux_tx_loopback(pkt);
2264 else
2265 ret = smux_tx_tty(pkt);
2266
2267 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2268 /* notify write-done */
2269 meta_write.write.pkt_priv = pkt->priv;
2270 meta_write.write.buffer = pkt->payload;
2271 meta_write.write.len = pkt->hdr.payload_len;
2272 if (ret >= 0) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302273 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002274 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2275 &meta_write);
2276 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002277 SMUX_ERR("%s: failed to write pkt %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002278 __func__, ret);
2279 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2280 &meta_write);
2281 }
2282 }
2283 }
2284}
2285
2286/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002287 * Flush pending TTY TX data.
2288 */
2289static void smux_flush_tty(void)
2290{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002291 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002292 if (!smux.tty) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002293 SMUX_ERR("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002294 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002295 return;
2296 }
2297
2298 tty_wait_until_sent(smux.tty,
2299 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2300
2301 if (tty_chars_in_buffer(smux.tty) > 0)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002302 SMUX_ERR("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002303
2304 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002305}
2306
2307/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002308 * Purge TX queue for logical channel.
2309 *
2310 * @ch Logical channel pointer
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002311 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002312 *
2313 * Must be called with the following spinlocks locked:
2314 * state_lock_lhb1
2315 * tx_lock_lhb2
2316 */
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002317static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002318{
2319 struct smux_pkt_t *pkt;
2320 int send_disconnect = 0;
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002321 struct smux_pkt_t *pkt_tmp;
2322 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002323
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002324 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2325 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002326 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002327 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2328 /* Open ACK must still be sent */
2329 is_state_pkt = 1;
2330 } else {
2331 /* Open never sent -- force to closed state */
2332 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2333 send_disconnect = 1;
2334 }
2335 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2336 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2337 is_state_pkt = 1;
2338 if (!send_disconnect)
2339 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002340 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2341 /* Notify client of failed write */
2342 union notifier_metadata meta_write;
2343
2344 meta_write.write.pkt_priv = pkt->priv;
2345 meta_write.write.buffer = pkt->payload;
2346 meta_write.write.len = pkt->hdr.payload_len;
2347 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2348 }
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002349
2350 if (!is_state_pkt || is_ssr) {
2351 list_del(&pkt->list);
2352 smux_free_pkt(pkt);
2353 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002354 }
2355
2356 if (send_disconnect) {
2357 union notifier_metadata meta_disconnected;
2358
2359 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2360 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2361 &meta_disconnected);
2362 }
2363}
2364
2365/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002366 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002367 *
2368 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002369 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002370static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002371{
2372 struct uart_state *state;
2373
2374 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002375 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002376 __func__, smux.tty);
2377 return;
2378 }
2379 state = smux.tty->driver_data;
2380 msm_hs_request_clock_on(state->uart_port);
2381}
2382
2383/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002384 * Power-up the UART.
2385 */
2386static void smux_uart_power_on(void)
2387{
2388 mutex_lock(&smux.mutex_lha0);
2389 smux_uart_power_on_atomic();
2390 mutex_unlock(&smux.mutex_lha0);
2391}
2392
2393/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002394 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002395 *
2396 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002397 */
Eric Holmberg06011322012-07-06 18:17:03 -06002398static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002399{
2400 struct uart_state *state;
2401
2402 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002403 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002404 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002405 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002406 return;
2407 }
2408 state = smux.tty->driver_data;
2409 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002410}
2411
2412/**
2413 * Power down the UART.
2414 */
2415static void smux_uart_power_off(void)
2416{
2417 mutex_lock(&smux.mutex_lha0);
2418 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002419 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002420}
2421
2422/**
2423 * TX Wakeup Worker
2424 *
2425 * @work Not used
2426 *
2427 * Do an exponential back-off wakeup sequence with a maximum period
2428 * of approximately 1 second (1 << 20 microseconds).
2429 */
2430static void smux_wakeup_worker(struct work_struct *work)
2431{
2432 unsigned long flags;
2433 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002434
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002435 if (smux.in_reset)
2436 return;
2437
2438 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2439 if (smux.power_state == SMUX_PWR_ON) {
2440 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002441 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002442 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302443 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002444
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002445 /*
2446 * Cancel any pending retry. This avoids a race condition with
2447 * a new power-up request because:
2448 * 1) this worker doesn't modify the state
2449 * 2) this worker is processed on the same single-threaded
2450 * workqueue as new TX wakeup requests
2451 */
2452 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002453 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002454 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002455 /* retry wakeup */
2456 wakeup_delay = smux.pwr_wakeup_delay_us;
2457 smux.pwr_wakeup_delay_us <<= 1;
2458 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2459 smux.pwr_wakeup_delay_us =
2460 SMUX_WAKEUP_DELAY_MAX;
2461
2462 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302463 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002464 smux_send_byte(SMUX_WAKEUP_REQ);
2465
2466 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302467 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002468 wakeup_delay);
2469 usleep_range(wakeup_delay, 2*wakeup_delay);
2470 queue_work(smux_tx_wq, &smux_wakeup_work);
2471 } else {
2472 /* schedule delayed work */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302473 SMUX_DBG(
2474 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002475 __func__, wakeup_delay / 1000);
2476 queue_delayed_work(smux_tx_wq,
2477 &smux_wakeup_delayed_work,
2478 msecs_to_jiffies(wakeup_delay / 1000));
2479 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002480 } else {
2481 /* wakeup aborted */
2482 smux.pwr_wakeup_delay_us = 1;
2483 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302484 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002485 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002486 }
2487}
2488
2489
2490/**
2491 * Inactivity timeout worker. Periodically scheduled when link is active.
2492 * When it detects inactivity, it will power-down the UART link.
2493 *
2494 * @work Work structure (not used)
2495 */
2496static void smux_inactivity_worker(struct work_struct *work)
2497{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002498 struct smux_pkt_t *pkt;
2499 unsigned long flags;
2500
Eric Holmberg06011322012-07-06 18:17:03 -06002501 if (smux.in_reset)
2502 return;
2503
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002504 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2505 spin_lock(&smux.tx_lock_lha2);
2506
2507 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2508 /* no activity */
2509 if (smux.powerdown_enabled) {
2510 if (smux.power_state == SMUX_PWR_ON) {
2511 /* start power-down sequence */
2512 pkt = smux_alloc_pkt();
2513 if (pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302514 SMUX_PWR(
2515 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002516 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002517 SMUX_PWR_TURNING_OFF_FLUSH);
2518 smux.power_state =
2519 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002520
2521 /* send power-down request */
2522 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2523 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002524 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2525 list_add_tail(&pkt->list,
2526 &smux.power_queue);
2527 queue_work(smux_tx_wq, &smux_tx_work);
2528 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002529 SMUX_ERR("%s: packet alloc failed\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002530 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002531 }
2532 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002533 }
2534 }
2535 smux.tx_activity_flag = 0;
2536 smux.rx_activity_flag = 0;
2537
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002538 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002539 /* ready to power-down the UART */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302540 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002541 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002542 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002543
2544 /* if data is pending, schedule a new wakeup */
2545 if (!list_empty(&smux.lch_tx_ready_list) ||
2546 !list_empty(&smux.power_queue))
2547 queue_work(smux_tx_wq, &smux_tx_work);
2548
2549 spin_unlock(&smux.tx_lock_lha2);
2550 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2551
2552 /* flush UART output queue and power down */
2553 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002554 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002555 } else {
2556 spin_unlock(&smux.tx_lock_lha2);
2557 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002558 }
2559
2560 /* reschedule inactivity worker */
2561 if (smux.power_state != SMUX_PWR_OFF)
2562 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2563 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2564}
2565
2566/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002567 * Remove RX retry packet from channel and free it.
2568 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002569 * @ch Channel for retry packet
2570 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002571 *
2572 * @returns 1 if flow control updated; 0 otherwise
2573 *
2574 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002575 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002576int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002577 struct smux_rx_pkt_retry *retry)
2578{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002579 int tx_ready = 0;
2580
Eric Holmbergb8435c82012-06-05 14:51:29 -06002581 list_del(&retry->rx_retry_list);
2582 --ch->rx_retry_queue_cnt;
2583 smux_free_pkt(retry->pkt);
2584 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002585
2586 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2587 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2588 ch->rx_flow_control_auto) {
2589 ch->rx_flow_control_auto = 0;
2590 smux_rx_flow_control_updated(ch);
2591 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2592 tx_ready = 1;
2593 }
2594 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002595}
2596
2597/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002598 * RX worker handles all receive operations.
2599 *
2600 * @work Work structure contained in TBD structure
2601 */
2602static void smux_rx_worker(struct work_struct *work)
2603{
2604 unsigned long flags;
2605 int used;
2606 int initial_rx_state;
2607 struct smux_rx_worker_data *w;
2608 const unsigned char *data;
2609 int len;
2610 int flag;
2611
2612 w = container_of(work, struct smux_rx_worker_data, work);
2613 data = w->data;
2614 len = w->len;
2615 flag = w->flag;
2616
2617 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2618 smux.rx_activity_flag = 1;
2619 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2620
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302621 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002622 used = 0;
2623 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002624 if (smux.in_reset) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302625 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002626 smux.rx_state = SMUX_RX_IDLE;
2627 break;
2628 }
2629
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302630 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002631 __func__, smux.rx_state, used, len);
2632 initial_rx_state = smux.rx_state;
2633
2634 switch (smux.rx_state) {
2635 case SMUX_RX_IDLE:
2636 smux_rx_handle_idle(data, len, &used, flag);
2637 break;
2638 case SMUX_RX_MAGIC:
2639 smux_rx_handle_magic(data, len, &used, flag);
2640 break;
2641 case SMUX_RX_HDR:
2642 smux_rx_handle_hdr(data, len, &used, flag);
2643 break;
2644 case SMUX_RX_PAYLOAD:
2645 smux_rx_handle_pkt_payload(data, len, &used, flag);
2646 break;
2647 default:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302648 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002649 __func__, smux.rx_state);
2650 smux.rx_state = SMUX_RX_IDLE;
2651 break;
2652 }
2653 } while (used < len || smux.rx_state != initial_rx_state);
2654
2655 complete(&w->work_complete);
2656}
2657
2658/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002659 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2660 * because the client was not ready (-EAGAIN).
2661 *
2662 * @work Work structure contained in smux_lch_t structure
2663 */
2664static void smux_rx_retry_worker(struct work_struct *work)
2665{
2666 struct smux_lch_t *ch;
2667 struct smux_rx_pkt_retry *retry;
2668 union notifier_metadata metadata;
2669 int tmp;
2670 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002671 int immediate_retry = 0;
2672 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002673
2674 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2675
2676 /* get next retry packet */
2677 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002678 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002679 /* port has been closed - remove all retries */
2680 while (!list_empty(&ch->rx_retry_queue)) {
2681 retry = list_first_entry(&ch->rx_retry_queue,
2682 struct smux_rx_pkt_retry,
2683 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002684 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002685 }
2686 }
2687
2688 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302689 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002690 __func__, ch->lcid);
2691 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2692 return;
2693 }
2694 retry = list_first_entry(&ch->rx_retry_queue,
2695 struct smux_rx_pkt_retry,
2696 rx_retry_list);
2697 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2698
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302699 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002700 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002701 metadata.read.pkt_priv = 0;
2702 metadata.read.buffer = 0;
2703 tmp = ch->get_rx_buffer(ch->priv,
2704 (void **)&metadata.read.pkt_priv,
2705 (void **)&metadata.read.buffer,
2706 retry->pkt->hdr.payload_len);
2707 if (tmp == 0 && metadata.read.buffer) {
2708 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002709
Eric Holmbergb8435c82012-06-05 14:51:29 -06002710 memcpy(metadata.read.buffer, retry->pkt->payload,
2711 retry->pkt->hdr.payload_len);
2712 metadata.read.len = retry->pkt->hdr.payload_len;
2713
2714 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002715 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002716 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002717 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002718 if (tx_ready)
2719 list_channel(ch);
2720
2721 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002722 } else if (tmp == -EAGAIN ||
2723 (tmp == 0 && !metadata.read.buffer)) {
2724 /* retry again */
2725 retry->timeout_in_ms <<= 1;
2726 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2727 /* timed out */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002728 SMUX_ERR("%s: ch %d RX retry client timeout\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002729 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002730 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002731 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002732 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002733 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2734 if (tx_ready)
2735 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002736 }
2737 } else {
2738 /* client error - drop packet */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002739 SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002740 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002741 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002742 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002743 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002744 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002745 if (tx_ready)
2746 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002747 }
2748
2749 /* schedule next retry */
2750 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2751 if (!list_empty(&ch->rx_retry_queue)) {
2752 retry = list_first_entry(&ch->rx_retry_queue,
2753 struct smux_rx_pkt_retry,
2754 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002755
2756 if (immediate_retry)
2757 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2758 else
2759 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2760 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002761 }
2762 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2763}
2764
2765/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002766 * Transmit worker handles serializing and transmitting packets onto the
2767 * underlying transport.
2768 *
2769 * @work Work structure (not used)
2770 */
2771static void smux_tx_worker(struct work_struct *work)
2772{
2773 struct smux_pkt_t *pkt;
2774 struct smux_lch_t *ch;
2775 unsigned low_wm_notif;
2776 unsigned lcid;
2777 unsigned long flags;
2778
2779
2780 /*
2781 * Transmit packets in round-robin fashion based upon ready
2782 * channels.
2783 *
2784 * To eliminate the need to hold a lock for the entire
2785 * iteration through the channel ready list, the head of the
2786 * ready-channel list is always the next channel to be
2787 * processed. To send a packet, the first valid packet in
2788 * the head channel is removed and the head channel is then
2789 * rescheduled at the end of the queue by removing it and
2790 * inserting after the tail. The locks can then be released
2791 * while the packet is processed.
2792 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002793 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002794 pkt = NULL;
2795 low_wm_notif = 0;
2796
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002797 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002798
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002799 /* handle wakeup if needed */
2800 if (smux.power_state == SMUX_PWR_OFF) {
2801 if (!list_empty(&smux.lch_tx_ready_list) ||
2802 !list_empty(&smux.power_queue)) {
2803 /* data to transmit, do wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302804 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002805 smux.power_state,
2806 SMUX_PWR_TURNING_ON);
Eric Holmberg8570d442013-05-21 18:04:50 -06002807 smux.local_initiated_wakeup_count++;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002808 smux.power_state = SMUX_PWR_TURNING_ON;
2809 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2810 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002811 queue_work(smux_tx_wq, &smux_wakeup_work);
2812 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002813 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002814 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2815 flags);
2816 }
2817 break;
2818 }
2819
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002820 /* process any pending power packets */
2821 if (!list_empty(&smux.power_queue)) {
2822 pkt = list_first_entry(&smux.power_queue,
2823 struct smux_pkt_t, list);
2824 list_del(&pkt->list);
2825 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2826
Eric Holmberga9b06472012-06-22 09:46:34 -06002827 /* Adjust power state if this is a flush command */
2828 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2829 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2830 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2831 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2832 smux.power_ctl_remote_req_received) {
2833 /*
2834 * Sending remote power-down request ACK
2835 * or sending local power-down request
2836 * and we already received a remote
2837 * power-down request.
2838 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302839 SMUX_PWR(
2840 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002841 smux.power_state,
2842 SMUX_PWR_OFF_FLUSH);
2843 smux.power_state = SMUX_PWR_OFF_FLUSH;
2844 smux.power_ctl_remote_req_received = 0;
2845 queue_work(smux_tx_wq,
2846 &smux_inactivity_work);
2847 } else {
2848 /* sending local power-down request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302849 SMUX_PWR(
2850 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002851 smux.power_state,
2852 SMUX_PWR_TURNING_OFF);
2853 smux.power_state = SMUX_PWR_TURNING_OFF;
2854 }
2855 }
2856 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2857
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002858 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002859 smux_uart_power_on();
2860 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002861 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002862 if (!smux_byte_loopback) {
2863 smux_tx_tty(pkt);
2864 smux_flush_tty();
2865 } else {
2866 smux_tx_loopback(pkt);
2867 }
2868
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002869 smux_free_pkt(pkt);
2870 continue;
2871 }
2872
2873 /* get the next ready channel */
2874 if (list_empty(&smux.lch_tx_ready_list)) {
2875 /* no ready channels */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302876 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002877 __func__);
2878 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2879 break;
2880 }
2881 smux.tx_activity_flag = 1;
2882
2883 if (smux.power_state != SMUX_PWR_ON) {
2884 /* channel not ready to transmit */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302885 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002886 __func__,
2887 smux.power_state);
2888 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2889 break;
2890 }
2891
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002892 /* get the next packet to send and rotate channel list */
2893 ch = list_first_entry(&smux.lch_tx_ready_list,
2894 struct smux_lch_t,
2895 tx_ready_list);
2896
2897 spin_lock(&ch->state_lock_lhb1);
2898 spin_lock(&ch->tx_lock_lhb2);
2899 if (!list_empty(&ch->tx_queue)) {
2900 /*
2901 * If remote TX flow control is enabled or
2902 * the channel is not fully opened, then only
2903 * send command packets.
2904 */
2905 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2906 struct smux_pkt_t *curr;
2907 list_for_each_entry(curr, &ch->tx_queue, list) {
2908 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2909 pkt = curr;
2910 break;
2911 }
2912 }
2913 } else {
2914 /* get next cmd/data packet to send */
2915 pkt = list_first_entry(&ch->tx_queue,
2916 struct smux_pkt_t, list);
2917 }
2918 }
2919
2920 if (pkt) {
2921 list_del(&pkt->list);
2922
2923 /* update packet stats */
2924 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2925 --ch->tx_pending_data_cnt;
2926 if (ch->notify_lwm &&
2927 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002928 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002929 ch->notify_lwm = 0;
2930 low_wm_notif = 1;
2931 }
2932 }
2933
2934 /* advance to the next ready channel */
2935 list_rotate_left(&smux.lch_tx_ready_list);
2936 } else {
2937 /* no data in channel to send, remove from ready list */
2938 list_del(&ch->tx_ready_list);
2939 INIT_LIST_HEAD(&ch->tx_ready_list);
2940 }
2941 lcid = ch->lcid;
2942 spin_unlock(&ch->tx_lock_lhb2);
2943 spin_unlock(&ch->state_lock_lhb1);
2944 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2945
2946 if (low_wm_notif)
2947 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2948
2949 /* send the packet */
2950 smux_tx_pkt(ch, pkt);
2951 smux_free_pkt(pkt);
2952 }
2953}
2954
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002955/**
2956 * Update the RX flow control (sent in the TIOCM Status command).
2957 *
2958 * @ch Channel for update
2959 *
2960 * @returns 1 for updated, 0 for not updated
2961 *
2962 * Must be called with ch->state_lock_lhb1 locked.
2963 */
2964static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2965{
2966 int updated = 0;
2967 int prev_state;
2968
2969 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2970
2971 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2972 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2973 else
2974 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2975
2976 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2977 smux_send_status_cmd(ch);
2978 updated = 1;
2979 }
2980
2981 return updated;
2982}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002983
Eric Holmberg06011322012-07-06 18:17:03 -06002984/**
2985 * Flush all SMUX workqueues.
2986 *
2987 * This sets the reset bit to abort any processing loops and then
2988 * flushes the workqueues to ensure that no new pending work is
2989 * running. Do not call with any locks used by workers held as
2990 * this will result in a deadlock.
2991 */
2992static void smux_flush_workqueues(void)
2993{
2994 smux.in_reset = 1;
2995
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302996 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002997 flush_workqueue(smux_tx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302998 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002999 flush_workqueue(smux_rx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303000 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003001 flush_workqueue(smux_notify_wq);
3002}
3003
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003004/**********************************************************************/
3005/* Kernel API */
3006/**********************************************************************/
3007
3008/**
3009 * Set or clear channel option using the SMUX_CH_OPTION_* channel
3010 * flags.
3011 *
3012 * @lcid Logical channel ID
3013 * @set Options to set
3014 * @clear Options to clear
3015 *
3016 * @returns 0 for success, < 0 for failure
3017 */
3018int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
3019{
3020 unsigned long flags;
3021 struct smux_lch_t *ch;
3022 int tx_ready = 0;
3023 int ret = 0;
3024
3025 if (smux_assert_lch_id(lcid))
3026 return -ENXIO;
3027
3028 ch = &smux_lch[lcid];
3029 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3030
3031 /* Local loopback mode */
3032 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3033 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
3034
3035 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
3036 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3037
3038 /* Remote loopback mode */
3039 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3040 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
3041
3042 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
3043 ch->local_mode = SMUX_LCH_MODE_NORMAL;
3044
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003045 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003046 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003047 ch->rx_flow_control_client = 1;
3048 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003049 }
3050
3051 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003052 ch->rx_flow_control_client = 0;
3053 tx_ready |= smux_rx_flow_control_updated(ch);
3054 }
3055
3056 /* Auto RX Flow Control */
3057 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303058 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003059 __func__);
3060 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3061 }
3062
3063 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303064 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003065 __func__);
3066 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
3067 ch->rx_flow_control_auto = 0;
3068 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003069 }
3070
3071 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3072
3073 if (tx_ready)
3074 list_channel(ch);
3075
3076 return ret;
3077}
3078
3079/**
3080 * Starts the opening sequence for a logical channel.
3081 *
3082 * @lcid Logical channel ID
3083 * @priv Free for client usage
3084 * @notify Event notification function
3085 * @get_rx_buffer Function used to provide a receive buffer to SMUX
3086 *
3087 * @returns 0 for success, <0 otherwise
3088 *
3089 * A channel must be fully closed (either not previously opened or
3090 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
3091 * received.
3092 *
3093 * One the remote side is opened, the client will receive a SMUX_CONNECTED
3094 * event.
3095 */
3096int msm_smux_open(uint8_t lcid, void *priv,
3097 void (*notify)(void *priv, int event_type, const void *metadata),
3098 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3099 int size))
3100{
3101 int ret;
3102 struct smux_lch_t *ch;
3103 struct smux_pkt_t *pkt;
3104 int tx_ready = 0;
3105 unsigned long flags;
3106
3107 if (smux_assert_lch_id(lcid))
3108 return -ENXIO;
3109
3110 ch = &smux_lch[lcid];
3111 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3112
3113 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3114 ret = -EAGAIN;
3115 goto out;
3116 }
3117
3118 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003119 SMUX_ERR("%s: open lcid %d local state %x invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003120 __func__, lcid, ch->local_state);
3121 ret = -EINVAL;
3122 goto out;
3123 }
3124
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303125 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003126 ch->local_state,
3127 SMUX_LCH_LOCAL_OPENING);
3128
Eric Holmberg06011322012-07-06 18:17:03 -06003129 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003130 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3131
3132 ch->priv = priv;
3133 ch->notify = notify;
3134 ch->get_rx_buffer = get_rx_buffer;
3135 ret = 0;
3136
3137 /* Send Open Command */
3138 pkt = smux_alloc_pkt();
3139 if (!pkt) {
3140 ret = -ENOMEM;
3141 goto out;
3142 }
3143 pkt->hdr.magic = SMUX_MAGIC;
3144 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3145 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3146 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3147 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3148 pkt->hdr.lcid = lcid;
3149 pkt->hdr.payload_len = 0;
3150 pkt->hdr.pad_len = 0;
3151 smux_tx_queue(pkt, ch, 0);
3152 tx_ready = 1;
3153
3154out:
3155 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003156 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003157 if (tx_ready)
3158 list_channel(ch);
3159 return ret;
3160}
3161
3162/**
3163 * Starts the closing sequence for a logical channel.
3164 *
3165 * @lcid Logical channel ID
3166 *
3167 * @returns 0 for success, <0 otherwise
3168 *
3169 * Once the close event has been acknowledge by the remote side, the client
3170 * will receive a SMUX_DISCONNECTED notification.
3171 */
3172int msm_smux_close(uint8_t lcid)
3173{
3174 int ret = 0;
3175 struct smux_lch_t *ch;
3176 struct smux_pkt_t *pkt;
3177 int tx_ready = 0;
3178 unsigned long flags;
3179
3180 if (smux_assert_lch_id(lcid))
3181 return -ENXIO;
3182
3183 ch = &smux_lch[lcid];
3184 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3185 ch->local_tiocm = 0x0;
3186 ch->remote_tiocm = 0x0;
3187 ch->tx_pending_data_cnt = 0;
3188 ch->notify_lwm = 0;
Eric Holmbergf61339e2012-08-13 14:45:27 -06003189 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003190
3191 /* Purge TX queue */
3192 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -06003193 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003194 spin_unlock(&ch->tx_lock_lhb2);
3195
3196 /* Send Close Command */
3197 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3198 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303199 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003200 ch->local_state,
3201 SMUX_LCH_LOCAL_CLOSING);
3202
3203 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3204 pkt = smux_alloc_pkt();
3205 if (pkt) {
3206 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3207 pkt->hdr.flags = 0;
3208 pkt->hdr.lcid = lcid;
3209 pkt->hdr.payload_len = 0;
3210 pkt->hdr.pad_len = 0;
3211 smux_tx_queue(pkt, ch, 0);
3212 tx_ready = 1;
3213 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003214 SMUX_ERR("%s: pkt allocation failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003215 ret = -ENOMEM;
3216 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003217
3218 /* Purge RX retry queue */
3219 if (ch->rx_retry_queue_cnt)
3220 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003221 }
3222 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3223
3224 if (tx_ready)
3225 list_channel(ch);
3226
3227 return ret;
3228}
3229
3230/**
3231 * Write data to a logical channel.
3232 *
3233 * @lcid Logical channel ID
3234 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3235 * SMUX_WRITE_FAIL notification.
3236 * @data Data to write
3237 * @len Length of @data
3238 *
3239 * @returns 0 for success, <0 otherwise
3240 *
3241 * Data may be written immediately after msm_smux_open() is called,
3242 * but the data will wait in the transmit queue until the channel has
3243 * been fully opened.
3244 *
3245 * Once the data has been written, the client will receive either a completion
3246 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3247 */
3248int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3249{
3250 struct smux_lch_t *ch;
3251 struct smux_pkt_t *pkt;
3252 int tx_ready = 0;
3253 unsigned long flags;
3254 int ret;
3255
3256 if (smux_assert_lch_id(lcid))
3257 return -ENXIO;
3258
3259 ch = &smux_lch[lcid];
3260 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3261
3262 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3263 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003264 SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003265 __func__, ch->local_state, lcid);
3266 ret = -EINVAL;
3267 goto out;
3268 }
3269
3270 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003271 SMUX_ERR("%s: payload %d too large\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003272 __func__, len);
3273 ret = -E2BIG;
3274 goto out;
3275 }
3276
3277 pkt = smux_alloc_pkt();
3278 if (!pkt) {
3279 ret = -ENOMEM;
3280 goto out;
3281 }
3282
3283 pkt->hdr.cmd = SMUX_CMD_DATA;
3284 pkt->hdr.lcid = lcid;
3285 pkt->hdr.flags = 0;
3286 pkt->hdr.payload_len = len;
3287 pkt->payload = (void *)data;
3288 pkt->priv = pkt_priv;
3289 pkt->hdr.pad_len = 0;
3290
3291 spin_lock(&ch->tx_lock_lhb2);
3292 /* verify high watermark */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303293 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003294
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003295 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003296 SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003297 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003298 ch->tx_pending_data_cnt);
3299 ret = -EAGAIN;
3300 goto out_inner;
3301 }
3302
3303 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003304 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003305 ch->notify_lwm = 1;
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003306 SMUX_ERR("%s: high watermark hit\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003307 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3308 }
3309 list_add_tail(&pkt->list, &ch->tx_queue);
3310
3311 /* add to ready list */
3312 if (IS_FULLY_OPENED(ch))
3313 tx_ready = 1;
3314
3315 ret = 0;
3316
3317out_inner:
3318 spin_unlock(&ch->tx_lock_lhb2);
3319
3320out:
3321 if (ret)
3322 smux_free_pkt(pkt);
3323 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3324
3325 if (tx_ready)
3326 list_channel(ch);
3327
3328 return ret;
3329}
3330
3331/**
3332 * Returns true if the TX queue is currently full (high water mark).
3333 *
3334 * @lcid Logical channel ID
3335 * @returns 0 if channel is not full
3336 * 1 if it is full
3337 * < 0 for error
3338 */
3339int msm_smux_is_ch_full(uint8_t lcid)
3340{
3341 struct smux_lch_t *ch;
3342 unsigned long flags;
3343 int is_full = 0;
3344
3345 if (smux_assert_lch_id(lcid))
3346 return -ENXIO;
3347
3348 ch = &smux_lch[lcid];
3349
3350 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003351 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003352 is_full = 1;
3353 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3354
3355 return is_full;
3356}
3357
3358/**
3359 * Returns true if the TX queue has space for more packets it is at or
3360 * below the low water mark).
3361 *
3362 * @lcid Logical channel ID
3363 * @returns 0 if channel is above low watermark
3364 * 1 if it's at or below the low watermark
3365 * < 0 for error
3366 */
3367int msm_smux_is_ch_low(uint8_t lcid)
3368{
3369 struct smux_lch_t *ch;
3370 unsigned long flags;
3371 int is_low = 0;
3372
3373 if (smux_assert_lch_id(lcid))
3374 return -ENXIO;
3375
3376 ch = &smux_lch[lcid];
3377
3378 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003379 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003380 is_low = 1;
3381 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3382
3383 return is_low;
3384}
3385
3386/**
3387 * Send TIOCM status update.
3388 *
3389 * @ch Channel for update
3390 *
3391 * @returns 0 for success, <0 for failure
3392 *
3393 * Channel lock must be held before calling.
3394 */
3395static int smux_send_status_cmd(struct smux_lch_t *ch)
3396{
3397 struct smux_pkt_t *pkt;
3398
3399 if (!ch)
3400 return -EINVAL;
3401
3402 pkt = smux_alloc_pkt();
3403 if (!pkt)
3404 return -ENOMEM;
3405
3406 pkt->hdr.lcid = ch->lcid;
3407 pkt->hdr.cmd = SMUX_CMD_STATUS;
3408 pkt->hdr.flags = ch->local_tiocm;
3409 pkt->hdr.payload_len = 0;
3410 pkt->hdr.pad_len = 0;
3411 smux_tx_queue(pkt, ch, 0);
3412
3413 return 0;
3414}
3415
3416/**
3417 * Internal helper function for getting the TIOCM status with
3418 * state_lock_lhb1 already locked.
3419 *
3420 * @ch Channel pointer
3421 *
3422 * @returns TIOCM status
3423 */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -06003424long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003425{
3426 long status = 0x0;
3427
3428 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3429 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3430 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3431 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3432
3433 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3434 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3435
3436 return status;
3437}
3438
3439/**
3440 * Get the TIOCM status bits.
3441 *
3442 * @lcid Logical channel ID
3443 *
3444 * @returns >= 0 TIOCM status bits
3445 * < 0 Error condition
3446 */
3447long msm_smux_tiocm_get(uint8_t lcid)
3448{
3449 struct smux_lch_t *ch;
3450 unsigned long flags;
3451 long status = 0x0;
3452
3453 if (smux_assert_lch_id(lcid))
3454 return -ENXIO;
3455
3456 ch = &smux_lch[lcid];
3457 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3458 status = msm_smux_tiocm_get_atomic(ch);
3459 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3460
3461 return status;
3462}
3463
3464/**
3465 * Set/clear the TIOCM status bits.
3466 *
3467 * @lcid Logical channel ID
3468 * @set Bits to set
3469 * @clear Bits to clear
3470 *
3471 * @returns 0 for success; < 0 for failure
3472 *
3473 * If a bit is specified in both the @set and @clear masks, then the clear bit
3474 * definition will dominate and the bit will be cleared.
3475 */
3476int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3477{
3478 struct smux_lch_t *ch;
3479 unsigned long flags;
3480 uint8_t old_status;
3481 uint8_t status_set = 0x0;
3482 uint8_t status_clear = 0x0;
3483 int tx_ready = 0;
3484 int ret = 0;
3485
3486 if (smux_assert_lch_id(lcid))
3487 return -ENXIO;
3488
3489 ch = &smux_lch[lcid];
3490 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3491
3492 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3493 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3494 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3495 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3496
3497 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3498 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3499 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3500 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3501
3502 old_status = ch->local_tiocm;
3503 ch->local_tiocm |= status_set;
3504 ch->local_tiocm &= ~status_clear;
3505
3506 if (ch->local_tiocm != old_status) {
3507 ret = smux_send_status_cmd(ch);
3508 tx_ready = 1;
3509 }
3510 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3511
3512 if (tx_ready)
3513 list_channel(ch);
3514
3515 return ret;
3516}
3517
3518/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003519/* Subsystem Restart */
3520/**********************************************************************/
3521static struct notifier_block ssr_notifier = {
3522 .notifier_call = ssr_notifier_cb,
3523};
3524
3525/**
3526 * Handle Subsystem Restart (SSR) notifications.
3527 *
3528 * @this Pointer to ssr_notifier
3529 * @code SSR Code
3530 * @data Data pointer (not used)
3531 */
3532static int ssr_notifier_cb(struct notifier_block *this,
3533 unsigned long code,
3534 void *data)
3535{
3536 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003537 int i;
3538 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003539 int power_off_uart = 0;
3540
Eric Holmbergd2697902012-06-15 09:58:46 -06003541 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303542 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003543 mutex_lock(&smux.mutex_lha0);
3544 smux.in_reset = 1;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003545 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003546 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003547 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003548 } else if (code == SUBSYS_AFTER_POWERUP) {
3549 /* re-register platform devices */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303550 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003551 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003552 if (smux.ld_open_count > 0
3553 && !smux.platform_devs_registered) {
3554 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303555 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003556 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003557 smux_devs[i].dev.release = smux_pdev_release;
3558 tmp = platform_device_register(&smux_devs[i]);
3559 if (tmp)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003560 SMUX_ERR(
3561 "%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003562 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003563 }
3564 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003565 }
3566 mutex_unlock(&smux.mutex_lha0);
3567 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003568 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3569 return NOTIFY_DONE;
3570 }
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303571 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003572
3573 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003574 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003575 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003576 if (smux.ld_open_count > 0) {
3577 smux_lch_purge();
3578 if (smux.tty)
3579 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003580
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003581 /* Unregister platform devices */
3582 if (smux.platform_devs_registered) {
3583 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303584 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003585 __func__, smux_devs[i].name);
3586 platform_device_unregister(&smux_devs[i]);
3587 }
3588 smux.platform_devs_registered = 0;
3589 }
3590
3591 /* Power-down UART */
3592 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3593 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303594 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3595 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003596 smux.power_state = SMUX_PWR_OFF;
3597 power_off_uart = 1;
3598 }
3599 smux.powerdown_enabled = 0;
3600 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3601
3602 if (power_off_uart)
3603 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003604 }
Eric Holmberg06011322012-07-06 18:17:03 -06003605 smux.tx_activity_flag = 0;
3606 smux.rx_activity_flag = 0;
3607 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003608 smux.in_reset = 0;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003609 smux.remote_is_alive = 0;
Eric Holmbergd2697902012-06-15 09:58:46 -06003610 mutex_unlock(&smux.mutex_lha0);
3611
Eric Holmberged1f00c2012-06-07 09:45:18 -06003612 return NOTIFY_DONE;
3613}
3614
3615/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003616/* Line Discipline Interface */
3617/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003618static void smux_pdev_release(struct device *dev)
3619{
3620 struct platform_device *pdev;
3621
3622 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303623 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3624 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003625 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3626}
3627
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003628static int smuxld_open(struct tty_struct *tty)
3629{
3630 int i;
3631 int tmp;
3632 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003633
3634 if (!smux.is_initialized)
3635 return -ENODEV;
3636
Eric Holmberged1f00c2012-06-07 09:45:18 -06003637 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003638 if (smux.ld_open_count) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003639 SMUX_ERR("%s: %p multiple instances not supported\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003640 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003641 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003642 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003643 }
3644
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003645 if (tty->ops->write == NULL) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003646 SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003647 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003648 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003649 }
3650
3651 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003652 ++smux.ld_open_count;
3653 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003654 smux.tty = tty;
3655 tty->disc_data = &smux;
3656 tty->receive_room = TTY_RECEIVE_ROOM;
3657 tty_driver_flush_buffer(tty);
3658
3659 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003660 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003661 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303662 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003663 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003664 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003665 queue_work(smux_tx_wq, &smux_inactivity_work);
3666 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003667 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003668 }
3669
3670 /* register platform devices */
3671 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303672 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003673 __func__, smux_devs[i].name);
3674 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003675 tmp = platform_device_register(&smux_devs[i]);
3676 if (tmp)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003677 SMUX_ERR("%s: error %d registering device %s\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003678 __func__, tmp, smux_devs[i].name);
3679 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003680 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003681 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003682 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003683}
3684
3685static void smuxld_close(struct tty_struct *tty)
3686{
3687 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003688 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003689 int i;
3690
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303691 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003692 smux_flush_workqueues();
3693
Eric Holmberged1f00c2012-06-07 09:45:18 -06003694 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003695 if (smux.ld_open_count <= 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003696 SMUX_ERR("%s: invalid ld count %d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003697 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003698 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003699 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003700 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003701 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003702
3703 /* Cleanup channels */
3704 smux_lch_purge();
3705
3706 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003707 if (smux.platform_devs_registered) {
3708 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303709 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003710 __func__, smux_devs[i].name);
3711 platform_device_unregister(&smux_devs[i]);
3712 }
3713 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003714 }
3715
3716 /* Schedule UART power-up if it's down */
3717 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003718 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003719 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003720 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003721 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003722 smux.tx_activity_flag = 0;
3723 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003724 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3725
3726 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003727 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003728
Eric Holmberg06011322012-07-06 18:17:03 -06003729 smux.rx_state = SMUX_RX_IDLE;
3730
Eric Holmberged1f00c2012-06-07 09:45:18 -06003731 /* Disconnect from TTY */
3732 smux.tty = NULL;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003733 smux.remote_is_alive = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003734 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303735 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003736}
3737
3738/**
3739 * Receive data from TTY Line Discipline.
3740 *
3741 * @tty TTY structure
3742 * @cp Character data
3743 * @fp Flag data
3744 * @count Size of character and flag data
3745 */
3746void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3747 char *fp, int count)
3748{
3749 int i;
3750 int last_idx = 0;
3751 const char *tty_name = NULL;
3752 char *f;
3753
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003754 /* verify error flags */
3755 for (i = 0, f = fp; i < count; ++i, ++f) {
3756 if (*f != TTY_NORMAL) {
3757 if (tty)
3758 tty_name = tty->name;
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003759 SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003760 tty_name, *f, tty_flag_to_str(*f));
3761
3762 /* feed all previous valid data to the parser */
3763 smux_rx_state_machine(cp + last_idx, i - last_idx,
3764 TTY_NORMAL);
3765
3766 /* feed bad data to parser */
3767 smux_rx_state_machine(cp + i, 1, *f);
3768 last_idx = i + 1;
3769 }
3770 }
3771
3772 /* feed data to RX state machine */
3773 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3774}
3775
3776static void smuxld_flush_buffer(struct tty_struct *tty)
3777{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003778 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003779}
3780
3781static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3782{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003783 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003784 return -ENODEV;
3785}
3786
3787static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3788 unsigned char __user *buf, size_t nr)
3789{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003790 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003791 return -ENODEV;
3792}
3793
3794static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3795 const unsigned char *buf, size_t nr)
3796{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003797 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003798 return -ENODEV;
3799}
3800
3801static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3802 unsigned int cmd, unsigned long arg)
3803{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003804 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003805 return -ENODEV;
3806}
3807
3808static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3809 struct poll_table_struct *tbl)
3810{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003811 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003812 return -ENODEV;
3813}
3814
3815static void smuxld_write_wakeup(struct tty_struct *tty)
3816{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003817 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003818}
3819
3820static struct tty_ldisc_ops smux_ldisc_ops = {
3821 .owner = THIS_MODULE,
3822 .magic = TTY_LDISC_MAGIC,
3823 .name = "n_smux",
3824 .open = smuxld_open,
3825 .close = smuxld_close,
3826 .flush_buffer = smuxld_flush_buffer,
3827 .chars_in_buffer = smuxld_chars_in_buffer,
3828 .read = smuxld_read,
3829 .write = smuxld_write,
3830 .ioctl = smuxld_ioctl,
3831 .poll = smuxld_poll,
3832 .receive_buf = smuxld_receive_buf,
3833 .write_wakeup = smuxld_write_wakeup
3834};
3835
3836static int __init smux_init(void)
3837{
3838 int ret;
3839
Eric Holmberged1f00c2012-06-07 09:45:18 -06003840 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003841
3842 spin_lock_init(&smux.rx_lock_lha1);
3843 smux.rx_state = SMUX_RX_IDLE;
3844 smux.power_state = SMUX_PWR_OFF;
3845 smux.pwr_wakeup_delay_us = 1;
3846 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003847 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003848 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003849 smux.rx_activity_flag = 0;
3850 smux.tx_activity_flag = 0;
3851 smux.recv_len = 0;
3852 smux.tty = NULL;
3853 smux.ld_open_count = 0;
3854 smux.in_reset = 0;
Eric Holmbergf3f34592012-08-28 13:51:14 -06003855 smux.remote_is_alive = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003856 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003857 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003858 smux_byte_loopback = 0;
3859
3860 spin_lock_init(&smux.tx_lock_lha2);
3861 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3862
3863 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3864 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003865 SMUX_ERR("%s: error %d registering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003866 __func__, ret);
3867 return ret;
3868 }
3869
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003870 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003871
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003872 ret = lch_init();
3873 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003874 SMUX_ERR("%s: lch_init failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003875 return ret;
3876 }
3877
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303878 log_ctx = ipc_log_context_create(1, "smux");
3879 if (!log_ctx) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003880 SMUX_ERR("%s: unable to create log context\n", __func__);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303881 disable_ipc_logging = 1;
3882 }
3883
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003884 return 0;
3885}
3886
3887static void __exit smux_exit(void)
3888{
3889 int ret;
3890
3891 ret = tty_unregister_ldisc(N_SMUX);
3892 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003893 SMUX_ERR("%s error %d unregistering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003894 __func__, ret);
3895 return;
3896 }
3897}
3898
3899module_init(smux_init);
3900module_exit(smux_exit);
3901
3902MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3903MODULE_LICENSE("GPL v2");
3904MODULE_ALIAS_LDISC(N_SMUX);