blob: 5cb864f64b4430ae606f2b14bfc0eb1612d89718 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkar98f78122012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg33f82522012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkar98f78122012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkar98f78122012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkar98f78122012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmberg01778932012-08-21 16:28:12 -060087#define SMUX_ERR(x...) do { \
88 pr_err(x); \
89 IPC_LOG_STR(x); \
90} while (0)
91
Eric Holmbergff0b0112012-06-08 15:06:57 -060092#define SMUX_PWR(x...) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053094 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060095} while (0)
96
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060097#define SMUX_PWR_PKT_RX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
99 smux_log_pkt(pkt, 1); \
100} while (0)
101
102#define SMUX_PWR_PKT_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
105 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530106 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600107 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
108 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530109 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600110 else \
111 smux_log_pkt(pkt, 0); \
112 } \
113} while (0)
114
115#define SMUX_PWR_BYTE_TX(pkt) do { \
116 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
117 smux_log_pkt(pkt, 0); \
118 } \
119} while (0)
120
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600121#define SMUX_LOG_PKT_RX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 1); \
124} while (0)
125
126#define SMUX_LOG_PKT_TX(pkt) do { \
127 if (smux_debug_mask & MSM_SMUX_PKT) \
128 smux_log_pkt(pkt, 0); \
129} while (0)
130
131/**
132 * Return true if channel is fully opened (both
133 * local and remote sides are in the OPENED state).
134 */
135#define IS_FULLY_OPENED(ch) \
136 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
137 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
138
139static struct platform_device smux_devs[] = {
140 {.name = "SMUX_CTL", .id = -1},
141 {.name = "SMUX_RMNET", .id = -1},
142 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
143 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
144 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
145 {.name = "SMUX_DIAG", .id = -1},
146};
147
148enum {
149 SMUX_CMD_STATUS_RTC = 1 << 0,
150 SMUX_CMD_STATUS_RTR = 1 << 1,
151 SMUX_CMD_STATUS_RI = 1 << 2,
152 SMUX_CMD_STATUS_DCD = 1 << 3,
153 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
154};
155
156/* Channel mode */
157enum {
158 SMUX_LCH_MODE_NORMAL,
159 SMUX_LCH_MODE_LOCAL_LOOPBACK,
160 SMUX_LCH_MODE_REMOTE_LOOPBACK,
161};
162
163enum {
164 SMUX_RX_IDLE,
165 SMUX_RX_MAGIC,
166 SMUX_RX_HDR,
167 SMUX_RX_PAYLOAD,
168 SMUX_RX_FAILURE,
169};
170
171/**
172 * Power states.
173 *
174 * The _FLUSH states are internal transitional states and are not part of the
175 * official state machine.
176 */
177enum {
178 SMUX_PWR_OFF,
179 SMUX_PWR_TURNING_ON,
180 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600181 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182 SMUX_PWR_TURNING_OFF,
183 SMUX_PWR_OFF_FLUSH,
184};
185
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600201 * Get RX Buffer Retry structure.
202 *
203 * This is used for clients that are unable to provide an RX buffer
204 * immediately. This temporary structure will be used to temporarily hold the
205 * data and perform a retry.
206 */
207struct smux_rx_pkt_retry {
208 struct smux_pkt_t *pkt;
209 struct list_head rx_retry_list;
210 unsigned timeout_in_ms;
211};
212
213/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600214 * Receive worker data structure.
215 *
216 * One instance is created for every call to smux_rx_state_machine.
217 */
218struct smux_rx_worker_data {
219 const unsigned char *data;
220 int len;
221 int flag;
222
223 struct work_struct work;
224 struct completion work_complete;
225};
226
227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600228 * Line discipline and module structure.
229 *
230 * Only one instance since multiple instances of line discipline are not
231 * allowed.
232 */
233struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600234 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600235
236 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600237 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 int in_reset;
239 int ld_open_count;
240 struct tty_struct *tty;
241
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600242 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600243 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
244 unsigned int recv_len;
245 unsigned int pkt_remain;
246 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600247
248 /* RX Activity - accessed by multiple threads */
249 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250 unsigned rx_activity_flag;
251
252 /* TX / Power */
253 spinlock_t tx_lock_lha2;
254 struct list_head lch_tx_ready_list;
255 unsigned power_state;
256 unsigned pwr_wakeup_delay_us;
257 unsigned tx_activity_flag;
258 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600259 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600260 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600261};
262
263
264/* data structures */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600265struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600266static struct smux_ldisc_t smux;
267static const char *tty_error_type[] = {
268 [TTY_NORMAL] = "normal",
269 [TTY_OVERRUN] = "overrun",
270 [TTY_BREAK] = "break",
271 [TTY_PARITY] = "parity",
272 [TTY_FRAME] = "framing",
273};
274
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600275static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600276 [SMUX_CMD_DATA] = "DATA",
277 [SMUX_CMD_OPEN_LCH] = "OPEN",
278 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
279 [SMUX_CMD_STATUS] = "STATUS",
280 [SMUX_CMD_PWR_CTL] = "PWR",
281 [SMUX_CMD_BYTE] = "Raw Byte",
282};
283
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530284static const char * const smux_events[] = {
285 [SMUX_CONNECTED] = "CONNECTED" ,
286 [SMUX_DISCONNECTED] = "DISCONNECTED",
287 [SMUX_READ_DONE] = "READ_DONE",
288 [SMUX_READ_FAIL] = "READ_FAIL",
289 [SMUX_WRITE_DONE] = "WRITE_DONE",
290 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
291 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
292 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
293 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
294 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
295 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
296};
297
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600298static const char * const smux_local_state[] = {
299 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
300 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
301 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
302 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
303};
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530304
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600305static const char * const smux_remote_state[] = {
306 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
307 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
308};
309
310static const char * const smux_mode[] = {
311 [SMUX_LCH_MODE_NORMAL] = "N",
312 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
313 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
314};
315
316static const char * const smux_undef[] = {
317 [SMUX_UNDEF_LONG] = "UNDEF",
318 [SMUX_UNDEF_SHORT] = "U",
319};
320
321static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600322static void smux_notify_local_fn(struct work_struct *work);
323static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
324
325static struct workqueue_struct *smux_notify_wq;
326static size_t handle_size;
327static struct kfifo smux_notify_fifo;
328static int queued_fifo_notifications;
329static DEFINE_SPINLOCK(notify_lock_lhc1);
330
331static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600332static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600333static void smux_tx_worker(struct work_struct *work);
334static DECLARE_WORK(smux_tx_work, smux_tx_worker);
335
336static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600337static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600338static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600339static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
340static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
341
342static void smux_inactivity_worker(struct work_struct *work);
343static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
344static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
345 smux_inactivity_worker);
346
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600347static void list_channel(struct smux_lch_t *ch);
348static int smux_send_status_cmd(struct smux_lch_t *ch);
349static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600350static void smux_flush_tty(void);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600351static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600352static int schedule_notify(uint8_t lcid, int event,
353 const union notifier_metadata *metadata);
354static int ssr_notifier_cb(struct notifier_block *this,
355 unsigned long code,
356 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600357static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600358static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600359static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600360static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600361
362/**
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600363 * local_lch_state() - Return human readable form of local logical state.
364 * @state: Local logical channel state enum.
365 *
366 */
367const char *local_lch_state(unsigned state)
368{
369 if (state < ARRAY_SIZE(smux_local_state))
370 return smux_local_state[state];
371 else
372 return smux_undef[SMUX_UNDEF_LONG];
373}
374
375/**
376 * remote_lch_state() - Return human readable for of remote logical state.
377 * @state: Remote logical channel state enum.
378 *
379 */
380const char *remote_lch_state(unsigned state)
381{
382 if (state < ARRAY_SIZE(smux_remote_state))
383 return smux_remote_state[state];
384 else
385 return smux_undef[SMUX_UNDEF_LONG];
386}
387
388/**
389 * lch_mode() - Return human readable form of mode.
390 * @mode: Mode of the logical channel.
391 *
392 */
393const char *lch_mode(unsigned mode)
394{
395 if (mode < ARRAY_SIZE(smux_mode))
396 return smux_mode[mode];
397 else
398 return smux_undef[SMUX_UNDEF_SHORT];
399}
400
401/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600402 * Convert TTY Error Flags to string for logging purposes.
403 *
404 * @flag TTY_* flag
405 * @returns String description or NULL if unknown
406 */
407static const char *tty_flag_to_str(unsigned flag)
408{
409 if (flag < ARRAY_SIZE(tty_error_type))
410 return tty_error_type[flag];
411 return NULL;
412}
413
414/**
415 * Convert SMUX Command to string for logging purposes.
416 *
417 * @cmd SMUX command
418 * @returns String description or NULL if unknown
419 */
420static const char *cmd_to_str(unsigned cmd)
421{
422 if (cmd < ARRAY_SIZE(smux_cmds))
423 return smux_cmds[cmd];
424 return NULL;
425}
426
427/**
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530428 * Convert SMUX event to string for logging purposes.
429 *
430 * @event SMUX event
431 * @returns String description or NULL if unknown
432 */
433static const char *event_to_str(unsigned cmd)
434{
435 if (cmd < ARRAY_SIZE(smux_events))
436 return smux_events[cmd];
437 return NULL;
438}
439
440/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600441 * Set the reset state due to an unrecoverable failure.
442 */
443static void smux_enter_reset(void)
444{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600445 SMUX_ERR("%s: unrecoverable failure, waiting for ssr\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600446 smux.in_reset = 1;
447}
448
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600449/**
450 * Initialize the lch_structs.
451 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600452static int lch_init(void)
453{
454 unsigned int id;
455 struct smux_lch_t *ch;
456 int i = 0;
457
458 handle_size = sizeof(struct smux_notify_handle *);
459
460 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
461 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600462 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600463
464 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530465 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600466 __func__);
467 return -ENOMEM;
468 }
469
470 i |= kfifo_alloc(&smux_notify_fifo,
471 SMUX_NOTIFY_FIFO_SIZE * handle_size,
472 GFP_KERNEL);
473 i |= smux_loopback_init();
474
475 if (i) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600476 SMUX_ERR("%s: out of memory error\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600477 return -ENOMEM;
478 }
479
480 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
481 ch = &smux_lch[id];
482
483 spin_lock_init(&ch->state_lock_lhb1);
484 ch->lcid = id;
485 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
486 ch->local_mode = SMUX_LCH_MODE_NORMAL;
487 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600488 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600489 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
490 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
491 ch->remote_tiocm = 0x0;
492 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600493 ch->rx_flow_control_auto = 0;
494 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600495 ch->priv = 0;
496 ch->notify = 0;
497 ch->get_rx_buffer = 0;
498
Eric Holmbergb8435c82012-06-05 14:51:29 -0600499 INIT_LIST_HEAD(&ch->rx_retry_queue);
500 ch->rx_retry_queue_cnt = 0;
501 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
502
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600503 spin_lock_init(&ch->tx_lock_lhb2);
504 INIT_LIST_HEAD(&ch->tx_queue);
505 INIT_LIST_HEAD(&ch->tx_ready_list);
506 ch->tx_pending_data_cnt = 0;
507 ch->notify_lwm = 0;
508 }
509
510 return 0;
511}
512
Eric Holmberged1f00c2012-06-07 09:45:18 -0600513/**
514 * Empty and cleanup all SMUX logical channels for subsystem restart or line
515 * discipline disconnect.
516 */
517static void smux_lch_purge(void)
518{
519 struct smux_lch_t *ch;
520 unsigned long flags;
521 int i;
522
523 /* Empty TX ready list */
524 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
525 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530526 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600527 __func__, smux.lch_tx_ready_list.next);
528 ch = list_first_entry(&smux.lch_tx_ready_list,
529 struct smux_lch_t,
530 tx_ready_list);
531 list_del(&ch->tx_ready_list);
532 INIT_LIST_HEAD(&ch->tx_ready_list);
533 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600534
535 /* Purge Power Queue */
536 while (!list_empty(&smux.power_queue)) {
537 struct smux_pkt_t *pkt;
538
539 pkt = list_first_entry(&smux.power_queue,
540 struct smux_pkt_t,
541 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600542 list_del(&pkt->list);
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530543 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600544 __func__, pkt);
545 smux_free_pkt(pkt);
546 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600547 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
548
549 /* Close all ports */
550 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
551 ch = &smux_lch[i];
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530552 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600553
554 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
555
556 /* Purge TX queue */
557 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600558 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600559 spin_unlock(&ch->tx_lock_lhb2);
560
561 /* Notify user of disconnect and reset channel state */
562 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
563 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
564 union notifier_metadata meta;
565
566 meta.disconnected.is_ssr = smux.in_reset;
567 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
568 }
569
570 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600571 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
572 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
573 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600574 ch->rx_flow_control_auto = 0;
575 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600576
577 /* Purge RX retry queue */
578 if (ch->rx_retry_queue_cnt)
579 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
580
581 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
582 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600583}
584
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600585int smux_assert_lch_id(uint32_t lcid)
586{
587 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
588 return -ENXIO;
589 else
590 return 0;
591}
592
593/**
594 * Log packet information for debug purposes.
595 *
596 * @pkt Packet to log
597 * @is_recv 1 = RX packet; 0 = TX Packet
598 *
599 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
600 *
601 * PKT Info:
602 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
603 *
604 * Direction: R = Receive, S = Send
605 * Local State: C = Closed; c = closing; o = opening; O = Opened
606 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
607 * Remote State: C = Closed; O = Opened
608 * Remote Mode: R = Remote loopback; N = Normal
609 */
610static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
611{
612 char logbuf[SMUX_PKT_LOG_SIZE];
613 char cmd_extra[16];
614 int i = 0;
615 int count;
616 int len;
617 char local_state;
618 char local_mode;
619 char remote_state;
620 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600621 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600622 unsigned char *data;
623
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600624 if (!smux_assert_lch_id(pkt->hdr.lcid))
625 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600626
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600627 if (ch) {
628 switch (ch->local_state) {
629 case SMUX_LCH_LOCAL_CLOSED:
630 local_state = 'C';
631 break;
632 case SMUX_LCH_LOCAL_OPENING:
633 local_state = 'o';
634 break;
635 case SMUX_LCH_LOCAL_OPENED:
636 local_state = 'O';
637 break;
638 case SMUX_LCH_LOCAL_CLOSING:
639 local_state = 'c';
640 break;
641 default:
642 local_state = 'U';
643 break;
644 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600645
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600646 switch (ch->local_mode) {
647 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
648 local_mode = 'L';
649 break;
650 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
651 local_mode = 'R';
652 break;
653 case SMUX_LCH_MODE_NORMAL:
654 local_mode = 'N';
655 break;
656 default:
657 local_mode = 'U';
658 break;
659 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600660
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600661 switch (ch->remote_state) {
662 case SMUX_LCH_REMOTE_CLOSED:
663 remote_state = 'C';
664 break;
665 case SMUX_LCH_REMOTE_OPENED:
666 remote_state = 'O';
667 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600668
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600669 default:
670 remote_state = 'U';
671 break;
672 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600673
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600674 switch (ch->remote_mode) {
675 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
676 remote_mode = 'R';
677 break;
678 case SMUX_LCH_MODE_NORMAL:
679 remote_mode = 'N';
680 break;
681 default:
682 remote_mode = 'U';
683 break;
684 }
685 } else {
686 /* broadcast channel */
687 local_state = '-';
688 local_mode = '-';
689 remote_state = '-';
690 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600691 }
692
693 /* determine command type (ACK, etc) */
694 cmd_extra[0] = '\0';
695 switch (pkt->hdr.cmd) {
696 case SMUX_CMD_OPEN_LCH:
697 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
698 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
699 break;
700 case SMUX_CMD_CLOSE_LCH:
701 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
702 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
703 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600704
705 case SMUX_CMD_PWR_CTL:
706 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
707 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
708 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600709 };
710
711 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
712 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
713 is_recv ? 'R' : 'S', pkt->hdr.lcid,
714 local_state, local_mode,
715 remote_state, remote_mode,
716 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
717 pkt->hdr.payload_len, pkt->hdr.pad_len);
718
719 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
720 data = (unsigned char *)pkt->payload;
721 for (count = 0; count < len; count++)
722 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
723 "%02x ", (unsigned)data[count]);
724
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530725 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600726}
727
728static void smux_notify_local_fn(struct work_struct *work)
729{
730 struct smux_notify_handle *notify_handle = NULL;
731 union notifier_metadata *metadata = NULL;
732 unsigned long flags;
733 int i;
734
735 for (;;) {
736 /* retrieve notification */
737 spin_lock_irqsave(&notify_lock_lhc1, flags);
738 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
739 i = kfifo_out(&smux_notify_fifo,
740 &notify_handle,
741 handle_size);
742 if (i != handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600743 SMUX_ERR(
744 "%s: unable to retrieve handle %d expected %d\n",
745 __func__, i, handle_size);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600746 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
747 break;
748 }
749 } else {
750 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
751 break;
752 }
753 --queued_fifo_notifications;
754 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
755
756 /* notify client */
757 metadata = notify_handle->metadata;
758 notify_handle->notify(notify_handle->priv,
759 notify_handle->event_type,
760 metadata);
761
762 kfree(metadata);
763 kfree(notify_handle);
764 }
765}
766
767/**
768 * Initialize existing packet.
769 */
770void smux_init_pkt(struct smux_pkt_t *pkt)
771{
772 memset(pkt, 0x0, sizeof(*pkt));
773 pkt->hdr.magic = SMUX_MAGIC;
774 INIT_LIST_HEAD(&pkt->list);
775}
776
777/**
778 * Allocate and initialize packet.
779 *
780 * If a payload is needed, either set it directly and ensure that it's freed or
781 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
782 * automatically when smd_free_pkt() is called.
783 */
784struct smux_pkt_t *smux_alloc_pkt(void)
785{
786 struct smux_pkt_t *pkt;
787
788 /* Consider a free list implementation instead of kmalloc */
789 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
790 if (!pkt) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600791 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600792 return NULL;
793 }
794 smux_init_pkt(pkt);
795 pkt->allocated = 1;
796
797 return pkt;
798}
799
800/**
801 * Free packet.
802 *
803 * @pkt Packet to free (may be NULL)
804 *
805 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
806 * well. Otherwise, the caller is responsible for freeing the payload.
807 */
808void smux_free_pkt(struct smux_pkt_t *pkt)
809{
810 if (pkt) {
811 if (pkt->free_payload)
812 kfree(pkt->payload);
813 if (pkt->allocated)
814 kfree(pkt);
815 }
816}
817
818/**
819 * Allocate packet payload.
820 *
821 * @pkt Packet to add payload to
822 *
823 * @returns 0 on success, <0 upon error
824 *
825 * A flag is set to signal smux_free_pkt() to free the payload.
826 */
827int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
828{
829 if (!pkt)
830 return -EINVAL;
831
832 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
833 pkt->free_payload = 1;
834 if (!pkt->payload) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600835 SMUX_ERR("%s: unable to malloc %d bytes for payload\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600836 __func__, pkt->hdr.payload_len);
837 return -ENOMEM;
838 }
839
840 return 0;
841}
842
843static int schedule_notify(uint8_t lcid, int event,
844 const union notifier_metadata *metadata)
845{
846 struct smux_notify_handle *notify_handle = 0;
847 union notifier_metadata *meta_copy = 0;
848 struct smux_lch_t *ch;
849 int i;
850 unsigned long flags;
851 int ret = 0;
852
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530853 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600854 ch = &smux_lch[lcid];
855 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
856 GFP_ATOMIC);
857 if (!notify_handle) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600858 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600859 ret = -ENOMEM;
860 goto free_out;
861 }
862
863 notify_handle->notify = ch->notify;
864 notify_handle->priv = ch->priv;
865 notify_handle->event_type = event;
866 if (metadata) {
867 meta_copy = kzalloc(sizeof(union notifier_metadata),
868 GFP_ATOMIC);
869 if (!meta_copy) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600870 SMUX_ERR("%s: out of memory\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600871 ret = -ENOMEM;
872 goto free_out;
873 }
874 *meta_copy = *metadata;
875 notify_handle->metadata = meta_copy;
876 } else {
877 notify_handle->metadata = NULL;
878 }
879
880 spin_lock_irqsave(&notify_lock_lhc1, flags);
881 i = kfifo_avail(&smux_notify_fifo);
882 if (i < handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600883 SMUX_ERR("%s: fifo full error %d expected %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600884 __func__, i, handle_size);
885 ret = -ENOMEM;
886 goto unlock_out;
887 }
888
889 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
890 if (i < 0 || i != handle_size) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600891 SMUX_ERR("%s: fifo not available error %d (expected %d)\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600892 __func__, i, handle_size);
893 ret = -ENOSPC;
894 goto unlock_out;
895 }
896 ++queued_fifo_notifications;
897
898unlock_out:
899 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
900
901free_out:
902 queue_work(smux_notify_wq, &smux_notify_local);
903 if (ret < 0 && notify_handle) {
904 kfree(notify_handle->metadata);
905 kfree(notify_handle);
906 }
907 return ret;
908}
909
910/**
911 * Returns the serialized size of a packet.
912 *
913 * @pkt Packet to serialize
914 *
915 * @returns Serialized length of packet
916 */
917static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
918{
919 unsigned int size;
920
921 size = sizeof(struct smux_hdr_t);
922 size += pkt->hdr.payload_len;
923 size += pkt->hdr.pad_len;
924
925 return size;
926}
927
928/**
929 * Serialize packet @pkt into output buffer @data.
930 *
931 * @pkt Packet to serialize
932 * @out Destination buffer pointer
933 * @out_len Size of serialized packet
934 *
935 * @returns 0 for success
936 */
937int smux_serialize(struct smux_pkt_t *pkt, char *out,
938 unsigned int *out_len)
939{
940 char *data_start = out;
941
942 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -0600943 SMUX_ERR("%s: packet size %d too big\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600944 __func__, smux_serialize_size(pkt));
945 return -E2BIG;
946 }
947
948 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
949 out += sizeof(struct smux_hdr_t);
950 if (pkt->payload) {
951 memcpy(out, pkt->payload, pkt->hdr.payload_len);
952 out += pkt->hdr.payload_len;
953 }
954 if (pkt->hdr.pad_len) {
955 memset(out, 0x0, pkt->hdr.pad_len);
956 out += pkt->hdr.pad_len;
957 }
958 *out_len = out - data_start;
959 return 0;
960}
961
962/**
963 * Serialize header and provide pointer to the data.
964 *
965 * @pkt Packet
966 * @out[out] Pointer to the serialized header data
967 * @out_len[out] Pointer to the serialized header length
968 */
969static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
970 unsigned int *out_len)
971{
972 *out = (char *)&pkt->hdr;
973 *out_len = sizeof(struct smux_hdr_t);
974}
975
976/**
977 * Serialize payload and provide pointer to the data.
978 *
979 * @pkt Packet
980 * @out[out] Pointer to the serialized payload data
981 * @out_len[out] Pointer to the serialized payload length
982 */
983static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
984 unsigned int *out_len)
985{
986 *out = pkt->payload;
987 *out_len = pkt->hdr.payload_len;
988}
989
990/**
991 * Serialize padding and provide pointer to the data.
992 *
993 * @pkt Packet
994 * @out[out] Pointer to the serialized padding (always NULL)
995 * @out_len[out] Pointer to the serialized payload length
996 *
997 * Since the padding field value is undefined, only the size of the patting
998 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
999 */
1000static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
1001 unsigned int *out_len)
1002{
1003 *out = NULL;
1004 *out_len = pkt->hdr.pad_len;
1005}
1006
1007/**
1008 * Write data to TTY framework and handle breaking the writes up if needed.
1009 *
1010 * @data Data to write
1011 * @len Length of data
1012 *
1013 * @returns 0 for success, < 0 for failure
1014 */
1015static int write_to_tty(char *data, unsigned len)
1016{
1017 int data_written;
1018
1019 if (!data)
1020 return 0;
1021
Eric Holmberged1f00c2012-06-07 09:45:18 -06001022 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001023 data_written = smux.tty->ops->write(smux.tty, data, len);
1024 if (data_written >= 0) {
1025 len -= data_written;
1026 data += data_written;
1027 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001028 SMUX_ERR("%s: TTY write returned error %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001029 __func__, data_written);
1030 return data_written;
1031 }
1032
1033 if (len)
1034 tty_wait_until_sent(smux.tty,
1035 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001036 }
1037 return 0;
1038}
1039
1040/**
1041 * Write packet to TTY.
1042 *
1043 * @pkt packet to write
1044 *
1045 * @returns 0 on success
1046 */
1047static int smux_tx_tty(struct smux_pkt_t *pkt)
1048{
1049 char *data;
1050 unsigned int len;
1051 int ret;
1052
1053 if (!smux.tty) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001054 SMUX_ERR("%s: TTY not initialized", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001055 return -ENOTTY;
1056 }
1057
1058 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301059 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001060 ret = write_to_tty(&pkt->hdr.flags, 1);
1061 return ret;
1062 }
1063
1064 smux_serialize_hdr(pkt, &data, &len);
1065 ret = write_to_tty(data, len);
1066 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001067 SMUX_ERR("%s: failed %d to write header %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001068 __func__, ret, len);
1069 return ret;
1070 }
1071
1072 smux_serialize_payload(pkt, &data, &len);
1073 ret = write_to_tty(data, len);
1074 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001075 SMUX_ERR("%s: failed %d to write payload %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001076 __func__, ret, len);
1077 return ret;
1078 }
1079
1080 smux_serialize_padding(pkt, &data, &len);
1081 while (len > 0) {
1082 char zero = 0x0;
1083 ret = write_to_tty(&zero, 1);
1084 if (ret) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001085 SMUX_ERR("%s: failed %d to write padding %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001086 __func__, ret, len);
1087 return ret;
1088 }
1089 --len;
1090 }
1091 return 0;
1092}
1093
1094/**
1095 * Send a single character.
1096 *
1097 * @ch Character to send
1098 */
1099static void smux_send_byte(char ch)
1100{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001101 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001102
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001103 pkt = smux_alloc_pkt();
1104 if (!pkt) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001105 SMUX_ERR("%s: alloc failure for byte %x\n", __func__, ch);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001106 return;
1107 }
1108 pkt->hdr.cmd = SMUX_CMD_BYTE;
1109 pkt->hdr.flags = ch;
1110 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001111
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001112 list_add_tail(&pkt->list, &smux.power_queue);
1113 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001114}
1115
1116/**
1117 * Receive a single-character packet (used for internal testing).
1118 *
1119 * @ch Character to receive
1120 * @lcid Logical channel ID for packet
1121 *
1122 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001123 */
1124static int smux_receive_byte(char ch, int lcid)
1125{
1126 struct smux_pkt_t pkt;
1127
1128 smux_init_pkt(&pkt);
1129 pkt.hdr.lcid = lcid;
1130 pkt.hdr.cmd = SMUX_CMD_BYTE;
1131 pkt.hdr.flags = ch;
1132
1133 return smux_dispatch_rx_pkt(&pkt);
1134}
1135
1136/**
1137 * Queue packet for transmit.
1138 *
1139 * @pkt_ptr Packet to queue
1140 * @ch Channel to queue packet on
1141 * @queue Queue channel on ready list
1142 */
1143static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1144 int queue)
1145{
1146 unsigned long flags;
1147
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301148 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001149
1150 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1151 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1152 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1153
1154 if (queue)
1155 list_channel(ch);
1156}
1157
1158/**
1159 * Handle receive OPEN ACK command.
1160 *
1161 * @pkt Received packet
1162 *
1163 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001164 */
1165static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1166{
1167 uint8_t lcid;
1168 int ret;
1169 struct smux_lch_t *ch;
1170 int enable_powerdown = 0;
1171
1172 lcid = pkt->hdr.lcid;
1173 ch = &smux_lch[lcid];
1174
1175 spin_lock(&ch->state_lock_lhb1);
1176 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301177 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001178 ch->local_state,
1179 SMUX_LCH_LOCAL_OPENED);
1180
1181 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1182 enable_powerdown = 1;
1183
1184 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1185 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1186 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1187 ret = 0;
1188 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301189 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001190 ret = 0;
1191 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001192 SMUX_ERR("%s: lcid %d state 0x%x open ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001193 __func__, lcid, ch->local_state);
1194 ret = -EINVAL;
1195 }
1196 spin_unlock(&ch->state_lock_lhb1);
1197
1198 if (enable_powerdown) {
1199 spin_lock(&smux.tx_lock_lha2);
1200 if (!smux.powerdown_enabled) {
1201 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301202 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001203 __func__);
1204 }
1205 spin_unlock(&smux.tx_lock_lha2);
1206 }
1207
1208 return ret;
1209}
1210
1211static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1212{
1213 uint8_t lcid;
1214 int ret;
1215 struct smux_lch_t *ch;
1216 union notifier_metadata meta_disconnected;
1217 unsigned long flags;
1218
1219 lcid = pkt->hdr.lcid;
1220 ch = &smux_lch[lcid];
1221 meta_disconnected.disconnected.is_ssr = 0;
1222
1223 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1224
1225 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301226 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001227 SMUX_LCH_LOCAL_CLOSING,
1228 SMUX_LCH_LOCAL_CLOSED);
1229 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1230 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1231 schedule_notify(lcid, SMUX_DISCONNECTED,
1232 &meta_disconnected);
1233 ret = 0;
1234 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301235 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001236 ret = 0;
1237 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001238 SMUX_ERR("%s: lcid %d state 0x%x close ack invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001239 __func__, lcid, ch->local_state);
1240 ret = -EINVAL;
1241 }
1242 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1243 return ret;
1244}
1245
1246/**
1247 * Handle receive OPEN command.
1248 *
1249 * @pkt Received packet
1250 *
1251 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001252 */
1253static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1254{
1255 uint8_t lcid;
1256 int ret;
1257 struct smux_lch_t *ch;
1258 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001259 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001260 int tx_ready = 0;
1261 int enable_powerdown = 0;
1262
1263 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1264 return smux_handle_rx_open_ack(pkt);
1265
1266 lcid = pkt->hdr.lcid;
1267 ch = &smux_lch[lcid];
1268
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001269 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001270
1271 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301272 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001273 SMUX_LCH_REMOTE_CLOSED,
1274 SMUX_LCH_REMOTE_OPENED);
1275
1276 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1277 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1278 enable_powerdown = 1;
1279
1280 /* Send Open ACK */
1281 ack_pkt = smux_alloc_pkt();
1282 if (!ack_pkt) {
1283 /* exit out to allow retrying this later */
1284 ret = -ENOMEM;
1285 goto out;
1286 }
1287 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1288 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1289 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1290 ack_pkt->hdr.lcid = lcid;
1291 ack_pkt->hdr.payload_len = 0;
1292 ack_pkt->hdr.pad_len = 0;
1293 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1294 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1295 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1296 }
1297 smux_tx_queue(ack_pkt, ch, 0);
1298 tx_ready = 1;
1299
1300 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1301 /*
1302 * Send an Open command to the remote side to
1303 * simulate our local client doing it.
1304 */
1305 ack_pkt = smux_alloc_pkt();
1306 if (ack_pkt) {
1307 ack_pkt->hdr.lcid = lcid;
1308 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1309 ack_pkt->hdr.flags =
1310 SMUX_CMD_OPEN_POWER_COLLAPSE;
1311 ack_pkt->hdr.payload_len = 0;
1312 ack_pkt->hdr.pad_len = 0;
1313 smux_tx_queue(ack_pkt, ch, 0);
1314 tx_ready = 1;
1315 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001316 SMUX_ERR(
1317 "%s: Remote loopack allocation failure\n",
1318 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001319 }
1320 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1321 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1322 }
1323 ret = 0;
1324 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001325 SMUX_ERR("%s: lcid %d remote state 0x%x open invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001326 __func__, lcid, ch->remote_state);
1327 ret = -EINVAL;
1328 }
1329
1330out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001331 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001332
1333 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001334 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001335 if (!smux.powerdown_enabled) {
1336 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301337 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001338 __func__);
1339 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001340 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001341 }
1342
1343 if (tx_ready)
1344 list_channel(ch);
1345
1346 return ret;
1347}
1348
1349/**
1350 * Handle receive CLOSE command.
1351 *
1352 * @pkt Received packet
1353 *
1354 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001355 */
1356static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1357{
1358 uint8_t lcid;
1359 int ret;
1360 struct smux_lch_t *ch;
1361 struct smux_pkt_t *ack_pkt;
1362 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001363 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001364 int tx_ready = 0;
1365
1366 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1367 return smux_handle_close_ack(pkt);
1368
1369 lcid = pkt->hdr.lcid;
1370 ch = &smux_lch[lcid];
1371 meta_disconnected.disconnected.is_ssr = 0;
1372
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001373 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001374 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301375 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001376 SMUX_LCH_REMOTE_OPENED,
1377 SMUX_LCH_REMOTE_CLOSED);
1378
1379 ack_pkt = smux_alloc_pkt();
1380 if (!ack_pkt) {
1381 /* exit out to allow retrying this later */
1382 ret = -ENOMEM;
1383 goto out;
1384 }
1385 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1386 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1387 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1388 ack_pkt->hdr.lcid = lcid;
1389 ack_pkt->hdr.payload_len = 0;
1390 ack_pkt->hdr.pad_len = 0;
1391 smux_tx_queue(ack_pkt, ch, 0);
1392 tx_ready = 1;
1393
1394 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1395 /*
1396 * Send a Close command to the remote side to simulate
1397 * our local client doing it.
1398 */
1399 ack_pkt = smux_alloc_pkt();
1400 if (ack_pkt) {
1401 ack_pkt->hdr.lcid = lcid;
1402 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1403 ack_pkt->hdr.flags = 0;
1404 ack_pkt->hdr.payload_len = 0;
1405 ack_pkt->hdr.pad_len = 0;
1406 smux_tx_queue(ack_pkt, ch, 0);
1407 tx_ready = 1;
1408 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001409 SMUX_ERR(
1410 "%s: Remote loopack allocation failure\n",
1411 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001412 }
1413 }
1414
1415 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1416 schedule_notify(lcid, SMUX_DISCONNECTED,
1417 &meta_disconnected);
1418 ret = 0;
1419 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001420 SMUX_ERR("%s: lcid %d remote state 0x%x close invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001421 __func__, lcid, ch->remote_state);
1422 ret = -EINVAL;
1423 }
1424out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001425 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001426 if (tx_ready)
1427 list_channel(ch);
1428
1429 return ret;
1430}
1431
1432/*
1433 * Handle receive DATA command.
1434 *
1435 * @pkt Received packet
1436 *
1437 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001438 */
1439static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1440{
1441 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001442 int ret = 0;
1443 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001444 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001445 int tmp;
1446 int rx_len;
1447 struct smux_lch_t *ch;
1448 union notifier_metadata metadata;
1449 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001450 struct smux_pkt_t *ack_pkt;
1451 unsigned long flags;
1452
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001453 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1454 ret = -ENXIO;
1455 goto out;
1456 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001457
Eric Holmbergb8435c82012-06-05 14:51:29 -06001458 rx_len = pkt->hdr.payload_len;
1459 if (rx_len == 0) {
1460 ret = -EINVAL;
1461 goto out;
1462 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001463
1464 lcid = pkt->hdr.lcid;
1465 ch = &smux_lch[lcid];
1466 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1467 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1468
1469 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1470 && !remote_loopback) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001471 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001472 lcid, ch->local_state);
1473 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001474 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001475 goto out;
1476 }
1477
1478 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001479 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001480 lcid, ch->remote_state);
1481 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001482 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001483 goto out;
1484 }
1485
Eric Holmbergb8435c82012-06-05 14:51:29 -06001486 if (!list_empty(&ch->rx_retry_queue)) {
1487 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001488
1489 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1490 !ch->rx_flow_control_auto &&
1491 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1492 /* need to flow control RX */
1493 ch->rx_flow_control_auto = 1;
1494 tx_ready |= smux_rx_flow_control_updated(ch);
1495 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1496 NULL);
1497 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001498 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1499 /* retry queue full */
Eric Holmberg01778932012-08-21 16:28:12 -06001500 SMUX_ERR(
1501 "%s: ch %d RX retry queue full; rx flow=%d\n",
1502 __func__, lcid, ch->rx_flow_control_auto);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001503 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1504 ret = -ENOMEM;
1505 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1506 goto out;
1507 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001508 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001509 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001510
Eric Holmbergb8435c82012-06-05 14:51:29 -06001511 if (remote_loopback) {
1512 /* Echo the data back to the remote client. */
1513 ack_pkt = smux_alloc_pkt();
1514 if (ack_pkt) {
1515 ack_pkt->hdr.lcid = lcid;
1516 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1517 ack_pkt->hdr.flags = 0;
1518 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1519 if (ack_pkt->hdr.payload_len) {
1520 smux_alloc_pkt_payload(ack_pkt);
1521 memcpy(ack_pkt->payload, pkt->payload,
1522 ack_pkt->hdr.payload_len);
1523 }
1524 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1525 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001526 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001527 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001528 SMUX_ERR("%s: Remote loopack allocation failure\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06001529 __func__);
1530 }
1531 } else if (!do_retry) {
1532 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001533 metadata.read.pkt_priv = 0;
1534 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001535 tmp = ch->get_rx_buffer(ch->priv,
1536 (void **)&metadata.read.pkt_priv,
1537 (void **)&metadata.read.buffer,
1538 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001539
Eric Holmbergb8435c82012-06-05 14:51:29 -06001540 if (tmp == 0 && metadata.read.buffer) {
1541 /* place data into RX buffer */
1542 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001543 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001544 metadata.read.len = rx_len;
1545 schedule_notify(lcid, SMUX_READ_DONE,
1546 &metadata);
1547 } else if (tmp == -EAGAIN ||
1548 (tmp == 0 && !metadata.read.buffer)) {
1549 /* buffer allocation failed - add to retry queue */
1550 do_retry = 1;
1551 } else if (tmp < 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001552 SMUX_ERR("%s: ch %d Client RX buffer alloc failed %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001553 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001554 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1555 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001556 }
1557 }
1558
Eric Holmbergb8435c82012-06-05 14:51:29 -06001559 if (do_retry) {
1560 struct smux_rx_pkt_retry *retry;
1561
1562 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1563 if (!retry) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001564 SMUX_ERR("%s: retry alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001565 ret = -ENOMEM;
1566 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1567 goto out;
1568 }
1569 INIT_LIST_HEAD(&retry->rx_retry_list);
1570 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1571
1572 /* copy packet */
1573 retry->pkt = smux_alloc_pkt();
1574 if (!retry->pkt) {
1575 kfree(retry);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001576 SMUX_ERR("%s: pkt alloc failure\n", __func__);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001577 ret = -ENOMEM;
1578 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1579 goto out;
1580 }
1581 retry->pkt->hdr.lcid = lcid;
1582 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1583 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1584 if (retry->pkt->hdr.payload_len) {
1585 smux_alloc_pkt_payload(retry->pkt);
1586 memcpy(retry->pkt->payload, pkt->payload,
1587 retry->pkt->hdr.payload_len);
1588 }
1589
1590 /* add to retry queue */
1591 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1592 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1593 ++ch->rx_retry_queue_cnt;
1594 if (ch->rx_retry_queue_cnt == 1)
1595 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1596 msecs_to_jiffies(retry->timeout_in_ms));
1597 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1598 }
1599
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001600 if (tx_ready)
1601 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001602out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001603 return ret;
1604}
1605
1606/**
1607 * Handle receive byte command for testing purposes.
1608 *
1609 * @pkt Received packet
1610 *
1611 * @returns 0 for success
1612 */
1613static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1614{
1615 uint8_t lcid;
1616 int ret;
1617 struct smux_lch_t *ch;
1618 union notifier_metadata metadata;
1619 unsigned long flags;
1620
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001621 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001622 SMUX_ERR("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001623 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001624 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001625
1626 lcid = pkt->hdr.lcid;
1627 ch = &smux_lch[lcid];
1628 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1629
1630 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001631 SMUX_ERR("smux: ch %d error data on local state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001632 lcid, ch->local_state);
1633 ret = -EIO;
1634 goto out;
1635 }
1636
1637 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001638 SMUX_ERR("smux: ch %d error data on remote state 0x%x",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001639 lcid, ch->remote_state);
1640 ret = -EIO;
1641 goto out;
1642 }
1643
1644 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1645 metadata.read.buffer = 0;
1646 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1647 ret = 0;
1648
1649out:
1650 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1651 return ret;
1652}
1653
1654/**
1655 * Handle receive status command.
1656 *
1657 * @pkt Received packet
1658 *
1659 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001660 */
1661static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1662{
1663 uint8_t lcid;
1664 int ret;
1665 struct smux_lch_t *ch;
1666 union notifier_metadata meta;
1667 unsigned long flags;
1668 int tx_ready = 0;
1669
1670 lcid = pkt->hdr.lcid;
1671 ch = &smux_lch[lcid];
1672
1673 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1674 meta.tiocm.tiocm_old = ch->remote_tiocm;
1675 meta.tiocm.tiocm_new = pkt->hdr.flags;
1676
1677 /* update logical channel flow control */
1678 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1679 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1680 /* logical channel flow control changed */
1681 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1682 /* disabled TX */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301683 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001684 ch->tx_flow_control = 1;
1685 } else {
1686 /* re-enable channel */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301687 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001688 ch->tx_flow_control = 0;
1689 tx_ready = 1;
1690 }
1691 }
1692 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1693 ch->remote_tiocm = pkt->hdr.flags;
1694 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1695
1696 /* client notification for status change */
1697 if (IS_FULLY_OPENED(ch)) {
1698 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1699 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1700 ret = 0;
1701 }
1702 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1703 if (tx_ready)
1704 list_channel(ch);
1705
1706 return ret;
1707}
1708
1709/**
1710 * Handle receive power command.
1711 *
1712 * @pkt Received packet
1713 *
1714 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001715 */
1716static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1717{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001718 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001719 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001720 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001721
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001722 SMUX_PWR_PKT_RX(pkt);
1723
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001724 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001725 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1726 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001727 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001728 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001729 power_down = 1;
1730 else
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001731 SMUX_ERR("%s: sleep request ack invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001732 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001733 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001734 /*
1735 * Remote sleep request
1736 *
1737 * Even if we have data pending, we need to transition to the
1738 * POWER_OFF state and then perform a wakeup since the remote
1739 * side has requested a power-down.
1740 *
1741 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1742 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1743 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001744 *
1745 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001746 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001747 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001748 ack_pkt = smux_alloc_pkt();
1749 if (ack_pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301750 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001751 smux.power_state,
1752 SMUX_PWR_TURNING_OFF_FLUSH);
1753
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001754 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1755
1756 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001757 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1758 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001759 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1760 list_add_tail(&ack_pkt->list,
1761 &smux.power_queue);
1762 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001763 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001764 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1765 /* Local power-down request still in TX queue */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301766 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001767 __func__);
1768 smux.power_ctl_remote_req_received = 1;
1769 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1770 /*
1771 * Local power-down request already sent to remote
1772 * side, so this request gets treated as an ACK.
1773 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301774 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001775 __func__);
1776 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001777 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001778 SMUX_ERR("%s: sleep request invalid in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001779 __func__, smux.power_state);
1780 }
1781 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001782
1783 if (power_down) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301784 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001785 smux.power_state, SMUX_PWR_OFF_FLUSH);
1786 smux.power_state = SMUX_PWR_OFF_FLUSH;
1787 queue_work(smux_tx_wq, &smux_inactivity_work);
1788 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001789 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001790
1791 return 0;
1792}
1793
1794/**
1795 * Handle dispatching a completed packet for receive processing.
1796 *
1797 * @pkt Packet to process
1798 *
1799 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001800 */
1801static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1802{
Eric Holmbergf9622662012-06-13 15:55:45 -06001803 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001804
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001805 switch (pkt->hdr.cmd) {
1806 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001807 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001808 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001809 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001810 __func__, pkt->hdr.lcid);
1811 break;
1812 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001813 ret = smux_handle_rx_open_cmd(pkt);
1814 break;
1815
1816 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001817 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001818 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001819 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001820 __func__, pkt->hdr.lcid);
1821 break;
1822 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001823 ret = smux_handle_rx_data_cmd(pkt);
1824 break;
1825
1826 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001827 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001828 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001829 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001830 __func__, pkt->hdr.lcid);
1831 break;
1832 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001833 ret = smux_handle_rx_close_cmd(pkt);
1834 break;
1835
1836 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001837 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001838 if (smux_assert_lch_id(pkt->hdr.lcid)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001839 SMUX_ERR("%s: invalid channel id %d\n",
Eric Holmbergf9622662012-06-13 15:55:45 -06001840 __func__, pkt->hdr.lcid);
1841 break;
1842 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001843 ret = smux_handle_rx_status_cmd(pkt);
1844 break;
1845
1846 case SMUX_CMD_PWR_CTL:
1847 ret = smux_handle_rx_power_cmd(pkt);
1848 break;
1849
1850 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001851 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001852 ret = smux_handle_rx_byte_cmd(pkt);
1853 break;
1854
1855 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001856 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001857 SMUX_ERR("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001858 ret = -EINVAL;
1859 }
1860 return ret;
1861}
1862
1863/**
1864 * Deserializes a packet and dispatches it to the packet receive logic.
1865 *
1866 * @data Raw data for one packet
1867 * @len Length of the data
1868 *
1869 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001870 */
1871static int smux_deserialize(unsigned char *data, int len)
1872{
1873 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001874
1875 smux_init_pkt(&recv);
1876
1877 /*
1878 * It may be possible to optimize this to not use the
1879 * temporary buffer.
1880 */
1881 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1882
1883 if (recv.hdr.magic != SMUX_MAGIC) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001884 SMUX_ERR("%s: invalid header magic\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001885 return -EINVAL;
1886 }
1887
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001888 if (recv.hdr.payload_len)
1889 recv.payload = data + sizeof(struct smux_hdr_t);
1890
1891 return smux_dispatch_rx_pkt(&recv);
1892}
1893
1894/**
1895 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001896 */
1897static void smux_handle_wakeup_req(void)
1898{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001899 unsigned long flags;
1900
1901 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001902 if (smux.power_state == SMUX_PWR_OFF
1903 || smux.power_state == SMUX_PWR_TURNING_ON) {
1904 /* wakeup system */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301905 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001906 smux.power_state, SMUX_PWR_ON);
1907 smux.power_state = SMUX_PWR_ON;
1908 queue_work(smux_tx_wq, &smux_wakeup_work);
1909 queue_work(smux_tx_wq, &smux_tx_work);
1910 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1911 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1912 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001913 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001914 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001915 } else {
1916 /* stale wakeup request from previous wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301917 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001918 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001919 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001920 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001921}
1922
1923/**
1924 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001925 */
1926static void smux_handle_wakeup_ack(void)
1927{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001928 unsigned long flags;
1929
1930 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001931 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1932 /* received response to wakeup request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301933 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001934 smux.power_state, SMUX_PWR_ON);
1935 smux.power_state = SMUX_PWR_ON;
1936 queue_work(smux_tx_wq, &smux_tx_work);
1937 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1938 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1939
1940 } else if (smux.power_state != SMUX_PWR_ON) {
1941 /* invalid message */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301942 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001943 __func__, smux.power_state);
1944 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001945 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001946}
1947
1948/**
1949 * RX State machine - IDLE state processing.
1950 *
1951 * @data New RX data to process
1952 * @len Length of the data
1953 * @used Return value of length processed
1954 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001955 */
1956static void smux_rx_handle_idle(const unsigned char *data,
1957 int len, int *used, int flag)
1958{
1959 int i;
1960
1961 if (flag) {
1962 if (smux_byte_loopback)
1963 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1964 smux_byte_loopback);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001965 SMUX_ERR("%s: TTY error 0x%x - ignoring\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001966 ++*used;
1967 return;
1968 }
1969
1970 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1971 switch (data[i]) {
1972 case SMUX_MAGIC_WORD1:
1973 smux.rx_state = SMUX_RX_MAGIC;
1974 break;
1975 case SMUX_WAKEUP_REQ:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301976 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001977 smux_handle_wakeup_req();
1978 break;
1979 case SMUX_WAKEUP_ACK:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301980 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001981 smux_handle_wakeup_ack();
1982 break;
1983 default:
1984 /* unexpected character */
1985 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1986 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1987 smux_byte_loopback);
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06001988 SMUX_ERR("%s: parse error 0x%02x - ignoring\n",
1989 __func__, (unsigned)data[i]);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001990 break;
1991 }
1992 }
1993
1994 *used = i;
1995}
1996
1997/**
1998 * RX State machine - Header Magic state processing.
1999 *
2000 * @data New RX data to process
2001 * @len Length of the data
2002 * @used Return value of length processed
2003 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002004 */
2005static void smux_rx_handle_magic(const unsigned char *data,
2006 int len, int *used, int flag)
2007{
2008 int i;
2009
2010 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002011 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002012 smux_enter_reset();
2013 smux.rx_state = SMUX_RX_FAILURE;
2014 ++*used;
2015 return;
2016 }
2017
2018 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2019 /* wait for completion of the magic */
2020 if (data[i] == SMUX_MAGIC_WORD2) {
2021 smux.recv_len = 0;
2022 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2023 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2024 smux.rx_state = SMUX_RX_HDR;
2025 } else {
2026 /* unexpected / trash character */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002027 SMUX_ERR(
2028 "%s: rx parse error for char %c; *used=%d, len=%d\n",
2029 __func__, data[i], *used, len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002030 smux.rx_state = SMUX_RX_IDLE;
2031 }
2032 }
2033
2034 *used = i;
2035}
2036
2037/**
2038 * RX State machine - Packet Header state processing.
2039 *
2040 * @data New RX data to process
2041 * @len Length of the data
2042 * @used Return value of length processed
2043 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002044 */
2045static void smux_rx_handle_hdr(const unsigned char *data,
2046 int len, int *used, int flag)
2047{
2048 int i;
2049 struct smux_hdr_t *hdr;
2050
2051 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002052 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002053 smux_enter_reset();
2054 smux.rx_state = SMUX_RX_FAILURE;
2055 ++*used;
2056 return;
2057 }
2058
2059 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2060 smux.recv_buf[smux.recv_len++] = data[i];
2061
2062 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2063 /* complete header received */
2064 hdr = (struct smux_hdr_t *)smux.recv_buf;
2065 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2066 smux.rx_state = SMUX_RX_PAYLOAD;
2067 }
2068 }
2069 *used = i;
2070}
2071
2072/**
2073 * RX State machine - Packet Payload state processing.
2074 *
2075 * @data New RX data to process
2076 * @len Length of the data
2077 * @used Return value of length processed
2078 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002079 */
2080static void smux_rx_handle_pkt_payload(const unsigned char *data,
2081 int len, int *used, int flag)
2082{
2083 int remaining;
2084
2085 if (flag) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002086 SMUX_ERR("%s: TTY RX error %d\n", __func__, flag);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002087 smux_enter_reset();
2088 smux.rx_state = SMUX_RX_FAILURE;
2089 ++*used;
2090 return;
2091 }
2092
2093 /* copy data into rx buffer */
2094 if (smux.pkt_remain < (len - *used))
2095 remaining = smux.pkt_remain;
2096 else
2097 remaining = len - *used;
2098
2099 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2100 smux.recv_len += remaining;
2101 smux.pkt_remain -= remaining;
2102 *used += remaining;
2103
2104 if (smux.pkt_remain == 0) {
2105 /* complete packet received */
2106 smux_deserialize(smux.recv_buf, smux.recv_len);
2107 smux.rx_state = SMUX_RX_IDLE;
2108 }
2109}
2110
2111/**
2112 * Feed data to the receive state machine.
2113 *
2114 * @data Pointer to data block
2115 * @len Length of data
2116 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002117 */
2118void smux_rx_state_machine(const unsigned char *data,
2119 int len, int flag)
2120{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002121 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002122
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002123 work.data = data;
2124 work.len = len;
2125 work.flag = flag;
2126 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2127 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002128
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002129 queue_work(smux_rx_wq, &work.work);
2130 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002131}
2132
2133/**
2134 * Add channel to transmit-ready list and trigger transmit worker.
2135 *
2136 * @ch Channel to add
2137 */
2138static void list_channel(struct smux_lch_t *ch)
2139{
2140 unsigned long flags;
2141
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302142 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002143 __func__, ch->lcid);
2144
2145 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2146 spin_lock(&ch->tx_lock_lhb2);
2147 smux.tx_activity_flag = 1;
2148 if (list_empty(&ch->tx_ready_list))
2149 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2150 spin_unlock(&ch->tx_lock_lhb2);
2151 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2152
2153 queue_work(smux_tx_wq, &smux_tx_work);
2154}
2155
2156/**
2157 * Transmit packet on correct transport and then perform client
2158 * notification.
2159 *
2160 * @ch Channel to transmit on
2161 * @pkt Packet to transmit
2162 */
2163static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2164{
2165 union notifier_metadata meta_write;
2166 int ret;
2167
2168 if (ch && pkt) {
2169 SMUX_LOG_PKT_TX(pkt);
2170 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2171 ret = smux_tx_loopback(pkt);
2172 else
2173 ret = smux_tx_tty(pkt);
2174
2175 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2176 /* notify write-done */
2177 meta_write.write.pkt_priv = pkt->priv;
2178 meta_write.write.buffer = pkt->payload;
2179 meta_write.write.len = pkt->hdr.payload_len;
2180 if (ret >= 0) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302181 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002182 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2183 &meta_write);
2184 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002185 SMUX_ERR("%s: failed to write pkt %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002186 __func__, ret);
2187 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2188 &meta_write);
2189 }
2190 }
2191 }
2192}
2193
2194/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002195 * Flush pending TTY TX data.
2196 */
2197static void smux_flush_tty(void)
2198{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002199 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002200 if (!smux.tty) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002201 SMUX_ERR("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002202 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002203 return;
2204 }
2205
2206 tty_wait_until_sent(smux.tty,
2207 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2208
2209 if (tty_chars_in_buffer(smux.tty) > 0)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002210 SMUX_ERR("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002211
2212 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002213}
2214
2215/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002216 * Purge TX queue for logical channel.
2217 *
2218 * @ch Logical channel pointer
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002219 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002220 *
2221 * Must be called with the following spinlocks locked:
2222 * state_lock_lhb1
2223 * tx_lock_lhb2
2224 */
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002225static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002226{
2227 struct smux_pkt_t *pkt;
2228 int send_disconnect = 0;
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002229 struct smux_pkt_t *pkt_tmp;
2230 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002231
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002232 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2233 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002234 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002235 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2236 /* Open ACK must still be sent */
2237 is_state_pkt = 1;
2238 } else {
2239 /* Open never sent -- force to closed state */
2240 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2241 send_disconnect = 1;
2242 }
2243 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2244 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2245 is_state_pkt = 1;
2246 if (!send_disconnect)
2247 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002248 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2249 /* Notify client of failed write */
2250 union notifier_metadata meta_write;
2251
2252 meta_write.write.pkt_priv = pkt->priv;
2253 meta_write.write.buffer = pkt->payload;
2254 meta_write.write.len = pkt->hdr.payload_len;
2255 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2256 }
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002257
2258 if (!is_state_pkt || is_ssr) {
2259 list_del(&pkt->list);
2260 smux_free_pkt(pkt);
2261 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002262 }
2263
2264 if (send_disconnect) {
2265 union notifier_metadata meta_disconnected;
2266
2267 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2268 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2269 &meta_disconnected);
2270 }
2271}
2272
2273/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002274 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002275 *
2276 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002277 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002278static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002279{
2280 struct uart_state *state;
2281
2282 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002283 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002284 __func__, smux.tty);
2285 return;
2286 }
2287 state = smux.tty->driver_data;
2288 msm_hs_request_clock_on(state->uart_port);
2289}
2290
2291/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002292 * Power-up the UART.
2293 */
2294static void smux_uart_power_on(void)
2295{
2296 mutex_lock(&smux.mutex_lha0);
2297 smux_uart_power_on_atomic();
2298 mutex_unlock(&smux.mutex_lha0);
2299}
2300
2301/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002302 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002303 *
2304 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002305 */
Eric Holmberg06011322012-07-06 18:17:03 -06002306static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002307{
2308 struct uart_state *state;
2309
2310 if (!smux.tty || !smux.tty->driver_data) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002311 SMUX_ERR("%s: unable to find UART port for tty %p\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002312 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002313 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002314 return;
2315 }
2316 state = smux.tty->driver_data;
2317 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002318}
2319
2320/**
2321 * Power down the UART.
2322 */
2323static void smux_uart_power_off(void)
2324{
2325 mutex_lock(&smux.mutex_lha0);
2326 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002327 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002328}
2329
2330/**
2331 * TX Wakeup Worker
2332 *
2333 * @work Not used
2334 *
2335 * Do an exponential back-off wakeup sequence with a maximum period
2336 * of approximately 1 second (1 << 20 microseconds).
2337 */
2338static void smux_wakeup_worker(struct work_struct *work)
2339{
2340 unsigned long flags;
2341 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002342
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002343 if (smux.in_reset)
2344 return;
2345
2346 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2347 if (smux.power_state == SMUX_PWR_ON) {
2348 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002349 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002350 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302351 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002352
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002353 /*
2354 * Cancel any pending retry. This avoids a race condition with
2355 * a new power-up request because:
2356 * 1) this worker doesn't modify the state
2357 * 2) this worker is processed on the same single-threaded
2358 * workqueue as new TX wakeup requests
2359 */
2360 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002361 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002362 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002363 /* retry wakeup */
2364 wakeup_delay = smux.pwr_wakeup_delay_us;
2365 smux.pwr_wakeup_delay_us <<= 1;
2366 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2367 smux.pwr_wakeup_delay_us =
2368 SMUX_WAKEUP_DELAY_MAX;
2369
2370 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302371 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002372 smux_send_byte(SMUX_WAKEUP_REQ);
2373
2374 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302375 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002376 wakeup_delay);
2377 usleep_range(wakeup_delay, 2*wakeup_delay);
2378 queue_work(smux_tx_wq, &smux_wakeup_work);
2379 } else {
2380 /* schedule delayed work */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302381 SMUX_DBG(
2382 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002383 __func__, wakeup_delay / 1000);
2384 queue_delayed_work(smux_tx_wq,
2385 &smux_wakeup_delayed_work,
2386 msecs_to_jiffies(wakeup_delay / 1000));
2387 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002388 } else {
2389 /* wakeup aborted */
2390 smux.pwr_wakeup_delay_us = 1;
2391 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302392 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002393 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002394 }
2395}
2396
2397
2398/**
2399 * Inactivity timeout worker. Periodically scheduled when link is active.
2400 * When it detects inactivity, it will power-down the UART link.
2401 *
2402 * @work Work structure (not used)
2403 */
2404static void smux_inactivity_worker(struct work_struct *work)
2405{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002406 struct smux_pkt_t *pkt;
2407 unsigned long flags;
2408
Eric Holmberg06011322012-07-06 18:17:03 -06002409 if (smux.in_reset)
2410 return;
2411
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002412 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2413 spin_lock(&smux.tx_lock_lha2);
2414
2415 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2416 /* no activity */
2417 if (smux.powerdown_enabled) {
2418 if (smux.power_state == SMUX_PWR_ON) {
2419 /* start power-down sequence */
2420 pkt = smux_alloc_pkt();
2421 if (pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302422 SMUX_PWR(
2423 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002424 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002425 SMUX_PWR_TURNING_OFF_FLUSH);
2426 smux.power_state =
2427 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002428
2429 /* send power-down request */
2430 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2431 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002432 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2433 list_add_tail(&pkt->list,
2434 &smux.power_queue);
2435 queue_work(smux_tx_wq, &smux_tx_work);
2436 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002437 SMUX_ERR("%s: packet alloc failed\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002438 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002439 }
2440 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002441 }
2442 }
2443 smux.tx_activity_flag = 0;
2444 smux.rx_activity_flag = 0;
2445
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002446 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002447 /* ready to power-down the UART */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302448 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002449 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002450 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002451
2452 /* if data is pending, schedule a new wakeup */
2453 if (!list_empty(&smux.lch_tx_ready_list) ||
2454 !list_empty(&smux.power_queue))
2455 queue_work(smux_tx_wq, &smux_tx_work);
2456
2457 spin_unlock(&smux.tx_lock_lha2);
2458 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2459
2460 /* flush UART output queue and power down */
2461 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002462 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002463 } else {
2464 spin_unlock(&smux.tx_lock_lha2);
2465 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002466 }
2467
2468 /* reschedule inactivity worker */
2469 if (smux.power_state != SMUX_PWR_OFF)
2470 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2471 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2472}
2473
2474/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002475 * Remove RX retry packet from channel and free it.
2476 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002477 * @ch Channel for retry packet
2478 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002479 *
2480 * @returns 1 if flow control updated; 0 otherwise
2481 *
2482 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002483 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002484int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002485 struct smux_rx_pkt_retry *retry)
2486{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002487 int tx_ready = 0;
2488
Eric Holmbergb8435c82012-06-05 14:51:29 -06002489 list_del(&retry->rx_retry_list);
2490 --ch->rx_retry_queue_cnt;
2491 smux_free_pkt(retry->pkt);
2492 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002493
2494 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2495 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2496 ch->rx_flow_control_auto) {
2497 ch->rx_flow_control_auto = 0;
2498 smux_rx_flow_control_updated(ch);
2499 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2500 tx_ready = 1;
2501 }
2502 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002503}
2504
2505/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002506 * RX worker handles all receive operations.
2507 *
2508 * @work Work structure contained in TBD structure
2509 */
2510static void smux_rx_worker(struct work_struct *work)
2511{
2512 unsigned long flags;
2513 int used;
2514 int initial_rx_state;
2515 struct smux_rx_worker_data *w;
2516 const unsigned char *data;
2517 int len;
2518 int flag;
2519
2520 w = container_of(work, struct smux_rx_worker_data, work);
2521 data = w->data;
2522 len = w->len;
2523 flag = w->flag;
2524
2525 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2526 smux.rx_activity_flag = 1;
2527 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2528
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302529 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002530 used = 0;
2531 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002532 if (smux.in_reset) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302533 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002534 smux.rx_state = SMUX_RX_IDLE;
2535 break;
2536 }
2537
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302538 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002539 __func__, smux.rx_state, used, len);
2540 initial_rx_state = smux.rx_state;
2541
2542 switch (smux.rx_state) {
2543 case SMUX_RX_IDLE:
2544 smux_rx_handle_idle(data, len, &used, flag);
2545 break;
2546 case SMUX_RX_MAGIC:
2547 smux_rx_handle_magic(data, len, &used, flag);
2548 break;
2549 case SMUX_RX_HDR:
2550 smux_rx_handle_hdr(data, len, &used, flag);
2551 break;
2552 case SMUX_RX_PAYLOAD:
2553 smux_rx_handle_pkt_payload(data, len, &used, flag);
2554 break;
2555 default:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302556 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002557 __func__, smux.rx_state);
2558 smux.rx_state = SMUX_RX_IDLE;
2559 break;
2560 }
2561 } while (used < len || smux.rx_state != initial_rx_state);
2562
2563 complete(&w->work_complete);
2564}
2565
2566/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002567 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2568 * because the client was not ready (-EAGAIN).
2569 *
2570 * @work Work structure contained in smux_lch_t structure
2571 */
2572static void smux_rx_retry_worker(struct work_struct *work)
2573{
2574 struct smux_lch_t *ch;
2575 struct smux_rx_pkt_retry *retry;
2576 union notifier_metadata metadata;
2577 int tmp;
2578 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002579 int immediate_retry = 0;
2580 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002581
2582 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2583
2584 /* get next retry packet */
2585 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002586 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002587 /* port has been closed - remove all retries */
2588 while (!list_empty(&ch->rx_retry_queue)) {
2589 retry = list_first_entry(&ch->rx_retry_queue,
2590 struct smux_rx_pkt_retry,
2591 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002592 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002593 }
2594 }
2595
2596 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302597 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002598 __func__, ch->lcid);
2599 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2600 return;
2601 }
2602 retry = list_first_entry(&ch->rx_retry_queue,
2603 struct smux_rx_pkt_retry,
2604 rx_retry_list);
2605 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2606
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302607 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002608 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002609 metadata.read.pkt_priv = 0;
2610 metadata.read.buffer = 0;
2611 tmp = ch->get_rx_buffer(ch->priv,
2612 (void **)&metadata.read.pkt_priv,
2613 (void **)&metadata.read.buffer,
2614 retry->pkt->hdr.payload_len);
2615 if (tmp == 0 && metadata.read.buffer) {
2616 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002617
Eric Holmbergb8435c82012-06-05 14:51:29 -06002618 memcpy(metadata.read.buffer, retry->pkt->payload,
2619 retry->pkt->hdr.payload_len);
2620 metadata.read.len = retry->pkt->hdr.payload_len;
2621
2622 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002623 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002624 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002625 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002626 if (tx_ready)
2627 list_channel(ch);
2628
2629 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002630 } else if (tmp == -EAGAIN ||
2631 (tmp == 0 && !metadata.read.buffer)) {
2632 /* retry again */
2633 retry->timeout_in_ms <<= 1;
2634 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2635 /* timed out */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002636 SMUX_ERR("%s: ch %d RX retry client timeout\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002637 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002638 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002639 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002640 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002641 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2642 if (tx_ready)
2643 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002644 }
2645 } else {
2646 /* client error - drop packet */
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06002647 SMUX_ERR("%s: ch %d RX retry client failed (%d)\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002648 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002649 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002650 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002651 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002652 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002653 if (tx_ready)
2654 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002655 }
2656
2657 /* schedule next retry */
2658 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2659 if (!list_empty(&ch->rx_retry_queue)) {
2660 retry = list_first_entry(&ch->rx_retry_queue,
2661 struct smux_rx_pkt_retry,
2662 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002663
2664 if (immediate_retry)
2665 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2666 else
2667 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2668 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002669 }
2670 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2671}
2672
2673/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002674 * Transmit worker handles serializing and transmitting packets onto the
2675 * underlying transport.
2676 *
2677 * @work Work structure (not used)
2678 */
2679static void smux_tx_worker(struct work_struct *work)
2680{
2681 struct smux_pkt_t *pkt;
2682 struct smux_lch_t *ch;
2683 unsigned low_wm_notif;
2684 unsigned lcid;
2685 unsigned long flags;
2686
2687
2688 /*
2689 * Transmit packets in round-robin fashion based upon ready
2690 * channels.
2691 *
2692 * To eliminate the need to hold a lock for the entire
2693 * iteration through the channel ready list, the head of the
2694 * ready-channel list is always the next channel to be
2695 * processed. To send a packet, the first valid packet in
2696 * the head channel is removed and the head channel is then
2697 * rescheduled at the end of the queue by removing it and
2698 * inserting after the tail. The locks can then be released
2699 * while the packet is processed.
2700 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002701 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002702 pkt = NULL;
2703 low_wm_notif = 0;
2704
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002705 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002706
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002707 /* handle wakeup if needed */
2708 if (smux.power_state == SMUX_PWR_OFF) {
2709 if (!list_empty(&smux.lch_tx_ready_list) ||
2710 !list_empty(&smux.power_queue)) {
2711 /* data to transmit, do wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302712 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002713 smux.power_state,
2714 SMUX_PWR_TURNING_ON);
2715 smux.power_state = SMUX_PWR_TURNING_ON;
2716 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2717 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002718 queue_work(smux_tx_wq, &smux_wakeup_work);
2719 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002720 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002721 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2722 flags);
2723 }
2724 break;
2725 }
2726
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002727 /* process any pending power packets */
2728 if (!list_empty(&smux.power_queue)) {
2729 pkt = list_first_entry(&smux.power_queue,
2730 struct smux_pkt_t, list);
2731 list_del(&pkt->list);
2732 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2733
Eric Holmberga9b06472012-06-22 09:46:34 -06002734 /* Adjust power state if this is a flush command */
2735 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2736 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2737 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2738 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2739 smux.power_ctl_remote_req_received) {
2740 /*
2741 * Sending remote power-down request ACK
2742 * or sending local power-down request
2743 * and we already received a remote
2744 * power-down request.
2745 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302746 SMUX_PWR(
2747 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002748 smux.power_state,
2749 SMUX_PWR_OFF_FLUSH);
2750 smux.power_state = SMUX_PWR_OFF_FLUSH;
2751 smux.power_ctl_remote_req_received = 0;
2752 queue_work(smux_tx_wq,
2753 &smux_inactivity_work);
2754 } else {
2755 /* sending local power-down request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302756 SMUX_PWR(
2757 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002758 smux.power_state,
2759 SMUX_PWR_TURNING_OFF);
2760 smux.power_state = SMUX_PWR_TURNING_OFF;
2761 }
2762 }
2763 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2764
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002765 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002766 smux_uart_power_on();
2767 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002768 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002769 if (!smux_byte_loopback) {
2770 smux_tx_tty(pkt);
2771 smux_flush_tty();
2772 } else {
2773 smux_tx_loopback(pkt);
2774 }
2775
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002776 smux_free_pkt(pkt);
2777 continue;
2778 }
2779
2780 /* get the next ready channel */
2781 if (list_empty(&smux.lch_tx_ready_list)) {
2782 /* no ready channels */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302783 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002784 __func__);
2785 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2786 break;
2787 }
2788 smux.tx_activity_flag = 1;
2789
2790 if (smux.power_state != SMUX_PWR_ON) {
2791 /* channel not ready to transmit */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302792 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002793 __func__,
2794 smux.power_state);
2795 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2796 break;
2797 }
2798
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002799 /* get the next packet to send and rotate channel list */
2800 ch = list_first_entry(&smux.lch_tx_ready_list,
2801 struct smux_lch_t,
2802 tx_ready_list);
2803
2804 spin_lock(&ch->state_lock_lhb1);
2805 spin_lock(&ch->tx_lock_lhb2);
2806 if (!list_empty(&ch->tx_queue)) {
2807 /*
2808 * If remote TX flow control is enabled or
2809 * the channel is not fully opened, then only
2810 * send command packets.
2811 */
2812 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2813 struct smux_pkt_t *curr;
2814 list_for_each_entry(curr, &ch->tx_queue, list) {
2815 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2816 pkt = curr;
2817 break;
2818 }
2819 }
2820 } else {
2821 /* get next cmd/data packet to send */
2822 pkt = list_first_entry(&ch->tx_queue,
2823 struct smux_pkt_t, list);
2824 }
2825 }
2826
2827 if (pkt) {
2828 list_del(&pkt->list);
2829
2830 /* update packet stats */
2831 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2832 --ch->tx_pending_data_cnt;
2833 if (ch->notify_lwm &&
2834 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002835 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002836 ch->notify_lwm = 0;
2837 low_wm_notif = 1;
2838 }
2839 }
2840
2841 /* advance to the next ready channel */
2842 list_rotate_left(&smux.lch_tx_ready_list);
2843 } else {
2844 /* no data in channel to send, remove from ready list */
2845 list_del(&ch->tx_ready_list);
2846 INIT_LIST_HEAD(&ch->tx_ready_list);
2847 }
2848 lcid = ch->lcid;
2849 spin_unlock(&ch->tx_lock_lhb2);
2850 spin_unlock(&ch->state_lock_lhb1);
2851 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2852
2853 if (low_wm_notif)
2854 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2855
2856 /* send the packet */
2857 smux_tx_pkt(ch, pkt);
2858 smux_free_pkt(pkt);
2859 }
2860}
2861
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002862/**
2863 * Update the RX flow control (sent in the TIOCM Status command).
2864 *
2865 * @ch Channel for update
2866 *
2867 * @returns 1 for updated, 0 for not updated
2868 *
2869 * Must be called with ch->state_lock_lhb1 locked.
2870 */
2871static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2872{
2873 int updated = 0;
2874 int prev_state;
2875
2876 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2877
2878 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2879 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2880 else
2881 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2882
2883 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2884 smux_send_status_cmd(ch);
2885 updated = 1;
2886 }
2887
2888 return updated;
2889}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002890
Eric Holmberg06011322012-07-06 18:17:03 -06002891/**
2892 * Flush all SMUX workqueues.
2893 *
2894 * This sets the reset bit to abort any processing loops and then
2895 * flushes the workqueues to ensure that no new pending work is
2896 * running. Do not call with any locks used by workers held as
2897 * this will result in a deadlock.
2898 */
2899static void smux_flush_workqueues(void)
2900{
2901 smux.in_reset = 1;
2902
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302903 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002904 flush_workqueue(smux_tx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302905 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002906 flush_workqueue(smux_rx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302907 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002908 flush_workqueue(smux_notify_wq);
2909}
2910
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002911/**********************************************************************/
2912/* Kernel API */
2913/**********************************************************************/
2914
2915/**
2916 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2917 * flags.
2918 *
2919 * @lcid Logical channel ID
2920 * @set Options to set
2921 * @clear Options to clear
2922 *
2923 * @returns 0 for success, < 0 for failure
2924 */
2925int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2926{
2927 unsigned long flags;
2928 struct smux_lch_t *ch;
2929 int tx_ready = 0;
2930 int ret = 0;
2931
2932 if (smux_assert_lch_id(lcid))
2933 return -ENXIO;
2934
2935 ch = &smux_lch[lcid];
2936 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2937
2938 /* Local loopback mode */
2939 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2940 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2941
2942 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2943 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2944
2945 /* Remote loopback mode */
2946 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2947 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2948
2949 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2950 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2951
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002952 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002953 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002954 ch->rx_flow_control_client = 1;
2955 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002956 }
2957
2958 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002959 ch->rx_flow_control_client = 0;
2960 tx_ready |= smux_rx_flow_control_updated(ch);
2961 }
2962
2963 /* Auto RX Flow Control */
2964 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302965 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002966 __func__);
2967 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2968 }
2969
2970 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302971 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002972 __func__);
2973 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2974 ch->rx_flow_control_auto = 0;
2975 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002976 }
2977
2978 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2979
2980 if (tx_ready)
2981 list_channel(ch);
2982
2983 return ret;
2984}
2985
2986/**
2987 * Starts the opening sequence for a logical channel.
2988 *
2989 * @lcid Logical channel ID
2990 * @priv Free for client usage
2991 * @notify Event notification function
2992 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2993 *
2994 * @returns 0 for success, <0 otherwise
2995 *
2996 * A channel must be fully closed (either not previously opened or
2997 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2998 * received.
2999 *
3000 * One the remote side is opened, the client will receive a SMUX_CONNECTED
3001 * event.
3002 */
3003int msm_smux_open(uint8_t lcid, void *priv,
3004 void (*notify)(void *priv, int event_type, const void *metadata),
3005 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
3006 int size))
3007{
3008 int ret;
3009 struct smux_lch_t *ch;
3010 struct smux_pkt_t *pkt;
3011 int tx_ready = 0;
3012 unsigned long flags;
3013
3014 if (smux_assert_lch_id(lcid))
3015 return -ENXIO;
3016
3017 ch = &smux_lch[lcid];
3018 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3019
3020 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3021 ret = -EAGAIN;
3022 goto out;
3023 }
3024
3025 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003026 SMUX_ERR("%s: open lcid %d local state %x invalid\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003027 __func__, lcid, ch->local_state);
3028 ret = -EINVAL;
3029 goto out;
3030 }
3031
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303032 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003033 ch->local_state,
3034 SMUX_LCH_LOCAL_OPENING);
3035
Eric Holmberg06011322012-07-06 18:17:03 -06003036 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003037 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3038
3039 ch->priv = priv;
3040 ch->notify = notify;
3041 ch->get_rx_buffer = get_rx_buffer;
3042 ret = 0;
3043
3044 /* Send Open Command */
3045 pkt = smux_alloc_pkt();
3046 if (!pkt) {
3047 ret = -ENOMEM;
3048 goto out;
3049 }
3050 pkt->hdr.magic = SMUX_MAGIC;
3051 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3052 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3053 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3054 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3055 pkt->hdr.lcid = lcid;
3056 pkt->hdr.payload_len = 0;
3057 pkt->hdr.pad_len = 0;
3058 smux_tx_queue(pkt, ch, 0);
3059 tx_ready = 1;
3060
3061out:
3062 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003063 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003064 if (tx_ready)
3065 list_channel(ch);
3066 return ret;
3067}
3068
3069/**
3070 * Starts the closing sequence for a logical channel.
3071 *
3072 * @lcid Logical channel ID
3073 *
3074 * @returns 0 for success, <0 otherwise
3075 *
3076 * Once the close event has been acknowledge by the remote side, the client
3077 * will receive a SMUX_DISCONNECTED notification.
3078 */
3079int msm_smux_close(uint8_t lcid)
3080{
3081 int ret = 0;
3082 struct smux_lch_t *ch;
3083 struct smux_pkt_t *pkt;
3084 int tx_ready = 0;
3085 unsigned long flags;
3086
3087 if (smux_assert_lch_id(lcid))
3088 return -ENXIO;
3089
3090 ch = &smux_lch[lcid];
3091 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3092 ch->local_tiocm = 0x0;
3093 ch->remote_tiocm = 0x0;
3094 ch->tx_pending_data_cnt = 0;
3095 ch->notify_lwm = 0;
Eric Holmbergf61339e2012-08-13 14:45:27 -06003096 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003097
3098 /* Purge TX queue */
3099 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -06003100 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003101 spin_unlock(&ch->tx_lock_lhb2);
3102
3103 /* Send Close Command */
3104 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3105 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303106 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003107 ch->local_state,
3108 SMUX_LCH_LOCAL_CLOSING);
3109
3110 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3111 pkt = smux_alloc_pkt();
3112 if (pkt) {
3113 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3114 pkt->hdr.flags = 0;
3115 pkt->hdr.lcid = lcid;
3116 pkt->hdr.payload_len = 0;
3117 pkt->hdr.pad_len = 0;
3118 smux_tx_queue(pkt, ch, 0);
3119 tx_ready = 1;
3120 } else {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003121 SMUX_ERR("%s: pkt allocation failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003122 ret = -ENOMEM;
3123 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003124
3125 /* Purge RX retry queue */
3126 if (ch->rx_retry_queue_cnt)
3127 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003128 }
3129 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3130
3131 if (tx_ready)
3132 list_channel(ch);
3133
3134 return ret;
3135}
3136
3137/**
3138 * Write data to a logical channel.
3139 *
3140 * @lcid Logical channel ID
3141 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3142 * SMUX_WRITE_FAIL notification.
3143 * @data Data to write
3144 * @len Length of @data
3145 *
3146 * @returns 0 for success, <0 otherwise
3147 *
3148 * Data may be written immediately after msm_smux_open() is called,
3149 * but the data will wait in the transmit queue until the channel has
3150 * been fully opened.
3151 *
3152 * Once the data has been written, the client will receive either a completion
3153 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3154 */
3155int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3156{
3157 struct smux_lch_t *ch;
3158 struct smux_pkt_t *pkt;
3159 int tx_ready = 0;
3160 unsigned long flags;
3161 int ret;
3162
3163 if (smux_assert_lch_id(lcid))
3164 return -ENXIO;
3165
3166 ch = &smux_lch[lcid];
3167 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3168
3169 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3170 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003171 SMUX_ERR("%s: hdr.invalid local state %d channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003172 __func__, ch->local_state, lcid);
3173 ret = -EINVAL;
3174 goto out;
3175 }
3176
3177 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003178 SMUX_ERR("%s: payload %d too large\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003179 __func__, len);
3180 ret = -E2BIG;
3181 goto out;
3182 }
3183
3184 pkt = smux_alloc_pkt();
3185 if (!pkt) {
3186 ret = -ENOMEM;
3187 goto out;
3188 }
3189
3190 pkt->hdr.cmd = SMUX_CMD_DATA;
3191 pkt->hdr.lcid = lcid;
3192 pkt->hdr.flags = 0;
3193 pkt->hdr.payload_len = len;
3194 pkt->payload = (void *)data;
3195 pkt->priv = pkt_priv;
3196 pkt->hdr.pad_len = 0;
3197
3198 spin_lock(&ch->tx_lock_lhb2);
3199 /* verify high watermark */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303200 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003201
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003202 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003203 SMUX_ERR("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003204 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003205 ch->tx_pending_data_cnt);
3206 ret = -EAGAIN;
3207 goto out_inner;
3208 }
3209
3210 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003211 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003212 ch->notify_lwm = 1;
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003213 SMUX_ERR("%s: high watermark hit\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003214 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3215 }
3216 list_add_tail(&pkt->list, &ch->tx_queue);
3217
3218 /* add to ready list */
3219 if (IS_FULLY_OPENED(ch))
3220 tx_ready = 1;
3221
3222 ret = 0;
3223
3224out_inner:
3225 spin_unlock(&ch->tx_lock_lhb2);
3226
3227out:
3228 if (ret)
3229 smux_free_pkt(pkt);
3230 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3231
3232 if (tx_ready)
3233 list_channel(ch);
3234
3235 return ret;
3236}
3237
3238/**
3239 * Returns true if the TX queue is currently full (high water mark).
3240 *
3241 * @lcid Logical channel ID
3242 * @returns 0 if channel is not full
3243 * 1 if it is full
3244 * < 0 for error
3245 */
3246int msm_smux_is_ch_full(uint8_t lcid)
3247{
3248 struct smux_lch_t *ch;
3249 unsigned long flags;
3250 int is_full = 0;
3251
3252 if (smux_assert_lch_id(lcid))
3253 return -ENXIO;
3254
3255 ch = &smux_lch[lcid];
3256
3257 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003258 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003259 is_full = 1;
3260 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3261
3262 return is_full;
3263}
3264
3265/**
3266 * Returns true if the TX queue has space for more packets it is at or
3267 * below the low water mark).
3268 *
3269 * @lcid Logical channel ID
3270 * @returns 0 if channel is above low watermark
3271 * 1 if it's at or below the low watermark
3272 * < 0 for error
3273 */
3274int msm_smux_is_ch_low(uint8_t lcid)
3275{
3276 struct smux_lch_t *ch;
3277 unsigned long flags;
3278 int is_low = 0;
3279
3280 if (smux_assert_lch_id(lcid))
3281 return -ENXIO;
3282
3283 ch = &smux_lch[lcid];
3284
3285 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003286 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003287 is_low = 1;
3288 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3289
3290 return is_low;
3291}
3292
3293/**
3294 * Send TIOCM status update.
3295 *
3296 * @ch Channel for update
3297 *
3298 * @returns 0 for success, <0 for failure
3299 *
3300 * Channel lock must be held before calling.
3301 */
3302static int smux_send_status_cmd(struct smux_lch_t *ch)
3303{
3304 struct smux_pkt_t *pkt;
3305
3306 if (!ch)
3307 return -EINVAL;
3308
3309 pkt = smux_alloc_pkt();
3310 if (!pkt)
3311 return -ENOMEM;
3312
3313 pkt->hdr.lcid = ch->lcid;
3314 pkt->hdr.cmd = SMUX_CMD_STATUS;
3315 pkt->hdr.flags = ch->local_tiocm;
3316 pkt->hdr.payload_len = 0;
3317 pkt->hdr.pad_len = 0;
3318 smux_tx_queue(pkt, ch, 0);
3319
3320 return 0;
3321}
3322
3323/**
3324 * Internal helper function for getting the TIOCM status with
3325 * state_lock_lhb1 already locked.
3326 *
3327 * @ch Channel pointer
3328 *
3329 * @returns TIOCM status
3330 */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -06003331long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003332{
3333 long status = 0x0;
3334
3335 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3336 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3337 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3338 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3339
3340 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3341 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3342
3343 return status;
3344}
3345
3346/**
3347 * Get the TIOCM status bits.
3348 *
3349 * @lcid Logical channel ID
3350 *
3351 * @returns >= 0 TIOCM status bits
3352 * < 0 Error condition
3353 */
3354long msm_smux_tiocm_get(uint8_t lcid)
3355{
3356 struct smux_lch_t *ch;
3357 unsigned long flags;
3358 long status = 0x0;
3359
3360 if (smux_assert_lch_id(lcid))
3361 return -ENXIO;
3362
3363 ch = &smux_lch[lcid];
3364 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3365 status = msm_smux_tiocm_get_atomic(ch);
3366 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3367
3368 return status;
3369}
3370
3371/**
3372 * Set/clear the TIOCM status bits.
3373 *
3374 * @lcid Logical channel ID
3375 * @set Bits to set
3376 * @clear Bits to clear
3377 *
3378 * @returns 0 for success; < 0 for failure
3379 *
3380 * If a bit is specified in both the @set and @clear masks, then the clear bit
3381 * definition will dominate and the bit will be cleared.
3382 */
3383int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3384{
3385 struct smux_lch_t *ch;
3386 unsigned long flags;
3387 uint8_t old_status;
3388 uint8_t status_set = 0x0;
3389 uint8_t status_clear = 0x0;
3390 int tx_ready = 0;
3391 int ret = 0;
3392
3393 if (smux_assert_lch_id(lcid))
3394 return -ENXIO;
3395
3396 ch = &smux_lch[lcid];
3397 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3398
3399 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3400 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3401 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3402 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3403
3404 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3405 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3406 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3407 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3408
3409 old_status = ch->local_tiocm;
3410 ch->local_tiocm |= status_set;
3411 ch->local_tiocm &= ~status_clear;
3412
3413 if (ch->local_tiocm != old_status) {
3414 ret = smux_send_status_cmd(ch);
3415 tx_ready = 1;
3416 }
3417 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3418
3419 if (tx_ready)
3420 list_channel(ch);
3421
3422 return ret;
3423}
3424
3425/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003426/* Subsystem Restart */
3427/**********************************************************************/
3428static struct notifier_block ssr_notifier = {
3429 .notifier_call = ssr_notifier_cb,
3430};
3431
3432/**
3433 * Handle Subsystem Restart (SSR) notifications.
3434 *
3435 * @this Pointer to ssr_notifier
3436 * @code SSR Code
3437 * @data Data pointer (not used)
3438 */
3439static int ssr_notifier_cb(struct notifier_block *this,
3440 unsigned long code,
3441 void *data)
3442{
3443 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003444 int i;
3445 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003446 int power_off_uart = 0;
3447
Eric Holmbergd2697902012-06-15 09:58:46 -06003448 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303449 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003450 mutex_lock(&smux.mutex_lha0);
3451 smux.in_reset = 1;
3452 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003453 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003454 } else if (code == SUBSYS_AFTER_POWERUP) {
3455 /* re-register platform devices */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303456 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003457 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003458 if (smux.ld_open_count > 0
3459 && !smux.platform_devs_registered) {
3460 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303461 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003462 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003463 smux_devs[i].dev.release = smux_pdev_release;
3464 tmp = platform_device_register(&smux_devs[i]);
3465 if (tmp)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003466 SMUX_ERR(
3467 "%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003468 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003469 }
3470 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003471 }
3472 mutex_unlock(&smux.mutex_lha0);
3473 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003474 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3475 return NOTIFY_DONE;
3476 }
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303477 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003478
3479 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003480 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003481 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003482 if (smux.ld_open_count > 0) {
3483 smux_lch_purge();
3484 if (smux.tty)
3485 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003486
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003487 /* Unregister platform devices */
3488 if (smux.platform_devs_registered) {
3489 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303490 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003491 __func__, smux_devs[i].name);
3492 platform_device_unregister(&smux_devs[i]);
3493 }
3494 smux.platform_devs_registered = 0;
3495 }
3496
3497 /* Power-down UART */
3498 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3499 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303500 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3501 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003502 smux.power_state = SMUX_PWR_OFF;
3503 power_off_uart = 1;
3504 }
3505 smux.powerdown_enabled = 0;
3506 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3507
3508 if (power_off_uart)
3509 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003510 }
Eric Holmberg06011322012-07-06 18:17:03 -06003511 smux.tx_activity_flag = 0;
3512 smux.rx_activity_flag = 0;
3513 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003514 smux.in_reset = 0;
3515 mutex_unlock(&smux.mutex_lha0);
3516
Eric Holmberged1f00c2012-06-07 09:45:18 -06003517 return NOTIFY_DONE;
3518}
3519
3520/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003521/* Line Discipline Interface */
3522/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003523static void smux_pdev_release(struct device *dev)
3524{
3525 struct platform_device *pdev;
3526
3527 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303528 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3529 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003530 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3531}
3532
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003533static int smuxld_open(struct tty_struct *tty)
3534{
3535 int i;
3536 int tmp;
3537 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003538
3539 if (!smux.is_initialized)
3540 return -ENODEV;
3541
Eric Holmberged1f00c2012-06-07 09:45:18 -06003542 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003543 if (smux.ld_open_count) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003544 SMUX_ERR("%s: %p multiple instances not supported\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003545 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003546 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003547 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003548 }
3549
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003550 if (tty->ops->write == NULL) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003551 SMUX_ERR("%s: tty->ops->write already NULL\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003552 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003553 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003554 }
3555
3556 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003557 ++smux.ld_open_count;
3558 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003559 smux.tty = tty;
3560 tty->disc_data = &smux;
3561 tty->receive_room = TTY_RECEIVE_ROOM;
3562 tty_driver_flush_buffer(tty);
3563
3564 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003565 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003566 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303567 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003568 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003569 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003570 queue_work(smux_tx_wq, &smux_inactivity_work);
3571 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003572 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003573 }
3574
3575 /* register platform devices */
3576 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303577 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003578 __func__, smux_devs[i].name);
3579 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003580 tmp = platform_device_register(&smux_devs[i]);
3581 if (tmp)
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003582 SMUX_ERR("%s: error %d registering device %s\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003583 __func__, tmp, smux_devs[i].name);
3584 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003585 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003586 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003587 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003588}
3589
3590static void smuxld_close(struct tty_struct *tty)
3591{
3592 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003593 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003594 int i;
3595
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303596 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003597 smux_flush_workqueues();
3598
Eric Holmberged1f00c2012-06-07 09:45:18 -06003599 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003600 if (smux.ld_open_count <= 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003601 SMUX_ERR("%s: invalid ld count %d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003602 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003603 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003604 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003605 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003606 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003607
3608 /* Cleanup channels */
3609 smux_lch_purge();
3610
3611 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003612 if (smux.platform_devs_registered) {
3613 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303614 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003615 __func__, smux_devs[i].name);
3616 platform_device_unregister(&smux_devs[i]);
3617 }
3618 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003619 }
3620
3621 /* Schedule UART power-up if it's down */
3622 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003623 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003624 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003625 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003626 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003627 smux.tx_activity_flag = 0;
3628 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003629 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3630
3631 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003632 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003633
Eric Holmberg06011322012-07-06 18:17:03 -06003634 smux.rx_state = SMUX_RX_IDLE;
3635
Eric Holmberged1f00c2012-06-07 09:45:18 -06003636 /* Disconnect from TTY */
3637 smux.tty = NULL;
3638 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303639 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003640}
3641
3642/**
3643 * Receive data from TTY Line Discipline.
3644 *
3645 * @tty TTY structure
3646 * @cp Character data
3647 * @fp Flag data
3648 * @count Size of character and flag data
3649 */
3650void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3651 char *fp, int count)
3652{
3653 int i;
3654 int last_idx = 0;
3655 const char *tty_name = NULL;
3656 char *f;
3657
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003658 /* verify error flags */
3659 for (i = 0, f = fp; i < count; ++i, ++f) {
3660 if (*f != TTY_NORMAL) {
3661 if (tty)
3662 tty_name = tty->name;
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003663 SMUX_ERR("%s: TTY %s Error %d (%s)\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003664 tty_name, *f, tty_flag_to_str(*f));
3665
3666 /* feed all previous valid data to the parser */
3667 smux_rx_state_machine(cp + last_idx, i - last_idx,
3668 TTY_NORMAL);
3669
3670 /* feed bad data to parser */
3671 smux_rx_state_machine(cp + i, 1, *f);
3672 last_idx = i + 1;
3673 }
3674 }
3675
3676 /* feed data to RX state machine */
3677 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3678}
3679
3680static void smuxld_flush_buffer(struct tty_struct *tty)
3681{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003682 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003683}
3684
3685static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3686{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003687 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003688 return -ENODEV;
3689}
3690
3691static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3692 unsigned char __user *buf, size_t nr)
3693{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003694 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003695 return -ENODEV;
3696}
3697
3698static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3699 const unsigned char *buf, size_t nr)
3700{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003701 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003702 return -ENODEV;
3703}
3704
3705static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3706 unsigned int cmd, unsigned long arg)
3707{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003708 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003709 return -ENODEV;
3710}
3711
3712static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3713 struct poll_table_struct *tbl)
3714{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003715 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003716 return -ENODEV;
3717}
3718
3719static void smuxld_write_wakeup(struct tty_struct *tty)
3720{
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003721 SMUX_ERR("%s: not supported\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003722}
3723
3724static struct tty_ldisc_ops smux_ldisc_ops = {
3725 .owner = THIS_MODULE,
3726 .magic = TTY_LDISC_MAGIC,
3727 .name = "n_smux",
3728 .open = smuxld_open,
3729 .close = smuxld_close,
3730 .flush_buffer = smuxld_flush_buffer,
3731 .chars_in_buffer = smuxld_chars_in_buffer,
3732 .read = smuxld_read,
3733 .write = smuxld_write,
3734 .ioctl = smuxld_ioctl,
3735 .poll = smuxld_poll,
3736 .receive_buf = smuxld_receive_buf,
3737 .write_wakeup = smuxld_write_wakeup
3738};
3739
3740static int __init smux_init(void)
3741{
3742 int ret;
3743
Eric Holmberged1f00c2012-06-07 09:45:18 -06003744 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003745
3746 spin_lock_init(&smux.rx_lock_lha1);
3747 smux.rx_state = SMUX_RX_IDLE;
3748 smux.power_state = SMUX_PWR_OFF;
3749 smux.pwr_wakeup_delay_us = 1;
3750 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003751 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003752 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003753 smux.rx_activity_flag = 0;
3754 smux.tx_activity_flag = 0;
3755 smux.recv_len = 0;
3756 smux.tty = NULL;
3757 smux.ld_open_count = 0;
3758 smux.in_reset = 0;
3759 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003760 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003761 smux_byte_loopback = 0;
3762
3763 spin_lock_init(&smux.tx_lock_lha2);
3764 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3765
3766 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3767 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003768 SMUX_ERR("%s: error %d registering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003769 __func__, ret);
3770 return ret;
3771 }
3772
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003773 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003774
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003775 ret = lch_init();
3776 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003777 SMUX_ERR("%s: lch_init failed\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003778 return ret;
3779 }
3780
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303781 log_ctx = ipc_log_context_create(1, "smux");
3782 if (!log_ctx) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003783 SMUX_ERR("%s: unable to create log context\n", __func__);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303784 disable_ipc_logging = 1;
3785 }
3786
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003787 return 0;
3788}
3789
3790static void __exit smux_exit(void)
3791{
3792 int ret;
3793
3794 ret = tty_unregister_ldisc(N_SMUX);
3795 if (ret != 0) {
Eric Holmbergb9c3cf72012-08-21 16:43:39 -06003796 SMUX_ERR("%s error %d unregistering line discipline\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003797 __func__, ret);
3798 return;
3799 }
3800}
3801
3802module_init(smux_init);
3803module_exit(smux_exit);
3804
3805MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3806MODULE_LICENSE("GPL v2");
3807MODULE_ALIAS_LDISC(N_SMUX);