blob: b223a91a89c729bb6de5bf03746eb78a4a9e9533 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
Angshuman Sarkar98f78122012-07-24 14:50:42 +053031#include <mach/msm_ipc_logging.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060032#include "smux_private.h"
33#include "smux_loopback.h"
34
35#define SMUX_NOTIFY_FIFO_SIZE 128
36#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg33f82522012-08-30 15:38:11 -060037#define SMUX_PKT_LOG_SIZE 128
Eric Holmberg8ed30f22012-05-10 19:16:51 -060038
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060050#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060051
Eric Holmbergb8435c82012-06-05 14:51:29 -060052/* RX get_rx_buffer retry timeout values */
53#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
54#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
55
Eric Holmberg8ed30f22012-05-10 19:16:51 -060056enum {
57 MSM_SMUX_DEBUG = 1U << 0,
58 MSM_SMUX_INFO = 1U << 1,
59 MSM_SMUX_POWER_INFO = 1U << 2,
60 MSM_SMUX_PKT = 1U << 3,
61};
62
Angshuman Sarkar98f78122012-07-24 14:50:42 +053063static int smux_debug_mask = MSM_SMUX_DEBUG | MSM_SMUX_POWER_INFO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -060064module_param_named(debug_mask, smux_debug_mask,
65 int, S_IRUGO | S_IWUSR | S_IWGRP);
66
Angshuman Sarkar98f78122012-07-24 14:50:42 +053067static int disable_ipc_logging;
68
Eric Holmberg8ed30f22012-05-10 19:16:51 -060069/* Simulated wakeup used for testing */
70int smux_byte_loopback;
71module_param_named(byte_loopback, smux_byte_loopback,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73int smux_simulate_wakeup_delay = 1;
74module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
76
Angshuman Sarkar98f78122012-07-24 14:50:42 +053077#define IPC_LOG_STR(x...) do { \
78 if (!disable_ipc_logging && log_ctx) \
79 ipc_log_string(log_ctx, x); \
80} while (0)
81
Eric Holmberg8ed30f22012-05-10 19:16:51 -060082#define SMUX_DBG(x...) do { \
83 if (smux_debug_mask & MSM_SMUX_DEBUG) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053084 IPC_LOG_STR(x); \
Eric Holmberg8ed30f22012-05-10 19:16:51 -060085} while (0)
86
Eric Holmbergff0b0112012-06-08 15:06:57 -060087#define SMUX_PWR(x...) do { \
88 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +053089 IPC_LOG_STR(x); \
Eric Holmbergff0b0112012-06-08 15:06:57 -060090} while (0)
91
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060092#define SMUX_PWR_PKT_RX(pkt) do { \
93 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
94 smux_log_pkt(pkt, 1); \
95} while (0)
96
97#define SMUX_PWR_PKT_TX(pkt) do { \
98 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
99 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
100 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530101 IPC_LOG_STR("smux: TX Wakeup ACK\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600102 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
103 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530104 IPC_LOG_STR("smux: TX Wakeup REQ\n"); \
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600105 else \
106 smux_log_pkt(pkt, 0); \
107 } \
108} while (0)
109
110#define SMUX_PWR_BYTE_TX(pkt) do { \
111 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
112 smux_log_pkt(pkt, 0); \
113 } \
114} while (0)
115
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600116#define SMUX_LOG_PKT_RX(pkt) do { \
117 if (smux_debug_mask & MSM_SMUX_PKT) \
118 smux_log_pkt(pkt, 1); \
119} while (0)
120
121#define SMUX_LOG_PKT_TX(pkt) do { \
122 if (smux_debug_mask & MSM_SMUX_PKT) \
123 smux_log_pkt(pkt, 0); \
124} while (0)
125
126/**
127 * Return true if channel is fully opened (both
128 * local and remote sides are in the OPENED state).
129 */
130#define IS_FULLY_OPENED(ch) \
131 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
132 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
133
134static struct platform_device smux_devs[] = {
135 {.name = "SMUX_CTL", .id = -1},
136 {.name = "SMUX_RMNET", .id = -1},
137 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
138 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
139 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
140 {.name = "SMUX_DIAG", .id = -1},
141};
142
143enum {
144 SMUX_CMD_STATUS_RTC = 1 << 0,
145 SMUX_CMD_STATUS_RTR = 1 << 1,
146 SMUX_CMD_STATUS_RI = 1 << 2,
147 SMUX_CMD_STATUS_DCD = 1 << 3,
148 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
149};
150
151/* Channel mode */
152enum {
153 SMUX_LCH_MODE_NORMAL,
154 SMUX_LCH_MODE_LOCAL_LOOPBACK,
155 SMUX_LCH_MODE_REMOTE_LOOPBACK,
156};
157
158enum {
159 SMUX_RX_IDLE,
160 SMUX_RX_MAGIC,
161 SMUX_RX_HDR,
162 SMUX_RX_PAYLOAD,
163 SMUX_RX_FAILURE,
164};
165
166/**
167 * Power states.
168 *
169 * The _FLUSH states are internal transitional states and are not part of the
170 * official state machine.
171 */
172enum {
173 SMUX_PWR_OFF,
174 SMUX_PWR_TURNING_ON,
175 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600176 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600177 SMUX_PWR_TURNING_OFF,
178 SMUX_PWR_OFF_FLUSH,
179};
180
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600181union notifier_metadata {
182 struct smux_meta_disconnected disconnected;
183 struct smux_meta_read read;
184 struct smux_meta_write write;
185 struct smux_meta_tiocm tiocm;
186};
187
188struct smux_notify_handle {
189 void (*notify)(void *priv, int event_type, const void *metadata);
190 void *priv;
191 int event_type;
192 union notifier_metadata *metadata;
193};
194
195/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600196 * Get RX Buffer Retry structure.
197 *
198 * This is used for clients that are unable to provide an RX buffer
199 * immediately. This temporary structure will be used to temporarily hold the
200 * data and perform a retry.
201 */
202struct smux_rx_pkt_retry {
203 struct smux_pkt_t *pkt;
204 struct list_head rx_retry_list;
205 unsigned timeout_in_ms;
206};
207
208/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600209 * Receive worker data structure.
210 *
211 * One instance is created for every call to smux_rx_state_machine.
212 */
213struct smux_rx_worker_data {
214 const unsigned char *data;
215 int len;
216 int flag;
217
218 struct work_struct work;
219 struct completion work_complete;
220};
221
222/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600223 * Line discipline and module structure.
224 *
225 * Only one instance since multiple instances of line discipline are not
226 * allowed.
227 */
228struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600229 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600230
231 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600232 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600233 int in_reset;
234 int ld_open_count;
235 struct tty_struct *tty;
236
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600237 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
239 unsigned int recv_len;
240 unsigned int pkt_remain;
241 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600242
243 /* RX Activity - accessed by multiple threads */
244 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600245 unsigned rx_activity_flag;
246
247 /* TX / Power */
248 spinlock_t tx_lock_lha2;
249 struct list_head lch_tx_ready_list;
250 unsigned power_state;
251 unsigned pwr_wakeup_delay_us;
252 unsigned tx_activity_flag;
253 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600254 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600255 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600256};
257
258
259/* data structures */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600260struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600261static struct smux_ldisc_t smux;
262static const char *tty_error_type[] = {
263 [TTY_NORMAL] = "normal",
264 [TTY_OVERRUN] = "overrun",
265 [TTY_BREAK] = "break",
266 [TTY_PARITY] = "parity",
267 [TTY_FRAME] = "framing",
268};
269
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600270static const char * const smux_cmds[] = {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600271 [SMUX_CMD_DATA] = "DATA",
272 [SMUX_CMD_OPEN_LCH] = "OPEN",
273 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
274 [SMUX_CMD_STATUS] = "STATUS",
275 [SMUX_CMD_PWR_CTL] = "PWR",
276 [SMUX_CMD_BYTE] = "Raw Byte",
277};
278
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530279static const char * const smux_events[] = {
280 [SMUX_CONNECTED] = "CONNECTED" ,
281 [SMUX_DISCONNECTED] = "DISCONNECTED",
282 [SMUX_READ_DONE] = "READ_DONE",
283 [SMUX_READ_FAIL] = "READ_FAIL",
284 [SMUX_WRITE_DONE] = "WRITE_DONE",
285 [SMUX_WRITE_FAIL] = "WRITE_FAIL",
286 [SMUX_TIOCM_UPDATE] = "TIOCM_UPDATE",
287 [SMUX_LOW_WM_HIT] = "LOW_WM_HIT",
288 [SMUX_HIGH_WM_HIT] = "HIGH_WM_HIT",
289 [SMUX_RX_RETRY_HIGH_WM_HIT] = "RX_RETRY_HIGH_WM_HIT",
290 [SMUX_RX_RETRY_LOW_WM_HIT] = "RX_RETRY_LOW_WM_HIT",
291};
292
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600293static const char * const smux_local_state[] = {
294 [SMUX_LCH_LOCAL_CLOSED] = "CLOSED",
295 [SMUX_LCH_LOCAL_OPENING] = "OPENING",
296 [SMUX_LCH_LOCAL_OPENED] = "OPENED",
297 [SMUX_LCH_LOCAL_CLOSING] = "CLOSING",
298};
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530299
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600300static const char * const smux_remote_state[] = {
301 [SMUX_LCH_REMOTE_CLOSED] = "CLOSED",
302 [SMUX_LCH_REMOTE_OPENED] = "OPENED",
303};
304
305static const char * const smux_mode[] = {
306 [SMUX_LCH_MODE_NORMAL] = "N",
307 [SMUX_LCH_MODE_LOCAL_LOOPBACK] = "L",
308 [SMUX_LCH_MODE_REMOTE_LOOPBACK] = "R",
309};
310
311static const char * const smux_undef[] = {
312 [SMUX_UNDEF_LONG] = "UNDEF",
313 [SMUX_UNDEF_SHORT] = "U",
314};
315
316static void *log_ctx;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600317static void smux_notify_local_fn(struct work_struct *work);
318static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
319
320static struct workqueue_struct *smux_notify_wq;
321static size_t handle_size;
322static struct kfifo smux_notify_fifo;
323static int queued_fifo_notifications;
324static DEFINE_SPINLOCK(notify_lock_lhc1);
325
326static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600327static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600328static void smux_tx_worker(struct work_struct *work);
329static DECLARE_WORK(smux_tx_work, smux_tx_worker);
330
331static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600332static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600333static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
335static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
336
337static void smux_inactivity_worker(struct work_struct *work);
338static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
339static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
340 smux_inactivity_worker);
341
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600342static void list_channel(struct smux_lch_t *ch);
343static int smux_send_status_cmd(struct smux_lch_t *ch);
344static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600345static void smux_flush_tty(void);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600346static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600347static int schedule_notify(uint8_t lcid, int event,
348 const union notifier_metadata *metadata);
349static int ssr_notifier_cb(struct notifier_block *this,
350 unsigned long code,
351 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600352static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600353static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600354static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600355static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600356
357/**
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600358 * local_lch_state() - Return human readable form of local logical state.
359 * @state: Local logical channel state enum.
360 *
361 */
362const char *local_lch_state(unsigned state)
363{
364 if (state < ARRAY_SIZE(smux_local_state))
365 return smux_local_state[state];
366 else
367 return smux_undef[SMUX_UNDEF_LONG];
368}
369
370/**
371 * remote_lch_state() - Return human readable for of remote logical state.
372 * @state: Remote logical channel state enum.
373 *
374 */
375const char *remote_lch_state(unsigned state)
376{
377 if (state < ARRAY_SIZE(smux_remote_state))
378 return smux_remote_state[state];
379 else
380 return smux_undef[SMUX_UNDEF_LONG];
381}
382
383/**
384 * lch_mode() - Return human readable form of mode.
385 * @mode: Mode of the logical channel.
386 *
387 */
388const char *lch_mode(unsigned mode)
389{
390 if (mode < ARRAY_SIZE(smux_mode))
391 return smux_mode[mode];
392 else
393 return smux_undef[SMUX_UNDEF_SHORT];
394}
395
396/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600397 * Convert TTY Error Flags to string for logging purposes.
398 *
399 * @flag TTY_* flag
400 * @returns String description or NULL if unknown
401 */
402static const char *tty_flag_to_str(unsigned flag)
403{
404 if (flag < ARRAY_SIZE(tty_error_type))
405 return tty_error_type[flag];
406 return NULL;
407}
408
409/**
410 * Convert SMUX Command to string for logging purposes.
411 *
412 * @cmd SMUX command
413 * @returns String description or NULL if unknown
414 */
415static const char *cmd_to_str(unsigned cmd)
416{
417 if (cmd < ARRAY_SIZE(smux_cmds))
418 return smux_cmds[cmd];
419 return NULL;
420}
421
422/**
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530423 * Convert SMUX event to string for logging purposes.
424 *
425 * @event SMUX event
426 * @returns String description or NULL if unknown
427 */
428static const char *event_to_str(unsigned cmd)
429{
430 if (cmd < ARRAY_SIZE(smux_events))
431 return smux_events[cmd];
432 return NULL;
433}
434
435/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600436 * Set the reset state due to an unrecoverable failure.
437 */
438static void smux_enter_reset(void)
439{
440 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
441 smux.in_reset = 1;
442}
443
Eric Holmbergf44a8cc2012-06-13 17:58:13 -0600444/**
445 * Initialize the lch_structs.
446 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600447static int lch_init(void)
448{
449 unsigned int id;
450 struct smux_lch_t *ch;
451 int i = 0;
452
453 handle_size = sizeof(struct smux_notify_handle *);
454
455 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
456 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600457 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600458
459 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530460 SMUX_DBG("smux: %s: create_singlethread_workqueue ENOMEM\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600461 __func__);
462 return -ENOMEM;
463 }
464
465 i |= kfifo_alloc(&smux_notify_fifo,
466 SMUX_NOTIFY_FIFO_SIZE * handle_size,
467 GFP_KERNEL);
468 i |= smux_loopback_init();
469
470 if (i) {
471 pr_err("%s: out of memory error\n", __func__);
472 return -ENOMEM;
473 }
474
475 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
476 ch = &smux_lch[id];
477
478 spin_lock_init(&ch->state_lock_lhb1);
479 ch->lcid = id;
480 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
481 ch->local_mode = SMUX_LCH_MODE_NORMAL;
482 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600483 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600484 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
485 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
486 ch->remote_tiocm = 0x0;
487 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600488 ch->rx_flow_control_auto = 0;
489 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600490 ch->priv = 0;
491 ch->notify = 0;
492 ch->get_rx_buffer = 0;
493
Eric Holmbergb8435c82012-06-05 14:51:29 -0600494 INIT_LIST_HEAD(&ch->rx_retry_queue);
495 ch->rx_retry_queue_cnt = 0;
496 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
497
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600498 spin_lock_init(&ch->tx_lock_lhb2);
499 INIT_LIST_HEAD(&ch->tx_queue);
500 INIT_LIST_HEAD(&ch->tx_ready_list);
501 ch->tx_pending_data_cnt = 0;
502 ch->notify_lwm = 0;
503 }
504
505 return 0;
506}
507
Eric Holmberged1f00c2012-06-07 09:45:18 -0600508/**
509 * Empty and cleanup all SMUX logical channels for subsystem restart or line
510 * discipline disconnect.
511 */
512static void smux_lch_purge(void)
513{
514 struct smux_lch_t *ch;
515 unsigned long flags;
516 int i;
517
518 /* Empty TX ready list */
519 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
520 while (!list_empty(&smux.lch_tx_ready_list)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530521 SMUX_DBG("smux: %s: emptying ready list %p\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -0600522 __func__, smux.lch_tx_ready_list.next);
523 ch = list_first_entry(&smux.lch_tx_ready_list,
524 struct smux_lch_t,
525 tx_ready_list);
526 list_del(&ch->tx_ready_list);
527 INIT_LIST_HEAD(&ch->tx_ready_list);
528 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600529
530 /* Purge Power Queue */
531 while (!list_empty(&smux.power_queue)) {
532 struct smux_pkt_t *pkt;
533
534 pkt = list_first_entry(&smux.power_queue,
535 struct smux_pkt_t,
536 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600537 list_del(&pkt->list);
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530538 SMUX_DBG("smux: %s: emptying power queue pkt=%p\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600539 __func__, pkt);
540 smux_free_pkt(pkt);
541 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600542 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
543
544 /* Close all ports */
545 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
546 ch = &smux_lch[i];
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530547 SMUX_DBG("smux: %s: cleaning up lcid %d\n", __func__, i);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600548
549 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
550
551 /* Purge TX queue */
552 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -0600553 smux_purge_ch_tx_queue(ch, 1);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600554 spin_unlock(&ch->tx_lock_lhb2);
555
556 /* Notify user of disconnect and reset channel state */
557 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
558 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
559 union notifier_metadata meta;
560
561 meta.disconnected.is_ssr = smux.in_reset;
562 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
563 }
564
565 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600566 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
567 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
568 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600569 ch->rx_flow_control_auto = 0;
570 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600571
572 /* Purge RX retry queue */
573 if (ch->rx_retry_queue_cnt)
574 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
575
576 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
577 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600578}
579
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600580int smux_assert_lch_id(uint32_t lcid)
581{
582 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
583 return -ENXIO;
584 else
585 return 0;
586}
587
588/**
589 * Log packet information for debug purposes.
590 *
591 * @pkt Packet to log
592 * @is_recv 1 = RX packet; 0 = TX Packet
593 *
594 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
595 *
596 * PKT Info:
597 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
598 *
599 * Direction: R = Receive, S = Send
600 * Local State: C = Closed; c = closing; o = opening; O = Opened
601 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
602 * Remote State: C = Closed; O = Opened
603 * Remote Mode: R = Remote loopback; N = Normal
604 */
605static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
606{
607 char logbuf[SMUX_PKT_LOG_SIZE];
608 char cmd_extra[16];
609 int i = 0;
610 int count;
611 int len;
612 char local_state;
613 char local_mode;
614 char remote_state;
615 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600616 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600617 unsigned char *data;
618
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600619 if (!smux_assert_lch_id(pkt->hdr.lcid))
620 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600621
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600622 if (ch) {
623 switch (ch->local_state) {
624 case SMUX_LCH_LOCAL_CLOSED:
625 local_state = 'C';
626 break;
627 case SMUX_LCH_LOCAL_OPENING:
628 local_state = 'o';
629 break;
630 case SMUX_LCH_LOCAL_OPENED:
631 local_state = 'O';
632 break;
633 case SMUX_LCH_LOCAL_CLOSING:
634 local_state = 'c';
635 break;
636 default:
637 local_state = 'U';
638 break;
639 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600640
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600641 switch (ch->local_mode) {
642 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
643 local_mode = 'L';
644 break;
645 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
646 local_mode = 'R';
647 break;
648 case SMUX_LCH_MODE_NORMAL:
649 local_mode = 'N';
650 break;
651 default:
652 local_mode = 'U';
653 break;
654 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600655
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600656 switch (ch->remote_state) {
657 case SMUX_LCH_REMOTE_CLOSED:
658 remote_state = 'C';
659 break;
660 case SMUX_LCH_REMOTE_OPENED:
661 remote_state = 'O';
662 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600663
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600664 default:
665 remote_state = 'U';
666 break;
667 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600668
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600669 switch (ch->remote_mode) {
670 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
671 remote_mode = 'R';
672 break;
673 case SMUX_LCH_MODE_NORMAL:
674 remote_mode = 'N';
675 break;
676 default:
677 remote_mode = 'U';
678 break;
679 }
680 } else {
681 /* broadcast channel */
682 local_state = '-';
683 local_mode = '-';
684 remote_state = '-';
685 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600686 }
687
688 /* determine command type (ACK, etc) */
689 cmd_extra[0] = '\0';
690 switch (pkt->hdr.cmd) {
691 case SMUX_CMD_OPEN_LCH:
692 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
693 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
694 break;
695 case SMUX_CMD_CLOSE_LCH:
696 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
697 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
698 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600699
700 case SMUX_CMD_PWR_CTL:
701 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
702 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
703 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600704 };
705
706 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
707 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
708 is_recv ? 'R' : 'S', pkt->hdr.lcid,
709 local_state, local_mode,
710 remote_state, remote_mode,
711 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
712 pkt->hdr.payload_len, pkt->hdr.pad_len);
713
714 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
715 data = (unsigned char *)pkt->payload;
716 for (count = 0; count < len; count++)
717 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
718 "%02x ", (unsigned)data[count]);
719
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530720 IPC_LOG_STR(logbuf);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600721}
722
723static void smux_notify_local_fn(struct work_struct *work)
724{
725 struct smux_notify_handle *notify_handle = NULL;
726 union notifier_metadata *metadata = NULL;
727 unsigned long flags;
728 int i;
729
730 for (;;) {
731 /* retrieve notification */
732 spin_lock_irqsave(&notify_lock_lhc1, flags);
733 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
734 i = kfifo_out(&smux_notify_fifo,
735 &notify_handle,
736 handle_size);
737 if (i != handle_size) {
738 pr_err("%s: unable to retrieve handle %d expected %d\n",
739 __func__, i, handle_size);
740 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
741 break;
742 }
743 } else {
744 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
745 break;
746 }
747 --queued_fifo_notifications;
748 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
749
750 /* notify client */
751 metadata = notify_handle->metadata;
752 notify_handle->notify(notify_handle->priv,
753 notify_handle->event_type,
754 metadata);
755
756 kfree(metadata);
757 kfree(notify_handle);
758 }
759}
760
761/**
762 * Initialize existing packet.
763 */
764void smux_init_pkt(struct smux_pkt_t *pkt)
765{
766 memset(pkt, 0x0, sizeof(*pkt));
767 pkt->hdr.magic = SMUX_MAGIC;
768 INIT_LIST_HEAD(&pkt->list);
769}
770
771/**
772 * Allocate and initialize packet.
773 *
774 * If a payload is needed, either set it directly and ensure that it's freed or
775 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
776 * automatically when smd_free_pkt() is called.
777 */
778struct smux_pkt_t *smux_alloc_pkt(void)
779{
780 struct smux_pkt_t *pkt;
781
782 /* Consider a free list implementation instead of kmalloc */
783 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
784 if (!pkt) {
785 pr_err("%s: out of memory\n", __func__);
786 return NULL;
787 }
788 smux_init_pkt(pkt);
789 pkt->allocated = 1;
790
791 return pkt;
792}
793
794/**
795 * Free packet.
796 *
797 * @pkt Packet to free (may be NULL)
798 *
799 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
800 * well. Otherwise, the caller is responsible for freeing the payload.
801 */
802void smux_free_pkt(struct smux_pkt_t *pkt)
803{
804 if (pkt) {
805 if (pkt->free_payload)
806 kfree(pkt->payload);
807 if (pkt->allocated)
808 kfree(pkt);
809 }
810}
811
812/**
813 * Allocate packet payload.
814 *
815 * @pkt Packet to add payload to
816 *
817 * @returns 0 on success, <0 upon error
818 *
819 * A flag is set to signal smux_free_pkt() to free the payload.
820 */
821int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
822{
823 if (!pkt)
824 return -EINVAL;
825
826 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
827 pkt->free_payload = 1;
828 if (!pkt->payload) {
829 pr_err("%s: unable to malloc %d bytes for payload\n",
830 __func__, pkt->hdr.payload_len);
831 return -ENOMEM;
832 }
833
834 return 0;
835}
836
837static int schedule_notify(uint8_t lcid, int event,
838 const union notifier_metadata *metadata)
839{
840 struct smux_notify_handle *notify_handle = 0;
841 union notifier_metadata *meta_copy = 0;
842 struct smux_lch_t *ch;
843 int i;
844 unsigned long flags;
845 int ret = 0;
846
Angshuman Sarkar98f78122012-07-24 14:50:42 +0530847 IPC_LOG_STR("smux: %s ch:%d\n", event_to_str(event), lcid);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600848 ch = &smux_lch[lcid];
849 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
850 GFP_ATOMIC);
851 if (!notify_handle) {
852 pr_err("%s: out of memory\n", __func__);
853 ret = -ENOMEM;
854 goto free_out;
855 }
856
857 notify_handle->notify = ch->notify;
858 notify_handle->priv = ch->priv;
859 notify_handle->event_type = event;
860 if (metadata) {
861 meta_copy = kzalloc(sizeof(union notifier_metadata),
862 GFP_ATOMIC);
863 if (!meta_copy) {
864 pr_err("%s: out of memory\n", __func__);
865 ret = -ENOMEM;
866 goto free_out;
867 }
868 *meta_copy = *metadata;
869 notify_handle->metadata = meta_copy;
870 } else {
871 notify_handle->metadata = NULL;
872 }
873
874 spin_lock_irqsave(&notify_lock_lhc1, flags);
875 i = kfifo_avail(&smux_notify_fifo);
876 if (i < handle_size) {
877 pr_err("%s: fifo full error %d expected %d\n",
878 __func__, i, handle_size);
879 ret = -ENOMEM;
880 goto unlock_out;
881 }
882
883 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
884 if (i < 0 || i != handle_size) {
885 pr_err("%s: fifo not available error %d (expected %d)\n",
886 __func__, i, handle_size);
887 ret = -ENOSPC;
888 goto unlock_out;
889 }
890 ++queued_fifo_notifications;
891
892unlock_out:
893 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
894
895free_out:
896 queue_work(smux_notify_wq, &smux_notify_local);
897 if (ret < 0 && notify_handle) {
898 kfree(notify_handle->metadata);
899 kfree(notify_handle);
900 }
901 return ret;
902}
903
904/**
905 * Returns the serialized size of a packet.
906 *
907 * @pkt Packet to serialize
908 *
909 * @returns Serialized length of packet
910 */
911static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
912{
913 unsigned int size;
914
915 size = sizeof(struct smux_hdr_t);
916 size += pkt->hdr.payload_len;
917 size += pkt->hdr.pad_len;
918
919 return size;
920}
921
922/**
923 * Serialize packet @pkt into output buffer @data.
924 *
925 * @pkt Packet to serialize
926 * @out Destination buffer pointer
927 * @out_len Size of serialized packet
928 *
929 * @returns 0 for success
930 */
931int smux_serialize(struct smux_pkt_t *pkt, char *out,
932 unsigned int *out_len)
933{
934 char *data_start = out;
935
936 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
937 pr_err("%s: packet size %d too big\n",
938 __func__, smux_serialize_size(pkt));
939 return -E2BIG;
940 }
941
942 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
943 out += sizeof(struct smux_hdr_t);
944 if (pkt->payload) {
945 memcpy(out, pkt->payload, pkt->hdr.payload_len);
946 out += pkt->hdr.payload_len;
947 }
948 if (pkt->hdr.pad_len) {
949 memset(out, 0x0, pkt->hdr.pad_len);
950 out += pkt->hdr.pad_len;
951 }
952 *out_len = out - data_start;
953 return 0;
954}
955
956/**
957 * Serialize header and provide pointer to the data.
958 *
959 * @pkt Packet
960 * @out[out] Pointer to the serialized header data
961 * @out_len[out] Pointer to the serialized header length
962 */
963static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
964 unsigned int *out_len)
965{
966 *out = (char *)&pkt->hdr;
967 *out_len = sizeof(struct smux_hdr_t);
968}
969
970/**
971 * Serialize payload and provide pointer to the data.
972 *
973 * @pkt Packet
974 * @out[out] Pointer to the serialized payload data
975 * @out_len[out] Pointer to the serialized payload length
976 */
977static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
978 unsigned int *out_len)
979{
980 *out = pkt->payload;
981 *out_len = pkt->hdr.payload_len;
982}
983
984/**
985 * Serialize padding and provide pointer to the data.
986 *
987 * @pkt Packet
988 * @out[out] Pointer to the serialized padding (always NULL)
989 * @out_len[out] Pointer to the serialized payload length
990 *
991 * Since the padding field value is undefined, only the size of the patting
992 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
993 */
994static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
995 unsigned int *out_len)
996{
997 *out = NULL;
998 *out_len = pkt->hdr.pad_len;
999}
1000
1001/**
1002 * Write data to TTY framework and handle breaking the writes up if needed.
1003 *
1004 * @data Data to write
1005 * @len Length of data
1006 *
1007 * @returns 0 for success, < 0 for failure
1008 */
1009static int write_to_tty(char *data, unsigned len)
1010{
1011 int data_written;
1012
1013 if (!data)
1014 return 0;
1015
Eric Holmberged1f00c2012-06-07 09:45:18 -06001016 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001017 data_written = smux.tty->ops->write(smux.tty, data, len);
1018 if (data_written >= 0) {
1019 len -= data_written;
1020 data += data_written;
1021 } else {
1022 pr_err("%s: TTY write returned error %d\n",
1023 __func__, data_written);
1024 return data_written;
1025 }
1026
1027 if (len)
1028 tty_wait_until_sent(smux.tty,
1029 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001030 }
1031 return 0;
1032}
1033
1034/**
1035 * Write packet to TTY.
1036 *
1037 * @pkt packet to write
1038 *
1039 * @returns 0 on success
1040 */
1041static int smux_tx_tty(struct smux_pkt_t *pkt)
1042{
1043 char *data;
1044 unsigned int len;
1045 int ret;
1046
1047 if (!smux.tty) {
1048 pr_err("%s: TTY not initialized", __func__);
1049 return -ENOTTY;
1050 }
1051
1052 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301053 SMUX_DBG("smux: %s: tty send single byte\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001054 ret = write_to_tty(&pkt->hdr.flags, 1);
1055 return ret;
1056 }
1057
1058 smux_serialize_hdr(pkt, &data, &len);
1059 ret = write_to_tty(data, len);
1060 if (ret) {
1061 pr_err("%s: failed %d to write header %d\n",
1062 __func__, ret, len);
1063 return ret;
1064 }
1065
1066 smux_serialize_payload(pkt, &data, &len);
1067 ret = write_to_tty(data, len);
1068 if (ret) {
1069 pr_err("%s: failed %d to write payload %d\n",
1070 __func__, ret, len);
1071 return ret;
1072 }
1073
1074 smux_serialize_padding(pkt, &data, &len);
1075 while (len > 0) {
1076 char zero = 0x0;
1077 ret = write_to_tty(&zero, 1);
1078 if (ret) {
1079 pr_err("%s: failed %d to write padding %d\n",
1080 __func__, ret, len);
1081 return ret;
1082 }
1083 --len;
1084 }
1085 return 0;
1086}
1087
1088/**
1089 * Send a single character.
1090 *
1091 * @ch Character to send
1092 */
1093static void smux_send_byte(char ch)
1094{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001095 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001096
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001097 pkt = smux_alloc_pkt();
1098 if (!pkt) {
1099 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1100 return;
1101 }
1102 pkt->hdr.cmd = SMUX_CMD_BYTE;
1103 pkt->hdr.flags = ch;
1104 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001105
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001106 list_add_tail(&pkt->list, &smux.power_queue);
1107 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001108}
1109
1110/**
1111 * Receive a single-character packet (used for internal testing).
1112 *
1113 * @ch Character to receive
1114 * @lcid Logical channel ID for packet
1115 *
1116 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001117 */
1118static int smux_receive_byte(char ch, int lcid)
1119{
1120 struct smux_pkt_t pkt;
1121
1122 smux_init_pkt(&pkt);
1123 pkt.hdr.lcid = lcid;
1124 pkt.hdr.cmd = SMUX_CMD_BYTE;
1125 pkt.hdr.flags = ch;
1126
1127 return smux_dispatch_rx_pkt(&pkt);
1128}
1129
1130/**
1131 * Queue packet for transmit.
1132 *
1133 * @pkt_ptr Packet to queue
1134 * @ch Channel to queue packet on
1135 * @queue Queue channel on ready list
1136 */
1137static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1138 int queue)
1139{
1140 unsigned long flags;
1141
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301142 SMUX_DBG("smux: %s: queuing pkt %p\n", __func__, pkt_ptr);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001143
1144 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1145 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1146 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1147
1148 if (queue)
1149 list_channel(ch);
1150}
1151
1152/**
1153 * Handle receive OPEN ACK command.
1154 *
1155 * @pkt Received packet
1156 *
1157 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001158 */
1159static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1160{
1161 uint8_t lcid;
1162 int ret;
1163 struct smux_lch_t *ch;
1164 int enable_powerdown = 0;
1165
1166 lcid = pkt->hdr.lcid;
1167 ch = &smux_lch[lcid];
1168
1169 spin_lock(&ch->state_lock_lhb1);
1170 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301171 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001172 ch->local_state,
1173 SMUX_LCH_LOCAL_OPENED);
1174
1175 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1176 enable_powerdown = 1;
1177
1178 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1179 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1180 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1181 ret = 0;
1182 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301183 SMUX_DBG("smux: Remote loopback OPEN ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001184 ret = 0;
1185 } else {
1186 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1187 __func__, lcid, ch->local_state);
1188 ret = -EINVAL;
1189 }
1190 spin_unlock(&ch->state_lock_lhb1);
1191
1192 if (enable_powerdown) {
1193 spin_lock(&smux.tx_lock_lha2);
1194 if (!smux.powerdown_enabled) {
1195 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301196 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001197 __func__);
1198 }
1199 spin_unlock(&smux.tx_lock_lha2);
1200 }
1201
1202 return ret;
1203}
1204
1205static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1206{
1207 uint8_t lcid;
1208 int ret;
1209 struct smux_lch_t *ch;
1210 union notifier_metadata meta_disconnected;
1211 unsigned long flags;
1212
1213 lcid = pkt->hdr.lcid;
1214 ch = &smux_lch[lcid];
1215 meta_disconnected.disconnected.is_ssr = 0;
1216
1217 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1218
1219 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301220 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001221 SMUX_LCH_LOCAL_CLOSING,
1222 SMUX_LCH_LOCAL_CLOSED);
1223 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1224 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1225 schedule_notify(lcid, SMUX_DISCONNECTED,
1226 &meta_disconnected);
1227 ret = 0;
1228 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301229 SMUX_DBG("smux: Remote loopback CLOSE ACK received\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001230 ret = 0;
1231 } else {
1232 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1233 __func__, lcid, ch->local_state);
1234 ret = -EINVAL;
1235 }
1236 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1237 return ret;
1238}
1239
1240/**
1241 * Handle receive OPEN command.
1242 *
1243 * @pkt Received packet
1244 *
1245 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001246 */
1247static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1248{
1249 uint8_t lcid;
1250 int ret;
1251 struct smux_lch_t *ch;
1252 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001253 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001254 int tx_ready = 0;
1255 int enable_powerdown = 0;
1256
1257 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1258 return smux_handle_rx_open_ack(pkt);
1259
1260 lcid = pkt->hdr.lcid;
1261 ch = &smux_lch[lcid];
1262
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001263 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001264
1265 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301266 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001267 SMUX_LCH_REMOTE_CLOSED,
1268 SMUX_LCH_REMOTE_OPENED);
1269
1270 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1271 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1272 enable_powerdown = 1;
1273
1274 /* Send Open ACK */
1275 ack_pkt = smux_alloc_pkt();
1276 if (!ack_pkt) {
1277 /* exit out to allow retrying this later */
1278 ret = -ENOMEM;
1279 goto out;
1280 }
1281 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1282 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1283 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1284 ack_pkt->hdr.lcid = lcid;
1285 ack_pkt->hdr.payload_len = 0;
1286 ack_pkt->hdr.pad_len = 0;
1287 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1288 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1289 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1290 }
1291 smux_tx_queue(ack_pkt, ch, 0);
1292 tx_ready = 1;
1293
1294 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1295 /*
1296 * Send an Open command to the remote side to
1297 * simulate our local client doing it.
1298 */
1299 ack_pkt = smux_alloc_pkt();
1300 if (ack_pkt) {
1301 ack_pkt->hdr.lcid = lcid;
1302 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1303 ack_pkt->hdr.flags =
1304 SMUX_CMD_OPEN_POWER_COLLAPSE;
1305 ack_pkt->hdr.payload_len = 0;
1306 ack_pkt->hdr.pad_len = 0;
1307 smux_tx_queue(ack_pkt, ch, 0);
1308 tx_ready = 1;
1309 } else {
1310 pr_err("%s: Remote loopack allocation failure\n",
1311 __func__);
1312 }
1313 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1314 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1315 }
1316 ret = 0;
1317 } else {
1318 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1319 __func__, lcid, ch->remote_state);
1320 ret = -EINVAL;
1321 }
1322
1323out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001324 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001325
1326 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001327 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001328 if (!smux.powerdown_enabled) {
1329 smux.powerdown_enabled = 1;
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301330 SMUX_DBG("smux: %s: enabling power-collapse support\n",
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001331 __func__);
1332 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001333 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001334 }
1335
1336 if (tx_ready)
1337 list_channel(ch);
1338
1339 return ret;
1340}
1341
1342/**
1343 * Handle receive CLOSE command.
1344 *
1345 * @pkt Received packet
1346 *
1347 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001348 */
1349static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1350{
1351 uint8_t lcid;
1352 int ret;
1353 struct smux_lch_t *ch;
1354 struct smux_pkt_t *ack_pkt;
1355 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001356 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001357 int tx_ready = 0;
1358
1359 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1360 return smux_handle_close_ack(pkt);
1361
1362 lcid = pkt->hdr.lcid;
1363 ch = &smux_lch[lcid];
1364 meta_disconnected.disconnected.is_ssr = 0;
1365
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001366 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001367 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301368 SMUX_DBG("smux: lcid %d remote state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001369 SMUX_LCH_REMOTE_OPENED,
1370 SMUX_LCH_REMOTE_CLOSED);
1371
1372 ack_pkt = smux_alloc_pkt();
1373 if (!ack_pkt) {
1374 /* exit out to allow retrying this later */
1375 ret = -ENOMEM;
1376 goto out;
1377 }
1378 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1379 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1380 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1381 ack_pkt->hdr.lcid = lcid;
1382 ack_pkt->hdr.payload_len = 0;
1383 ack_pkt->hdr.pad_len = 0;
1384 smux_tx_queue(ack_pkt, ch, 0);
1385 tx_ready = 1;
1386
1387 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1388 /*
1389 * Send a Close command to the remote side to simulate
1390 * our local client doing it.
1391 */
1392 ack_pkt = smux_alloc_pkt();
1393 if (ack_pkt) {
1394 ack_pkt->hdr.lcid = lcid;
1395 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1396 ack_pkt->hdr.flags = 0;
1397 ack_pkt->hdr.payload_len = 0;
1398 ack_pkt->hdr.pad_len = 0;
1399 smux_tx_queue(ack_pkt, ch, 0);
1400 tx_ready = 1;
1401 } else {
1402 pr_err("%s: Remote loopack allocation failure\n",
1403 __func__);
1404 }
1405 }
1406
1407 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1408 schedule_notify(lcid, SMUX_DISCONNECTED,
1409 &meta_disconnected);
1410 ret = 0;
1411 } else {
1412 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1413 __func__, lcid, ch->remote_state);
1414 ret = -EINVAL;
1415 }
1416out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001417 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001418 if (tx_ready)
1419 list_channel(ch);
1420
1421 return ret;
1422}
1423
1424/*
1425 * Handle receive DATA command.
1426 *
1427 * @pkt Received packet
1428 *
1429 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001430 */
1431static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1432{
1433 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001434 int ret = 0;
1435 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001436 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001437 int tmp;
1438 int rx_len;
1439 struct smux_lch_t *ch;
1440 union notifier_metadata metadata;
1441 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001442 struct smux_pkt_t *ack_pkt;
1443 unsigned long flags;
1444
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001445 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1446 ret = -ENXIO;
1447 goto out;
1448 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001449
Eric Holmbergb8435c82012-06-05 14:51:29 -06001450 rx_len = pkt->hdr.payload_len;
1451 if (rx_len == 0) {
1452 ret = -EINVAL;
1453 goto out;
1454 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001455
1456 lcid = pkt->hdr.lcid;
1457 ch = &smux_lch[lcid];
1458 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1459 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1460
1461 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1462 && !remote_loopback) {
1463 pr_err("smux: ch %d error data on local state 0x%x",
1464 lcid, ch->local_state);
1465 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001466 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001467 goto out;
1468 }
1469
1470 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1471 pr_err("smux: ch %d error data on remote state 0x%x",
1472 lcid, ch->remote_state);
1473 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001474 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001475 goto out;
1476 }
1477
Eric Holmbergb8435c82012-06-05 14:51:29 -06001478 if (!list_empty(&ch->rx_retry_queue)) {
1479 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001480
1481 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1482 !ch->rx_flow_control_auto &&
1483 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1484 /* need to flow control RX */
1485 ch->rx_flow_control_auto = 1;
1486 tx_ready |= smux_rx_flow_control_updated(ch);
1487 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1488 NULL);
1489 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001490 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1491 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001492 pr_err("%s: ch %d RX retry queue full\n",
1493 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001494 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1495 ret = -ENOMEM;
1496 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1497 goto out;
1498 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001499 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001500 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001501
Eric Holmbergb8435c82012-06-05 14:51:29 -06001502 if (remote_loopback) {
1503 /* Echo the data back to the remote client. */
1504 ack_pkt = smux_alloc_pkt();
1505 if (ack_pkt) {
1506 ack_pkt->hdr.lcid = lcid;
1507 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1508 ack_pkt->hdr.flags = 0;
1509 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1510 if (ack_pkt->hdr.payload_len) {
1511 smux_alloc_pkt_payload(ack_pkt);
1512 memcpy(ack_pkt->payload, pkt->payload,
1513 ack_pkt->hdr.payload_len);
1514 }
1515 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1516 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001517 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001518 } else {
1519 pr_err("%s: Remote loopack allocation failure\n",
1520 __func__);
1521 }
1522 } else if (!do_retry) {
1523 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001524 metadata.read.pkt_priv = 0;
1525 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001526 tmp = ch->get_rx_buffer(ch->priv,
1527 (void **)&metadata.read.pkt_priv,
1528 (void **)&metadata.read.buffer,
1529 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001530
Eric Holmbergb8435c82012-06-05 14:51:29 -06001531 if (tmp == 0 && metadata.read.buffer) {
1532 /* place data into RX buffer */
1533 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001534 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001535 metadata.read.len = rx_len;
1536 schedule_notify(lcid, SMUX_READ_DONE,
1537 &metadata);
1538 } else if (tmp == -EAGAIN ||
1539 (tmp == 0 && !metadata.read.buffer)) {
1540 /* buffer allocation failed - add to retry queue */
1541 do_retry = 1;
1542 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001543 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1544 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001545 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1546 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001547 }
1548 }
1549
Eric Holmbergb8435c82012-06-05 14:51:29 -06001550 if (do_retry) {
1551 struct smux_rx_pkt_retry *retry;
1552
1553 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1554 if (!retry) {
1555 pr_err("%s: retry alloc failure\n", __func__);
1556 ret = -ENOMEM;
1557 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1558 goto out;
1559 }
1560 INIT_LIST_HEAD(&retry->rx_retry_list);
1561 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1562
1563 /* copy packet */
1564 retry->pkt = smux_alloc_pkt();
1565 if (!retry->pkt) {
1566 kfree(retry);
1567 pr_err("%s: pkt alloc failure\n", __func__);
1568 ret = -ENOMEM;
1569 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1570 goto out;
1571 }
1572 retry->pkt->hdr.lcid = lcid;
1573 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1574 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1575 if (retry->pkt->hdr.payload_len) {
1576 smux_alloc_pkt_payload(retry->pkt);
1577 memcpy(retry->pkt->payload, pkt->payload,
1578 retry->pkt->hdr.payload_len);
1579 }
1580
1581 /* add to retry queue */
1582 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1583 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1584 ++ch->rx_retry_queue_cnt;
1585 if (ch->rx_retry_queue_cnt == 1)
1586 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1587 msecs_to_jiffies(retry->timeout_in_ms));
1588 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1589 }
1590
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001591 if (tx_ready)
1592 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001593out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001594 return ret;
1595}
1596
1597/**
1598 * Handle receive byte command for testing purposes.
1599 *
1600 * @pkt Received packet
1601 *
1602 * @returns 0 for success
1603 */
1604static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1605{
1606 uint8_t lcid;
1607 int ret;
1608 struct smux_lch_t *ch;
1609 union notifier_metadata metadata;
1610 unsigned long flags;
1611
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001612 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1613 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001614 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001615 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001616
1617 lcid = pkt->hdr.lcid;
1618 ch = &smux_lch[lcid];
1619 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1620
1621 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1622 pr_err("smux: ch %d error data on local state 0x%x",
1623 lcid, ch->local_state);
1624 ret = -EIO;
1625 goto out;
1626 }
1627
1628 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1629 pr_err("smux: ch %d error data on remote state 0x%x",
1630 lcid, ch->remote_state);
1631 ret = -EIO;
1632 goto out;
1633 }
1634
1635 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1636 metadata.read.buffer = 0;
1637 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1638 ret = 0;
1639
1640out:
1641 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1642 return ret;
1643}
1644
1645/**
1646 * Handle receive status command.
1647 *
1648 * @pkt Received packet
1649 *
1650 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001651 */
1652static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1653{
1654 uint8_t lcid;
1655 int ret;
1656 struct smux_lch_t *ch;
1657 union notifier_metadata meta;
1658 unsigned long flags;
1659 int tx_ready = 0;
1660
1661 lcid = pkt->hdr.lcid;
1662 ch = &smux_lch[lcid];
1663
1664 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1665 meta.tiocm.tiocm_old = ch->remote_tiocm;
1666 meta.tiocm.tiocm_new = pkt->hdr.flags;
1667
1668 /* update logical channel flow control */
1669 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1670 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1671 /* logical channel flow control changed */
1672 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1673 /* disabled TX */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301674 SMUX_DBG("smux: TX Flow control enabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001675 ch->tx_flow_control = 1;
1676 } else {
1677 /* re-enable channel */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301678 SMUX_DBG("smux: TX Flow control disabled\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001679 ch->tx_flow_control = 0;
1680 tx_ready = 1;
1681 }
1682 }
1683 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1684 ch->remote_tiocm = pkt->hdr.flags;
1685 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1686
1687 /* client notification for status change */
1688 if (IS_FULLY_OPENED(ch)) {
1689 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1690 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1691 ret = 0;
1692 }
1693 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1694 if (tx_ready)
1695 list_channel(ch);
1696
1697 return ret;
1698}
1699
1700/**
1701 * Handle receive power command.
1702 *
1703 * @pkt Received packet
1704 *
1705 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001706 */
1707static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1708{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001709 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001710 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001711 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001712
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001713 SMUX_PWR_PKT_RX(pkt);
1714
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001715 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001716 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1717 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001718 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001719 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001720 power_down = 1;
1721 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001722 pr_err("%s: sleep request ack invalid in state %d\n",
1723 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001724 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001725 /*
1726 * Remote sleep request
1727 *
1728 * Even if we have data pending, we need to transition to the
1729 * POWER_OFF state and then perform a wakeup since the remote
1730 * side has requested a power-down.
1731 *
1732 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1733 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1734 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001735 *
1736 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001737 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001738 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001739 ack_pkt = smux_alloc_pkt();
1740 if (ack_pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301741 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001742 smux.power_state,
1743 SMUX_PWR_TURNING_OFF_FLUSH);
1744
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001745 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1746
1747 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001748 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1749 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001750 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1751 list_add_tail(&ack_pkt->list,
1752 &smux.power_queue);
1753 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001754 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001755 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1756 /* Local power-down request still in TX queue */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301757 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001758 __func__);
1759 smux.power_ctl_remote_req_received = 1;
1760 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1761 /*
1762 * Local power-down request already sent to remote
1763 * side, so this request gets treated as an ACK.
1764 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301765 SMUX_PWR("smux: %s: Power-down shortcut - no ack\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001766 __func__);
1767 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001768 } else {
1769 pr_err("%s: sleep request invalid in state %d\n",
1770 __func__, smux.power_state);
1771 }
1772 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001773
1774 if (power_down) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301775 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06001776 smux.power_state, SMUX_PWR_OFF_FLUSH);
1777 smux.power_state = SMUX_PWR_OFF_FLUSH;
1778 queue_work(smux_tx_wq, &smux_inactivity_work);
1779 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001780 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001781
1782 return 0;
1783}
1784
1785/**
1786 * Handle dispatching a completed packet for receive processing.
1787 *
1788 * @pkt Packet to process
1789 *
1790 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001791 */
1792static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1793{
Eric Holmbergf9622662012-06-13 15:55:45 -06001794 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001795
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001796 switch (pkt->hdr.cmd) {
1797 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001798 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001799 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1800 pr_err("%s: invalid channel id %d\n",
1801 __func__, pkt->hdr.lcid);
1802 break;
1803 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001804 ret = smux_handle_rx_open_cmd(pkt);
1805 break;
1806
1807 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001808 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001809 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1810 pr_err("%s: invalid channel id %d\n",
1811 __func__, pkt->hdr.lcid);
1812 break;
1813 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001814 ret = smux_handle_rx_data_cmd(pkt);
1815 break;
1816
1817 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001818 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001819 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1820 pr_err("%s: invalid channel id %d\n",
1821 __func__, pkt->hdr.lcid);
1822 break;
1823 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001824 ret = smux_handle_rx_close_cmd(pkt);
1825 break;
1826
1827 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001828 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001829 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1830 pr_err("%s: invalid channel id %d\n",
1831 __func__, pkt->hdr.lcid);
1832 break;
1833 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001834 ret = smux_handle_rx_status_cmd(pkt);
1835 break;
1836
1837 case SMUX_CMD_PWR_CTL:
1838 ret = smux_handle_rx_power_cmd(pkt);
1839 break;
1840
1841 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001842 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001843 ret = smux_handle_rx_byte_cmd(pkt);
1844 break;
1845
1846 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001847 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001848 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1849 ret = -EINVAL;
1850 }
1851 return ret;
1852}
1853
1854/**
1855 * Deserializes a packet and dispatches it to the packet receive logic.
1856 *
1857 * @data Raw data for one packet
1858 * @len Length of the data
1859 *
1860 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001861 */
1862static int smux_deserialize(unsigned char *data, int len)
1863{
1864 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001865
1866 smux_init_pkt(&recv);
1867
1868 /*
1869 * It may be possible to optimize this to not use the
1870 * temporary buffer.
1871 */
1872 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1873
1874 if (recv.hdr.magic != SMUX_MAGIC) {
1875 pr_err("%s: invalid header magic\n", __func__);
1876 return -EINVAL;
1877 }
1878
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001879 if (recv.hdr.payload_len)
1880 recv.payload = data + sizeof(struct smux_hdr_t);
1881
1882 return smux_dispatch_rx_pkt(&recv);
1883}
1884
1885/**
1886 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001887 */
1888static void smux_handle_wakeup_req(void)
1889{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001890 unsigned long flags;
1891
1892 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001893 if (smux.power_state == SMUX_PWR_OFF
1894 || smux.power_state == SMUX_PWR_TURNING_ON) {
1895 /* wakeup system */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301896 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001897 smux.power_state, SMUX_PWR_ON);
1898 smux.power_state = SMUX_PWR_ON;
1899 queue_work(smux_tx_wq, &smux_wakeup_work);
1900 queue_work(smux_tx_wq, &smux_tx_work);
1901 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1902 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1903 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001904 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001905 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001906 } else {
1907 /* stale wakeup request from previous wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301908 SMUX_PWR("smux: %s: stale Wakeup REQ in state %d\n",
Eric Holmberga9b06472012-06-22 09:46:34 -06001909 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001910 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001911 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001912}
1913
1914/**
1915 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001916 */
1917static void smux_handle_wakeup_ack(void)
1918{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001919 unsigned long flags;
1920
1921 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001922 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1923 /* received response to wakeup request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301924 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001925 smux.power_state, SMUX_PWR_ON);
1926 smux.power_state = SMUX_PWR_ON;
1927 queue_work(smux_tx_wq, &smux_tx_work);
1928 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1929 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1930
1931 } else if (smux.power_state != SMUX_PWR_ON) {
1932 /* invalid message */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301933 SMUX_PWR("smux: %s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001934 __func__, smux.power_state);
1935 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001936 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001937}
1938
1939/**
1940 * RX State machine - IDLE state processing.
1941 *
1942 * @data New RX data to process
1943 * @len Length of the data
1944 * @used Return value of length processed
1945 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001946 */
1947static void smux_rx_handle_idle(const unsigned char *data,
1948 int len, int *used, int flag)
1949{
1950 int i;
1951
1952 if (flag) {
1953 if (smux_byte_loopback)
1954 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1955 smux_byte_loopback);
1956 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1957 ++*used;
1958 return;
1959 }
1960
1961 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1962 switch (data[i]) {
1963 case SMUX_MAGIC_WORD1:
1964 smux.rx_state = SMUX_RX_MAGIC;
1965 break;
1966 case SMUX_WAKEUP_REQ:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301967 SMUX_PWR("smux: smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001968 smux_handle_wakeup_req();
1969 break;
1970 case SMUX_WAKEUP_ACK:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05301971 SMUX_PWR("smux: smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001972 smux_handle_wakeup_ack();
1973 break;
1974 default:
1975 /* unexpected character */
1976 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1977 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1978 smux_byte_loopback);
1979 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1980 (unsigned)data[i]);
1981 break;
1982 }
1983 }
1984
1985 *used = i;
1986}
1987
1988/**
1989 * RX State machine - Header Magic state processing.
1990 *
1991 * @data New RX data to process
1992 * @len Length of the data
1993 * @used Return value of length processed
1994 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001995 */
1996static void smux_rx_handle_magic(const unsigned char *data,
1997 int len, int *used, int flag)
1998{
1999 int i;
2000
2001 if (flag) {
2002 pr_err("%s: TTY RX error %d\n", __func__, flag);
2003 smux_enter_reset();
2004 smux.rx_state = SMUX_RX_FAILURE;
2005 ++*used;
2006 return;
2007 }
2008
2009 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
2010 /* wait for completion of the magic */
2011 if (data[i] == SMUX_MAGIC_WORD2) {
2012 smux.recv_len = 0;
2013 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
2014 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
2015 smux.rx_state = SMUX_RX_HDR;
2016 } else {
2017 /* unexpected / trash character */
2018 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
2019 __func__, data[i], *used, len);
2020 smux.rx_state = SMUX_RX_IDLE;
2021 }
2022 }
2023
2024 *used = i;
2025}
2026
2027/**
2028 * RX State machine - Packet Header state processing.
2029 *
2030 * @data New RX data to process
2031 * @len Length of the data
2032 * @used Return value of length processed
2033 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002034 */
2035static void smux_rx_handle_hdr(const unsigned char *data,
2036 int len, int *used, int flag)
2037{
2038 int i;
2039 struct smux_hdr_t *hdr;
2040
2041 if (flag) {
2042 pr_err("%s: TTY RX error %d\n", __func__, flag);
2043 smux_enter_reset();
2044 smux.rx_state = SMUX_RX_FAILURE;
2045 ++*used;
2046 return;
2047 }
2048
2049 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2050 smux.recv_buf[smux.recv_len++] = data[i];
2051
2052 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2053 /* complete header received */
2054 hdr = (struct smux_hdr_t *)smux.recv_buf;
2055 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2056 smux.rx_state = SMUX_RX_PAYLOAD;
2057 }
2058 }
2059 *used = i;
2060}
2061
2062/**
2063 * RX State machine - Packet Payload state processing.
2064 *
2065 * @data New RX data to process
2066 * @len Length of the data
2067 * @used Return value of length processed
2068 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002069 */
2070static void smux_rx_handle_pkt_payload(const unsigned char *data,
2071 int len, int *used, int flag)
2072{
2073 int remaining;
2074
2075 if (flag) {
2076 pr_err("%s: TTY RX error %d\n", __func__, flag);
2077 smux_enter_reset();
2078 smux.rx_state = SMUX_RX_FAILURE;
2079 ++*used;
2080 return;
2081 }
2082
2083 /* copy data into rx buffer */
2084 if (smux.pkt_remain < (len - *used))
2085 remaining = smux.pkt_remain;
2086 else
2087 remaining = len - *used;
2088
2089 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2090 smux.recv_len += remaining;
2091 smux.pkt_remain -= remaining;
2092 *used += remaining;
2093
2094 if (smux.pkt_remain == 0) {
2095 /* complete packet received */
2096 smux_deserialize(smux.recv_buf, smux.recv_len);
2097 smux.rx_state = SMUX_RX_IDLE;
2098 }
2099}
2100
2101/**
2102 * Feed data to the receive state machine.
2103 *
2104 * @data Pointer to data block
2105 * @len Length of data
2106 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002107 */
2108void smux_rx_state_machine(const unsigned char *data,
2109 int len, int flag)
2110{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002111 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002112
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002113 work.data = data;
2114 work.len = len;
2115 work.flag = flag;
2116 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2117 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002118
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002119 queue_work(smux_rx_wq, &work.work);
2120 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002121}
2122
2123/**
2124 * Add channel to transmit-ready list and trigger transmit worker.
2125 *
2126 * @ch Channel to add
2127 */
2128static void list_channel(struct smux_lch_t *ch)
2129{
2130 unsigned long flags;
2131
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302132 SMUX_DBG("smux: %s: listing channel %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002133 __func__, ch->lcid);
2134
2135 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2136 spin_lock(&ch->tx_lock_lhb2);
2137 smux.tx_activity_flag = 1;
2138 if (list_empty(&ch->tx_ready_list))
2139 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2140 spin_unlock(&ch->tx_lock_lhb2);
2141 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2142
2143 queue_work(smux_tx_wq, &smux_tx_work);
2144}
2145
2146/**
2147 * Transmit packet on correct transport and then perform client
2148 * notification.
2149 *
2150 * @ch Channel to transmit on
2151 * @pkt Packet to transmit
2152 */
2153static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2154{
2155 union notifier_metadata meta_write;
2156 int ret;
2157
2158 if (ch && pkt) {
2159 SMUX_LOG_PKT_TX(pkt);
2160 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2161 ret = smux_tx_loopback(pkt);
2162 else
2163 ret = smux_tx_tty(pkt);
2164
2165 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2166 /* notify write-done */
2167 meta_write.write.pkt_priv = pkt->priv;
2168 meta_write.write.buffer = pkt->payload;
2169 meta_write.write.len = pkt->hdr.payload_len;
2170 if (ret >= 0) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302171 SMUX_DBG("smux: %s: PKT write done", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002172 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2173 &meta_write);
2174 } else {
2175 pr_err("%s: failed to write pkt %d\n",
2176 __func__, ret);
2177 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2178 &meta_write);
2179 }
2180 }
2181 }
2182}
2183
2184/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002185 * Flush pending TTY TX data.
2186 */
2187static void smux_flush_tty(void)
2188{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002189 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002190 if (!smux.tty) {
2191 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002192 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002193 return;
2194 }
2195
2196 tty_wait_until_sent(smux.tty,
2197 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2198
2199 if (tty_chars_in_buffer(smux.tty) > 0)
2200 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002201
2202 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002203}
2204
2205/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002206 * Purge TX queue for logical channel.
2207 *
2208 * @ch Logical channel pointer
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002209 * @is_ssr 1 = this is a subsystem restart purge
Eric Holmberged1f00c2012-06-07 09:45:18 -06002210 *
2211 * Must be called with the following spinlocks locked:
2212 * state_lock_lhb1
2213 * tx_lock_lhb2
2214 */
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002215static void smux_purge_ch_tx_queue(struct smux_lch_t *ch, int is_ssr)
Eric Holmberged1f00c2012-06-07 09:45:18 -06002216{
2217 struct smux_pkt_t *pkt;
2218 int send_disconnect = 0;
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002219 struct smux_pkt_t *pkt_tmp;
2220 int is_state_pkt;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002221
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002222 list_for_each_entry_safe(pkt, pkt_tmp, &ch->tx_queue, list) {
2223 is_state_pkt = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002224 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002225 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK) {
2226 /* Open ACK must still be sent */
2227 is_state_pkt = 1;
2228 } else {
2229 /* Open never sent -- force to closed state */
2230 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2231 send_disconnect = 1;
2232 }
2233 } else if (pkt->hdr.cmd == SMUX_CMD_CLOSE_LCH) {
2234 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
2235 is_state_pkt = 1;
2236 if (!send_disconnect)
2237 is_state_pkt = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06002238 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2239 /* Notify client of failed write */
2240 union notifier_metadata meta_write;
2241
2242 meta_write.write.pkt_priv = pkt->priv;
2243 meta_write.write.buffer = pkt->payload;
2244 meta_write.write.len = pkt->hdr.payload_len;
2245 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2246 }
Eric Holmberg6fcf5322012-07-11 11:46:28 -06002247
2248 if (!is_state_pkt || is_ssr) {
2249 list_del(&pkt->list);
2250 smux_free_pkt(pkt);
2251 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06002252 }
2253
2254 if (send_disconnect) {
2255 union notifier_metadata meta_disconnected;
2256
2257 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2258 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2259 &meta_disconnected);
2260 }
2261}
2262
2263/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002264 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002265 *
2266 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002267 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002268static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002269{
2270 struct uart_state *state;
2271
2272 if (!smux.tty || !smux.tty->driver_data) {
2273 pr_err("%s: unable to find UART port for tty %p\n",
2274 __func__, smux.tty);
2275 return;
2276 }
2277 state = smux.tty->driver_data;
2278 msm_hs_request_clock_on(state->uart_port);
2279}
2280
2281/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002282 * Power-up the UART.
2283 */
2284static void smux_uart_power_on(void)
2285{
2286 mutex_lock(&smux.mutex_lha0);
2287 smux_uart_power_on_atomic();
2288 mutex_unlock(&smux.mutex_lha0);
2289}
2290
2291/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002292 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002293 *
2294 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002295 */
Eric Holmberg06011322012-07-06 18:17:03 -06002296static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002297{
2298 struct uart_state *state;
2299
2300 if (!smux.tty || !smux.tty->driver_data) {
2301 pr_err("%s: unable to find UART port for tty %p\n",
2302 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002303 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002304 return;
2305 }
2306 state = smux.tty->driver_data;
2307 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002308}
2309
2310/**
2311 * Power down the UART.
2312 */
2313static void smux_uart_power_off(void)
2314{
2315 mutex_lock(&smux.mutex_lha0);
2316 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002317 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002318}
2319
2320/**
2321 * TX Wakeup Worker
2322 *
2323 * @work Not used
2324 *
2325 * Do an exponential back-off wakeup sequence with a maximum period
2326 * of approximately 1 second (1 << 20 microseconds).
2327 */
2328static void smux_wakeup_worker(struct work_struct *work)
2329{
2330 unsigned long flags;
2331 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002332
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002333 if (smux.in_reset)
2334 return;
2335
2336 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2337 if (smux.power_state == SMUX_PWR_ON) {
2338 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002339 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002340 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302341 SMUX_DBG("smux: %s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002342
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002343 /*
2344 * Cancel any pending retry. This avoids a race condition with
2345 * a new power-up request because:
2346 * 1) this worker doesn't modify the state
2347 * 2) this worker is processed on the same single-threaded
2348 * workqueue as new TX wakeup requests
2349 */
2350 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002351 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002352 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002353 /* retry wakeup */
2354 wakeup_delay = smux.pwr_wakeup_delay_us;
2355 smux.pwr_wakeup_delay_us <<= 1;
2356 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2357 smux.pwr_wakeup_delay_us =
2358 SMUX_WAKEUP_DELAY_MAX;
2359
2360 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302361 SMUX_PWR("smux: %s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002362 smux_send_byte(SMUX_WAKEUP_REQ);
2363
2364 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302365 SMUX_DBG("smux: %s: sleeping for %u us\n", __func__,
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002366 wakeup_delay);
2367 usleep_range(wakeup_delay, 2*wakeup_delay);
2368 queue_work(smux_tx_wq, &smux_wakeup_work);
2369 } else {
2370 /* schedule delayed work */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302371 SMUX_DBG(
2372 "smux: %s: scheduling delayed wakeup in %u ms\n",
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002373 __func__, wakeup_delay / 1000);
2374 queue_delayed_work(smux_tx_wq,
2375 &smux_wakeup_delayed_work,
2376 msecs_to_jiffies(wakeup_delay / 1000));
2377 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002378 } else {
2379 /* wakeup aborted */
2380 smux.pwr_wakeup_delay_us = 1;
2381 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302382 SMUX_PWR("smux: %s: wakeup aborted\n", __func__);
Eric Holmberga9b06472012-06-22 09:46:34 -06002383 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002384 }
2385}
2386
2387
2388/**
2389 * Inactivity timeout worker. Periodically scheduled when link is active.
2390 * When it detects inactivity, it will power-down the UART link.
2391 *
2392 * @work Work structure (not used)
2393 */
2394static void smux_inactivity_worker(struct work_struct *work)
2395{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002396 struct smux_pkt_t *pkt;
2397 unsigned long flags;
2398
Eric Holmberg06011322012-07-06 18:17:03 -06002399 if (smux.in_reset)
2400 return;
2401
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002402 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2403 spin_lock(&smux.tx_lock_lha2);
2404
2405 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2406 /* no activity */
2407 if (smux.powerdown_enabled) {
2408 if (smux.power_state == SMUX_PWR_ON) {
2409 /* start power-down sequence */
2410 pkt = smux_alloc_pkt();
2411 if (pkt) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302412 SMUX_PWR(
2413 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002414 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002415 SMUX_PWR_TURNING_OFF_FLUSH);
2416 smux.power_state =
2417 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002418
2419 /* send power-down request */
2420 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2421 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002422 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2423 list_add_tail(&pkt->list,
2424 &smux.power_queue);
2425 queue_work(smux_tx_wq, &smux_tx_work);
2426 } else {
2427 pr_err("%s: packet alloc failed\n",
2428 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002429 }
2430 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002431 }
2432 }
2433 smux.tx_activity_flag = 0;
2434 smux.rx_activity_flag = 0;
2435
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002436 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002437 /* ready to power-down the UART */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302438 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002439 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002440 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002441
2442 /* if data is pending, schedule a new wakeup */
2443 if (!list_empty(&smux.lch_tx_ready_list) ||
2444 !list_empty(&smux.power_queue))
2445 queue_work(smux_tx_wq, &smux_tx_work);
2446
2447 spin_unlock(&smux.tx_lock_lha2);
2448 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2449
2450 /* flush UART output queue and power down */
2451 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002452 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002453 } else {
2454 spin_unlock(&smux.tx_lock_lha2);
2455 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002456 }
2457
2458 /* reschedule inactivity worker */
2459 if (smux.power_state != SMUX_PWR_OFF)
2460 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2461 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2462}
2463
2464/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002465 * Remove RX retry packet from channel and free it.
2466 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002467 * @ch Channel for retry packet
2468 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002469 *
2470 * @returns 1 if flow control updated; 0 otherwise
2471 *
2472 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002473 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002474int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002475 struct smux_rx_pkt_retry *retry)
2476{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002477 int tx_ready = 0;
2478
Eric Holmbergb8435c82012-06-05 14:51:29 -06002479 list_del(&retry->rx_retry_list);
2480 --ch->rx_retry_queue_cnt;
2481 smux_free_pkt(retry->pkt);
2482 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002483
2484 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2485 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2486 ch->rx_flow_control_auto) {
2487 ch->rx_flow_control_auto = 0;
2488 smux_rx_flow_control_updated(ch);
2489 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2490 tx_ready = 1;
2491 }
2492 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002493}
2494
2495/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002496 * RX worker handles all receive operations.
2497 *
2498 * @work Work structure contained in TBD structure
2499 */
2500static void smux_rx_worker(struct work_struct *work)
2501{
2502 unsigned long flags;
2503 int used;
2504 int initial_rx_state;
2505 struct smux_rx_worker_data *w;
2506 const unsigned char *data;
2507 int len;
2508 int flag;
2509
2510 w = container_of(work, struct smux_rx_worker_data, work);
2511 data = w->data;
2512 len = w->len;
2513 flag = w->flag;
2514
2515 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2516 smux.rx_activity_flag = 1;
2517 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2518
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302519 SMUX_DBG("smux: %s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002520 used = 0;
2521 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002522 if (smux.in_reset) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302523 SMUX_DBG("smux: %s: abort RX due to reset\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002524 smux.rx_state = SMUX_RX_IDLE;
2525 break;
2526 }
2527
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302528 SMUX_DBG("smux: %s: state %d; %d of %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002529 __func__, smux.rx_state, used, len);
2530 initial_rx_state = smux.rx_state;
2531
2532 switch (smux.rx_state) {
2533 case SMUX_RX_IDLE:
2534 smux_rx_handle_idle(data, len, &used, flag);
2535 break;
2536 case SMUX_RX_MAGIC:
2537 smux_rx_handle_magic(data, len, &used, flag);
2538 break;
2539 case SMUX_RX_HDR:
2540 smux_rx_handle_hdr(data, len, &used, flag);
2541 break;
2542 case SMUX_RX_PAYLOAD:
2543 smux_rx_handle_pkt_payload(data, len, &used, flag);
2544 break;
2545 default:
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302546 SMUX_DBG("smux: %s: invalid state %d\n",
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002547 __func__, smux.rx_state);
2548 smux.rx_state = SMUX_RX_IDLE;
2549 break;
2550 }
2551 } while (used < len || smux.rx_state != initial_rx_state);
2552
2553 complete(&w->work_complete);
2554}
2555
2556/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002557 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2558 * because the client was not ready (-EAGAIN).
2559 *
2560 * @work Work structure contained in smux_lch_t structure
2561 */
2562static void smux_rx_retry_worker(struct work_struct *work)
2563{
2564 struct smux_lch_t *ch;
2565 struct smux_rx_pkt_retry *retry;
2566 union notifier_metadata metadata;
2567 int tmp;
2568 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002569 int immediate_retry = 0;
2570 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002571
2572 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2573
2574 /* get next retry packet */
2575 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002576 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002577 /* port has been closed - remove all retries */
2578 while (!list_empty(&ch->rx_retry_queue)) {
2579 retry = list_first_entry(&ch->rx_retry_queue,
2580 struct smux_rx_pkt_retry,
2581 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002582 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002583 }
2584 }
2585
2586 if (list_empty(&ch->rx_retry_queue)) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302587 SMUX_DBG("smux: %s: retry list empty for channel %d\n",
Eric Holmbergb8435c82012-06-05 14:51:29 -06002588 __func__, ch->lcid);
2589 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2590 return;
2591 }
2592 retry = list_first_entry(&ch->rx_retry_queue,
2593 struct smux_rx_pkt_retry,
2594 rx_retry_list);
2595 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2596
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302597 SMUX_DBG("smux: %s: ch %d retrying rx pkt %p\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002598 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002599 metadata.read.pkt_priv = 0;
2600 metadata.read.buffer = 0;
2601 tmp = ch->get_rx_buffer(ch->priv,
2602 (void **)&metadata.read.pkt_priv,
2603 (void **)&metadata.read.buffer,
2604 retry->pkt->hdr.payload_len);
2605 if (tmp == 0 && metadata.read.buffer) {
2606 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002607
Eric Holmbergb8435c82012-06-05 14:51:29 -06002608 memcpy(metadata.read.buffer, retry->pkt->payload,
2609 retry->pkt->hdr.payload_len);
2610 metadata.read.len = retry->pkt->hdr.payload_len;
2611
2612 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002613 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002614 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002615 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002616 if (tx_ready)
2617 list_channel(ch);
2618
2619 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002620 } else if (tmp == -EAGAIN ||
2621 (tmp == 0 && !metadata.read.buffer)) {
2622 /* retry again */
2623 retry->timeout_in_ms <<= 1;
2624 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2625 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002626 pr_err("%s: ch %d RX retry client timeout\n",
2627 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002628 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002629 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002630 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002631 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2632 if (tx_ready)
2633 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002634 }
2635 } else {
2636 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002637 pr_err("%s: ch %d RX retry client failed (%d)\n",
2638 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002639 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002640 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002641 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002642 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002643 if (tx_ready)
2644 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002645 }
2646
2647 /* schedule next retry */
2648 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2649 if (!list_empty(&ch->rx_retry_queue)) {
2650 retry = list_first_entry(&ch->rx_retry_queue,
2651 struct smux_rx_pkt_retry,
2652 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002653
2654 if (immediate_retry)
2655 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2656 else
2657 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2658 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002659 }
2660 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2661}
2662
2663/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002664 * Transmit worker handles serializing and transmitting packets onto the
2665 * underlying transport.
2666 *
2667 * @work Work structure (not used)
2668 */
2669static void smux_tx_worker(struct work_struct *work)
2670{
2671 struct smux_pkt_t *pkt;
2672 struct smux_lch_t *ch;
2673 unsigned low_wm_notif;
2674 unsigned lcid;
2675 unsigned long flags;
2676
2677
2678 /*
2679 * Transmit packets in round-robin fashion based upon ready
2680 * channels.
2681 *
2682 * To eliminate the need to hold a lock for the entire
2683 * iteration through the channel ready list, the head of the
2684 * ready-channel list is always the next channel to be
2685 * processed. To send a packet, the first valid packet in
2686 * the head channel is removed and the head channel is then
2687 * rescheduled at the end of the queue by removing it and
2688 * inserting after the tail. The locks can then be released
2689 * while the packet is processed.
2690 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002691 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002692 pkt = NULL;
2693 low_wm_notif = 0;
2694
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002695 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002696
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002697 /* handle wakeup if needed */
2698 if (smux.power_state == SMUX_PWR_OFF) {
2699 if (!list_empty(&smux.lch_tx_ready_list) ||
2700 !list_empty(&smux.power_queue)) {
2701 /* data to transmit, do wakeup */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302702 SMUX_PWR("smux: %s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002703 smux.power_state,
2704 SMUX_PWR_TURNING_ON);
2705 smux.power_state = SMUX_PWR_TURNING_ON;
2706 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2707 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002708 queue_work(smux_tx_wq, &smux_wakeup_work);
2709 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002710 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002711 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2712 flags);
2713 }
2714 break;
2715 }
2716
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002717 /* process any pending power packets */
2718 if (!list_empty(&smux.power_queue)) {
2719 pkt = list_first_entry(&smux.power_queue,
2720 struct smux_pkt_t, list);
2721 list_del(&pkt->list);
2722 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2723
Eric Holmberga9b06472012-06-22 09:46:34 -06002724 /* Adjust power state if this is a flush command */
2725 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2726 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2727 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2728 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2729 smux.power_ctl_remote_req_received) {
2730 /*
2731 * Sending remote power-down request ACK
2732 * or sending local power-down request
2733 * and we already received a remote
2734 * power-down request.
2735 */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302736 SMUX_PWR(
2737 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002738 smux.power_state,
2739 SMUX_PWR_OFF_FLUSH);
2740 smux.power_state = SMUX_PWR_OFF_FLUSH;
2741 smux.power_ctl_remote_req_received = 0;
2742 queue_work(smux_tx_wq,
2743 &smux_inactivity_work);
2744 } else {
2745 /* sending local power-down request */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302746 SMUX_PWR(
2747 "smux: %s: Power %d->%d\n", __func__,
Eric Holmberga9b06472012-06-22 09:46:34 -06002748 smux.power_state,
2749 SMUX_PWR_TURNING_OFF);
2750 smux.power_state = SMUX_PWR_TURNING_OFF;
2751 }
2752 }
2753 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2754
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002755 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002756 smux_uart_power_on();
2757 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002758 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002759 if (!smux_byte_loopback) {
2760 smux_tx_tty(pkt);
2761 smux_flush_tty();
2762 } else {
2763 smux_tx_loopback(pkt);
2764 }
2765
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002766 smux_free_pkt(pkt);
2767 continue;
2768 }
2769
2770 /* get the next ready channel */
2771 if (list_empty(&smux.lch_tx_ready_list)) {
2772 /* no ready channels */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302773 SMUX_DBG("smux: %s: no more ready channels, exiting\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002774 __func__);
2775 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2776 break;
2777 }
2778 smux.tx_activity_flag = 1;
2779
2780 if (smux.power_state != SMUX_PWR_ON) {
2781 /* channel not ready to transmit */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302782 SMUX_DBG("smux: %s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002783 __func__,
2784 smux.power_state);
2785 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2786 break;
2787 }
2788
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002789 /* get the next packet to send and rotate channel list */
2790 ch = list_first_entry(&smux.lch_tx_ready_list,
2791 struct smux_lch_t,
2792 tx_ready_list);
2793
2794 spin_lock(&ch->state_lock_lhb1);
2795 spin_lock(&ch->tx_lock_lhb2);
2796 if (!list_empty(&ch->tx_queue)) {
2797 /*
2798 * If remote TX flow control is enabled or
2799 * the channel is not fully opened, then only
2800 * send command packets.
2801 */
2802 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2803 struct smux_pkt_t *curr;
2804 list_for_each_entry(curr, &ch->tx_queue, list) {
2805 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2806 pkt = curr;
2807 break;
2808 }
2809 }
2810 } else {
2811 /* get next cmd/data packet to send */
2812 pkt = list_first_entry(&ch->tx_queue,
2813 struct smux_pkt_t, list);
2814 }
2815 }
2816
2817 if (pkt) {
2818 list_del(&pkt->list);
2819
2820 /* update packet stats */
2821 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2822 --ch->tx_pending_data_cnt;
2823 if (ch->notify_lwm &&
2824 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002825 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002826 ch->notify_lwm = 0;
2827 low_wm_notif = 1;
2828 }
2829 }
2830
2831 /* advance to the next ready channel */
2832 list_rotate_left(&smux.lch_tx_ready_list);
2833 } else {
2834 /* no data in channel to send, remove from ready list */
2835 list_del(&ch->tx_ready_list);
2836 INIT_LIST_HEAD(&ch->tx_ready_list);
2837 }
2838 lcid = ch->lcid;
2839 spin_unlock(&ch->tx_lock_lhb2);
2840 spin_unlock(&ch->state_lock_lhb1);
2841 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2842
2843 if (low_wm_notif)
2844 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2845
2846 /* send the packet */
2847 smux_tx_pkt(ch, pkt);
2848 smux_free_pkt(pkt);
2849 }
2850}
2851
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002852/**
2853 * Update the RX flow control (sent in the TIOCM Status command).
2854 *
2855 * @ch Channel for update
2856 *
2857 * @returns 1 for updated, 0 for not updated
2858 *
2859 * Must be called with ch->state_lock_lhb1 locked.
2860 */
2861static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2862{
2863 int updated = 0;
2864 int prev_state;
2865
2866 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2867
2868 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2869 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2870 else
2871 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2872
2873 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2874 smux_send_status_cmd(ch);
2875 updated = 1;
2876 }
2877
2878 return updated;
2879}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002880
Eric Holmberg06011322012-07-06 18:17:03 -06002881/**
2882 * Flush all SMUX workqueues.
2883 *
2884 * This sets the reset bit to abort any processing loops and then
2885 * flushes the workqueues to ensure that no new pending work is
2886 * running. Do not call with any locks used by workers held as
2887 * this will result in a deadlock.
2888 */
2889static void smux_flush_workqueues(void)
2890{
2891 smux.in_reset = 1;
2892
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302893 SMUX_DBG("smux: %s: flushing tx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002894 flush_workqueue(smux_tx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302895 SMUX_DBG("smux: %s: flushing rx wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002896 flush_workqueue(smux_rx_wq);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302897 SMUX_DBG("smux: %s: flushing notify wq\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06002898 flush_workqueue(smux_notify_wq);
2899}
2900
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002901/**********************************************************************/
2902/* Kernel API */
2903/**********************************************************************/
2904
2905/**
2906 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2907 * flags.
2908 *
2909 * @lcid Logical channel ID
2910 * @set Options to set
2911 * @clear Options to clear
2912 *
2913 * @returns 0 for success, < 0 for failure
2914 */
2915int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2916{
2917 unsigned long flags;
2918 struct smux_lch_t *ch;
2919 int tx_ready = 0;
2920 int ret = 0;
2921
2922 if (smux_assert_lch_id(lcid))
2923 return -ENXIO;
2924
2925 ch = &smux_lch[lcid];
2926 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2927
2928 /* Local loopback mode */
2929 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2930 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2931
2932 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2933 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2934
2935 /* Remote loopback mode */
2936 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2937 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2938
2939 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2940 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2941
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002942 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002943 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002944 ch->rx_flow_control_client = 1;
2945 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002946 }
2947
2948 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002949 ch->rx_flow_control_client = 0;
2950 tx_ready |= smux_rx_flow_control_updated(ch);
2951 }
2952
2953 /* Auto RX Flow Control */
2954 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302955 SMUX_DBG("smux: %s: auto rx flow control option enabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002956 __func__);
2957 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2958 }
2959
2960 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05302961 SMUX_DBG("smux: %s: auto rx flow control option disabled\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002962 __func__);
2963 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2964 ch->rx_flow_control_auto = 0;
2965 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002966 }
2967
2968 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2969
2970 if (tx_ready)
2971 list_channel(ch);
2972
2973 return ret;
2974}
2975
2976/**
2977 * Starts the opening sequence for a logical channel.
2978 *
2979 * @lcid Logical channel ID
2980 * @priv Free for client usage
2981 * @notify Event notification function
2982 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2983 *
2984 * @returns 0 for success, <0 otherwise
2985 *
2986 * A channel must be fully closed (either not previously opened or
2987 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2988 * received.
2989 *
2990 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2991 * event.
2992 */
2993int msm_smux_open(uint8_t lcid, void *priv,
2994 void (*notify)(void *priv, int event_type, const void *metadata),
2995 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2996 int size))
2997{
2998 int ret;
2999 struct smux_lch_t *ch;
3000 struct smux_pkt_t *pkt;
3001 int tx_ready = 0;
3002 unsigned long flags;
3003
3004 if (smux_assert_lch_id(lcid))
3005 return -ENXIO;
3006
3007 ch = &smux_lch[lcid];
3008 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3009
3010 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
3011 ret = -EAGAIN;
3012 goto out;
3013 }
3014
3015 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
3016 pr_err("%s: open lcid %d local state %x invalid\n",
3017 __func__, lcid, ch->local_state);
3018 ret = -EINVAL;
3019 goto out;
3020 }
3021
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303022 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003023 ch->local_state,
3024 SMUX_LCH_LOCAL_OPENING);
3025
Eric Holmberg06011322012-07-06 18:17:03 -06003026 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003027 ch->local_state = SMUX_LCH_LOCAL_OPENING;
3028
3029 ch->priv = priv;
3030 ch->notify = notify;
3031 ch->get_rx_buffer = get_rx_buffer;
3032 ret = 0;
3033
3034 /* Send Open Command */
3035 pkt = smux_alloc_pkt();
3036 if (!pkt) {
3037 ret = -ENOMEM;
3038 goto out;
3039 }
3040 pkt->hdr.magic = SMUX_MAGIC;
3041 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
3042 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
3043 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
3044 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
3045 pkt->hdr.lcid = lcid;
3046 pkt->hdr.payload_len = 0;
3047 pkt->hdr.pad_len = 0;
3048 smux_tx_queue(pkt, ch, 0);
3049 tx_ready = 1;
3050
3051out:
3052 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06003053 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003054 if (tx_ready)
3055 list_channel(ch);
3056 return ret;
3057}
3058
3059/**
3060 * Starts the closing sequence for a logical channel.
3061 *
3062 * @lcid Logical channel ID
3063 *
3064 * @returns 0 for success, <0 otherwise
3065 *
3066 * Once the close event has been acknowledge by the remote side, the client
3067 * will receive a SMUX_DISCONNECTED notification.
3068 */
3069int msm_smux_close(uint8_t lcid)
3070{
3071 int ret = 0;
3072 struct smux_lch_t *ch;
3073 struct smux_pkt_t *pkt;
3074 int tx_ready = 0;
3075 unsigned long flags;
3076
3077 if (smux_assert_lch_id(lcid))
3078 return -ENXIO;
3079
3080 ch = &smux_lch[lcid];
3081 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3082 ch->local_tiocm = 0x0;
3083 ch->remote_tiocm = 0x0;
3084 ch->tx_pending_data_cnt = 0;
3085 ch->notify_lwm = 0;
Eric Holmbergf61339e2012-08-13 14:45:27 -06003086 ch->tx_flow_control = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003087
3088 /* Purge TX queue */
3089 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberg6fcf5322012-07-11 11:46:28 -06003090 smux_purge_ch_tx_queue(ch, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003091 spin_unlock(&ch->tx_lock_lhb2);
3092
3093 /* Send Close Command */
3094 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3095 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303096 SMUX_DBG("smux: lcid %d local state 0x%x -> 0x%x\n", lcid,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003097 ch->local_state,
3098 SMUX_LCH_LOCAL_CLOSING);
3099
3100 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3101 pkt = smux_alloc_pkt();
3102 if (pkt) {
3103 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3104 pkt->hdr.flags = 0;
3105 pkt->hdr.lcid = lcid;
3106 pkt->hdr.payload_len = 0;
3107 pkt->hdr.pad_len = 0;
3108 smux_tx_queue(pkt, ch, 0);
3109 tx_ready = 1;
3110 } else {
3111 pr_err("%s: pkt allocation failed\n", __func__);
3112 ret = -ENOMEM;
3113 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003114
3115 /* Purge RX retry queue */
3116 if (ch->rx_retry_queue_cnt)
3117 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003118 }
3119 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3120
3121 if (tx_ready)
3122 list_channel(ch);
3123
3124 return ret;
3125}
3126
3127/**
3128 * Write data to a logical channel.
3129 *
3130 * @lcid Logical channel ID
3131 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3132 * SMUX_WRITE_FAIL notification.
3133 * @data Data to write
3134 * @len Length of @data
3135 *
3136 * @returns 0 for success, <0 otherwise
3137 *
3138 * Data may be written immediately after msm_smux_open() is called,
3139 * but the data will wait in the transmit queue until the channel has
3140 * been fully opened.
3141 *
3142 * Once the data has been written, the client will receive either a completion
3143 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3144 */
3145int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3146{
3147 struct smux_lch_t *ch;
3148 struct smux_pkt_t *pkt;
3149 int tx_ready = 0;
3150 unsigned long flags;
3151 int ret;
3152
3153 if (smux_assert_lch_id(lcid))
3154 return -ENXIO;
3155
3156 ch = &smux_lch[lcid];
3157 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3158
3159 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3160 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3161 pr_err("%s: hdr.invalid local state %d channel %d\n",
3162 __func__, ch->local_state, lcid);
3163 ret = -EINVAL;
3164 goto out;
3165 }
3166
3167 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3168 pr_err("%s: payload %d too large\n",
3169 __func__, len);
3170 ret = -E2BIG;
3171 goto out;
3172 }
3173
3174 pkt = smux_alloc_pkt();
3175 if (!pkt) {
3176 ret = -ENOMEM;
3177 goto out;
3178 }
3179
3180 pkt->hdr.cmd = SMUX_CMD_DATA;
3181 pkt->hdr.lcid = lcid;
3182 pkt->hdr.flags = 0;
3183 pkt->hdr.payload_len = len;
3184 pkt->payload = (void *)data;
3185 pkt->priv = pkt_priv;
3186 pkt->hdr.pad_len = 0;
3187
3188 spin_lock(&ch->tx_lock_lhb2);
3189 /* verify high watermark */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303190 SMUX_DBG("smux: %s: pending %d", __func__, ch->tx_pending_data_cnt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003191
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003192 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003193 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003194 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003195 ch->tx_pending_data_cnt);
3196 ret = -EAGAIN;
3197 goto out_inner;
3198 }
3199
3200 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003201 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003202 ch->notify_lwm = 1;
3203 pr_err("%s: high watermark hit\n", __func__);
3204 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3205 }
3206 list_add_tail(&pkt->list, &ch->tx_queue);
3207
3208 /* add to ready list */
3209 if (IS_FULLY_OPENED(ch))
3210 tx_ready = 1;
3211
3212 ret = 0;
3213
3214out_inner:
3215 spin_unlock(&ch->tx_lock_lhb2);
3216
3217out:
3218 if (ret)
3219 smux_free_pkt(pkt);
3220 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3221
3222 if (tx_ready)
3223 list_channel(ch);
3224
3225 return ret;
3226}
3227
3228/**
3229 * Returns true if the TX queue is currently full (high water mark).
3230 *
3231 * @lcid Logical channel ID
3232 * @returns 0 if channel is not full
3233 * 1 if it is full
3234 * < 0 for error
3235 */
3236int msm_smux_is_ch_full(uint8_t lcid)
3237{
3238 struct smux_lch_t *ch;
3239 unsigned long flags;
3240 int is_full = 0;
3241
3242 if (smux_assert_lch_id(lcid))
3243 return -ENXIO;
3244
3245 ch = &smux_lch[lcid];
3246
3247 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003248 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003249 is_full = 1;
3250 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3251
3252 return is_full;
3253}
3254
3255/**
3256 * Returns true if the TX queue has space for more packets it is at or
3257 * below the low water mark).
3258 *
3259 * @lcid Logical channel ID
3260 * @returns 0 if channel is above low watermark
3261 * 1 if it's at or below the low watermark
3262 * < 0 for error
3263 */
3264int msm_smux_is_ch_low(uint8_t lcid)
3265{
3266 struct smux_lch_t *ch;
3267 unsigned long flags;
3268 int is_low = 0;
3269
3270 if (smux_assert_lch_id(lcid))
3271 return -ENXIO;
3272
3273 ch = &smux_lch[lcid];
3274
3275 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003276 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003277 is_low = 1;
3278 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3279
3280 return is_low;
3281}
3282
3283/**
3284 * Send TIOCM status update.
3285 *
3286 * @ch Channel for update
3287 *
3288 * @returns 0 for success, <0 for failure
3289 *
3290 * Channel lock must be held before calling.
3291 */
3292static int smux_send_status_cmd(struct smux_lch_t *ch)
3293{
3294 struct smux_pkt_t *pkt;
3295
3296 if (!ch)
3297 return -EINVAL;
3298
3299 pkt = smux_alloc_pkt();
3300 if (!pkt)
3301 return -ENOMEM;
3302
3303 pkt->hdr.lcid = ch->lcid;
3304 pkt->hdr.cmd = SMUX_CMD_STATUS;
3305 pkt->hdr.flags = ch->local_tiocm;
3306 pkt->hdr.payload_len = 0;
3307 pkt->hdr.pad_len = 0;
3308 smux_tx_queue(pkt, ch, 0);
3309
3310 return 0;
3311}
3312
3313/**
3314 * Internal helper function for getting the TIOCM status with
3315 * state_lock_lhb1 already locked.
3316 *
3317 * @ch Channel pointer
3318 *
3319 * @returns TIOCM status
3320 */
Eric Holmbergf44a8cc2012-06-13 17:58:13 -06003321long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003322{
3323 long status = 0x0;
3324
3325 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3326 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3327 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3328 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3329
3330 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3331 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3332
3333 return status;
3334}
3335
3336/**
3337 * Get the TIOCM status bits.
3338 *
3339 * @lcid Logical channel ID
3340 *
3341 * @returns >= 0 TIOCM status bits
3342 * < 0 Error condition
3343 */
3344long msm_smux_tiocm_get(uint8_t lcid)
3345{
3346 struct smux_lch_t *ch;
3347 unsigned long flags;
3348 long status = 0x0;
3349
3350 if (smux_assert_lch_id(lcid))
3351 return -ENXIO;
3352
3353 ch = &smux_lch[lcid];
3354 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3355 status = msm_smux_tiocm_get_atomic(ch);
3356 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3357
3358 return status;
3359}
3360
3361/**
3362 * Set/clear the TIOCM status bits.
3363 *
3364 * @lcid Logical channel ID
3365 * @set Bits to set
3366 * @clear Bits to clear
3367 *
3368 * @returns 0 for success; < 0 for failure
3369 *
3370 * If a bit is specified in both the @set and @clear masks, then the clear bit
3371 * definition will dominate and the bit will be cleared.
3372 */
3373int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3374{
3375 struct smux_lch_t *ch;
3376 unsigned long flags;
3377 uint8_t old_status;
3378 uint8_t status_set = 0x0;
3379 uint8_t status_clear = 0x0;
3380 int tx_ready = 0;
3381 int ret = 0;
3382
3383 if (smux_assert_lch_id(lcid))
3384 return -ENXIO;
3385
3386 ch = &smux_lch[lcid];
3387 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3388
3389 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3390 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3391 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3392 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3393
3394 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3395 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3396 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3397 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3398
3399 old_status = ch->local_tiocm;
3400 ch->local_tiocm |= status_set;
3401 ch->local_tiocm &= ~status_clear;
3402
3403 if (ch->local_tiocm != old_status) {
3404 ret = smux_send_status_cmd(ch);
3405 tx_ready = 1;
3406 }
3407 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3408
3409 if (tx_ready)
3410 list_channel(ch);
3411
3412 return ret;
3413}
3414
3415/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003416/* Subsystem Restart */
3417/**********************************************************************/
3418static struct notifier_block ssr_notifier = {
3419 .notifier_call = ssr_notifier_cb,
3420};
3421
3422/**
3423 * Handle Subsystem Restart (SSR) notifications.
3424 *
3425 * @this Pointer to ssr_notifier
3426 * @code SSR Code
3427 * @data Data pointer (not used)
3428 */
3429static int ssr_notifier_cb(struct notifier_block *this,
3430 unsigned long code,
3431 void *data)
3432{
3433 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003434 int i;
3435 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003436 int power_off_uart = 0;
3437
Eric Holmbergd2697902012-06-15 09:58:46 -06003438 if (code == SUBSYS_BEFORE_SHUTDOWN) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303439 SMUX_DBG("smux: %s: ssr - before shutdown\n", __func__);
Eric Holmbergd2697902012-06-15 09:58:46 -06003440 mutex_lock(&smux.mutex_lha0);
3441 smux.in_reset = 1;
3442 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003443 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003444 } else if (code == SUBSYS_AFTER_POWERUP) {
3445 /* re-register platform devices */
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303446 SMUX_DBG("smux: %s: ssr - after power-up\n", __func__);
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003447 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003448 if (smux.ld_open_count > 0
3449 && !smux.platform_devs_registered) {
3450 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303451 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003452 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003453 smux_devs[i].dev.release = smux_pdev_release;
3454 tmp = platform_device_register(&smux_devs[i]);
3455 if (tmp)
3456 pr_err("%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003457 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003458 }
3459 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003460 }
3461 mutex_unlock(&smux.mutex_lha0);
3462 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003463 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3464 return NOTIFY_DONE;
3465 }
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303466 SMUX_DBG("smux: %s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003467
3468 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003469 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003470 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003471 if (smux.ld_open_count > 0) {
3472 smux_lch_purge();
3473 if (smux.tty)
3474 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003475
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003476 /* Unregister platform devices */
3477 if (smux.platform_devs_registered) {
3478 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303479 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003480 __func__, smux_devs[i].name);
3481 platform_device_unregister(&smux_devs[i]);
3482 }
3483 smux.platform_devs_registered = 0;
3484 }
3485
3486 /* Power-down UART */
3487 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3488 if (smux.power_state != SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303489 SMUX_PWR("smux: %s: SSR - turning off UART\n",
3490 __func__);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003491 smux.power_state = SMUX_PWR_OFF;
3492 power_off_uart = 1;
3493 }
3494 smux.powerdown_enabled = 0;
3495 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3496
3497 if (power_off_uart)
3498 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003499 }
Eric Holmberg06011322012-07-06 18:17:03 -06003500 smux.tx_activity_flag = 0;
3501 smux.rx_activity_flag = 0;
3502 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003503 smux.in_reset = 0;
3504 mutex_unlock(&smux.mutex_lha0);
3505
Eric Holmberged1f00c2012-06-07 09:45:18 -06003506 return NOTIFY_DONE;
3507}
3508
3509/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003510/* Line Discipline Interface */
3511/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003512static void smux_pdev_release(struct device *dev)
3513{
3514 struct platform_device *pdev;
3515
3516 pdev = container_of(dev, struct platform_device, dev);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303517 SMUX_DBG("smux: %s: releasing pdev %p '%s'\n",
3518 __func__, pdev, pdev->name);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003519 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3520}
3521
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003522static int smuxld_open(struct tty_struct *tty)
3523{
3524 int i;
3525 int tmp;
3526 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003527
3528 if (!smux.is_initialized)
3529 return -ENODEV;
3530
Eric Holmberged1f00c2012-06-07 09:45:18 -06003531 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003532 if (smux.ld_open_count) {
3533 pr_err("%s: %p multiple instances not supported\n",
3534 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003535 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003536 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003537 }
3538
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003539 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003540 pr_err("%s: tty->ops->write already NULL\n", __func__);
3541 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003542 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003543 }
3544
3545 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003546 ++smux.ld_open_count;
3547 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003548 smux.tty = tty;
3549 tty->disc_data = &smux;
3550 tty->receive_room = TTY_RECEIVE_ROOM;
3551 tty_driver_flush_buffer(tty);
3552
3553 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003554 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003555 if (smux.power_state == SMUX_PWR_OFF) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303556 SMUX_PWR("smux: %s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003557 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003558 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003559 queue_work(smux_tx_wq, &smux_inactivity_work);
3560 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003561 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003562 }
3563
3564 /* register platform devices */
3565 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303566 SMUX_DBG("smux: %s: register pdev '%s'\n",
Eric Holmberged1f00c2012-06-07 09:45:18 -06003567 __func__, smux_devs[i].name);
3568 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003569 tmp = platform_device_register(&smux_devs[i]);
3570 if (tmp)
3571 pr_err("%s: error %d registering device %s\n",
3572 __func__, tmp, smux_devs[i].name);
3573 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003574 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003575 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003576 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003577}
3578
3579static void smuxld_close(struct tty_struct *tty)
3580{
3581 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003582 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003583 int i;
3584
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303585 SMUX_DBG("smux: %s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003586 smux_flush_workqueues();
3587
Eric Holmberged1f00c2012-06-07 09:45:18 -06003588 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003589 if (smux.ld_open_count <= 0) {
3590 pr_err("%s: invalid ld count %d\n", __func__,
3591 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003592 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003593 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003594 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003595 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003596
3597 /* Cleanup channels */
3598 smux_lch_purge();
3599
3600 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003601 if (smux.platform_devs_registered) {
3602 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303603 SMUX_DBG("smux: %s: unregister pdev '%s'\n",
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003604 __func__, smux_devs[i].name);
3605 platform_device_unregister(&smux_devs[i]);
3606 }
3607 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003608 }
3609
3610 /* Schedule UART power-up if it's down */
3611 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003612 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003613 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003614 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003615 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003616 smux.tx_activity_flag = 0;
3617 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003618 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3619
3620 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003621 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003622
Eric Holmberg06011322012-07-06 18:17:03 -06003623 smux.rx_state = SMUX_RX_IDLE;
3624
Eric Holmberged1f00c2012-06-07 09:45:18 -06003625 /* Disconnect from TTY */
3626 smux.tty = NULL;
3627 mutex_unlock(&smux.mutex_lha0);
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303628 SMUX_DBG("smux: %s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003629}
3630
3631/**
3632 * Receive data from TTY Line Discipline.
3633 *
3634 * @tty TTY structure
3635 * @cp Character data
3636 * @fp Flag data
3637 * @count Size of character and flag data
3638 */
3639void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3640 char *fp, int count)
3641{
3642 int i;
3643 int last_idx = 0;
3644 const char *tty_name = NULL;
3645 char *f;
3646
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003647 /* verify error flags */
3648 for (i = 0, f = fp; i < count; ++i, ++f) {
3649 if (*f != TTY_NORMAL) {
3650 if (tty)
3651 tty_name = tty->name;
3652 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3653 tty_name, *f, tty_flag_to_str(*f));
3654
3655 /* feed all previous valid data to the parser */
3656 smux_rx_state_machine(cp + last_idx, i - last_idx,
3657 TTY_NORMAL);
3658
3659 /* feed bad data to parser */
3660 smux_rx_state_machine(cp + i, 1, *f);
3661 last_idx = i + 1;
3662 }
3663 }
3664
3665 /* feed data to RX state machine */
3666 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3667}
3668
3669static void smuxld_flush_buffer(struct tty_struct *tty)
3670{
3671 pr_err("%s: not supported\n", __func__);
3672}
3673
3674static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3675{
3676 pr_err("%s: not supported\n", __func__);
3677 return -ENODEV;
3678}
3679
3680static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3681 unsigned char __user *buf, size_t nr)
3682{
3683 pr_err("%s: not supported\n", __func__);
3684 return -ENODEV;
3685}
3686
3687static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3688 const unsigned char *buf, size_t nr)
3689{
3690 pr_err("%s: not supported\n", __func__);
3691 return -ENODEV;
3692}
3693
3694static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3695 unsigned int cmd, unsigned long arg)
3696{
3697 pr_err("%s: not supported\n", __func__);
3698 return -ENODEV;
3699}
3700
3701static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3702 struct poll_table_struct *tbl)
3703{
3704 pr_err("%s: not supported\n", __func__);
3705 return -ENODEV;
3706}
3707
3708static void smuxld_write_wakeup(struct tty_struct *tty)
3709{
3710 pr_err("%s: not supported\n", __func__);
3711}
3712
3713static struct tty_ldisc_ops smux_ldisc_ops = {
3714 .owner = THIS_MODULE,
3715 .magic = TTY_LDISC_MAGIC,
3716 .name = "n_smux",
3717 .open = smuxld_open,
3718 .close = smuxld_close,
3719 .flush_buffer = smuxld_flush_buffer,
3720 .chars_in_buffer = smuxld_chars_in_buffer,
3721 .read = smuxld_read,
3722 .write = smuxld_write,
3723 .ioctl = smuxld_ioctl,
3724 .poll = smuxld_poll,
3725 .receive_buf = smuxld_receive_buf,
3726 .write_wakeup = smuxld_write_wakeup
3727};
3728
3729static int __init smux_init(void)
3730{
3731 int ret;
3732
Eric Holmberged1f00c2012-06-07 09:45:18 -06003733 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003734
3735 spin_lock_init(&smux.rx_lock_lha1);
3736 smux.rx_state = SMUX_RX_IDLE;
3737 smux.power_state = SMUX_PWR_OFF;
3738 smux.pwr_wakeup_delay_us = 1;
3739 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003740 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003741 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003742 smux.rx_activity_flag = 0;
3743 smux.tx_activity_flag = 0;
3744 smux.recv_len = 0;
3745 smux.tty = NULL;
3746 smux.ld_open_count = 0;
3747 smux.in_reset = 0;
3748 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003749 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003750 smux_byte_loopback = 0;
3751
3752 spin_lock_init(&smux.tx_lock_lha2);
3753 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3754
3755 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3756 if (ret != 0) {
3757 pr_err("%s: error %d registering line discipline\n",
3758 __func__, ret);
3759 return ret;
3760 }
3761
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003762 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003763
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003764 ret = lch_init();
3765 if (ret != 0) {
3766 pr_err("%s: lch_init failed\n", __func__);
3767 return ret;
3768 }
3769
Angshuman Sarkar98f78122012-07-24 14:50:42 +05303770 log_ctx = ipc_log_context_create(1, "smux");
3771 if (!log_ctx) {
3772 pr_err("%s: unable to create log context\n", __func__);
3773 disable_ipc_logging = 1;
3774 }
3775
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003776 return 0;
3777}
3778
3779static void __exit smux_exit(void)
3780{
3781 int ret;
3782
3783 ret = tty_unregister_ldisc(N_SMUX);
3784 if (ret != 0) {
3785 pr_err("%s error %d unregistering line discipline\n",
3786 __func__, ret);
3787 return;
3788 }
3789}
3790
3791module_init(smux_init);
3792module_exit(smux_exit);
3793
3794MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3795MODULE_LICENSE("GPL v2");
3796MODULE_ALIAS_LDISC(N_SMUX);