blob: 32e52d018c1fd1a37dc7e46a8fb7c7a85b8df876 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060049#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060050
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060084#define SMUX_PWR_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_PWR_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
91 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
92 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
93 pr_info("smux: TX Wakeup ACK\n"); \
94 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
95 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
96 pr_info("smux: TX Wakeup REQ\n"); \
97 else \
98 smux_log_pkt(pkt, 0); \
99 } \
100} while (0)
101
102#define SMUX_PWR_BYTE_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 smux_log_pkt(pkt, 0); \
105 } \
106} while (0)
107
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600108#define SMUX_LOG_PKT_RX(pkt) do { \
109 if (smux_debug_mask & MSM_SMUX_PKT) \
110 smux_log_pkt(pkt, 1); \
111} while (0)
112
113#define SMUX_LOG_PKT_TX(pkt) do { \
114 if (smux_debug_mask & MSM_SMUX_PKT) \
115 smux_log_pkt(pkt, 0); \
116} while (0)
117
118/**
119 * Return true if channel is fully opened (both
120 * local and remote sides are in the OPENED state).
121 */
122#define IS_FULLY_OPENED(ch) \
123 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
124 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
125
126static struct platform_device smux_devs[] = {
127 {.name = "SMUX_CTL", .id = -1},
128 {.name = "SMUX_RMNET", .id = -1},
129 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
130 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
131 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
132 {.name = "SMUX_DIAG", .id = -1},
133};
134
135enum {
136 SMUX_CMD_STATUS_RTC = 1 << 0,
137 SMUX_CMD_STATUS_RTR = 1 << 1,
138 SMUX_CMD_STATUS_RI = 1 << 2,
139 SMUX_CMD_STATUS_DCD = 1 << 3,
140 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
141};
142
143/* Channel mode */
144enum {
145 SMUX_LCH_MODE_NORMAL,
146 SMUX_LCH_MODE_LOCAL_LOOPBACK,
147 SMUX_LCH_MODE_REMOTE_LOOPBACK,
148};
149
150enum {
151 SMUX_RX_IDLE,
152 SMUX_RX_MAGIC,
153 SMUX_RX_HDR,
154 SMUX_RX_PAYLOAD,
155 SMUX_RX_FAILURE,
156};
157
158/**
159 * Power states.
160 *
161 * The _FLUSH states are internal transitional states and are not part of the
162 * official state machine.
163 */
164enum {
165 SMUX_PWR_OFF,
166 SMUX_PWR_TURNING_ON,
167 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600168 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600169 SMUX_PWR_TURNING_OFF,
170 SMUX_PWR_OFF_FLUSH,
171};
172
173/**
174 * Logical Channel Structure. One instance per channel.
175 *
176 * Locking Hierarchy
177 * Each lock has a postfix that describes the locking level. If multiple locks
178 * are required, only increasing lock hierarchy numbers may be locked which
179 * ensures avoiding a deadlock.
180 *
181 * Locking Example
182 * If state_lock_lhb1 is currently held and the TX list needs to be
183 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
184 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
185 * not be acquired since it would result in a deadlock.
186 *
187 * Note that the Line Discipline locks (*_lha) should always be acquired
188 * before the logical channel locks.
189 */
190struct smux_lch_t {
191 /* channel state */
192 spinlock_t state_lock_lhb1;
193 uint8_t lcid;
194 unsigned local_state;
195 unsigned local_mode;
196 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600197 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600198
199 unsigned remote_state;
200 unsigned remote_mode;
201 uint8_t remote_tiocm;
202
203 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600204 int rx_flow_control_auto;
205 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 /* client callbacks and private data */
208 void *priv;
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
211 int size);
212
Eric Holmbergb8435c82012-06-05 14:51:29 -0600213 /* RX Info */
214 struct list_head rx_retry_queue;
215 unsigned rx_retry_queue_cnt;
216 struct delayed_work rx_retry_work;
217
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600218 /* TX Info */
219 spinlock_t tx_lock_lhb2;
220 struct list_head tx_queue;
221 struct list_head tx_ready_list;
222 unsigned tx_pending_data_cnt;
223 unsigned notify_lwm;
224};
225
226union notifier_metadata {
227 struct smux_meta_disconnected disconnected;
228 struct smux_meta_read read;
229 struct smux_meta_write write;
230 struct smux_meta_tiocm tiocm;
231};
232
233struct smux_notify_handle {
234 void (*notify)(void *priv, int event_type, const void *metadata);
235 void *priv;
236 int event_type;
237 union notifier_metadata *metadata;
238};
239
240/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600241 * Get RX Buffer Retry structure.
242 *
243 * This is used for clients that are unable to provide an RX buffer
244 * immediately. This temporary structure will be used to temporarily hold the
245 * data and perform a retry.
246 */
247struct smux_rx_pkt_retry {
248 struct smux_pkt_t *pkt;
249 struct list_head rx_retry_list;
250 unsigned timeout_in_ms;
251};
252
253/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254 * Receive worker data structure.
255 *
256 * One instance is created for every call to smux_rx_state_machine.
257 */
258struct smux_rx_worker_data {
259 const unsigned char *data;
260 int len;
261 int flag;
262
263 struct work_struct work;
264 struct completion work_complete;
265};
266
267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600268 * Line discipline and module structure.
269 *
270 * Only one instance since multiple instances of line discipline are not
271 * allowed.
272 */
273struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600274 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275
276 int is_initialized;
277 int in_reset;
278 int ld_open_count;
279 struct tty_struct *tty;
280
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600281 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600282 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
283 unsigned int recv_len;
284 unsigned int pkt_remain;
285 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600286
287 /* RX Activity - accessed by multiple threads */
288 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600289 unsigned rx_activity_flag;
290
291 /* TX / Power */
292 spinlock_t tx_lock_lha2;
293 struct list_head lch_tx_ready_list;
294 unsigned power_state;
295 unsigned pwr_wakeup_delay_us;
296 unsigned tx_activity_flag;
297 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600298 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600299 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600300};
301
302
303/* data structures */
304static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
305static struct smux_ldisc_t smux;
306static const char *tty_error_type[] = {
307 [TTY_NORMAL] = "normal",
308 [TTY_OVERRUN] = "overrun",
309 [TTY_BREAK] = "break",
310 [TTY_PARITY] = "parity",
311 [TTY_FRAME] = "framing",
312};
313
314static const char *smux_cmds[] = {
315 [SMUX_CMD_DATA] = "DATA",
316 [SMUX_CMD_OPEN_LCH] = "OPEN",
317 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
318 [SMUX_CMD_STATUS] = "STATUS",
319 [SMUX_CMD_PWR_CTL] = "PWR",
320 [SMUX_CMD_BYTE] = "Raw Byte",
321};
322
323static void smux_notify_local_fn(struct work_struct *work);
324static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
325
326static struct workqueue_struct *smux_notify_wq;
327static size_t handle_size;
328static struct kfifo smux_notify_fifo;
329static int queued_fifo_notifications;
330static DEFINE_SPINLOCK(notify_lock_lhc1);
331
332static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600333static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334static void smux_tx_worker(struct work_struct *work);
335static DECLARE_WORK(smux_tx_work, smux_tx_worker);
336
337static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600338static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600339static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600340static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
341static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
342
343static void smux_inactivity_worker(struct work_struct *work);
344static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
345static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
346 smux_inactivity_worker);
347
348static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
349static void list_channel(struct smux_lch_t *ch);
350static int smux_send_status_cmd(struct smux_lch_t *ch);
351static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600352static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600353static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
354static int schedule_notify(uint8_t lcid, int event,
355 const union notifier_metadata *metadata);
356static int ssr_notifier_cb(struct notifier_block *this,
357 unsigned long code,
358 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600359static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600360static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600361static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600362static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600363
364/**
365 * Convert TTY Error Flags to string for logging purposes.
366 *
367 * @flag TTY_* flag
368 * @returns String description or NULL if unknown
369 */
370static const char *tty_flag_to_str(unsigned flag)
371{
372 if (flag < ARRAY_SIZE(tty_error_type))
373 return tty_error_type[flag];
374 return NULL;
375}
376
377/**
378 * Convert SMUX Command to string for logging purposes.
379 *
380 * @cmd SMUX command
381 * @returns String description or NULL if unknown
382 */
383static const char *cmd_to_str(unsigned cmd)
384{
385 if (cmd < ARRAY_SIZE(smux_cmds))
386 return smux_cmds[cmd];
387 return NULL;
388}
389
390/**
391 * Set the reset state due to an unrecoverable failure.
392 */
393static void smux_enter_reset(void)
394{
395 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
396 smux.in_reset = 1;
397}
398
399static int lch_init(void)
400{
401 unsigned int id;
402 struct smux_lch_t *ch;
403 int i = 0;
404
405 handle_size = sizeof(struct smux_notify_handle *);
406
407 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
408 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600409 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600410
411 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
412 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
413 __func__);
414 return -ENOMEM;
415 }
416
417 i |= kfifo_alloc(&smux_notify_fifo,
418 SMUX_NOTIFY_FIFO_SIZE * handle_size,
419 GFP_KERNEL);
420 i |= smux_loopback_init();
421
422 if (i) {
423 pr_err("%s: out of memory error\n", __func__);
424 return -ENOMEM;
425 }
426
427 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
428 ch = &smux_lch[id];
429
430 spin_lock_init(&ch->state_lock_lhb1);
431 ch->lcid = id;
432 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
433 ch->local_mode = SMUX_LCH_MODE_NORMAL;
434 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600435 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600436 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
437 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
438 ch->remote_tiocm = 0x0;
439 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600440 ch->rx_flow_control_auto = 0;
441 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600442 ch->priv = 0;
443 ch->notify = 0;
444 ch->get_rx_buffer = 0;
445
Eric Holmbergb8435c82012-06-05 14:51:29 -0600446 INIT_LIST_HEAD(&ch->rx_retry_queue);
447 ch->rx_retry_queue_cnt = 0;
448 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
449
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600450 spin_lock_init(&ch->tx_lock_lhb2);
451 INIT_LIST_HEAD(&ch->tx_queue);
452 INIT_LIST_HEAD(&ch->tx_ready_list);
453 ch->tx_pending_data_cnt = 0;
454 ch->notify_lwm = 0;
455 }
456
457 return 0;
458}
459
Eric Holmberged1f00c2012-06-07 09:45:18 -0600460/**
461 * Empty and cleanup all SMUX logical channels for subsystem restart or line
462 * discipline disconnect.
463 */
464static void smux_lch_purge(void)
465{
466 struct smux_lch_t *ch;
467 unsigned long flags;
468 int i;
469
470 /* Empty TX ready list */
471 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
472 while (!list_empty(&smux.lch_tx_ready_list)) {
473 SMUX_DBG("%s: emptying ready list %p\n",
474 __func__, smux.lch_tx_ready_list.next);
475 ch = list_first_entry(&smux.lch_tx_ready_list,
476 struct smux_lch_t,
477 tx_ready_list);
478 list_del(&ch->tx_ready_list);
479 INIT_LIST_HEAD(&ch->tx_ready_list);
480 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600481
482 /* Purge Power Queue */
483 while (!list_empty(&smux.power_queue)) {
484 struct smux_pkt_t *pkt;
485
486 pkt = list_first_entry(&smux.power_queue,
487 struct smux_pkt_t,
488 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600489 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600490 SMUX_DBG("%s: emptying power queue pkt=%p\n",
491 __func__, pkt);
492 smux_free_pkt(pkt);
493 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600494 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
495
496 /* Close all ports */
497 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
498 ch = &smux_lch[i];
499 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
500
501 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
502
503 /* Purge TX queue */
504 spin_lock(&ch->tx_lock_lhb2);
505 smux_purge_ch_tx_queue(ch);
506 spin_unlock(&ch->tx_lock_lhb2);
507
508 /* Notify user of disconnect and reset channel state */
509 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
510 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
511 union notifier_metadata meta;
512
513 meta.disconnected.is_ssr = smux.in_reset;
514 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
515 }
516
517 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600518 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
519 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
520 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600521 ch->rx_flow_control_auto = 0;
522 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600523
524 /* Purge RX retry queue */
525 if (ch->rx_retry_queue_cnt)
526 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
527
528 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
529 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600530}
531
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600532int smux_assert_lch_id(uint32_t lcid)
533{
534 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
535 return -ENXIO;
536 else
537 return 0;
538}
539
540/**
541 * Log packet information for debug purposes.
542 *
543 * @pkt Packet to log
544 * @is_recv 1 = RX packet; 0 = TX Packet
545 *
546 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
547 *
548 * PKT Info:
549 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
550 *
551 * Direction: R = Receive, S = Send
552 * Local State: C = Closed; c = closing; o = opening; O = Opened
553 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
554 * Remote State: C = Closed; O = Opened
555 * Remote Mode: R = Remote loopback; N = Normal
556 */
557static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
558{
559 char logbuf[SMUX_PKT_LOG_SIZE];
560 char cmd_extra[16];
561 int i = 0;
562 int count;
563 int len;
564 char local_state;
565 char local_mode;
566 char remote_state;
567 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600568 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600569 unsigned char *data;
570
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600571 if (!smux_assert_lch_id(pkt->hdr.lcid))
572 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600573
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600574 if (ch) {
575 switch (ch->local_state) {
576 case SMUX_LCH_LOCAL_CLOSED:
577 local_state = 'C';
578 break;
579 case SMUX_LCH_LOCAL_OPENING:
580 local_state = 'o';
581 break;
582 case SMUX_LCH_LOCAL_OPENED:
583 local_state = 'O';
584 break;
585 case SMUX_LCH_LOCAL_CLOSING:
586 local_state = 'c';
587 break;
588 default:
589 local_state = 'U';
590 break;
591 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600592
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600593 switch (ch->local_mode) {
594 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
595 local_mode = 'L';
596 break;
597 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
598 local_mode = 'R';
599 break;
600 case SMUX_LCH_MODE_NORMAL:
601 local_mode = 'N';
602 break;
603 default:
604 local_mode = 'U';
605 break;
606 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600607
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600608 switch (ch->remote_state) {
609 case SMUX_LCH_REMOTE_CLOSED:
610 remote_state = 'C';
611 break;
612 case SMUX_LCH_REMOTE_OPENED:
613 remote_state = 'O';
614 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600615
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600616 default:
617 remote_state = 'U';
618 break;
619 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600620
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600621 switch (ch->remote_mode) {
622 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
623 remote_mode = 'R';
624 break;
625 case SMUX_LCH_MODE_NORMAL:
626 remote_mode = 'N';
627 break;
628 default:
629 remote_mode = 'U';
630 break;
631 }
632 } else {
633 /* broadcast channel */
634 local_state = '-';
635 local_mode = '-';
636 remote_state = '-';
637 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600638 }
639
640 /* determine command type (ACK, etc) */
641 cmd_extra[0] = '\0';
642 switch (pkt->hdr.cmd) {
643 case SMUX_CMD_OPEN_LCH:
644 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
645 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
646 break;
647 case SMUX_CMD_CLOSE_LCH:
648 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
649 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
650 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600651
652 case SMUX_CMD_PWR_CTL:
653 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
654 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
655 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600656 };
657
658 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
659 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
660 is_recv ? 'R' : 'S', pkt->hdr.lcid,
661 local_state, local_mode,
662 remote_state, remote_mode,
663 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
664 pkt->hdr.payload_len, pkt->hdr.pad_len);
665
666 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
667 data = (unsigned char *)pkt->payload;
668 for (count = 0; count < len; count++)
669 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
670 "%02x ", (unsigned)data[count]);
671
672 pr_info("%s\n", logbuf);
673}
674
675static void smux_notify_local_fn(struct work_struct *work)
676{
677 struct smux_notify_handle *notify_handle = NULL;
678 union notifier_metadata *metadata = NULL;
679 unsigned long flags;
680 int i;
681
682 for (;;) {
683 /* retrieve notification */
684 spin_lock_irqsave(&notify_lock_lhc1, flags);
685 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
686 i = kfifo_out(&smux_notify_fifo,
687 &notify_handle,
688 handle_size);
689 if (i != handle_size) {
690 pr_err("%s: unable to retrieve handle %d expected %d\n",
691 __func__, i, handle_size);
692 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
693 break;
694 }
695 } else {
696 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
697 break;
698 }
699 --queued_fifo_notifications;
700 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
701
702 /* notify client */
703 metadata = notify_handle->metadata;
704 notify_handle->notify(notify_handle->priv,
705 notify_handle->event_type,
706 metadata);
707
708 kfree(metadata);
709 kfree(notify_handle);
710 }
711}
712
713/**
714 * Initialize existing packet.
715 */
716void smux_init_pkt(struct smux_pkt_t *pkt)
717{
718 memset(pkt, 0x0, sizeof(*pkt));
719 pkt->hdr.magic = SMUX_MAGIC;
720 INIT_LIST_HEAD(&pkt->list);
721}
722
723/**
724 * Allocate and initialize packet.
725 *
726 * If a payload is needed, either set it directly and ensure that it's freed or
727 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
728 * automatically when smd_free_pkt() is called.
729 */
730struct smux_pkt_t *smux_alloc_pkt(void)
731{
732 struct smux_pkt_t *pkt;
733
734 /* Consider a free list implementation instead of kmalloc */
735 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
736 if (!pkt) {
737 pr_err("%s: out of memory\n", __func__);
738 return NULL;
739 }
740 smux_init_pkt(pkt);
741 pkt->allocated = 1;
742
743 return pkt;
744}
745
746/**
747 * Free packet.
748 *
749 * @pkt Packet to free (may be NULL)
750 *
751 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
752 * well. Otherwise, the caller is responsible for freeing the payload.
753 */
754void smux_free_pkt(struct smux_pkt_t *pkt)
755{
756 if (pkt) {
757 if (pkt->free_payload)
758 kfree(pkt->payload);
759 if (pkt->allocated)
760 kfree(pkt);
761 }
762}
763
764/**
765 * Allocate packet payload.
766 *
767 * @pkt Packet to add payload to
768 *
769 * @returns 0 on success, <0 upon error
770 *
771 * A flag is set to signal smux_free_pkt() to free the payload.
772 */
773int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
774{
775 if (!pkt)
776 return -EINVAL;
777
778 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
779 pkt->free_payload = 1;
780 if (!pkt->payload) {
781 pr_err("%s: unable to malloc %d bytes for payload\n",
782 __func__, pkt->hdr.payload_len);
783 return -ENOMEM;
784 }
785
786 return 0;
787}
788
789static int schedule_notify(uint8_t lcid, int event,
790 const union notifier_metadata *metadata)
791{
792 struct smux_notify_handle *notify_handle = 0;
793 union notifier_metadata *meta_copy = 0;
794 struct smux_lch_t *ch;
795 int i;
796 unsigned long flags;
797 int ret = 0;
798
799 ch = &smux_lch[lcid];
800 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
801 GFP_ATOMIC);
802 if (!notify_handle) {
803 pr_err("%s: out of memory\n", __func__);
804 ret = -ENOMEM;
805 goto free_out;
806 }
807
808 notify_handle->notify = ch->notify;
809 notify_handle->priv = ch->priv;
810 notify_handle->event_type = event;
811 if (metadata) {
812 meta_copy = kzalloc(sizeof(union notifier_metadata),
813 GFP_ATOMIC);
814 if (!meta_copy) {
815 pr_err("%s: out of memory\n", __func__);
816 ret = -ENOMEM;
817 goto free_out;
818 }
819 *meta_copy = *metadata;
820 notify_handle->metadata = meta_copy;
821 } else {
822 notify_handle->metadata = NULL;
823 }
824
825 spin_lock_irqsave(&notify_lock_lhc1, flags);
826 i = kfifo_avail(&smux_notify_fifo);
827 if (i < handle_size) {
828 pr_err("%s: fifo full error %d expected %d\n",
829 __func__, i, handle_size);
830 ret = -ENOMEM;
831 goto unlock_out;
832 }
833
834 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
835 if (i < 0 || i != handle_size) {
836 pr_err("%s: fifo not available error %d (expected %d)\n",
837 __func__, i, handle_size);
838 ret = -ENOSPC;
839 goto unlock_out;
840 }
841 ++queued_fifo_notifications;
842
843unlock_out:
844 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
845
846free_out:
847 queue_work(smux_notify_wq, &smux_notify_local);
848 if (ret < 0 && notify_handle) {
849 kfree(notify_handle->metadata);
850 kfree(notify_handle);
851 }
852 return ret;
853}
854
855/**
856 * Returns the serialized size of a packet.
857 *
858 * @pkt Packet to serialize
859 *
860 * @returns Serialized length of packet
861 */
862static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
863{
864 unsigned int size;
865
866 size = sizeof(struct smux_hdr_t);
867 size += pkt->hdr.payload_len;
868 size += pkt->hdr.pad_len;
869
870 return size;
871}
872
873/**
874 * Serialize packet @pkt into output buffer @data.
875 *
876 * @pkt Packet to serialize
877 * @out Destination buffer pointer
878 * @out_len Size of serialized packet
879 *
880 * @returns 0 for success
881 */
882int smux_serialize(struct smux_pkt_t *pkt, char *out,
883 unsigned int *out_len)
884{
885 char *data_start = out;
886
887 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
888 pr_err("%s: packet size %d too big\n",
889 __func__, smux_serialize_size(pkt));
890 return -E2BIG;
891 }
892
893 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
894 out += sizeof(struct smux_hdr_t);
895 if (pkt->payload) {
896 memcpy(out, pkt->payload, pkt->hdr.payload_len);
897 out += pkt->hdr.payload_len;
898 }
899 if (pkt->hdr.pad_len) {
900 memset(out, 0x0, pkt->hdr.pad_len);
901 out += pkt->hdr.pad_len;
902 }
903 *out_len = out - data_start;
904 return 0;
905}
906
907/**
908 * Serialize header and provide pointer to the data.
909 *
910 * @pkt Packet
911 * @out[out] Pointer to the serialized header data
912 * @out_len[out] Pointer to the serialized header length
913 */
914static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
915 unsigned int *out_len)
916{
917 *out = (char *)&pkt->hdr;
918 *out_len = sizeof(struct smux_hdr_t);
919}
920
921/**
922 * Serialize payload and provide pointer to the data.
923 *
924 * @pkt Packet
925 * @out[out] Pointer to the serialized payload data
926 * @out_len[out] Pointer to the serialized payload length
927 */
928static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
929 unsigned int *out_len)
930{
931 *out = pkt->payload;
932 *out_len = pkt->hdr.payload_len;
933}
934
935/**
936 * Serialize padding and provide pointer to the data.
937 *
938 * @pkt Packet
939 * @out[out] Pointer to the serialized padding (always NULL)
940 * @out_len[out] Pointer to the serialized payload length
941 *
942 * Since the padding field value is undefined, only the size of the patting
943 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
944 */
945static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
946 unsigned int *out_len)
947{
948 *out = NULL;
949 *out_len = pkt->hdr.pad_len;
950}
951
952/**
953 * Write data to TTY framework and handle breaking the writes up if needed.
954 *
955 * @data Data to write
956 * @len Length of data
957 *
958 * @returns 0 for success, < 0 for failure
959 */
960static int write_to_tty(char *data, unsigned len)
961{
962 int data_written;
963
964 if (!data)
965 return 0;
966
Eric Holmberged1f00c2012-06-07 09:45:18 -0600967 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600968 data_written = smux.tty->ops->write(smux.tty, data, len);
969 if (data_written >= 0) {
970 len -= data_written;
971 data += data_written;
972 } else {
973 pr_err("%s: TTY write returned error %d\n",
974 __func__, data_written);
975 return data_written;
976 }
977
978 if (len)
979 tty_wait_until_sent(smux.tty,
980 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600981 }
982 return 0;
983}
984
985/**
986 * Write packet to TTY.
987 *
988 * @pkt packet to write
989 *
990 * @returns 0 on success
991 */
992static int smux_tx_tty(struct smux_pkt_t *pkt)
993{
994 char *data;
995 unsigned int len;
996 int ret;
997
998 if (!smux.tty) {
999 pr_err("%s: TTY not initialized", __func__);
1000 return -ENOTTY;
1001 }
1002
1003 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
1004 SMUX_DBG("%s: tty send single byte\n", __func__);
1005 ret = write_to_tty(&pkt->hdr.flags, 1);
1006 return ret;
1007 }
1008
1009 smux_serialize_hdr(pkt, &data, &len);
1010 ret = write_to_tty(data, len);
1011 if (ret) {
1012 pr_err("%s: failed %d to write header %d\n",
1013 __func__, ret, len);
1014 return ret;
1015 }
1016
1017 smux_serialize_payload(pkt, &data, &len);
1018 ret = write_to_tty(data, len);
1019 if (ret) {
1020 pr_err("%s: failed %d to write payload %d\n",
1021 __func__, ret, len);
1022 return ret;
1023 }
1024
1025 smux_serialize_padding(pkt, &data, &len);
1026 while (len > 0) {
1027 char zero = 0x0;
1028 ret = write_to_tty(&zero, 1);
1029 if (ret) {
1030 pr_err("%s: failed %d to write padding %d\n",
1031 __func__, ret, len);
1032 return ret;
1033 }
1034 --len;
1035 }
1036 return 0;
1037}
1038
1039/**
1040 * Send a single character.
1041 *
1042 * @ch Character to send
1043 */
1044static void smux_send_byte(char ch)
1045{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001046 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001047
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001048 pkt = smux_alloc_pkt();
1049 if (!pkt) {
1050 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1051 return;
1052 }
1053 pkt->hdr.cmd = SMUX_CMD_BYTE;
1054 pkt->hdr.flags = ch;
1055 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001056
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001057 list_add_tail(&pkt->list, &smux.power_queue);
1058 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001059}
1060
1061/**
1062 * Receive a single-character packet (used for internal testing).
1063 *
1064 * @ch Character to receive
1065 * @lcid Logical channel ID for packet
1066 *
1067 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001068 */
1069static int smux_receive_byte(char ch, int lcid)
1070{
1071 struct smux_pkt_t pkt;
1072
1073 smux_init_pkt(&pkt);
1074 pkt.hdr.lcid = lcid;
1075 pkt.hdr.cmd = SMUX_CMD_BYTE;
1076 pkt.hdr.flags = ch;
1077
1078 return smux_dispatch_rx_pkt(&pkt);
1079}
1080
1081/**
1082 * Queue packet for transmit.
1083 *
1084 * @pkt_ptr Packet to queue
1085 * @ch Channel to queue packet on
1086 * @queue Queue channel on ready list
1087 */
1088static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1089 int queue)
1090{
1091 unsigned long flags;
1092
1093 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1094
1095 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1096 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1097 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1098
1099 if (queue)
1100 list_channel(ch);
1101}
1102
1103/**
1104 * Handle receive OPEN ACK command.
1105 *
1106 * @pkt Received packet
1107 *
1108 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001109 */
1110static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1111{
1112 uint8_t lcid;
1113 int ret;
1114 struct smux_lch_t *ch;
1115 int enable_powerdown = 0;
1116
1117 lcid = pkt->hdr.lcid;
1118 ch = &smux_lch[lcid];
1119
1120 spin_lock(&ch->state_lock_lhb1);
1121 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1122 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1123 ch->local_state,
1124 SMUX_LCH_LOCAL_OPENED);
1125
1126 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1127 enable_powerdown = 1;
1128
1129 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1130 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1131 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1132 ret = 0;
1133 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1134 SMUX_DBG("Remote loopback OPEN ACK received\n");
1135 ret = 0;
1136 } else {
1137 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1138 __func__, lcid, ch->local_state);
1139 ret = -EINVAL;
1140 }
1141 spin_unlock(&ch->state_lock_lhb1);
1142
1143 if (enable_powerdown) {
1144 spin_lock(&smux.tx_lock_lha2);
1145 if (!smux.powerdown_enabled) {
1146 smux.powerdown_enabled = 1;
1147 SMUX_DBG("%s: enabling power-collapse support\n",
1148 __func__);
1149 }
1150 spin_unlock(&smux.tx_lock_lha2);
1151 }
1152
1153 return ret;
1154}
1155
1156static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1157{
1158 uint8_t lcid;
1159 int ret;
1160 struct smux_lch_t *ch;
1161 union notifier_metadata meta_disconnected;
1162 unsigned long flags;
1163
1164 lcid = pkt->hdr.lcid;
1165 ch = &smux_lch[lcid];
1166 meta_disconnected.disconnected.is_ssr = 0;
1167
1168 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1169
1170 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1171 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1172 SMUX_LCH_LOCAL_CLOSING,
1173 SMUX_LCH_LOCAL_CLOSED);
1174 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1175 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1176 schedule_notify(lcid, SMUX_DISCONNECTED,
1177 &meta_disconnected);
1178 ret = 0;
1179 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1180 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1181 ret = 0;
1182 } else {
1183 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1184 __func__, lcid, ch->local_state);
1185 ret = -EINVAL;
1186 }
1187 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1188 return ret;
1189}
1190
1191/**
1192 * Handle receive OPEN command.
1193 *
1194 * @pkt Received packet
1195 *
1196 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001197 */
1198static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1199{
1200 uint8_t lcid;
1201 int ret;
1202 struct smux_lch_t *ch;
1203 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001204 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001205 int tx_ready = 0;
1206 int enable_powerdown = 0;
1207
1208 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1209 return smux_handle_rx_open_ack(pkt);
1210
1211 lcid = pkt->hdr.lcid;
1212 ch = &smux_lch[lcid];
1213
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001214 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001215
1216 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1217 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1218 SMUX_LCH_REMOTE_CLOSED,
1219 SMUX_LCH_REMOTE_OPENED);
1220
1221 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1222 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1223 enable_powerdown = 1;
1224
1225 /* Send Open ACK */
1226 ack_pkt = smux_alloc_pkt();
1227 if (!ack_pkt) {
1228 /* exit out to allow retrying this later */
1229 ret = -ENOMEM;
1230 goto out;
1231 }
1232 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1233 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1234 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1235 ack_pkt->hdr.lcid = lcid;
1236 ack_pkt->hdr.payload_len = 0;
1237 ack_pkt->hdr.pad_len = 0;
1238 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1239 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1240 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1241 }
1242 smux_tx_queue(ack_pkt, ch, 0);
1243 tx_ready = 1;
1244
1245 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1246 /*
1247 * Send an Open command to the remote side to
1248 * simulate our local client doing it.
1249 */
1250 ack_pkt = smux_alloc_pkt();
1251 if (ack_pkt) {
1252 ack_pkt->hdr.lcid = lcid;
1253 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1254 ack_pkt->hdr.flags =
1255 SMUX_CMD_OPEN_POWER_COLLAPSE;
1256 ack_pkt->hdr.payload_len = 0;
1257 ack_pkt->hdr.pad_len = 0;
1258 smux_tx_queue(ack_pkt, ch, 0);
1259 tx_ready = 1;
1260 } else {
1261 pr_err("%s: Remote loopack allocation failure\n",
1262 __func__);
1263 }
1264 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1265 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1266 }
1267 ret = 0;
1268 } else {
1269 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1270 __func__, lcid, ch->remote_state);
1271 ret = -EINVAL;
1272 }
1273
1274out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001275 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001276
1277 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001278 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001279 if (!smux.powerdown_enabled) {
1280 smux.powerdown_enabled = 1;
1281 SMUX_DBG("%s: enabling power-collapse support\n",
1282 __func__);
1283 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001284 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001285 }
1286
1287 if (tx_ready)
1288 list_channel(ch);
1289
1290 return ret;
1291}
1292
1293/**
1294 * Handle receive CLOSE command.
1295 *
1296 * @pkt Received packet
1297 *
1298 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001299 */
1300static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1301{
1302 uint8_t lcid;
1303 int ret;
1304 struct smux_lch_t *ch;
1305 struct smux_pkt_t *ack_pkt;
1306 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001307 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001308 int tx_ready = 0;
1309
1310 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1311 return smux_handle_close_ack(pkt);
1312
1313 lcid = pkt->hdr.lcid;
1314 ch = &smux_lch[lcid];
1315 meta_disconnected.disconnected.is_ssr = 0;
1316
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001317 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001318 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1319 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1320 SMUX_LCH_REMOTE_OPENED,
1321 SMUX_LCH_REMOTE_CLOSED);
1322
1323 ack_pkt = smux_alloc_pkt();
1324 if (!ack_pkt) {
1325 /* exit out to allow retrying this later */
1326 ret = -ENOMEM;
1327 goto out;
1328 }
1329 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1330 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1331 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1332 ack_pkt->hdr.lcid = lcid;
1333 ack_pkt->hdr.payload_len = 0;
1334 ack_pkt->hdr.pad_len = 0;
1335 smux_tx_queue(ack_pkt, ch, 0);
1336 tx_ready = 1;
1337
1338 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1339 /*
1340 * Send a Close command to the remote side to simulate
1341 * our local client doing it.
1342 */
1343 ack_pkt = smux_alloc_pkt();
1344 if (ack_pkt) {
1345 ack_pkt->hdr.lcid = lcid;
1346 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1347 ack_pkt->hdr.flags = 0;
1348 ack_pkt->hdr.payload_len = 0;
1349 ack_pkt->hdr.pad_len = 0;
1350 smux_tx_queue(ack_pkt, ch, 0);
1351 tx_ready = 1;
1352 } else {
1353 pr_err("%s: Remote loopack allocation failure\n",
1354 __func__);
1355 }
1356 }
1357
1358 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1359 schedule_notify(lcid, SMUX_DISCONNECTED,
1360 &meta_disconnected);
1361 ret = 0;
1362 } else {
1363 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1364 __func__, lcid, ch->remote_state);
1365 ret = -EINVAL;
1366 }
1367out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001368 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001369 if (tx_ready)
1370 list_channel(ch);
1371
1372 return ret;
1373}
1374
1375/*
1376 * Handle receive DATA command.
1377 *
1378 * @pkt Received packet
1379 *
1380 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001381 */
1382static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1383{
1384 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001385 int ret = 0;
1386 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001387 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001388 int tmp;
1389 int rx_len;
1390 struct smux_lch_t *ch;
1391 union notifier_metadata metadata;
1392 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001393 struct smux_pkt_t *ack_pkt;
1394 unsigned long flags;
1395
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001396 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1397 ret = -ENXIO;
1398 goto out;
1399 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001400
Eric Holmbergb8435c82012-06-05 14:51:29 -06001401 rx_len = pkt->hdr.payload_len;
1402 if (rx_len == 0) {
1403 ret = -EINVAL;
1404 goto out;
1405 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001406
1407 lcid = pkt->hdr.lcid;
1408 ch = &smux_lch[lcid];
1409 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1410 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1411
1412 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1413 && !remote_loopback) {
1414 pr_err("smux: ch %d error data on local state 0x%x",
1415 lcid, ch->local_state);
1416 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001417 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001418 goto out;
1419 }
1420
1421 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1422 pr_err("smux: ch %d error data on remote state 0x%x",
1423 lcid, ch->remote_state);
1424 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001425 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001426 goto out;
1427 }
1428
Eric Holmbergb8435c82012-06-05 14:51:29 -06001429 if (!list_empty(&ch->rx_retry_queue)) {
1430 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001431
1432 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1433 !ch->rx_flow_control_auto &&
1434 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1435 /* need to flow control RX */
1436 ch->rx_flow_control_auto = 1;
1437 tx_ready |= smux_rx_flow_control_updated(ch);
1438 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1439 NULL);
1440 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001441 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1442 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001443 pr_err("%s: ch %d RX retry queue full\n",
1444 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001445 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1446 ret = -ENOMEM;
1447 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1448 goto out;
1449 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001450 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001451 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001452
Eric Holmbergb8435c82012-06-05 14:51:29 -06001453 if (remote_loopback) {
1454 /* Echo the data back to the remote client. */
1455 ack_pkt = smux_alloc_pkt();
1456 if (ack_pkt) {
1457 ack_pkt->hdr.lcid = lcid;
1458 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1459 ack_pkt->hdr.flags = 0;
1460 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1461 if (ack_pkt->hdr.payload_len) {
1462 smux_alloc_pkt_payload(ack_pkt);
1463 memcpy(ack_pkt->payload, pkt->payload,
1464 ack_pkt->hdr.payload_len);
1465 }
1466 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1467 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001468 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001469 } else {
1470 pr_err("%s: Remote loopack allocation failure\n",
1471 __func__);
1472 }
1473 } else if (!do_retry) {
1474 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001475 metadata.read.pkt_priv = 0;
1476 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001477 tmp = ch->get_rx_buffer(ch->priv,
1478 (void **)&metadata.read.pkt_priv,
1479 (void **)&metadata.read.buffer,
1480 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001481
Eric Holmbergb8435c82012-06-05 14:51:29 -06001482 if (tmp == 0 && metadata.read.buffer) {
1483 /* place data into RX buffer */
1484 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001485 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001486 metadata.read.len = rx_len;
1487 schedule_notify(lcid, SMUX_READ_DONE,
1488 &metadata);
1489 } else if (tmp == -EAGAIN ||
1490 (tmp == 0 && !metadata.read.buffer)) {
1491 /* buffer allocation failed - add to retry queue */
1492 do_retry = 1;
1493 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001494 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1495 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001496 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1497 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001498 }
1499 }
1500
Eric Holmbergb8435c82012-06-05 14:51:29 -06001501 if (do_retry) {
1502 struct smux_rx_pkt_retry *retry;
1503
1504 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1505 if (!retry) {
1506 pr_err("%s: retry alloc failure\n", __func__);
1507 ret = -ENOMEM;
1508 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1509 goto out;
1510 }
1511 INIT_LIST_HEAD(&retry->rx_retry_list);
1512 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1513
1514 /* copy packet */
1515 retry->pkt = smux_alloc_pkt();
1516 if (!retry->pkt) {
1517 kfree(retry);
1518 pr_err("%s: pkt alloc failure\n", __func__);
1519 ret = -ENOMEM;
1520 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1521 goto out;
1522 }
1523 retry->pkt->hdr.lcid = lcid;
1524 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1525 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1526 if (retry->pkt->hdr.payload_len) {
1527 smux_alloc_pkt_payload(retry->pkt);
1528 memcpy(retry->pkt->payload, pkt->payload,
1529 retry->pkt->hdr.payload_len);
1530 }
1531
1532 /* add to retry queue */
1533 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1534 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1535 ++ch->rx_retry_queue_cnt;
1536 if (ch->rx_retry_queue_cnt == 1)
1537 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1538 msecs_to_jiffies(retry->timeout_in_ms));
1539 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1540 }
1541
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001542 if (tx_ready)
1543 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001544out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001545 return ret;
1546}
1547
1548/**
1549 * Handle receive byte command for testing purposes.
1550 *
1551 * @pkt Received packet
1552 *
1553 * @returns 0 for success
1554 */
1555static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1556{
1557 uint8_t lcid;
1558 int ret;
1559 struct smux_lch_t *ch;
1560 union notifier_metadata metadata;
1561 unsigned long flags;
1562
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001563 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1564 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001565 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001566 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001567
1568 lcid = pkt->hdr.lcid;
1569 ch = &smux_lch[lcid];
1570 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1571
1572 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1573 pr_err("smux: ch %d error data on local state 0x%x",
1574 lcid, ch->local_state);
1575 ret = -EIO;
1576 goto out;
1577 }
1578
1579 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1580 pr_err("smux: ch %d error data on remote state 0x%x",
1581 lcid, ch->remote_state);
1582 ret = -EIO;
1583 goto out;
1584 }
1585
1586 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1587 metadata.read.buffer = 0;
1588 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1589 ret = 0;
1590
1591out:
1592 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1593 return ret;
1594}
1595
1596/**
1597 * Handle receive status command.
1598 *
1599 * @pkt Received packet
1600 *
1601 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001602 */
1603static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1604{
1605 uint8_t lcid;
1606 int ret;
1607 struct smux_lch_t *ch;
1608 union notifier_metadata meta;
1609 unsigned long flags;
1610 int tx_ready = 0;
1611
1612 lcid = pkt->hdr.lcid;
1613 ch = &smux_lch[lcid];
1614
1615 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1616 meta.tiocm.tiocm_old = ch->remote_tiocm;
1617 meta.tiocm.tiocm_new = pkt->hdr.flags;
1618
1619 /* update logical channel flow control */
1620 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1621 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1622 /* logical channel flow control changed */
1623 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1624 /* disabled TX */
1625 SMUX_DBG("TX Flow control enabled\n");
1626 ch->tx_flow_control = 1;
1627 } else {
1628 /* re-enable channel */
1629 SMUX_DBG("TX Flow control disabled\n");
1630 ch->tx_flow_control = 0;
1631 tx_ready = 1;
1632 }
1633 }
1634 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1635 ch->remote_tiocm = pkt->hdr.flags;
1636 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1637
1638 /* client notification for status change */
1639 if (IS_FULLY_OPENED(ch)) {
1640 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1641 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1642 ret = 0;
1643 }
1644 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1645 if (tx_ready)
1646 list_channel(ch);
1647
1648 return ret;
1649}
1650
1651/**
1652 * Handle receive power command.
1653 *
1654 * @pkt Received packet
1655 *
1656 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001657 */
1658static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1659{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001660 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001661 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001662 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001663
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001664 SMUX_PWR_PKT_RX(pkt);
1665
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001666 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001667 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1668 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001669 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001670 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001671 power_down = 1;
1672 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001673 pr_err("%s: sleep request ack invalid in state %d\n",
1674 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001675 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001676 /*
1677 * Remote sleep request
1678 *
1679 * Even if we have data pending, we need to transition to the
1680 * POWER_OFF state and then perform a wakeup since the remote
1681 * side has requested a power-down.
1682 *
1683 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1684 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1685 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001686 *
1687 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001688 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001689 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001690 ack_pkt = smux_alloc_pkt();
1691 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001692 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001693 smux.power_state,
1694 SMUX_PWR_TURNING_OFF_FLUSH);
1695
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001696 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1697
1698 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001699 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1700 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001701 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1702 list_add_tail(&ack_pkt->list,
1703 &smux.power_queue);
1704 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001705 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001706 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1707 /* Local power-down request still in TX queue */
1708 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1709 __func__);
1710 smux.power_ctl_remote_req_received = 1;
1711 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1712 /*
1713 * Local power-down request already sent to remote
1714 * side, so this request gets treated as an ACK.
1715 */
1716 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1717 __func__);
1718 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001719 } else {
1720 pr_err("%s: sleep request invalid in state %d\n",
1721 __func__, smux.power_state);
1722 }
1723 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001724
1725 if (power_down) {
1726 SMUX_PWR("%s: Power %d->%d\n", __func__,
1727 smux.power_state, SMUX_PWR_OFF_FLUSH);
1728 smux.power_state = SMUX_PWR_OFF_FLUSH;
1729 queue_work(smux_tx_wq, &smux_inactivity_work);
1730 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001731 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001732
1733 return 0;
1734}
1735
1736/**
1737 * Handle dispatching a completed packet for receive processing.
1738 *
1739 * @pkt Packet to process
1740 *
1741 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001742 */
1743static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1744{
Eric Holmbergf9622662012-06-13 15:55:45 -06001745 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001746
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747 switch (pkt->hdr.cmd) {
1748 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001749 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001750 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1751 pr_err("%s: invalid channel id %d\n",
1752 __func__, pkt->hdr.lcid);
1753 break;
1754 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001755 ret = smux_handle_rx_open_cmd(pkt);
1756 break;
1757
1758 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001759 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001760 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1761 pr_err("%s: invalid channel id %d\n",
1762 __func__, pkt->hdr.lcid);
1763 break;
1764 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001765 ret = smux_handle_rx_data_cmd(pkt);
1766 break;
1767
1768 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001769 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001770 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1771 pr_err("%s: invalid channel id %d\n",
1772 __func__, pkt->hdr.lcid);
1773 break;
1774 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001775 ret = smux_handle_rx_close_cmd(pkt);
1776 break;
1777
1778 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001779 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001780 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1781 pr_err("%s: invalid channel id %d\n",
1782 __func__, pkt->hdr.lcid);
1783 break;
1784 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001785 ret = smux_handle_rx_status_cmd(pkt);
1786 break;
1787
1788 case SMUX_CMD_PWR_CTL:
1789 ret = smux_handle_rx_power_cmd(pkt);
1790 break;
1791
1792 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001793 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001794 ret = smux_handle_rx_byte_cmd(pkt);
1795 break;
1796
1797 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001798 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001799 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1800 ret = -EINVAL;
1801 }
1802 return ret;
1803}
1804
1805/**
1806 * Deserializes a packet and dispatches it to the packet receive logic.
1807 *
1808 * @data Raw data for one packet
1809 * @len Length of the data
1810 *
1811 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001812 */
1813static int smux_deserialize(unsigned char *data, int len)
1814{
1815 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001816
1817 smux_init_pkt(&recv);
1818
1819 /*
1820 * It may be possible to optimize this to not use the
1821 * temporary buffer.
1822 */
1823 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1824
1825 if (recv.hdr.magic != SMUX_MAGIC) {
1826 pr_err("%s: invalid header magic\n", __func__);
1827 return -EINVAL;
1828 }
1829
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001830 if (recv.hdr.payload_len)
1831 recv.payload = data + sizeof(struct smux_hdr_t);
1832
1833 return smux_dispatch_rx_pkt(&recv);
1834}
1835
1836/**
1837 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001838 */
1839static void smux_handle_wakeup_req(void)
1840{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001841 unsigned long flags;
1842
1843 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001844 if (smux.power_state == SMUX_PWR_OFF
1845 || smux.power_state == SMUX_PWR_TURNING_ON) {
1846 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001847 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001848 smux.power_state, SMUX_PWR_ON);
1849 smux.power_state = SMUX_PWR_ON;
1850 queue_work(smux_tx_wq, &smux_wakeup_work);
1851 queue_work(smux_tx_wq, &smux_tx_work);
1852 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1853 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1854 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001855 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001856 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001857 } else {
1858 /* stale wakeup request from previous wakeup */
1859 SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
1860 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001861 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001862 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001863}
1864
1865/**
1866 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001867 */
1868static void smux_handle_wakeup_ack(void)
1869{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001870 unsigned long flags;
1871
1872 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001873 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1874 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001875 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001876 smux.power_state, SMUX_PWR_ON);
1877 smux.power_state = SMUX_PWR_ON;
1878 queue_work(smux_tx_wq, &smux_tx_work);
1879 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1880 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1881
1882 } else if (smux.power_state != SMUX_PWR_ON) {
1883 /* invalid message */
Eric Holmberga9b06472012-06-22 09:46:34 -06001884 SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001885 __func__, smux.power_state);
1886 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001887 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001888}
1889
1890/**
1891 * RX State machine - IDLE state processing.
1892 *
1893 * @data New RX data to process
1894 * @len Length of the data
1895 * @used Return value of length processed
1896 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001897 */
1898static void smux_rx_handle_idle(const unsigned char *data,
1899 int len, int *used, int flag)
1900{
1901 int i;
1902
1903 if (flag) {
1904 if (smux_byte_loopback)
1905 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1906 smux_byte_loopback);
1907 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1908 ++*used;
1909 return;
1910 }
1911
1912 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1913 switch (data[i]) {
1914 case SMUX_MAGIC_WORD1:
1915 smux.rx_state = SMUX_RX_MAGIC;
1916 break;
1917 case SMUX_WAKEUP_REQ:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001918 SMUX_PWR("smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001919 smux_handle_wakeup_req();
1920 break;
1921 case SMUX_WAKEUP_ACK:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001922 SMUX_PWR("smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001923 smux_handle_wakeup_ack();
1924 break;
1925 default:
1926 /* unexpected character */
1927 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1928 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1929 smux_byte_loopback);
1930 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1931 (unsigned)data[i]);
1932 break;
1933 }
1934 }
1935
1936 *used = i;
1937}
1938
1939/**
1940 * RX State machine - Header Magic state processing.
1941 *
1942 * @data New RX data to process
1943 * @len Length of the data
1944 * @used Return value of length processed
1945 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001946 */
1947static void smux_rx_handle_magic(const unsigned char *data,
1948 int len, int *used, int flag)
1949{
1950 int i;
1951
1952 if (flag) {
1953 pr_err("%s: TTY RX error %d\n", __func__, flag);
1954 smux_enter_reset();
1955 smux.rx_state = SMUX_RX_FAILURE;
1956 ++*used;
1957 return;
1958 }
1959
1960 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1961 /* wait for completion of the magic */
1962 if (data[i] == SMUX_MAGIC_WORD2) {
1963 smux.recv_len = 0;
1964 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1965 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1966 smux.rx_state = SMUX_RX_HDR;
1967 } else {
1968 /* unexpected / trash character */
1969 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1970 __func__, data[i], *used, len);
1971 smux.rx_state = SMUX_RX_IDLE;
1972 }
1973 }
1974
1975 *used = i;
1976}
1977
1978/**
1979 * RX State machine - Packet Header state processing.
1980 *
1981 * @data New RX data to process
1982 * @len Length of the data
1983 * @used Return value of length processed
1984 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001985 */
1986static void smux_rx_handle_hdr(const unsigned char *data,
1987 int len, int *used, int flag)
1988{
1989 int i;
1990 struct smux_hdr_t *hdr;
1991
1992 if (flag) {
1993 pr_err("%s: TTY RX error %d\n", __func__, flag);
1994 smux_enter_reset();
1995 smux.rx_state = SMUX_RX_FAILURE;
1996 ++*used;
1997 return;
1998 }
1999
2000 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2001 smux.recv_buf[smux.recv_len++] = data[i];
2002
2003 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2004 /* complete header received */
2005 hdr = (struct smux_hdr_t *)smux.recv_buf;
2006 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2007 smux.rx_state = SMUX_RX_PAYLOAD;
2008 }
2009 }
2010 *used = i;
2011}
2012
2013/**
2014 * RX State machine - Packet Payload state processing.
2015 *
2016 * @data New RX data to process
2017 * @len Length of the data
2018 * @used Return value of length processed
2019 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002020 */
2021static void smux_rx_handle_pkt_payload(const unsigned char *data,
2022 int len, int *used, int flag)
2023{
2024 int remaining;
2025
2026 if (flag) {
2027 pr_err("%s: TTY RX error %d\n", __func__, flag);
2028 smux_enter_reset();
2029 smux.rx_state = SMUX_RX_FAILURE;
2030 ++*used;
2031 return;
2032 }
2033
2034 /* copy data into rx buffer */
2035 if (smux.pkt_remain < (len - *used))
2036 remaining = smux.pkt_remain;
2037 else
2038 remaining = len - *used;
2039
2040 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2041 smux.recv_len += remaining;
2042 smux.pkt_remain -= remaining;
2043 *used += remaining;
2044
2045 if (smux.pkt_remain == 0) {
2046 /* complete packet received */
2047 smux_deserialize(smux.recv_buf, smux.recv_len);
2048 smux.rx_state = SMUX_RX_IDLE;
2049 }
2050}
2051
2052/**
2053 * Feed data to the receive state machine.
2054 *
2055 * @data Pointer to data block
2056 * @len Length of data
2057 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002058 */
2059void smux_rx_state_machine(const unsigned char *data,
2060 int len, int flag)
2061{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002062 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002063
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002064 work.data = data;
2065 work.len = len;
2066 work.flag = flag;
2067 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2068 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002069
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002070 queue_work(smux_rx_wq, &work.work);
2071 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002072}
2073
2074/**
2075 * Add channel to transmit-ready list and trigger transmit worker.
2076 *
2077 * @ch Channel to add
2078 */
2079static void list_channel(struct smux_lch_t *ch)
2080{
2081 unsigned long flags;
2082
2083 SMUX_DBG("%s: listing channel %d\n",
2084 __func__, ch->lcid);
2085
2086 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2087 spin_lock(&ch->tx_lock_lhb2);
2088 smux.tx_activity_flag = 1;
2089 if (list_empty(&ch->tx_ready_list))
2090 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2091 spin_unlock(&ch->tx_lock_lhb2);
2092 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2093
2094 queue_work(smux_tx_wq, &smux_tx_work);
2095}
2096
2097/**
2098 * Transmit packet on correct transport and then perform client
2099 * notification.
2100 *
2101 * @ch Channel to transmit on
2102 * @pkt Packet to transmit
2103 */
2104static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2105{
2106 union notifier_metadata meta_write;
2107 int ret;
2108
2109 if (ch && pkt) {
2110 SMUX_LOG_PKT_TX(pkt);
2111 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2112 ret = smux_tx_loopback(pkt);
2113 else
2114 ret = smux_tx_tty(pkt);
2115
2116 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2117 /* notify write-done */
2118 meta_write.write.pkt_priv = pkt->priv;
2119 meta_write.write.buffer = pkt->payload;
2120 meta_write.write.len = pkt->hdr.payload_len;
2121 if (ret >= 0) {
2122 SMUX_DBG("%s: PKT write done", __func__);
2123 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2124 &meta_write);
2125 } else {
2126 pr_err("%s: failed to write pkt %d\n",
2127 __func__, ret);
2128 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2129 &meta_write);
2130 }
2131 }
2132 }
2133}
2134
2135/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002136 * Flush pending TTY TX data.
2137 */
2138static void smux_flush_tty(void)
2139{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002140 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002141 if (!smux.tty) {
2142 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002143 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002144 return;
2145 }
2146
2147 tty_wait_until_sent(smux.tty,
2148 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2149
2150 if (tty_chars_in_buffer(smux.tty) > 0)
2151 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002152
2153 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002154}
2155
2156/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002157 * Purge TX queue for logical channel.
2158 *
2159 * @ch Logical channel pointer
2160 *
2161 * Must be called with the following spinlocks locked:
2162 * state_lock_lhb1
2163 * tx_lock_lhb2
2164 */
2165static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2166{
2167 struct smux_pkt_t *pkt;
2168 int send_disconnect = 0;
2169
2170 while (!list_empty(&ch->tx_queue)) {
2171 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2172 list);
2173 list_del(&pkt->list);
2174
2175 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2176 /* Open was never sent, just force to closed state */
2177 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2178 send_disconnect = 1;
2179 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2180 /* Notify client of failed write */
2181 union notifier_metadata meta_write;
2182
2183 meta_write.write.pkt_priv = pkt->priv;
2184 meta_write.write.buffer = pkt->payload;
2185 meta_write.write.len = pkt->hdr.payload_len;
2186 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2187 }
2188 smux_free_pkt(pkt);
2189 }
2190
2191 if (send_disconnect) {
2192 union notifier_metadata meta_disconnected;
2193
2194 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2195 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2196 &meta_disconnected);
2197 }
2198}
2199
2200/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002201 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002202 *
2203 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002204 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002205static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002206{
2207 struct uart_state *state;
2208
2209 if (!smux.tty || !smux.tty->driver_data) {
2210 pr_err("%s: unable to find UART port for tty %p\n",
2211 __func__, smux.tty);
2212 return;
2213 }
2214 state = smux.tty->driver_data;
2215 msm_hs_request_clock_on(state->uart_port);
2216}
2217
2218/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002219 * Power-up the UART.
2220 */
2221static void smux_uart_power_on(void)
2222{
2223 mutex_lock(&smux.mutex_lha0);
2224 smux_uart_power_on_atomic();
2225 mutex_unlock(&smux.mutex_lha0);
2226}
2227
2228/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002229 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002230 *
2231 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002232 */
Eric Holmberg06011322012-07-06 18:17:03 -06002233static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002234{
2235 struct uart_state *state;
2236
2237 if (!smux.tty || !smux.tty->driver_data) {
2238 pr_err("%s: unable to find UART port for tty %p\n",
2239 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002240 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002241 return;
2242 }
2243 state = smux.tty->driver_data;
2244 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002245}
2246
2247/**
2248 * Power down the UART.
2249 */
2250static void smux_uart_power_off(void)
2251{
2252 mutex_lock(&smux.mutex_lha0);
2253 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002254 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002255}
2256
2257/**
2258 * TX Wakeup Worker
2259 *
2260 * @work Not used
2261 *
2262 * Do an exponential back-off wakeup sequence with a maximum period
2263 * of approximately 1 second (1 << 20 microseconds).
2264 */
2265static void smux_wakeup_worker(struct work_struct *work)
2266{
2267 unsigned long flags;
2268 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002269
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002270 if (smux.in_reset)
2271 return;
2272
2273 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2274 if (smux.power_state == SMUX_PWR_ON) {
2275 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002276 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002277 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002278 SMUX_DBG("%s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002279
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002280 /*
2281 * Cancel any pending retry. This avoids a race condition with
2282 * a new power-up request because:
2283 * 1) this worker doesn't modify the state
2284 * 2) this worker is processed on the same single-threaded
2285 * workqueue as new TX wakeup requests
2286 */
2287 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002288 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002289 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002290 /* retry wakeup */
2291 wakeup_delay = smux.pwr_wakeup_delay_us;
2292 smux.pwr_wakeup_delay_us <<= 1;
2293 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2294 smux.pwr_wakeup_delay_us =
2295 SMUX_WAKEUP_DELAY_MAX;
2296
2297 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberga9b06472012-06-22 09:46:34 -06002298 SMUX_PWR("%s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002299 smux_send_byte(SMUX_WAKEUP_REQ);
2300
2301 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2302 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2303 wakeup_delay);
2304 usleep_range(wakeup_delay, 2*wakeup_delay);
2305 queue_work(smux_tx_wq, &smux_wakeup_work);
2306 } else {
2307 /* schedule delayed work */
2308 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2309 __func__, wakeup_delay / 1000);
2310 queue_delayed_work(smux_tx_wq,
2311 &smux_wakeup_delayed_work,
2312 msecs_to_jiffies(wakeup_delay / 1000));
2313 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002314 } else {
2315 /* wakeup aborted */
2316 smux.pwr_wakeup_delay_us = 1;
2317 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2318 SMUX_PWR("%s: wakeup aborted\n", __func__);
2319 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002320 }
2321}
2322
2323
2324/**
2325 * Inactivity timeout worker. Periodically scheduled when link is active.
2326 * When it detects inactivity, it will power-down the UART link.
2327 *
2328 * @work Work structure (not used)
2329 */
2330static void smux_inactivity_worker(struct work_struct *work)
2331{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002332 struct smux_pkt_t *pkt;
2333 unsigned long flags;
2334
Eric Holmberg06011322012-07-06 18:17:03 -06002335 if (smux.in_reset)
2336 return;
2337
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002338 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2339 spin_lock(&smux.tx_lock_lha2);
2340
2341 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2342 /* no activity */
2343 if (smux.powerdown_enabled) {
2344 if (smux.power_state == SMUX_PWR_ON) {
2345 /* start power-down sequence */
2346 pkt = smux_alloc_pkt();
2347 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002348 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002349 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002350 SMUX_PWR_TURNING_OFF_FLUSH);
2351 smux.power_state =
2352 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002353
2354 /* send power-down request */
2355 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2356 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002357 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2358 list_add_tail(&pkt->list,
2359 &smux.power_queue);
2360 queue_work(smux_tx_wq, &smux_tx_work);
2361 } else {
2362 pr_err("%s: packet alloc failed\n",
2363 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002364 }
2365 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002366 }
2367 }
2368 smux.tx_activity_flag = 0;
2369 smux.rx_activity_flag = 0;
2370
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002371 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002372 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002373 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002374 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002375 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002376
2377 /* if data is pending, schedule a new wakeup */
2378 if (!list_empty(&smux.lch_tx_ready_list) ||
2379 !list_empty(&smux.power_queue))
2380 queue_work(smux_tx_wq, &smux_tx_work);
2381
2382 spin_unlock(&smux.tx_lock_lha2);
2383 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2384
2385 /* flush UART output queue and power down */
2386 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002387 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002388 } else {
2389 spin_unlock(&smux.tx_lock_lha2);
2390 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002391 }
2392
2393 /* reschedule inactivity worker */
2394 if (smux.power_state != SMUX_PWR_OFF)
2395 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2396 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2397}
2398
2399/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002400 * Remove RX retry packet from channel and free it.
2401 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002402 * @ch Channel for retry packet
2403 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002404 *
2405 * @returns 1 if flow control updated; 0 otherwise
2406 *
2407 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002408 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002409int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002410 struct smux_rx_pkt_retry *retry)
2411{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002412 int tx_ready = 0;
2413
Eric Holmbergb8435c82012-06-05 14:51:29 -06002414 list_del(&retry->rx_retry_list);
2415 --ch->rx_retry_queue_cnt;
2416 smux_free_pkt(retry->pkt);
2417 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002418
2419 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2420 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2421 ch->rx_flow_control_auto) {
2422 ch->rx_flow_control_auto = 0;
2423 smux_rx_flow_control_updated(ch);
2424 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2425 tx_ready = 1;
2426 }
2427 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002428}
2429
2430/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002431 * RX worker handles all receive operations.
2432 *
2433 * @work Work structure contained in TBD structure
2434 */
2435static void smux_rx_worker(struct work_struct *work)
2436{
2437 unsigned long flags;
2438 int used;
2439 int initial_rx_state;
2440 struct smux_rx_worker_data *w;
2441 const unsigned char *data;
2442 int len;
2443 int flag;
2444
2445 w = container_of(work, struct smux_rx_worker_data, work);
2446 data = w->data;
2447 len = w->len;
2448 flag = w->flag;
2449
2450 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2451 smux.rx_activity_flag = 1;
2452 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2453
2454 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2455 used = 0;
2456 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002457 if (smux.in_reset) {
2458 SMUX_DBG("%s: abort RX due to reset\n", __func__);
2459 smux.rx_state = SMUX_RX_IDLE;
2460 break;
2461 }
2462
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002463 SMUX_DBG("%s: state %d; %d of %d\n",
2464 __func__, smux.rx_state, used, len);
2465 initial_rx_state = smux.rx_state;
2466
2467 switch (smux.rx_state) {
2468 case SMUX_RX_IDLE:
2469 smux_rx_handle_idle(data, len, &used, flag);
2470 break;
2471 case SMUX_RX_MAGIC:
2472 smux_rx_handle_magic(data, len, &used, flag);
2473 break;
2474 case SMUX_RX_HDR:
2475 smux_rx_handle_hdr(data, len, &used, flag);
2476 break;
2477 case SMUX_RX_PAYLOAD:
2478 smux_rx_handle_pkt_payload(data, len, &used, flag);
2479 break;
2480 default:
2481 SMUX_DBG("%s: invalid state %d\n",
2482 __func__, smux.rx_state);
2483 smux.rx_state = SMUX_RX_IDLE;
2484 break;
2485 }
2486 } while (used < len || smux.rx_state != initial_rx_state);
2487
2488 complete(&w->work_complete);
2489}
2490
2491/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002492 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2493 * because the client was not ready (-EAGAIN).
2494 *
2495 * @work Work structure contained in smux_lch_t structure
2496 */
2497static void smux_rx_retry_worker(struct work_struct *work)
2498{
2499 struct smux_lch_t *ch;
2500 struct smux_rx_pkt_retry *retry;
2501 union notifier_metadata metadata;
2502 int tmp;
2503 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002504 int immediate_retry = 0;
2505 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002506
2507 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2508
2509 /* get next retry packet */
2510 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002511 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002512 /* port has been closed - remove all retries */
2513 while (!list_empty(&ch->rx_retry_queue)) {
2514 retry = list_first_entry(&ch->rx_retry_queue,
2515 struct smux_rx_pkt_retry,
2516 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002517 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002518 }
2519 }
2520
2521 if (list_empty(&ch->rx_retry_queue)) {
2522 SMUX_DBG("%s: retry list empty for channel %d\n",
2523 __func__, ch->lcid);
2524 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2525 return;
2526 }
2527 retry = list_first_entry(&ch->rx_retry_queue,
2528 struct smux_rx_pkt_retry,
2529 rx_retry_list);
2530 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2531
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002532 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2533 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002534 metadata.read.pkt_priv = 0;
2535 metadata.read.buffer = 0;
2536 tmp = ch->get_rx_buffer(ch->priv,
2537 (void **)&metadata.read.pkt_priv,
2538 (void **)&metadata.read.buffer,
2539 retry->pkt->hdr.payload_len);
2540 if (tmp == 0 && metadata.read.buffer) {
2541 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002542
Eric Holmbergb8435c82012-06-05 14:51:29 -06002543 memcpy(metadata.read.buffer, retry->pkt->payload,
2544 retry->pkt->hdr.payload_len);
2545 metadata.read.len = retry->pkt->hdr.payload_len;
2546
2547 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002548 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002549 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002550 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002551 if (tx_ready)
2552 list_channel(ch);
2553
2554 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002555 } else if (tmp == -EAGAIN ||
2556 (tmp == 0 && !metadata.read.buffer)) {
2557 /* retry again */
2558 retry->timeout_in_ms <<= 1;
2559 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2560 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002561 pr_err("%s: ch %d RX retry client timeout\n",
2562 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002563 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002564 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002565 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002566 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2567 if (tx_ready)
2568 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002569 }
2570 } else {
2571 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002572 pr_err("%s: ch %d RX retry client failed (%d)\n",
2573 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002574 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002575 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002576 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002577 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002578 if (tx_ready)
2579 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002580 }
2581
2582 /* schedule next retry */
2583 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2584 if (!list_empty(&ch->rx_retry_queue)) {
2585 retry = list_first_entry(&ch->rx_retry_queue,
2586 struct smux_rx_pkt_retry,
2587 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002588
2589 if (immediate_retry)
2590 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2591 else
2592 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2593 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002594 }
2595 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2596}
2597
2598/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002599 * Transmit worker handles serializing and transmitting packets onto the
2600 * underlying transport.
2601 *
2602 * @work Work structure (not used)
2603 */
2604static void smux_tx_worker(struct work_struct *work)
2605{
2606 struct smux_pkt_t *pkt;
2607 struct smux_lch_t *ch;
2608 unsigned low_wm_notif;
2609 unsigned lcid;
2610 unsigned long flags;
2611
2612
2613 /*
2614 * Transmit packets in round-robin fashion based upon ready
2615 * channels.
2616 *
2617 * To eliminate the need to hold a lock for the entire
2618 * iteration through the channel ready list, the head of the
2619 * ready-channel list is always the next channel to be
2620 * processed. To send a packet, the first valid packet in
2621 * the head channel is removed and the head channel is then
2622 * rescheduled at the end of the queue by removing it and
2623 * inserting after the tail. The locks can then be released
2624 * while the packet is processed.
2625 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002626 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002627 pkt = NULL;
2628 low_wm_notif = 0;
2629
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002630 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002631
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002632 /* handle wakeup if needed */
2633 if (smux.power_state == SMUX_PWR_OFF) {
2634 if (!list_empty(&smux.lch_tx_ready_list) ||
2635 !list_empty(&smux.power_queue)) {
2636 /* data to transmit, do wakeup */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002637 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002638 smux.power_state,
2639 SMUX_PWR_TURNING_ON);
2640 smux.power_state = SMUX_PWR_TURNING_ON;
2641 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2642 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002643 queue_work(smux_tx_wq, &smux_wakeup_work);
2644 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002645 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002646 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2647 flags);
2648 }
2649 break;
2650 }
2651
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002652 /* process any pending power packets */
2653 if (!list_empty(&smux.power_queue)) {
2654 pkt = list_first_entry(&smux.power_queue,
2655 struct smux_pkt_t, list);
2656 list_del(&pkt->list);
2657 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2658
Eric Holmberga9b06472012-06-22 09:46:34 -06002659 /* Adjust power state if this is a flush command */
2660 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2661 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2662 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2663 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2664 smux.power_ctl_remote_req_received) {
2665 /*
2666 * Sending remote power-down request ACK
2667 * or sending local power-down request
2668 * and we already received a remote
2669 * power-down request.
2670 */
2671 SMUX_PWR("%s: Power %d->%d\n", __func__,
2672 smux.power_state,
2673 SMUX_PWR_OFF_FLUSH);
2674 smux.power_state = SMUX_PWR_OFF_FLUSH;
2675 smux.power_ctl_remote_req_received = 0;
2676 queue_work(smux_tx_wq,
2677 &smux_inactivity_work);
2678 } else {
2679 /* sending local power-down request */
2680 SMUX_PWR("%s: Power %d->%d\n", __func__,
2681 smux.power_state,
2682 SMUX_PWR_TURNING_OFF);
2683 smux.power_state = SMUX_PWR_TURNING_OFF;
2684 }
2685 }
2686 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2687
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002688 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002689 smux_uart_power_on();
2690 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002691 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002692 if (!smux_byte_loopback) {
2693 smux_tx_tty(pkt);
2694 smux_flush_tty();
2695 } else {
2696 smux_tx_loopback(pkt);
2697 }
2698
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002699 smux_free_pkt(pkt);
2700 continue;
2701 }
2702
2703 /* get the next ready channel */
2704 if (list_empty(&smux.lch_tx_ready_list)) {
2705 /* no ready channels */
2706 SMUX_DBG("%s: no more ready channels, exiting\n",
2707 __func__);
2708 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2709 break;
2710 }
2711 smux.tx_activity_flag = 1;
2712
2713 if (smux.power_state != SMUX_PWR_ON) {
2714 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002715 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002716 __func__,
2717 smux.power_state);
2718 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2719 break;
2720 }
2721
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002722 /* get the next packet to send and rotate channel list */
2723 ch = list_first_entry(&smux.lch_tx_ready_list,
2724 struct smux_lch_t,
2725 tx_ready_list);
2726
2727 spin_lock(&ch->state_lock_lhb1);
2728 spin_lock(&ch->tx_lock_lhb2);
2729 if (!list_empty(&ch->tx_queue)) {
2730 /*
2731 * If remote TX flow control is enabled or
2732 * the channel is not fully opened, then only
2733 * send command packets.
2734 */
2735 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2736 struct smux_pkt_t *curr;
2737 list_for_each_entry(curr, &ch->tx_queue, list) {
2738 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2739 pkt = curr;
2740 break;
2741 }
2742 }
2743 } else {
2744 /* get next cmd/data packet to send */
2745 pkt = list_first_entry(&ch->tx_queue,
2746 struct smux_pkt_t, list);
2747 }
2748 }
2749
2750 if (pkt) {
2751 list_del(&pkt->list);
2752
2753 /* update packet stats */
2754 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2755 --ch->tx_pending_data_cnt;
2756 if (ch->notify_lwm &&
2757 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002758 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002759 ch->notify_lwm = 0;
2760 low_wm_notif = 1;
2761 }
2762 }
2763
2764 /* advance to the next ready channel */
2765 list_rotate_left(&smux.lch_tx_ready_list);
2766 } else {
2767 /* no data in channel to send, remove from ready list */
2768 list_del(&ch->tx_ready_list);
2769 INIT_LIST_HEAD(&ch->tx_ready_list);
2770 }
2771 lcid = ch->lcid;
2772 spin_unlock(&ch->tx_lock_lhb2);
2773 spin_unlock(&ch->state_lock_lhb1);
2774 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2775
2776 if (low_wm_notif)
2777 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2778
2779 /* send the packet */
2780 smux_tx_pkt(ch, pkt);
2781 smux_free_pkt(pkt);
2782 }
2783}
2784
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002785/**
2786 * Update the RX flow control (sent in the TIOCM Status command).
2787 *
2788 * @ch Channel for update
2789 *
2790 * @returns 1 for updated, 0 for not updated
2791 *
2792 * Must be called with ch->state_lock_lhb1 locked.
2793 */
2794static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2795{
2796 int updated = 0;
2797 int prev_state;
2798
2799 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2800
2801 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2802 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2803 else
2804 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2805
2806 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2807 smux_send_status_cmd(ch);
2808 updated = 1;
2809 }
2810
2811 return updated;
2812}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002813
Eric Holmberg06011322012-07-06 18:17:03 -06002814/**
2815 * Flush all SMUX workqueues.
2816 *
2817 * This sets the reset bit to abort any processing loops and then
2818 * flushes the workqueues to ensure that no new pending work is
2819 * running. Do not call with any locks used by workers held as
2820 * this will result in a deadlock.
2821 */
2822static void smux_flush_workqueues(void)
2823{
2824 smux.in_reset = 1;
2825
2826 SMUX_DBG("%s: flushing tx wq\n", __func__);
2827 flush_workqueue(smux_tx_wq);
2828 SMUX_DBG("%s: flushing rx wq\n", __func__);
2829 flush_workqueue(smux_rx_wq);
2830 SMUX_DBG("%s: flushing notify wq\n", __func__);
2831 flush_workqueue(smux_notify_wq);
2832}
2833
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002834/**********************************************************************/
2835/* Kernel API */
2836/**********************************************************************/
2837
2838/**
2839 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2840 * flags.
2841 *
2842 * @lcid Logical channel ID
2843 * @set Options to set
2844 * @clear Options to clear
2845 *
2846 * @returns 0 for success, < 0 for failure
2847 */
2848int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2849{
2850 unsigned long flags;
2851 struct smux_lch_t *ch;
2852 int tx_ready = 0;
2853 int ret = 0;
2854
2855 if (smux_assert_lch_id(lcid))
2856 return -ENXIO;
2857
2858 ch = &smux_lch[lcid];
2859 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2860
2861 /* Local loopback mode */
2862 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2863 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2864
2865 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2866 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2867
2868 /* Remote loopback mode */
2869 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2870 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2871
2872 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2873 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2874
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002875 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002876 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002877 ch->rx_flow_control_client = 1;
2878 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002879 }
2880
2881 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002882 ch->rx_flow_control_client = 0;
2883 tx_ready |= smux_rx_flow_control_updated(ch);
2884 }
2885
2886 /* Auto RX Flow Control */
2887 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2888 SMUX_DBG("%s: auto rx flow control option enabled\n",
2889 __func__);
2890 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2891 }
2892
2893 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2894 SMUX_DBG("%s: auto rx flow control option disabled\n",
2895 __func__);
2896 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2897 ch->rx_flow_control_auto = 0;
2898 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002899 }
2900
2901 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2902
2903 if (tx_ready)
2904 list_channel(ch);
2905
2906 return ret;
2907}
2908
2909/**
2910 * Starts the opening sequence for a logical channel.
2911 *
2912 * @lcid Logical channel ID
2913 * @priv Free for client usage
2914 * @notify Event notification function
2915 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2916 *
2917 * @returns 0 for success, <0 otherwise
2918 *
2919 * A channel must be fully closed (either not previously opened or
2920 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2921 * received.
2922 *
2923 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2924 * event.
2925 */
2926int msm_smux_open(uint8_t lcid, void *priv,
2927 void (*notify)(void *priv, int event_type, const void *metadata),
2928 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2929 int size))
2930{
2931 int ret;
2932 struct smux_lch_t *ch;
2933 struct smux_pkt_t *pkt;
2934 int tx_ready = 0;
2935 unsigned long flags;
2936
2937 if (smux_assert_lch_id(lcid))
2938 return -ENXIO;
2939
2940 ch = &smux_lch[lcid];
2941 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2942
2943 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2944 ret = -EAGAIN;
2945 goto out;
2946 }
2947
2948 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2949 pr_err("%s: open lcid %d local state %x invalid\n",
2950 __func__, lcid, ch->local_state);
2951 ret = -EINVAL;
2952 goto out;
2953 }
2954
2955 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2956 ch->local_state,
2957 SMUX_LCH_LOCAL_OPENING);
2958
Eric Holmberg06011322012-07-06 18:17:03 -06002959 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002960 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2961
2962 ch->priv = priv;
2963 ch->notify = notify;
2964 ch->get_rx_buffer = get_rx_buffer;
2965 ret = 0;
2966
2967 /* Send Open Command */
2968 pkt = smux_alloc_pkt();
2969 if (!pkt) {
2970 ret = -ENOMEM;
2971 goto out;
2972 }
2973 pkt->hdr.magic = SMUX_MAGIC;
2974 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2975 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2976 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2977 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2978 pkt->hdr.lcid = lcid;
2979 pkt->hdr.payload_len = 0;
2980 pkt->hdr.pad_len = 0;
2981 smux_tx_queue(pkt, ch, 0);
2982 tx_ready = 1;
2983
2984out:
2985 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002986 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002987 if (tx_ready)
2988 list_channel(ch);
2989 return ret;
2990}
2991
2992/**
2993 * Starts the closing sequence for a logical channel.
2994 *
2995 * @lcid Logical channel ID
2996 *
2997 * @returns 0 for success, <0 otherwise
2998 *
2999 * Once the close event has been acknowledge by the remote side, the client
3000 * will receive a SMUX_DISCONNECTED notification.
3001 */
3002int msm_smux_close(uint8_t lcid)
3003{
3004 int ret = 0;
3005 struct smux_lch_t *ch;
3006 struct smux_pkt_t *pkt;
3007 int tx_ready = 0;
3008 unsigned long flags;
3009
3010 if (smux_assert_lch_id(lcid))
3011 return -ENXIO;
3012
3013 ch = &smux_lch[lcid];
3014 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3015 ch->local_tiocm = 0x0;
3016 ch->remote_tiocm = 0x0;
3017 ch->tx_pending_data_cnt = 0;
3018 ch->notify_lwm = 0;
3019
3020 /* Purge TX queue */
3021 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003022 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003023 spin_unlock(&ch->tx_lock_lhb2);
3024
3025 /* Send Close Command */
3026 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3027 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
3028 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
3029 ch->local_state,
3030 SMUX_LCH_LOCAL_CLOSING);
3031
3032 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3033 pkt = smux_alloc_pkt();
3034 if (pkt) {
3035 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3036 pkt->hdr.flags = 0;
3037 pkt->hdr.lcid = lcid;
3038 pkt->hdr.payload_len = 0;
3039 pkt->hdr.pad_len = 0;
3040 smux_tx_queue(pkt, ch, 0);
3041 tx_ready = 1;
3042 } else {
3043 pr_err("%s: pkt allocation failed\n", __func__);
3044 ret = -ENOMEM;
3045 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003046
3047 /* Purge RX retry queue */
3048 if (ch->rx_retry_queue_cnt)
3049 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003050 }
3051 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3052
3053 if (tx_ready)
3054 list_channel(ch);
3055
3056 return ret;
3057}
3058
3059/**
3060 * Write data to a logical channel.
3061 *
3062 * @lcid Logical channel ID
3063 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3064 * SMUX_WRITE_FAIL notification.
3065 * @data Data to write
3066 * @len Length of @data
3067 *
3068 * @returns 0 for success, <0 otherwise
3069 *
3070 * Data may be written immediately after msm_smux_open() is called,
3071 * but the data will wait in the transmit queue until the channel has
3072 * been fully opened.
3073 *
3074 * Once the data has been written, the client will receive either a completion
3075 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3076 */
3077int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3078{
3079 struct smux_lch_t *ch;
3080 struct smux_pkt_t *pkt;
3081 int tx_ready = 0;
3082 unsigned long flags;
3083 int ret;
3084
3085 if (smux_assert_lch_id(lcid))
3086 return -ENXIO;
3087
3088 ch = &smux_lch[lcid];
3089 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3090
3091 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3092 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3093 pr_err("%s: hdr.invalid local state %d channel %d\n",
3094 __func__, ch->local_state, lcid);
3095 ret = -EINVAL;
3096 goto out;
3097 }
3098
3099 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3100 pr_err("%s: payload %d too large\n",
3101 __func__, len);
3102 ret = -E2BIG;
3103 goto out;
3104 }
3105
3106 pkt = smux_alloc_pkt();
3107 if (!pkt) {
3108 ret = -ENOMEM;
3109 goto out;
3110 }
3111
3112 pkt->hdr.cmd = SMUX_CMD_DATA;
3113 pkt->hdr.lcid = lcid;
3114 pkt->hdr.flags = 0;
3115 pkt->hdr.payload_len = len;
3116 pkt->payload = (void *)data;
3117 pkt->priv = pkt_priv;
3118 pkt->hdr.pad_len = 0;
3119
3120 spin_lock(&ch->tx_lock_lhb2);
3121 /* verify high watermark */
3122 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3123
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003124 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003125 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003126 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003127 ch->tx_pending_data_cnt);
3128 ret = -EAGAIN;
3129 goto out_inner;
3130 }
3131
3132 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003133 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003134 ch->notify_lwm = 1;
3135 pr_err("%s: high watermark hit\n", __func__);
3136 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3137 }
3138 list_add_tail(&pkt->list, &ch->tx_queue);
3139
3140 /* add to ready list */
3141 if (IS_FULLY_OPENED(ch))
3142 tx_ready = 1;
3143
3144 ret = 0;
3145
3146out_inner:
3147 spin_unlock(&ch->tx_lock_lhb2);
3148
3149out:
3150 if (ret)
3151 smux_free_pkt(pkt);
3152 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3153
3154 if (tx_ready)
3155 list_channel(ch);
3156
3157 return ret;
3158}
3159
3160/**
3161 * Returns true if the TX queue is currently full (high water mark).
3162 *
3163 * @lcid Logical channel ID
3164 * @returns 0 if channel is not full
3165 * 1 if it is full
3166 * < 0 for error
3167 */
3168int msm_smux_is_ch_full(uint8_t lcid)
3169{
3170 struct smux_lch_t *ch;
3171 unsigned long flags;
3172 int is_full = 0;
3173
3174 if (smux_assert_lch_id(lcid))
3175 return -ENXIO;
3176
3177 ch = &smux_lch[lcid];
3178
3179 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003180 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003181 is_full = 1;
3182 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3183
3184 return is_full;
3185}
3186
3187/**
3188 * Returns true if the TX queue has space for more packets it is at or
3189 * below the low water mark).
3190 *
3191 * @lcid Logical channel ID
3192 * @returns 0 if channel is above low watermark
3193 * 1 if it's at or below the low watermark
3194 * < 0 for error
3195 */
3196int msm_smux_is_ch_low(uint8_t lcid)
3197{
3198 struct smux_lch_t *ch;
3199 unsigned long flags;
3200 int is_low = 0;
3201
3202 if (smux_assert_lch_id(lcid))
3203 return -ENXIO;
3204
3205 ch = &smux_lch[lcid];
3206
3207 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003208 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003209 is_low = 1;
3210 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3211
3212 return is_low;
3213}
3214
3215/**
3216 * Send TIOCM status update.
3217 *
3218 * @ch Channel for update
3219 *
3220 * @returns 0 for success, <0 for failure
3221 *
3222 * Channel lock must be held before calling.
3223 */
3224static int smux_send_status_cmd(struct smux_lch_t *ch)
3225{
3226 struct smux_pkt_t *pkt;
3227
3228 if (!ch)
3229 return -EINVAL;
3230
3231 pkt = smux_alloc_pkt();
3232 if (!pkt)
3233 return -ENOMEM;
3234
3235 pkt->hdr.lcid = ch->lcid;
3236 pkt->hdr.cmd = SMUX_CMD_STATUS;
3237 pkt->hdr.flags = ch->local_tiocm;
3238 pkt->hdr.payload_len = 0;
3239 pkt->hdr.pad_len = 0;
3240 smux_tx_queue(pkt, ch, 0);
3241
3242 return 0;
3243}
3244
3245/**
3246 * Internal helper function for getting the TIOCM status with
3247 * state_lock_lhb1 already locked.
3248 *
3249 * @ch Channel pointer
3250 *
3251 * @returns TIOCM status
3252 */
3253static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3254{
3255 long status = 0x0;
3256
3257 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3258 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3259 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3260 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3261
3262 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3263 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3264
3265 return status;
3266}
3267
3268/**
3269 * Get the TIOCM status bits.
3270 *
3271 * @lcid Logical channel ID
3272 *
3273 * @returns >= 0 TIOCM status bits
3274 * < 0 Error condition
3275 */
3276long msm_smux_tiocm_get(uint8_t lcid)
3277{
3278 struct smux_lch_t *ch;
3279 unsigned long flags;
3280 long status = 0x0;
3281
3282 if (smux_assert_lch_id(lcid))
3283 return -ENXIO;
3284
3285 ch = &smux_lch[lcid];
3286 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3287 status = msm_smux_tiocm_get_atomic(ch);
3288 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3289
3290 return status;
3291}
3292
3293/**
3294 * Set/clear the TIOCM status bits.
3295 *
3296 * @lcid Logical channel ID
3297 * @set Bits to set
3298 * @clear Bits to clear
3299 *
3300 * @returns 0 for success; < 0 for failure
3301 *
3302 * If a bit is specified in both the @set and @clear masks, then the clear bit
3303 * definition will dominate and the bit will be cleared.
3304 */
3305int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3306{
3307 struct smux_lch_t *ch;
3308 unsigned long flags;
3309 uint8_t old_status;
3310 uint8_t status_set = 0x0;
3311 uint8_t status_clear = 0x0;
3312 int tx_ready = 0;
3313 int ret = 0;
3314
3315 if (smux_assert_lch_id(lcid))
3316 return -ENXIO;
3317
3318 ch = &smux_lch[lcid];
3319 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3320
3321 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3322 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3323 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3324 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3325
3326 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3327 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3328 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3329 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3330
3331 old_status = ch->local_tiocm;
3332 ch->local_tiocm |= status_set;
3333 ch->local_tiocm &= ~status_clear;
3334
3335 if (ch->local_tiocm != old_status) {
3336 ret = smux_send_status_cmd(ch);
3337 tx_ready = 1;
3338 }
3339 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3340
3341 if (tx_ready)
3342 list_channel(ch);
3343
3344 return ret;
3345}
3346
3347/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003348/* Subsystem Restart */
3349/**********************************************************************/
3350static struct notifier_block ssr_notifier = {
3351 .notifier_call = ssr_notifier_cb,
3352};
3353
3354/**
3355 * Handle Subsystem Restart (SSR) notifications.
3356 *
3357 * @this Pointer to ssr_notifier
3358 * @code SSR Code
3359 * @data Data pointer (not used)
3360 */
3361static int ssr_notifier_cb(struct notifier_block *this,
3362 unsigned long code,
3363 void *data)
3364{
3365 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003366 int i;
3367 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003368 int power_off_uart = 0;
3369
Eric Holmbergd2697902012-06-15 09:58:46 -06003370 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3371 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3372 mutex_lock(&smux.mutex_lha0);
3373 smux.in_reset = 1;
3374 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003375 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003376 } else if (code == SUBSYS_AFTER_POWERUP) {
3377 /* re-register platform devices */
3378 SMUX_DBG("%s: ssr - after power-up\n", __func__);
3379 mutex_lock(&smux.mutex_lha0);
3380 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3381 SMUX_DBG("%s: register pdev '%s'\n",
3382 __func__, smux_devs[i].name);
3383 smux_devs[i].dev.release = smux_pdev_release;
3384 tmp = platform_device_register(&smux_devs[i]);
3385 if (tmp)
3386 pr_err("%s: error %d registering device %s\n",
3387 __func__, tmp, smux_devs[i].name);
3388 }
3389 mutex_unlock(&smux.mutex_lha0);
3390 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003391 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3392 return NOTIFY_DONE;
3393 }
3394 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003395
3396 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003397 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003398 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003399 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003400 if (smux.tty)
3401 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003402
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003403 /* Unregister platform devices */
3404 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3405 SMUX_DBG("%s: unregister pdev '%s'\n",
3406 __func__, smux_devs[i].name);
3407 platform_device_unregister(&smux_devs[i]);
3408 }
3409
Eric Holmberged1f00c2012-06-07 09:45:18 -06003410 /* Power-down UART */
3411 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3412 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003413 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003414 smux.power_state = SMUX_PWR_OFF;
3415 power_off_uart = 1;
3416 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003417 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003418 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3419
3420 if (power_off_uart)
Eric Holmberg06011322012-07-06 18:17:03 -06003421 smux_uart_power_off_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003422
Eric Holmberg06011322012-07-06 18:17:03 -06003423 smux.tx_activity_flag = 0;
3424 smux.rx_activity_flag = 0;
3425 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003426 smux.in_reset = 0;
3427 mutex_unlock(&smux.mutex_lha0);
3428
Eric Holmberged1f00c2012-06-07 09:45:18 -06003429 return NOTIFY_DONE;
3430}
3431
3432/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003433/* Line Discipline Interface */
3434/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003435static void smux_pdev_release(struct device *dev)
3436{
3437 struct platform_device *pdev;
3438
3439 pdev = container_of(dev, struct platform_device, dev);
3440 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3441 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3442}
3443
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003444static int smuxld_open(struct tty_struct *tty)
3445{
3446 int i;
3447 int tmp;
3448 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003449
3450 if (!smux.is_initialized)
3451 return -ENODEV;
3452
Eric Holmberged1f00c2012-06-07 09:45:18 -06003453 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003454 if (smux.ld_open_count) {
3455 pr_err("%s: %p multiple instances not supported\n",
3456 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003457 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003458 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003459 }
3460
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003461 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003462 pr_err("%s: tty->ops->write already NULL\n", __func__);
3463 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003464 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003465 }
3466
3467 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003468 ++smux.ld_open_count;
3469 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003470 smux.tty = tty;
3471 tty->disc_data = &smux;
3472 tty->receive_room = TTY_RECEIVE_ROOM;
3473 tty_driver_flush_buffer(tty);
3474
3475 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003476 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003477 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003478 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003479 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003480 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003481 queue_work(smux_tx_wq, &smux_inactivity_work);
3482 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003483 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003484 }
3485
3486 /* register platform devices */
3487 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003488 SMUX_DBG("%s: register pdev '%s'\n",
3489 __func__, smux_devs[i].name);
3490 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003491 tmp = platform_device_register(&smux_devs[i]);
3492 if (tmp)
3493 pr_err("%s: error %d registering device %s\n",
3494 __func__, tmp, smux_devs[i].name);
3495 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003496 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003497 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003498}
3499
3500static void smuxld_close(struct tty_struct *tty)
3501{
3502 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003503 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003504 int i;
3505
Eric Holmberged1f00c2012-06-07 09:45:18 -06003506 SMUX_DBG("%s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003507 smux_flush_workqueues();
3508
Eric Holmberged1f00c2012-06-07 09:45:18 -06003509 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003510 if (smux.ld_open_count <= 0) {
3511 pr_err("%s: invalid ld count %d\n", __func__,
3512 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003513 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003514 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003515 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003516 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003517
3518 /* Cleanup channels */
3519 smux_lch_purge();
3520
3521 /* Unregister platform devices */
3522 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3523 SMUX_DBG("%s: unregister pdev '%s'\n",
3524 __func__, smux_devs[i].name);
3525 platform_device_unregister(&smux_devs[i]);
3526 }
3527
3528 /* Schedule UART power-up if it's down */
3529 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003530 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003531 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003532 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003533 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003534 smux.tx_activity_flag = 0;
3535 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003536 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3537
3538 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003539 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003540
Eric Holmberg06011322012-07-06 18:17:03 -06003541 smux.rx_state = SMUX_RX_IDLE;
3542
Eric Holmberged1f00c2012-06-07 09:45:18 -06003543 /* Disconnect from TTY */
3544 smux.tty = NULL;
3545 mutex_unlock(&smux.mutex_lha0);
3546 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003547}
3548
3549/**
3550 * Receive data from TTY Line Discipline.
3551 *
3552 * @tty TTY structure
3553 * @cp Character data
3554 * @fp Flag data
3555 * @count Size of character and flag data
3556 */
3557void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3558 char *fp, int count)
3559{
3560 int i;
3561 int last_idx = 0;
3562 const char *tty_name = NULL;
3563 char *f;
3564
3565 if (smux_debug_mask & MSM_SMUX_DEBUG)
3566 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3567 16, 1, cp, count, true);
3568
3569 /* verify error flags */
3570 for (i = 0, f = fp; i < count; ++i, ++f) {
3571 if (*f != TTY_NORMAL) {
3572 if (tty)
3573 tty_name = tty->name;
3574 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3575 tty_name, *f, tty_flag_to_str(*f));
3576
3577 /* feed all previous valid data to the parser */
3578 smux_rx_state_machine(cp + last_idx, i - last_idx,
3579 TTY_NORMAL);
3580
3581 /* feed bad data to parser */
3582 smux_rx_state_machine(cp + i, 1, *f);
3583 last_idx = i + 1;
3584 }
3585 }
3586
3587 /* feed data to RX state machine */
3588 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3589}
3590
3591static void smuxld_flush_buffer(struct tty_struct *tty)
3592{
3593 pr_err("%s: not supported\n", __func__);
3594}
3595
3596static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3597{
3598 pr_err("%s: not supported\n", __func__);
3599 return -ENODEV;
3600}
3601
3602static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3603 unsigned char __user *buf, size_t nr)
3604{
3605 pr_err("%s: not supported\n", __func__);
3606 return -ENODEV;
3607}
3608
3609static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3610 const unsigned char *buf, size_t nr)
3611{
3612 pr_err("%s: not supported\n", __func__);
3613 return -ENODEV;
3614}
3615
3616static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3617 unsigned int cmd, unsigned long arg)
3618{
3619 pr_err("%s: not supported\n", __func__);
3620 return -ENODEV;
3621}
3622
3623static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3624 struct poll_table_struct *tbl)
3625{
3626 pr_err("%s: not supported\n", __func__);
3627 return -ENODEV;
3628}
3629
3630static void smuxld_write_wakeup(struct tty_struct *tty)
3631{
3632 pr_err("%s: not supported\n", __func__);
3633}
3634
3635static struct tty_ldisc_ops smux_ldisc_ops = {
3636 .owner = THIS_MODULE,
3637 .magic = TTY_LDISC_MAGIC,
3638 .name = "n_smux",
3639 .open = smuxld_open,
3640 .close = smuxld_close,
3641 .flush_buffer = smuxld_flush_buffer,
3642 .chars_in_buffer = smuxld_chars_in_buffer,
3643 .read = smuxld_read,
3644 .write = smuxld_write,
3645 .ioctl = smuxld_ioctl,
3646 .poll = smuxld_poll,
3647 .receive_buf = smuxld_receive_buf,
3648 .write_wakeup = smuxld_write_wakeup
3649};
3650
3651static int __init smux_init(void)
3652{
3653 int ret;
3654
Eric Holmberged1f00c2012-06-07 09:45:18 -06003655 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003656
3657 spin_lock_init(&smux.rx_lock_lha1);
3658 smux.rx_state = SMUX_RX_IDLE;
3659 smux.power_state = SMUX_PWR_OFF;
3660 smux.pwr_wakeup_delay_us = 1;
3661 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003662 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003663 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003664 smux.rx_activity_flag = 0;
3665 smux.tx_activity_flag = 0;
3666 smux.recv_len = 0;
3667 smux.tty = NULL;
3668 smux.ld_open_count = 0;
3669 smux.in_reset = 0;
3670 smux.is_initialized = 1;
3671 smux_byte_loopback = 0;
3672
3673 spin_lock_init(&smux.tx_lock_lha2);
3674 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3675
3676 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3677 if (ret != 0) {
3678 pr_err("%s: error %d registering line discipline\n",
3679 __func__, ret);
3680 return ret;
3681 }
3682
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003683 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003684
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003685 ret = lch_init();
3686 if (ret != 0) {
3687 pr_err("%s: lch_init failed\n", __func__);
3688 return ret;
3689 }
3690
3691 return 0;
3692}
3693
3694static void __exit smux_exit(void)
3695{
3696 int ret;
3697
3698 ret = tty_unregister_ldisc(N_SMUX);
3699 if (ret != 0) {
3700 pr_err("%s error %d unregistering line discipline\n",
3701 __func__, ret);
3702 return;
3703 }
3704}
3705
3706module_init(smux_init);
3707module_exit(smux_exit);
3708
3709MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3710MODULE_LICENSE("GPL v2");
3711MODULE_ALIAS_LDISC(N_SMUX);