blob: cb09de39502324a9c87fb7ae9b15c5c4539e6a5c [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060049#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060050
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060084#define SMUX_PWR_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_PWR_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
91 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
92 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
93 pr_info("smux: TX Wakeup ACK\n"); \
94 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
95 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
96 pr_info("smux: TX Wakeup REQ\n"); \
97 else \
98 smux_log_pkt(pkt, 0); \
99 } \
100} while (0)
101
102#define SMUX_PWR_BYTE_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 smux_log_pkt(pkt, 0); \
105 } \
106} while (0)
107
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600108#define SMUX_LOG_PKT_RX(pkt) do { \
109 if (smux_debug_mask & MSM_SMUX_PKT) \
110 smux_log_pkt(pkt, 1); \
111} while (0)
112
113#define SMUX_LOG_PKT_TX(pkt) do { \
114 if (smux_debug_mask & MSM_SMUX_PKT) \
115 smux_log_pkt(pkt, 0); \
116} while (0)
117
118/**
119 * Return true if channel is fully opened (both
120 * local and remote sides are in the OPENED state).
121 */
122#define IS_FULLY_OPENED(ch) \
123 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
124 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
125
126static struct platform_device smux_devs[] = {
127 {.name = "SMUX_CTL", .id = -1},
128 {.name = "SMUX_RMNET", .id = -1},
129 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
130 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
131 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
132 {.name = "SMUX_DIAG", .id = -1},
133};
134
135enum {
136 SMUX_CMD_STATUS_RTC = 1 << 0,
137 SMUX_CMD_STATUS_RTR = 1 << 1,
138 SMUX_CMD_STATUS_RI = 1 << 2,
139 SMUX_CMD_STATUS_DCD = 1 << 3,
140 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
141};
142
143/* Channel mode */
144enum {
145 SMUX_LCH_MODE_NORMAL,
146 SMUX_LCH_MODE_LOCAL_LOOPBACK,
147 SMUX_LCH_MODE_REMOTE_LOOPBACK,
148};
149
150enum {
151 SMUX_RX_IDLE,
152 SMUX_RX_MAGIC,
153 SMUX_RX_HDR,
154 SMUX_RX_PAYLOAD,
155 SMUX_RX_FAILURE,
156};
157
158/**
159 * Power states.
160 *
161 * The _FLUSH states are internal transitional states and are not part of the
162 * official state machine.
163 */
164enum {
165 SMUX_PWR_OFF,
166 SMUX_PWR_TURNING_ON,
167 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600168 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600169 SMUX_PWR_TURNING_OFF,
170 SMUX_PWR_OFF_FLUSH,
171};
172
173/**
174 * Logical Channel Structure. One instance per channel.
175 *
176 * Locking Hierarchy
177 * Each lock has a postfix that describes the locking level. If multiple locks
178 * are required, only increasing lock hierarchy numbers may be locked which
179 * ensures avoiding a deadlock.
180 *
181 * Locking Example
182 * If state_lock_lhb1 is currently held and the TX list needs to be
183 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
184 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
185 * not be acquired since it would result in a deadlock.
186 *
187 * Note that the Line Discipline locks (*_lha) should always be acquired
188 * before the logical channel locks.
189 */
190struct smux_lch_t {
191 /* channel state */
192 spinlock_t state_lock_lhb1;
193 uint8_t lcid;
194 unsigned local_state;
195 unsigned local_mode;
196 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600197 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600198
199 unsigned remote_state;
200 unsigned remote_mode;
201 uint8_t remote_tiocm;
202
203 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600204 int rx_flow_control_auto;
205 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 /* client callbacks and private data */
208 void *priv;
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
211 int size);
212
Eric Holmbergb8435c82012-06-05 14:51:29 -0600213 /* RX Info */
214 struct list_head rx_retry_queue;
215 unsigned rx_retry_queue_cnt;
216 struct delayed_work rx_retry_work;
217
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600218 /* TX Info */
219 spinlock_t tx_lock_lhb2;
220 struct list_head tx_queue;
221 struct list_head tx_ready_list;
222 unsigned tx_pending_data_cnt;
223 unsigned notify_lwm;
224};
225
226union notifier_metadata {
227 struct smux_meta_disconnected disconnected;
228 struct smux_meta_read read;
229 struct smux_meta_write write;
230 struct smux_meta_tiocm tiocm;
231};
232
233struct smux_notify_handle {
234 void (*notify)(void *priv, int event_type, const void *metadata);
235 void *priv;
236 int event_type;
237 union notifier_metadata *metadata;
238};
239
240/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600241 * Get RX Buffer Retry structure.
242 *
243 * This is used for clients that are unable to provide an RX buffer
244 * immediately. This temporary structure will be used to temporarily hold the
245 * data and perform a retry.
246 */
247struct smux_rx_pkt_retry {
248 struct smux_pkt_t *pkt;
249 struct list_head rx_retry_list;
250 unsigned timeout_in_ms;
251};
252
253/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254 * Receive worker data structure.
255 *
256 * One instance is created for every call to smux_rx_state_machine.
257 */
258struct smux_rx_worker_data {
259 const unsigned char *data;
260 int len;
261 int flag;
262
263 struct work_struct work;
264 struct completion work_complete;
265};
266
267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600268 * Line discipline and module structure.
269 *
270 * Only one instance since multiple instances of line discipline are not
271 * allowed.
272 */
273struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600274 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275
276 int is_initialized;
277 int in_reset;
278 int ld_open_count;
279 struct tty_struct *tty;
280
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600281 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600282 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
283 unsigned int recv_len;
284 unsigned int pkt_remain;
285 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600286
287 /* RX Activity - accessed by multiple threads */
288 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600289 unsigned rx_activity_flag;
290
291 /* TX / Power */
292 spinlock_t tx_lock_lha2;
293 struct list_head lch_tx_ready_list;
294 unsigned power_state;
295 unsigned pwr_wakeup_delay_us;
296 unsigned tx_activity_flag;
297 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600298 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600299 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600300};
301
302
303/* data structures */
304static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
305static struct smux_ldisc_t smux;
306static const char *tty_error_type[] = {
307 [TTY_NORMAL] = "normal",
308 [TTY_OVERRUN] = "overrun",
309 [TTY_BREAK] = "break",
310 [TTY_PARITY] = "parity",
311 [TTY_FRAME] = "framing",
312};
313
314static const char *smux_cmds[] = {
315 [SMUX_CMD_DATA] = "DATA",
316 [SMUX_CMD_OPEN_LCH] = "OPEN",
317 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
318 [SMUX_CMD_STATUS] = "STATUS",
319 [SMUX_CMD_PWR_CTL] = "PWR",
320 [SMUX_CMD_BYTE] = "Raw Byte",
321};
322
323static void smux_notify_local_fn(struct work_struct *work);
324static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
325
326static struct workqueue_struct *smux_notify_wq;
327static size_t handle_size;
328static struct kfifo smux_notify_fifo;
329static int queued_fifo_notifications;
330static DEFINE_SPINLOCK(notify_lock_lhc1);
331
332static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600333static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334static void smux_tx_worker(struct work_struct *work);
335static DECLARE_WORK(smux_tx_work, smux_tx_worker);
336
337static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600338static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600339static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600340static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
341static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
342
343static void smux_inactivity_worker(struct work_struct *work);
344static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
345static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
346 smux_inactivity_worker);
347
348static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
349static void list_channel(struct smux_lch_t *ch);
350static int smux_send_status_cmd(struct smux_lch_t *ch);
351static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600352static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600353static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
354static int schedule_notify(uint8_t lcid, int event,
355 const union notifier_metadata *metadata);
356static int ssr_notifier_cb(struct notifier_block *this,
357 unsigned long code,
358 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600359static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600360static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600361static void smux_flush_workqueues(void);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600362
363/**
364 * Convert TTY Error Flags to string for logging purposes.
365 *
366 * @flag TTY_* flag
367 * @returns String description or NULL if unknown
368 */
369static const char *tty_flag_to_str(unsigned flag)
370{
371 if (flag < ARRAY_SIZE(tty_error_type))
372 return tty_error_type[flag];
373 return NULL;
374}
375
376/**
377 * Convert SMUX Command to string for logging purposes.
378 *
379 * @cmd SMUX command
380 * @returns String description or NULL if unknown
381 */
382static const char *cmd_to_str(unsigned cmd)
383{
384 if (cmd < ARRAY_SIZE(smux_cmds))
385 return smux_cmds[cmd];
386 return NULL;
387}
388
389/**
390 * Set the reset state due to an unrecoverable failure.
391 */
392static void smux_enter_reset(void)
393{
394 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
395 smux.in_reset = 1;
396}
397
398static int lch_init(void)
399{
400 unsigned int id;
401 struct smux_lch_t *ch;
402 int i = 0;
403
404 handle_size = sizeof(struct smux_notify_handle *);
405
406 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
407 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600408 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600409
410 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
411 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
412 __func__);
413 return -ENOMEM;
414 }
415
416 i |= kfifo_alloc(&smux_notify_fifo,
417 SMUX_NOTIFY_FIFO_SIZE * handle_size,
418 GFP_KERNEL);
419 i |= smux_loopback_init();
420
421 if (i) {
422 pr_err("%s: out of memory error\n", __func__);
423 return -ENOMEM;
424 }
425
426 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
427 ch = &smux_lch[id];
428
429 spin_lock_init(&ch->state_lock_lhb1);
430 ch->lcid = id;
431 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
432 ch->local_mode = SMUX_LCH_MODE_NORMAL;
433 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600434 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600435 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
436 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
437 ch->remote_tiocm = 0x0;
438 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600439 ch->rx_flow_control_auto = 0;
440 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600441 ch->priv = 0;
442 ch->notify = 0;
443 ch->get_rx_buffer = 0;
444
Eric Holmbergb8435c82012-06-05 14:51:29 -0600445 INIT_LIST_HEAD(&ch->rx_retry_queue);
446 ch->rx_retry_queue_cnt = 0;
447 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
448
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600449 spin_lock_init(&ch->tx_lock_lhb2);
450 INIT_LIST_HEAD(&ch->tx_queue);
451 INIT_LIST_HEAD(&ch->tx_ready_list);
452 ch->tx_pending_data_cnt = 0;
453 ch->notify_lwm = 0;
454 }
455
456 return 0;
457}
458
Eric Holmberged1f00c2012-06-07 09:45:18 -0600459/**
460 * Empty and cleanup all SMUX logical channels for subsystem restart or line
461 * discipline disconnect.
462 */
463static void smux_lch_purge(void)
464{
465 struct smux_lch_t *ch;
466 unsigned long flags;
467 int i;
468
469 /* Empty TX ready list */
470 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
471 while (!list_empty(&smux.lch_tx_ready_list)) {
472 SMUX_DBG("%s: emptying ready list %p\n",
473 __func__, smux.lch_tx_ready_list.next);
474 ch = list_first_entry(&smux.lch_tx_ready_list,
475 struct smux_lch_t,
476 tx_ready_list);
477 list_del(&ch->tx_ready_list);
478 INIT_LIST_HEAD(&ch->tx_ready_list);
479 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600480
481 /* Purge Power Queue */
482 while (!list_empty(&smux.power_queue)) {
483 struct smux_pkt_t *pkt;
484
485 pkt = list_first_entry(&smux.power_queue,
486 struct smux_pkt_t,
487 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600488 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600489 SMUX_DBG("%s: emptying power queue pkt=%p\n",
490 __func__, pkt);
491 smux_free_pkt(pkt);
492 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600493 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
494
495 /* Close all ports */
496 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
497 ch = &smux_lch[i];
498 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
499
500 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
501
502 /* Purge TX queue */
503 spin_lock(&ch->tx_lock_lhb2);
504 smux_purge_ch_tx_queue(ch);
505 spin_unlock(&ch->tx_lock_lhb2);
506
507 /* Notify user of disconnect and reset channel state */
508 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
509 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
510 union notifier_metadata meta;
511
512 meta.disconnected.is_ssr = smux.in_reset;
513 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
514 }
515
516 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600517 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
518 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
519 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600520 ch->rx_flow_control_auto = 0;
521 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600522
523 /* Purge RX retry queue */
524 if (ch->rx_retry_queue_cnt)
525 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
526
527 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
528 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600529}
530
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600531int smux_assert_lch_id(uint32_t lcid)
532{
533 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
534 return -ENXIO;
535 else
536 return 0;
537}
538
539/**
540 * Log packet information for debug purposes.
541 *
542 * @pkt Packet to log
543 * @is_recv 1 = RX packet; 0 = TX Packet
544 *
545 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
546 *
547 * PKT Info:
548 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
549 *
550 * Direction: R = Receive, S = Send
551 * Local State: C = Closed; c = closing; o = opening; O = Opened
552 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
553 * Remote State: C = Closed; O = Opened
554 * Remote Mode: R = Remote loopback; N = Normal
555 */
556static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
557{
558 char logbuf[SMUX_PKT_LOG_SIZE];
559 char cmd_extra[16];
560 int i = 0;
561 int count;
562 int len;
563 char local_state;
564 char local_mode;
565 char remote_state;
566 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600567 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600568 unsigned char *data;
569
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600570 if (!smux_assert_lch_id(pkt->hdr.lcid))
571 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600572
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600573 if (ch) {
574 switch (ch->local_state) {
575 case SMUX_LCH_LOCAL_CLOSED:
576 local_state = 'C';
577 break;
578 case SMUX_LCH_LOCAL_OPENING:
579 local_state = 'o';
580 break;
581 case SMUX_LCH_LOCAL_OPENED:
582 local_state = 'O';
583 break;
584 case SMUX_LCH_LOCAL_CLOSING:
585 local_state = 'c';
586 break;
587 default:
588 local_state = 'U';
589 break;
590 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600591
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600592 switch (ch->local_mode) {
593 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
594 local_mode = 'L';
595 break;
596 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
597 local_mode = 'R';
598 break;
599 case SMUX_LCH_MODE_NORMAL:
600 local_mode = 'N';
601 break;
602 default:
603 local_mode = 'U';
604 break;
605 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600606
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600607 switch (ch->remote_state) {
608 case SMUX_LCH_REMOTE_CLOSED:
609 remote_state = 'C';
610 break;
611 case SMUX_LCH_REMOTE_OPENED:
612 remote_state = 'O';
613 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600614
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600615 default:
616 remote_state = 'U';
617 break;
618 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600619
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600620 switch (ch->remote_mode) {
621 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
622 remote_mode = 'R';
623 break;
624 case SMUX_LCH_MODE_NORMAL:
625 remote_mode = 'N';
626 break;
627 default:
628 remote_mode = 'U';
629 break;
630 }
631 } else {
632 /* broadcast channel */
633 local_state = '-';
634 local_mode = '-';
635 remote_state = '-';
636 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600637 }
638
639 /* determine command type (ACK, etc) */
640 cmd_extra[0] = '\0';
641 switch (pkt->hdr.cmd) {
642 case SMUX_CMD_OPEN_LCH:
643 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
644 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
645 break;
646 case SMUX_CMD_CLOSE_LCH:
647 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
648 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
649 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600650
651 case SMUX_CMD_PWR_CTL:
652 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
653 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
654 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600655 };
656
657 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
658 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
659 is_recv ? 'R' : 'S', pkt->hdr.lcid,
660 local_state, local_mode,
661 remote_state, remote_mode,
662 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
663 pkt->hdr.payload_len, pkt->hdr.pad_len);
664
665 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
666 data = (unsigned char *)pkt->payload;
667 for (count = 0; count < len; count++)
668 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
669 "%02x ", (unsigned)data[count]);
670
671 pr_info("%s\n", logbuf);
672}
673
674static void smux_notify_local_fn(struct work_struct *work)
675{
676 struct smux_notify_handle *notify_handle = NULL;
677 union notifier_metadata *metadata = NULL;
678 unsigned long flags;
679 int i;
680
681 for (;;) {
682 /* retrieve notification */
683 spin_lock_irqsave(&notify_lock_lhc1, flags);
684 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
685 i = kfifo_out(&smux_notify_fifo,
686 &notify_handle,
687 handle_size);
688 if (i != handle_size) {
689 pr_err("%s: unable to retrieve handle %d expected %d\n",
690 __func__, i, handle_size);
691 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
692 break;
693 }
694 } else {
695 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
696 break;
697 }
698 --queued_fifo_notifications;
699 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
700
701 /* notify client */
702 metadata = notify_handle->metadata;
703 notify_handle->notify(notify_handle->priv,
704 notify_handle->event_type,
705 metadata);
706
707 kfree(metadata);
708 kfree(notify_handle);
709 }
710}
711
712/**
713 * Initialize existing packet.
714 */
715void smux_init_pkt(struct smux_pkt_t *pkt)
716{
717 memset(pkt, 0x0, sizeof(*pkt));
718 pkt->hdr.magic = SMUX_MAGIC;
719 INIT_LIST_HEAD(&pkt->list);
720}
721
722/**
723 * Allocate and initialize packet.
724 *
725 * If a payload is needed, either set it directly and ensure that it's freed or
726 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
727 * automatically when smd_free_pkt() is called.
728 */
729struct smux_pkt_t *smux_alloc_pkt(void)
730{
731 struct smux_pkt_t *pkt;
732
733 /* Consider a free list implementation instead of kmalloc */
734 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
735 if (!pkt) {
736 pr_err("%s: out of memory\n", __func__);
737 return NULL;
738 }
739 smux_init_pkt(pkt);
740 pkt->allocated = 1;
741
742 return pkt;
743}
744
745/**
746 * Free packet.
747 *
748 * @pkt Packet to free (may be NULL)
749 *
750 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
751 * well. Otherwise, the caller is responsible for freeing the payload.
752 */
753void smux_free_pkt(struct smux_pkt_t *pkt)
754{
755 if (pkt) {
756 if (pkt->free_payload)
757 kfree(pkt->payload);
758 if (pkt->allocated)
759 kfree(pkt);
760 }
761}
762
763/**
764 * Allocate packet payload.
765 *
766 * @pkt Packet to add payload to
767 *
768 * @returns 0 on success, <0 upon error
769 *
770 * A flag is set to signal smux_free_pkt() to free the payload.
771 */
772int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
773{
774 if (!pkt)
775 return -EINVAL;
776
777 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
778 pkt->free_payload = 1;
779 if (!pkt->payload) {
780 pr_err("%s: unable to malloc %d bytes for payload\n",
781 __func__, pkt->hdr.payload_len);
782 return -ENOMEM;
783 }
784
785 return 0;
786}
787
788static int schedule_notify(uint8_t lcid, int event,
789 const union notifier_metadata *metadata)
790{
791 struct smux_notify_handle *notify_handle = 0;
792 union notifier_metadata *meta_copy = 0;
793 struct smux_lch_t *ch;
794 int i;
795 unsigned long flags;
796 int ret = 0;
797
798 ch = &smux_lch[lcid];
799 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
800 GFP_ATOMIC);
801 if (!notify_handle) {
802 pr_err("%s: out of memory\n", __func__);
803 ret = -ENOMEM;
804 goto free_out;
805 }
806
807 notify_handle->notify = ch->notify;
808 notify_handle->priv = ch->priv;
809 notify_handle->event_type = event;
810 if (metadata) {
811 meta_copy = kzalloc(sizeof(union notifier_metadata),
812 GFP_ATOMIC);
813 if (!meta_copy) {
814 pr_err("%s: out of memory\n", __func__);
815 ret = -ENOMEM;
816 goto free_out;
817 }
818 *meta_copy = *metadata;
819 notify_handle->metadata = meta_copy;
820 } else {
821 notify_handle->metadata = NULL;
822 }
823
824 spin_lock_irqsave(&notify_lock_lhc1, flags);
825 i = kfifo_avail(&smux_notify_fifo);
826 if (i < handle_size) {
827 pr_err("%s: fifo full error %d expected %d\n",
828 __func__, i, handle_size);
829 ret = -ENOMEM;
830 goto unlock_out;
831 }
832
833 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
834 if (i < 0 || i != handle_size) {
835 pr_err("%s: fifo not available error %d (expected %d)\n",
836 __func__, i, handle_size);
837 ret = -ENOSPC;
838 goto unlock_out;
839 }
840 ++queued_fifo_notifications;
841
842unlock_out:
843 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
844
845free_out:
846 queue_work(smux_notify_wq, &smux_notify_local);
847 if (ret < 0 && notify_handle) {
848 kfree(notify_handle->metadata);
849 kfree(notify_handle);
850 }
851 return ret;
852}
853
854/**
855 * Returns the serialized size of a packet.
856 *
857 * @pkt Packet to serialize
858 *
859 * @returns Serialized length of packet
860 */
861static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
862{
863 unsigned int size;
864
865 size = sizeof(struct smux_hdr_t);
866 size += pkt->hdr.payload_len;
867 size += pkt->hdr.pad_len;
868
869 return size;
870}
871
872/**
873 * Serialize packet @pkt into output buffer @data.
874 *
875 * @pkt Packet to serialize
876 * @out Destination buffer pointer
877 * @out_len Size of serialized packet
878 *
879 * @returns 0 for success
880 */
881int smux_serialize(struct smux_pkt_t *pkt, char *out,
882 unsigned int *out_len)
883{
884 char *data_start = out;
885
886 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
887 pr_err("%s: packet size %d too big\n",
888 __func__, smux_serialize_size(pkt));
889 return -E2BIG;
890 }
891
892 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
893 out += sizeof(struct smux_hdr_t);
894 if (pkt->payload) {
895 memcpy(out, pkt->payload, pkt->hdr.payload_len);
896 out += pkt->hdr.payload_len;
897 }
898 if (pkt->hdr.pad_len) {
899 memset(out, 0x0, pkt->hdr.pad_len);
900 out += pkt->hdr.pad_len;
901 }
902 *out_len = out - data_start;
903 return 0;
904}
905
906/**
907 * Serialize header and provide pointer to the data.
908 *
909 * @pkt Packet
910 * @out[out] Pointer to the serialized header data
911 * @out_len[out] Pointer to the serialized header length
912 */
913static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
914 unsigned int *out_len)
915{
916 *out = (char *)&pkt->hdr;
917 *out_len = sizeof(struct smux_hdr_t);
918}
919
920/**
921 * Serialize payload and provide pointer to the data.
922 *
923 * @pkt Packet
924 * @out[out] Pointer to the serialized payload data
925 * @out_len[out] Pointer to the serialized payload length
926 */
927static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
928 unsigned int *out_len)
929{
930 *out = pkt->payload;
931 *out_len = pkt->hdr.payload_len;
932}
933
934/**
935 * Serialize padding and provide pointer to the data.
936 *
937 * @pkt Packet
938 * @out[out] Pointer to the serialized padding (always NULL)
939 * @out_len[out] Pointer to the serialized payload length
940 *
941 * Since the padding field value is undefined, only the size of the patting
942 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
943 */
944static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
945 unsigned int *out_len)
946{
947 *out = NULL;
948 *out_len = pkt->hdr.pad_len;
949}
950
951/**
952 * Write data to TTY framework and handle breaking the writes up if needed.
953 *
954 * @data Data to write
955 * @len Length of data
956 *
957 * @returns 0 for success, < 0 for failure
958 */
959static int write_to_tty(char *data, unsigned len)
960{
961 int data_written;
962
963 if (!data)
964 return 0;
965
Eric Holmberged1f00c2012-06-07 09:45:18 -0600966 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600967 data_written = smux.tty->ops->write(smux.tty, data, len);
968 if (data_written >= 0) {
969 len -= data_written;
970 data += data_written;
971 } else {
972 pr_err("%s: TTY write returned error %d\n",
973 __func__, data_written);
974 return data_written;
975 }
976
977 if (len)
978 tty_wait_until_sent(smux.tty,
979 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600980 }
981 return 0;
982}
983
984/**
985 * Write packet to TTY.
986 *
987 * @pkt packet to write
988 *
989 * @returns 0 on success
990 */
991static int smux_tx_tty(struct smux_pkt_t *pkt)
992{
993 char *data;
994 unsigned int len;
995 int ret;
996
997 if (!smux.tty) {
998 pr_err("%s: TTY not initialized", __func__);
999 return -ENOTTY;
1000 }
1001
1002 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
1003 SMUX_DBG("%s: tty send single byte\n", __func__);
1004 ret = write_to_tty(&pkt->hdr.flags, 1);
1005 return ret;
1006 }
1007
1008 smux_serialize_hdr(pkt, &data, &len);
1009 ret = write_to_tty(data, len);
1010 if (ret) {
1011 pr_err("%s: failed %d to write header %d\n",
1012 __func__, ret, len);
1013 return ret;
1014 }
1015
1016 smux_serialize_payload(pkt, &data, &len);
1017 ret = write_to_tty(data, len);
1018 if (ret) {
1019 pr_err("%s: failed %d to write payload %d\n",
1020 __func__, ret, len);
1021 return ret;
1022 }
1023
1024 smux_serialize_padding(pkt, &data, &len);
1025 while (len > 0) {
1026 char zero = 0x0;
1027 ret = write_to_tty(&zero, 1);
1028 if (ret) {
1029 pr_err("%s: failed %d to write padding %d\n",
1030 __func__, ret, len);
1031 return ret;
1032 }
1033 --len;
1034 }
1035 return 0;
1036}
1037
1038/**
1039 * Send a single character.
1040 *
1041 * @ch Character to send
1042 */
1043static void smux_send_byte(char ch)
1044{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001045 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001046
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001047 pkt = smux_alloc_pkt();
1048 if (!pkt) {
1049 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1050 return;
1051 }
1052 pkt->hdr.cmd = SMUX_CMD_BYTE;
1053 pkt->hdr.flags = ch;
1054 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001055
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001056 list_add_tail(&pkt->list, &smux.power_queue);
1057 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001058}
1059
1060/**
1061 * Receive a single-character packet (used for internal testing).
1062 *
1063 * @ch Character to receive
1064 * @lcid Logical channel ID for packet
1065 *
1066 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001067 */
1068static int smux_receive_byte(char ch, int lcid)
1069{
1070 struct smux_pkt_t pkt;
1071
1072 smux_init_pkt(&pkt);
1073 pkt.hdr.lcid = lcid;
1074 pkt.hdr.cmd = SMUX_CMD_BYTE;
1075 pkt.hdr.flags = ch;
1076
1077 return smux_dispatch_rx_pkt(&pkt);
1078}
1079
1080/**
1081 * Queue packet for transmit.
1082 *
1083 * @pkt_ptr Packet to queue
1084 * @ch Channel to queue packet on
1085 * @queue Queue channel on ready list
1086 */
1087static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1088 int queue)
1089{
1090 unsigned long flags;
1091
1092 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1093
1094 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1095 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1096 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1097
1098 if (queue)
1099 list_channel(ch);
1100}
1101
1102/**
1103 * Handle receive OPEN ACK command.
1104 *
1105 * @pkt Received packet
1106 *
1107 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001108 */
1109static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1110{
1111 uint8_t lcid;
1112 int ret;
1113 struct smux_lch_t *ch;
1114 int enable_powerdown = 0;
1115
1116 lcid = pkt->hdr.lcid;
1117 ch = &smux_lch[lcid];
1118
1119 spin_lock(&ch->state_lock_lhb1);
1120 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1121 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1122 ch->local_state,
1123 SMUX_LCH_LOCAL_OPENED);
1124
1125 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1126 enable_powerdown = 1;
1127
1128 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1129 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1130 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1131 ret = 0;
1132 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1133 SMUX_DBG("Remote loopback OPEN ACK received\n");
1134 ret = 0;
1135 } else {
1136 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1137 __func__, lcid, ch->local_state);
1138 ret = -EINVAL;
1139 }
1140 spin_unlock(&ch->state_lock_lhb1);
1141
1142 if (enable_powerdown) {
1143 spin_lock(&smux.tx_lock_lha2);
1144 if (!smux.powerdown_enabled) {
1145 smux.powerdown_enabled = 1;
1146 SMUX_DBG("%s: enabling power-collapse support\n",
1147 __func__);
1148 }
1149 spin_unlock(&smux.tx_lock_lha2);
1150 }
1151
1152 return ret;
1153}
1154
1155static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1156{
1157 uint8_t lcid;
1158 int ret;
1159 struct smux_lch_t *ch;
1160 union notifier_metadata meta_disconnected;
1161 unsigned long flags;
1162
1163 lcid = pkt->hdr.lcid;
1164 ch = &smux_lch[lcid];
1165 meta_disconnected.disconnected.is_ssr = 0;
1166
1167 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1168
1169 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1170 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1171 SMUX_LCH_LOCAL_CLOSING,
1172 SMUX_LCH_LOCAL_CLOSED);
1173 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1174 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1175 schedule_notify(lcid, SMUX_DISCONNECTED,
1176 &meta_disconnected);
1177 ret = 0;
1178 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1179 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1180 ret = 0;
1181 } else {
1182 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1183 __func__, lcid, ch->local_state);
1184 ret = -EINVAL;
1185 }
1186 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1187 return ret;
1188}
1189
1190/**
1191 * Handle receive OPEN command.
1192 *
1193 * @pkt Received packet
1194 *
1195 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001196 */
1197static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1198{
1199 uint8_t lcid;
1200 int ret;
1201 struct smux_lch_t *ch;
1202 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001203 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001204 int tx_ready = 0;
1205 int enable_powerdown = 0;
1206
1207 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1208 return smux_handle_rx_open_ack(pkt);
1209
1210 lcid = pkt->hdr.lcid;
1211 ch = &smux_lch[lcid];
1212
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001213 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001214
1215 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1216 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1217 SMUX_LCH_REMOTE_CLOSED,
1218 SMUX_LCH_REMOTE_OPENED);
1219
1220 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1221 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1222 enable_powerdown = 1;
1223
1224 /* Send Open ACK */
1225 ack_pkt = smux_alloc_pkt();
1226 if (!ack_pkt) {
1227 /* exit out to allow retrying this later */
1228 ret = -ENOMEM;
1229 goto out;
1230 }
1231 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1232 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1233 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1234 ack_pkt->hdr.lcid = lcid;
1235 ack_pkt->hdr.payload_len = 0;
1236 ack_pkt->hdr.pad_len = 0;
1237 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1238 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1239 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1240 }
1241 smux_tx_queue(ack_pkt, ch, 0);
1242 tx_ready = 1;
1243
1244 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1245 /*
1246 * Send an Open command to the remote side to
1247 * simulate our local client doing it.
1248 */
1249 ack_pkt = smux_alloc_pkt();
1250 if (ack_pkt) {
1251 ack_pkt->hdr.lcid = lcid;
1252 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1253 ack_pkt->hdr.flags =
1254 SMUX_CMD_OPEN_POWER_COLLAPSE;
1255 ack_pkt->hdr.payload_len = 0;
1256 ack_pkt->hdr.pad_len = 0;
1257 smux_tx_queue(ack_pkt, ch, 0);
1258 tx_ready = 1;
1259 } else {
1260 pr_err("%s: Remote loopack allocation failure\n",
1261 __func__);
1262 }
1263 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1264 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1265 }
1266 ret = 0;
1267 } else {
1268 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1269 __func__, lcid, ch->remote_state);
1270 ret = -EINVAL;
1271 }
1272
1273out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001274 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001275
1276 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001277 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001278 if (!smux.powerdown_enabled) {
1279 smux.powerdown_enabled = 1;
1280 SMUX_DBG("%s: enabling power-collapse support\n",
1281 __func__);
1282 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001283 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001284 }
1285
1286 if (tx_ready)
1287 list_channel(ch);
1288
1289 return ret;
1290}
1291
1292/**
1293 * Handle receive CLOSE command.
1294 *
1295 * @pkt Received packet
1296 *
1297 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001298 */
1299static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1300{
1301 uint8_t lcid;
1302 int ret;
1303 struct smux_lch_t *ch;
1304 struct smux_pkt_t *ack_pkt;
1305 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001306 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001307 int tx_ready = 0;
1308
1309 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1310 return smux_handle_close_ack(pkt);
1311
1312 lcid = pkt->hdr.lcid;
1313 ch = &smux_lch[lcid];
1314 meta_disconnected.disconnected.is_ssr = 0;
1315
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001316 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001317 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1318 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1319 SMUX_LCH_REMOTE_OPENED,
1320 SMUX_LCH_REMOTE_CLOSED);
1321
1322 ack_pkt = smux_alloc_pkt();
1323 if (!ack_pkt) {
1324 /* exit out to allow retrying this later */
1325 ret = -ENOMEM;
1326 goto out;
1327 }
1328 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1329 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1330 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1331 ack_pkt->hdr.lcid = lcid;
1332 ack_pkt->hdr.payload_len = 0;
1333 ack_pkt->hdr.pad_len = 0;
1334 smux_tx_queue(ack_pkt, ch, 0);
1335 tx_ready = 1;
1336
1337 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1338 /*
1339 * Send a Close command to the remote side to simulate
1340 * our local client doing it.
1341 */
1342 ack_pkt = smux_alloc_pkt();
1343 if (ack_pkt) {
1344 ack_pkt->hdr.lcid = lcid;
1345 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1346 ack_pkt->hdr.flags = 0;
1347 ack_pkt->hdr.payload_len = 0;
1348 ack_pkt->hdr.pad_len = 0;
1349 smux_tx_queue(ack_pkt, ch, 0);
1350 tx_ready = 1;
1351 } else {
1352 pr_err("%s: Remote loopack allocation failure\n",
1353 __func__);
1354 }
1355 }
1356
1357 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1358 schedule_notify(lcid, SMUX_DISCONNECTED,
1359 &meta_disconnected);
1360 ret = 0;
1361 } else {
1362 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1363 __func__, lcid, ch->remote_state);
1364 ret = -EINVAL;
1365 }
1366out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001367 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001368 if (tx_ready)
1369 list_channel(ch);
1370
1371 return ret;
1372}
1373
1374/*
1375 * Handle receive DATA command.
1376 *
1377 * @pkt Received packet
1378 *
1379 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001380 */
1381static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1382{
1383 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001384 int ret = 0;
1385 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001386 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001387 int tmp;
1388 int rx_len;
1389 struct smux_lch_t *ch;
1390 union notifier_metadata metadata;
1391 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001392 struct smux_pkt_t *ack_pkt;
1393 unsigned long flags;
1394
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001395 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1396 ret = -ENXIO;
1397 goto out;
1398 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001399
Eric Holmbergb8435c82012-06-05 14:51:29 -06001400 rx_len = pkt->hdr.payload_len;
1401 if (rx_len == 0) {
1402 ret = -EINVAL;
1403 goto out;
1404 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001405
1406 lcid = pkt->hdr.lcid;
1407 ch = &smux_lch[lcid];
1408 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1409 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1410
1411 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1412 && !remote_loopback) {
1413 pr_err("smux: ch %d error data on local state 0x%x",
1414 lcid, ch->local_state);
1415 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001416 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001417 goto out;
1418 }
1419
1420 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1421 pr_err("smux: ch %d error data on remote state 0x%x",
1422 lcid, ch->remote_state);
1423 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001424 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001425 goto out;
1426 }
1427
Eric Holmbergb8435c82012-06-05 14:51:29 -06001428 if (!list_empty(&ch->rx_retry_queue)) {
1429 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001430
1431 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1432 !ch->rx_flow_control_auto &&
1433 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1434 /* need to flow control RX */
1435 ch->rx_flow_control_auto = 1;
1436 tx_ready |= smux_rx_flow_control_updated(ch);
1437 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1438 NULL);
1439 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001440 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1441 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001442 pr_err("%s: ch %d RX retry queue full\n",
1443 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001444 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1445 ret = -ENOMEM;
1446 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1447 goto out;
1448 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001449 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001450 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001451
Eric Holmbergb8435c82012-06-05 14:51:29 -06001452 if (remote_loopback) {
1453 /* Echo the data back to the remote client. */
1454 ack_pkt = smux_alloc_pkt();
1455 if (ack_pkt) {
1456 ack_pkt->hdr.lcid = lcid;
1457 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1458 ack_pkt->hdr.flags = 0;
1459 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1460 if (ack_pkt->hdr.payload_len) {
1461 smux_alloc_pkt_payload(ack_pkt);
1462 memcpy(ack_pkt->payload, pkt->payload,
1463 ack_pkt->hdr.payload_len);
1464 }
1465 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1466 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001467 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001468 } else {
1469 pr_err("%s: Remote loopack allocation failure\n",
1470 __func__);
1471 }
1472 } else if (!do_retry) {
1473 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001474 metadata.read.pkt_priv = 0;
1475 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001476 tmp = ch->get_rx_buffer(ch->priv,
1477 (void **)&metadata.read.pkt_priv,
1478 (void **)&metadata.read.buffer,
1479 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001480
Eric Holmbergb8435c82012-06-05 14:51:29 -06001481 if (tmp == 0 && metadata.read.buffer) {
1482 /* place data into RX buffer */
1483 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001484 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001485 metadata.read.len = rx_len;
1486 schedule_notify(lcid, SMUX_READ_DONE,
1487 &metadata);
1488 } else if (tmp == -EAGAIN ||
1489 (tmp == 0 && !metadata.read.buffer)) {
1490 /* buffer allocation failed - add to retry queue */
1491 do_retry = 1;
1492 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001493 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1494 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001495 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1496 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001497 }
1498 }
1499
Eric Holmbergb8435c82012-06-05 14:51:29 -06001500 if (do_retry) {
1501 struct smux_rx_pkt_retry *retry;
1502
1503 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1504 if (!retry) {
1505 pr_err("%s: retry alloc failure\n", __func__);
1506 ret = -ENOMEM;
1507 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1508 goto out;
1509 }
1510 INIT_LIST_HEAD(&retry->rx_retry_list);
1511 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1512
1513 /* copy packet */
1514 retry->pkt = smux_alloc_pkt();
1515 if (!retry->pkt) {
1516 kfree(retry);
1517 pr_err("%s: pkt alloc failure\n", __func__);
1518 ret = -ENOMEM;
1519 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1520 goto out;
1521 }
1522 retry->pkt->hdr.lcid = lcid;
1523 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1524 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1525 if (retry->pkt->hdr.payload_len) {
1526 smux_alloc_pkt_payload(retry->pkt);
1527 memcpy(retry->pkt->payload, pkt->payload,
1528 retry->pkt->hdr.payload_len);
1529 }
1530
1531 /* add to retry queue */
1532 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1533 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1534 ++ch->rx_retry_queue_cnt;
1535 if (ch->rx_retry_queue_cnt == 1)
1536 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1537 msecs_to_jiffies(retry->timeout_in_ms));
1538 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1539 }
1540
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001541 if (tx_ready)
1542 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001543out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001544 return ret;
1545}
1546
1547/**
1548 * Handle receive byte command for testing purposes.
1549 *
1550 * @pkt Received packet
1551 *
1552 * @returns 0 for success
1553 */
1554static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1555{
1556 uint8_t lcid;
1557 int ret;
1558 struct smux_lch_t *ch;
1559 union notifier_metadata metadata;
1560 unsigned long flags;
1561
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001562 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1563 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001564 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001565 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001566
1567 lcid = pkt->hdr.lcid;
1568 ch = &smux_lch[lcid];
1569 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1570
1571 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1572 pr_err("smux: ch %d error data on local state 0x%x",
1573 lcid, ch->local_state);
1574 ret = -EIO;
1575 goto out;
1576 }
1577
1578 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1579 pr_err("smux: ch %d error data on remote state 0x%x",
1580 lcid, ch->remote_state);
1581 ret = -EIO;
1582 goto out;
1583 }
1584
1585 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1586 metadata.read.buffer = 0;
1587 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1588 ret = 0;
1589
1590out:
1591 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1592 return ret;
1593}
1594
1595/**
1596 * Handle receive status command.
1597 *
1598 * @pkt Received packet
1599 *
1600 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001601 */
1602static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1603{
1604 uint8_t lcid;
1605 int ret;
1606 struct smux_lch_t *ch;
1607 union notifier_metadata meta;
1608 unsigned long flags;
1609 int tx_ready = 0;
1610
1611 lcid = pkt->hdr.lcid;
1612 ch = &smux_lch[lcid];
1613
1614 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1615 meta.tiocm.tiocm_old = ch->remote_tiocm;
1616 meta.tiocm.tiocm_new = pkt->hdr.flags;
1617
1618 /* update logical channel flow control */
1619 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1620 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1621 /* logical channel flow control changed */
1622 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1623 /* disabled TX */
1624 SMUX_DBG("TX Flow control enabled\n");
1625 ch->tx_flow_control = 1;
1626 } else {
1627 /* re-enable channel */
1628 SMUX_DBG("TX Flow control disabled\n");
1629 ch->tx_flow_control = 0;
1630 tx_ready = 1;
1631 }
1632 }
1633 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1634 ch->remote_tiocm = pkt->hdr.flags;
1635 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1636
1637 /* client notification for status change */
1638 if (IS_FULLY_OPENED(ch)) {
1639 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1640 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1641 ret = 0;
1642 }
1643 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1644 if (tx_ready)
1645 list_channel(ch);
1646
1647 return ret;
1648}
1649
1650/**
1651 * Handle receive power command.
1652 *
1653 * @pkt Received packet
1654 *
1655 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001656 */
1657static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1658{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001659 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001660 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001661 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001662
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001663 SMUX_PWR_PKT_RX(pkt);
1664
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001665 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001666 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1667 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001668 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001669 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001670 power_down = 1;
1671 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001672 pr_err("%s: sleep request ack invalid in state %d\n",
1673 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001674 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001675 /*
1676 * Remote sleep request
1677 *
1678 * Even if we have data pending, we need to transition to the
1679 * POWER_OFF state and then perform a wakeup since the remote
1680 * side has requested a power-down.
1681 *
1682 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1683 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1684 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001685 *
1686 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001687 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001688 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001689 ack_pkt = smux_alloc_pkt();
1690 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001691 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001692 smux.power_state,
1693 SMUX_PWR_TURNING_OFF_FLUSH);
1694
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001695 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1696
1697 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001698 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1699 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001700 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1701 list_add_tail(&ack_pkt->list,
1702 &smux.power_queue);
1703 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001704 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001705 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1706 /* Local power-down request still in TX queue */
1707 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1708 __func__);
1709 smux.power_ctl_remote_req_received = 1;
1710 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1711 /*
1712 * Local power-down request already sent to remote
1713 * side, so this request gets treated as an ACK.
1714 */
1715 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1716 __func__);
1717 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001718 } else {
1719 pr_err("%s: sleep request invalid in state %d\n",
1720 __func__, smux.power_state);
1721 }
1722 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001723
1724 if (power_down) {
1725 SMUX_PWR("%s: Power %d->%d\n", __func__,
1726 smux.power_state, SMUX_PWR_OFF_FLUSH);
1727 smux.power_state = SMUX_PWR_OFF_FLUSH;
1728 queue_work(smux_tx_wq, &smux_inactivity_work);
1729 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001730 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001731
1732 return 0;
1733}
1734
1735/**
1736 * Handle dispatching a completed packet for receive processing.
1737 *
1738 * @pkt Packet to process
1739 *
1740 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001741 */
1742static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1743{
Eric Holmbergf9622662012-06-13 15:55:45 -06001744 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001745
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001746 switch (pkt->hdr.cmd) {
1747 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001748 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001749 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1750 pr_err("%s: invalid channel id %d\n",
1751 __func__, pkt->hdr.lcid);
1752 break;
1753 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001754 ret = smux_handle_rx_open_cmd(pkt);
1755 break;
1756
1757 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001758 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001759 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1760 pr_err("%s: invalid channel id %d\n",
1761 __func__, pkt->hdr.lcid);
1762 break;
1763 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001764 ret = smux_handle_rx_data_cmd(pkt);
1765 break;
1766
1767 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001768 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001769 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1770 pr_err("%s: invalid channel id %d\n",
1771 __func__, pkt->hdr.lcid);
1772 break;
1773 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001774 ret = smux_handle_rx_close_cmd(pkt);
1775 break;
1776
1777 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001778 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001779 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1780 pr_err("%s: invalid channel id %d\n",
1781 __func__, pkt->hdr.lcid);
1782 break;
1783 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001784 ret = smux_handle_rx_status_cmd(pkt);
1785 break;
1786
1787 case SMUX_CMD_PWR_CTL:
1788 ret = smux_handle_rx_power_cmd(pkt);
1789 break;
1790
1791 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001792 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001793 ret = smux_handle_rx_byte_cmd(pkt);
1794 break;
1795
1796 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001797 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001798 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1799 ret = -EINVAL;
1800 }
1801 return ret;
1802}
1803
1804/**
1805 * Deserializes a packet and dispatches it to the packet receive logic.
1806 *
1807 * @data Raw data for one packet
1808 * @len Length of the data
1809 *
1810 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001811 */
1812static int smux_deserialize(unsigned char *data, int len)
1813{
1814 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001815
1816 smux_init_pkt(&recv);
1817
1818 /*
1819 * It may be possible to optimize this to not use the
1820 * temporary buffer.
1821 */
1822 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1823
1824 if (recv.hdr.magic != SMUX_MAGIC) {
1825 pr_err("%s: invalid header magic\n", __func__);
1826 return -EINVAL;
1827 }
1828
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001829 if (recv.hdr.payload_len)
1830 recv.payload = data + sizeof(struct smux_hdr_t);
1831
1832 return smux_dispatch_rx_pkt(&recv);
1833}
1834
1835/**
1836 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001837 */
1838static void smux_handle_wakeup_req(void)
1839{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001840 unsigned long flags;
1841
1842 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001843 if (smux.power_state == SMUX_PWR_OFF
1844 || smux.power_state == SMUX_PWR_TURNING_ON) {
1845 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001846 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001847 smux.power_state, SMUX_PWR_ON);
1848 smux.power_state = SMUX_PWR_ON;
1849 queue_work(smux_tx_wq, &smux_wakeup_work);
1850 queue_work(smux_tx_wq, &smux_tx_work);
1851 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1852 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1853 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001854 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001855 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001856 } else {
1857 /* stale wakeup request from previous wakeup */
1858 SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
1859 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001860 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001861 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001862}
1863
1864/**
1865 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001866 */
1867static void smux_handle_wakeup_ack(void)
1868{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001869 unsigned long flags;
1870
1871 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001872 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1873 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001874 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001875 smux.power_state, SMUX_PWR_ON);
1876 smux.power_state = SMUX_PWR_ON;
1877 queue_work(smux_tx_wq, &smux_tx_work);
1878 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1879 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1880
1881 } else if (smux.power_state != SMUX_PWR_ON) {
1882 /* invalid message */
Eric Holmberga9b06472012-06-22 09:46:34 -06001883 SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001884 __func__, smux.power_state);
1885 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001886 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001887}
1888
1889/**
1890 * RX State machine - IDLE state processing.
1891 *
1892 * @data New RX data to process
1893 * @len Length of the data
1894 * @used Return value of length processed
1895 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001896 */
1897static void smux_rx_handle_idle(const unsigned char *data,
1898 int len, int *used, int flag)
1899{
1900 int i;
1901
1902 if (flag) {
1903 if (smux_byte_loopback)
1904 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1905 smux_byte_loopback);
1906 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1907 ++*used;
1908 return;
1909 }
1910
1911 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1912 switch (data[i]) {
1913 case SMUX_MAGIC_WORD1:
1914 smux.rx_state = SMUX_RX_MAGIC;
1915 break;
1916 case SMUX_WAKEUP_REQ:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001917 SMUX_PWR("smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001918 smux_handle_wakeup_req();
1919 break;
1920 case SMUX_WAKEUP_ACK:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001921 SMUX_PWR("smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001922 smux_handle_wakeup_ack();
1923 break;
1924 default:
1925 /* unexpected character */
1926 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1927 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1928 smux_byte_loopback);
1929 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1930 (unsigned)data[i]);
1931 break;
1932 }
1933 }
1934
1935 *used = i;
1936}
1937
1938/**
1939 * RX State machine - Header Magic state processing.
1940 *
1941 * @data New RX data to process
1942 * @len Length of the data
1943 * @used Return value of length processed
1944 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001945 */
1946static void smux_rx_handle_magic(const unsigned char *data,
1947 int len, int *used, int flag)
1948{
1949 int i;
1950
1951 if (flag) {
1952 pr_err("%s: TTY RX error %d\n", __func__, flag);
1953 smux_enter_reset();
1954 smux.rx_state = SMUX_RX_FAILURE;
1955 ++*used;
1956 return;
1957 }
1958
1959 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1960 /* wait for completion of the magic */
1961 if (data[i] == SMUX_MAGIC_WORD2) {
1962 smux.recv_len = 0;
1963 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1964 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1965 smux.rx_state = SMUX_RX_HDR;
1966 } else {
1967 /* unexpected / trash character */
1968 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1969 __func__, data[i], *used, len);
1970 smux.rx_state = SMUX_RX_IDLE;
1971 }
1972 }
1973
1974 *used = i;
1975}
1976
1977/**
1978 * RX State machine - Packet Header state processing.
1979 *
1980 * @data New RX data to process
1981 * @len Length of the data
1982 * @used Return value of length processed
1983 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001984 */
1985static void smux_rx_handle_hdr(const unsigned char *data,
1986 int len, int *used, int flag)
1987{
1988 int i;
1989 struct smux_hdr_t *hdr;
1990
1991 if (flag) {
1992 pr_err("%s: TTY RX error %d\n", __func__, flag);
1993 smux_enter_reset();
1994 smux.rx_state = SMUX_RX_FAILURE;
1995 ++*used;
1996 return;
1997 }
1998
1999 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2000 smux.recv_buf[smux.recv_len++] = data[i];
2001
2002 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2003 /* complete header received */
2004 hdr = (struct smux_hdr_t *)smux.recv_buf;
2005 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2006 smux.rx_state = SMUX_RX_PAYLOAD;
2007 }
2008 }
2009 *used = i;
2010}
2011
2012/**
2013 * RX State machine - Packet Payload state processing.
2014 *
2015 * @data New RX data to process
2016 * @len Length of the data
2017 * @used Return value of length processed
2018 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002019 */
2020static void smux_rx_handle_pkt_payload(const unsigned char *data,
2021 int len, int *used, int flag)
2022{
2023 int remaining;
2024
2025 if (flag) {
2026 pr_err("%s: TTY RX error %d\n", __func__, flag);
2027 smux_enter_reset();
2028 smux.rx_state = SMUX_RX_FAILURE;
2029 ++*used;
2030 return;
2031 }
2032
2033 /* copy data into rx buffer */
2034 if (smux.pkt_remain < (len - *used))
2035 remaining = smux.pkt_remain;
2036 else
2037 remaining = len - *used;
2038
2039 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2040 smux.recv_len += remaining;
2041 smux.pkt_remain -= remaining;
2042 *used += remaining;
2043
2044 if (smux.pkt_remain == 0) {
2045 /* complete packet received */
2046 smux_deserialize(smux.recv_buf, smux.recv_len);
2047 smux.rx_state = SMUX_RX_IDLE;
2048 }
2049}
2050
2051/**
2052 * Feed data to the receive state machine.
2053 *
2054 * @data Pointer to data block
2055 * @len Length of data
2056 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002057 */
2058void smux_rx_state_machine(const unsigned char *data,
2059 int len, int flag)
2060{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002061 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002062
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002063 work.data = data;
2064 work.len = len;
2065 work.flag = flag;
2066 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2067 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002068
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002069 queue_work(smux_rx_wq, &work.work);
2070 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002071}
2072
2073/**
2074 * Add channel to transmit-ready list and trigger transmit worker.
2075 *
2076 * @ch Channel to add
2077 */
2078static void list_channel(struct smux_lch_t *ch)
2079{
2080 unsigned long flags;
2081
2082 SMUX_DBG("%s: listing channel %d\n",
2083 __func__, ch->lcid);
2084
2085 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2086 spin_lock(&ch->tx_lock_lhb2);
2087 smux.tx_activity_flag = 1;
2088 if (list_empty(&ch->tx_ready_list))
2089 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2090 spin_unlock(&ch->tx_lock_lhb2);
2091 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2092
2093 queue_work(smux_tx_wq, &smux_tx_work);
2094}
2095
2096/**
2097 * Transmit packet on correct transport and then perform client
2098 * notification.
2099 *
2100 * @ch Channel to transmit on
2101 * @pkt Packet to transmit
2102 */
2103static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2104{
2105 union notifier_metadata meta_write;
2106 int ret;
2107
2108 if (ch && pkt) {
2109 SMUX_LOG_PKT_TX(pkt);
2110 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2111 ret = smux_tx_loopback(pkt);
2112 else
2113 ret = smux_tx_tty(pkt);
2114
2115 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2116 /* notify write-done */
2117 meta_write.write.pkt_priv = pkt->priv;
2118 meta_write.write.buffer = pkt->payload;
2119 meta_write.write.len = pkt->hdr.payload_len;
2120 if (ret >= 0) {
2121 SMUX_DBG("%s: PKT write done", __func__);
2122 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2123 &meta_write);
2124 } else {
2125 pr_err("%s: failed to write pkt %d\n",
2126 __func__, ret);
2127 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2128 &meta_write);
2129 }
2130 }
2131 }
2132}
2133
2134/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002135 * Flush pending TTY TX data.
2136 */
2137static void smux_flush_tty(void)
2138{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002139 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002140 if (!smux.tty) {
2141 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002142 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002143 return;
2144 }
2145
2146 tty_wait_until_sent(smux.tty,
2147 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2148
2149 if (tty_chars_in_buffer(smux.tty) > 0)
2150 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002151
2152 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002153}
2154
2155/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002156 * Purge TX queue for logical channel.
2157 *
2158 * @ch Logical channel pointer
2159 *
2160 * Must be called with the following spinlocks locked:
2161 * state_lock_lhb1
2162 * tx_lock_lhb2
2163 */
2164static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2165{
2166 struct smux_pkt_t *pkt;
2167 int send_disconnect = 0;
2168
2169 while (!list_empty(&ch->tx_queue)) {
2170 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2171 list);
2172 list_del(&pkt->list);
2173
2174 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2175 /* Open was never sent, just force to closed state */
2176 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2177 send_disconnect = 1;
2178 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2179 /* Notify client of failed write */
2180 union notifier_metadata meta_write;
2181
2182 meta_write.write.pkt_priv = pkt->priv;
2183 meta_write.write.buffer = pkt->payload;
2184 meta_write.write.len = pkt->hdr.payload_len;
2185 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2186 }
2187 smux_free_pkt(pkt);
2188 }
2189
2190 if (send_disconnect) {
2191 union notifier_metadata meta_disconnected;
2192
2193 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2194 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2195 &meta_disconnected);
2196 }
2197}
2198
2199/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002200 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002201 *
2202 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002203 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002204static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002205{
2206 struct uart_state *state;
2207
2208 if (!smux.tty || !smux.tty->driver_data) {
2209 pr_err("%s: unable to find UART port for tty %p\n",
2210 __func__, smux.tty);
2211 return;
2212 }
2213 state = smux.tty->driver_data;
2214 msm_hs_request_clock_on(state->uart_port);
2215}
2216
2217/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002218 * Power-up the UART.
2219 */
2220static void smux_uart_power_on(void)
2221{
2222 mutex_lock(&smux.mutex_lha0);
2223 smux_uart_power_on_atomic();
2224 mutex_unlock(&smux.mutex_lha0);
2225}
2226
2227/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002228 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002229 *
2230 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002231 */
Eric Holmberg06011322012-07-06 18:17:03 -06002232static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002233{
2234 struct uart_state *state;
2235
2236 if (!smux.tty || !smux.tty->driver_data) {
2237 pr_err("%s: unable to find UART port for tty %p\n",
2238 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002239 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002240 return;
2241 }
2242 state = smux.tty->driver_data;
2243 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002244}
2245
2246/**
2247 * Power down the UART.
2248 */
2249static void smux_uart_power_off(void)
2250{
2251 mutex_lock(&smux.mutex_lha0);
2252 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002253 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002254}
2255
2256/**
2257 * TX Wakeup Worker
2258 *
2259 * @work Not used
2260 *
2261 * Do an exponential back-off wakeup sequence with a maximum period
2262 * of approximately 1 second (1 << 20 microseconds).
2263 */
2264static void smux_wakeup_worker(struct work_struct *work)
2265{
2266 unsigned long flags;
2267 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002268
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002269 if (smux.in_reset)
2270 return;
2271
2272 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2273 if (smux.power_state == SMUX_PWR_ON) {
2274 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002275 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002276 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002277 SMUX_DBG("%s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002278
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002279 /*
2280 * Cancel any pending retry. This avoids a race condition with
2281 * a new power-up request because:
2282 * 1) this worker doesn't modify the state
2283 * 2) this worker is processed on the same single-threaded
2284 * workqueue as new TX wakeup requests
2285 */
2286 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002287 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002288 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002289 /* retry wakeup */
2290 wakeup_delay = smux.pwr_wakeup_delay_us;
2291 smux.pwr_wakeup_delay_us <<= 1;
2292 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2293 smux.pwr_wakeup_delay_us =
2294 SMUX_WAKEUP_DELAY_MAX;
2295
2296 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberga9b06472012-06-22 09:46:34 -06002297 SMUX_PWR("%s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002298 smux_send_byte(SMUX_WAKEUP_REQ);
2299
2300 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2301 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2302 wakeup_delay);
2303 usleep_range(wakeup_delay, 2*wakeup_delay);
2304 queue_work(smux_tx_wq, &smux_wakeup_work);
2305 } else {
2306 /* schedule delayed work */
2307 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2308 __func__, wakeup_delay / 1000);
2309 queue_delayed_work(smux_tx_wq,
2310 &smux_wakeup_delayed_work,
2311 msecs_to_jiffies(wakeup_delay / 1000));
2312 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002313 } else {
2314 /* wakeup aborted */
2315 smux.pwr_wakeup_delay_us = 1;
2316 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2317 SMUX_PWR("%s: wakeup aborted\n", __func__);
2318 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002319 }
2320}
2321
2322
2323/**
2324 * Inactivity timeout worker. Periodically scheduled when link is active.
2325 * When it detects inactivity, it will power-down the UART link.
2326 *
2327 * @work Work structure (not used)
2328 */
2329static void smux_inactivity_worker(struct work_struct *work)
2330{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002331 struct smux_pkt_t *pkt;
2332 unsigned long flags;
2333
Eric Holmberg06011322012-07-06 18:17:03 -06002334 if (smux.in_reset)
2335 return;
2336
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002337 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2338 spin_lock(&smux.tx_lock_lha2);
2339
2340 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2341 /* no activity */
2342 if (smux.powerdown_enabled) {
2343 if (smux.power_state == SMUX_PWR_ON) {
2344 /* start power-down sequence */
2345 pkt = smux_alloc_pkt();
2346 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002347 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002348 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002349 SMUX_PWR_TURNING_OFF_FLUSH);
2350 smux.power_state =
2351 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002352
2353 /* send power-down request */
2354 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2355 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002356 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2357 list_add_tail(&pkt->list,
2358 &smux.power_queue);
2359 queue_work(smux_tx_wq, &smux_tx_work);
2360 } else {
2361 pr_err("%s: packet alloc failed\n",
2362 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002363 }
2364 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002365 }
2366 }
2367 smux.tx_activity_flag = 0;
2368 smux.rx_activity_flag = 0;
2369
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002370 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002371 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002372 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002373 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002374 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002375
2376 /* if data is pending, schedule a new wakeup */
2377 if (!list_empty(&smux.lch_tx_ready_list) ||
2378 !list_empty(&smux.power_queue))
2379 queue_work(smux_tx_wq, &smux_tx_work);
2380
2381 spin_unlock(&smux.tx_lock_lha2);
2382 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2383
2384 /* flush UART output queue and power down */
2385 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002386 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002387 } else {
2388 spin_unlock(&smux.tx_lock_lha2);
2389 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002390 }
2391
2392 /* reschedule inactivity worker */
2393 if (smux.power_state != SMUX_PWR_OFF)
2394 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2395 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2396}
2397
2398/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002399 * Remove RX retry packet from channel and free it.
2400 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002401 * @ch Channel for retry packet
2402 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002403 *
2404 * @returns 1 if flow control updated; 0 otherwise
2405 *
2406 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002407 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002408int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002409 struct smux_rx_pkt_retry *retry)
2410{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002411 int tx_ready = 0;
2412
Eric Holmbergb8435c82012-06-05 14:51:29 -06002413 list_del(&retry->rx_retry_list);
2414 --ch->rx_retry_queue_cnt;
2415 smux_free_pkt(retry->pkt);
2416 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002417
2418 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2419 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2420 ch->rx_flow_control_auto) {
2421 ch->rx_flow_control_auto = 0;
2422 smux_rx_flow_control_updated(ch);
2423 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2424 tx_ready = 1;
2425 }
2426 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002427}
2428
2429/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002430 * RX worker handles all receive operations.
2431 *
2432 * @work Work structure contained in TBD structure
2433 */
2434static void smux_rx_worker(struct work_struct *work)
2435{
2436 unsigned long flags;
2437 int used;
2438 int initial_rx_state;
2439 struct smux_rx_worker_data *w;
2440 const unsigned char *data;
2441 int len;
2442 int flag;
2443
2444 w = container_of(work, struct smux_rx_worker_data, work);
2445 data = w->data;
2446 len = w->len;
2447 flag = w->flag;
2448
2449 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2450 smux.rx_activity_flag = 1;
2451 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2452
2453 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2454 used = 0;
2455 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002456 if (smux.in_reset) {
2457 SMUX_DBG("%s: abort RX due to reset\n", __func__);
2458 smux.rx_state = SMUX_RX_IDLE;
2459 break;
2460 }
2461
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002462 SMUX_DBG("%s: state %d; %d of %d\n",
2463 __func__, smux.rx_state, used, len);
2464 initial_rx_state = smux.rx_state;
2465
2466 switch (smux.rx_state) {
2467 case SMUX_RX_IDLE:
2468 smux_rx_handle_idle(data, len, &used, flag);
2469 break;
2470 case SMUX_RX_MAGIC:
2471 smux_rx_handle_magic(data, len, &used, flag);
2472 break;
2473 case SMUX_RX_HDR:
2474 smux_rx_handle_hdr(data, len, &used, flag);
2475 break;
2476 case SMUX_RX_PAYLOAD:
2477 smux_rx_handle_pkt_payload(data, len, &used, flag);
2478 break;
2479 default:
2480 SMUX_DBG("%s: invalid state %d\n",
2481 __func__, smux.rx_state);
2482 smux.rx_state = SMUX_RX_IDLE;
2483 break;
2484 }
2485 } while (used < len || smux.rx_state != initial_rx_state);
2486
2487 complete(&w->work_complete);
2488}
2489
2490/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002491 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2492 * because the client was not ready (-EAGAIN).
2493 *
2494 * @work Work structure contained in smux_lch_t structure
2495 */
2496static void smux_rx_retry_worker(struct work_struct *work)
2497{
2498 struct smux_lch_t *ch;
2499 struct smux_rx_pkt_retry *retry;
2500 union notifier_metadata metadata;
2501 int tmp;
2502 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002503 int immediate_retry = 0;
2504 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002505
2506 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2507
2508 /* get next retry packet */
2509 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002510 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002511 /* port has been closed - remove all retries */
2512 while (!list_empty(&ch->rx_retry_queue)) {
2513 retry = list_first_entry(&ch->rx_retry_queue,
2514 struct smux_rx_pkt_retry,
2515 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002516 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002517 }
2518 }
2519
2520 if (list_empty(&ch->rx_retry_queue)) {
2521 SMUX_DBG("%s: retry list empty for channel %d\n",
2522 __func__, ch->lcid);
2523 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2524 return;
2525 }
2526 retry = list_first_entry(&ch->rx_retry_queue,
2527 struct smux_rx_pkt_retry,
2528 rx_retry_list);
2529 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2530
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002531 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2532 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002533 metadata.read.pkt_priv = 0;
2534 metadata.read.buffer = 0;
2535 tmp = ch->get_rx_buffer(ch->priv,
2536 (void **)&metadata.read.pkt_priv,
2537 (void **)&metadata.read.buffer,
2538 retry->pkt->hdr.payload_len);
2539 if (tmp == 0 && metadata.read.buffer) {
2540 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002541
Eric Holmbergb8435c82012-06-05 14:51:29 -06002542 memcpy(metadata.read.buffer, retry->pkt->payload,
2543 retry->pkt->hdr.payload_len);
2544 metadata.read.len = retry->pkt->hdr.payload_len;
2545
2546 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002547 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002548 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002549 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002550 if (tx_ready)
2551 list_channel(ch);
2552
2553 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002554 } else if (tmp == -EAGAIN ||
2555 (tmp == 0 && !metadata.read.buffer)) {
2556 /* retry again */
2557 retry->timeout_in_ms <<= 1;
2558 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2559 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002560 pr_err("%s: ch %d RX retry client timeout\n",
2561 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002562 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002563 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002564 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002565 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2566 if (tx_ready)
2567 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002568 }
2569 } else {
2570 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002571 pr_err("%s: ch %d RX retry client failed (%d)\n",
2572 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002573 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002574 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002575 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002576 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002577 if (tx_ready)
2578 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002579 }
2580
2581 /* schedule next retry */
2582 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2583 if (!list_empty(&ch->rx_retry_queue)) {
2584 retry = list_first_entry(&ch->rx_retry_queue,
2585 struct smux_rx_pkt_retry,
2586 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002587
2588 if (immediate_retry)
2589 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2590 else
2591 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2592 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002593 }
2594 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2595}
2596
2597/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002598 * Transmit worker handles serializing and transmitting packets onto the
2599 * underlying transport.
2600 *
2601 * @work Work structure (not used)
2602 */
2603static void smux_tx_worker(struct work_struct *work)
2604{
2605 struct smux_pkt_t *pkt;
2606 struct smux_lch_t *ch;
2607 unsigned low_wm_notif;
2608 unsigned lcid;
2609 unsigned long flags;
2610
2611
2612 /*
2613 * Transmit packets in round-robin fashion based upon ready
2614 * channels.
2615 *
2616 * To eliminate the need to hold a lock for the entire
2617 * iteration through the channel ready list, the head of the
2618 * ready-channel list is always the next channel to be
2619 * processed. To send a packet, the first valid packet in
2620 * the head channel is removed and the head channel is then
2621 * rescheduled at the end of the queue by removing it and
2622 * inserting after the tail. The locks can then be released
2623 * while the packet is processed.
2624 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002625 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002626 pkt = NULL;
2627 low_wm_notif = 0;
2628
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002629 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002630
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002631 /* handle wakeup if needed */
2632 if (smux.power_state == SMUX_PWR_OFF) {
2633 if (!list_empty(&smux.lch_tx_ready_list) ||
2634 !list_empty(&smux.power_queue)) {
2635 /* data to transmit, do wakeup */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002636 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002637 smux.power_state,
2638 SMUX_PWR_TURNING_ON);
2639 smux.power_state = SMUX_PWR_TURNING_ON;
2640 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2641 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002642 queue_work(smux_tx_wq, &smux_wakeup_work);
2643 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002644 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002645 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2646 flags);
2647 }
2648 break;
2649 }
2650
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002651 /* process any pending power packets */
2652 if (!list_empty(&smux.power_queue)) {
2653 pkt = list_first_entry(&smux.power_queue,
2654 struct smux_pkt_t, list);
2655 list_del(&pkt->list);
2656 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2657
Eric Holmberga9b06472012-06-22 09:46:34 -06002658 /* Adjust power state if this is a flush command */
2659 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2660 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2661 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2662 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2663 smux.power_ctl_remote_req_received) {
2664 /*
2665 * Sending remote power-down request ACK
2666 * or sending local power-down request
2667 * and we already received a remote
2668 * power-down request.
2669 */
2670 SMUX_PWR("%s: Power %d->%d\n", __func__,
2671 smux.power_state,
2672 SMUX_PWR_OFF_FLUSH);
2673 smux.power_state = SMUX_PWR_OFF_FLUSH;
2674 smux.power_ctl_remote_req_received = 0;
2675 queue_work(smux_tx_wq,
2676 &smux_inactivity_work);
2677 } else {
2678 /* sending local power-down request */
2679 SMUX_PWR("%s: Power %d->%d\n", __func__,
2680 smux.power_state,
2681 SMUX_PWR_TURNING_OFF);
2682 smux.power_state = SMUX_PWR_TURNING_OFF;
2683 }
2684 }
2685 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2686
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002687 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002688 smux_uart_power_on();
2689 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002690 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002691 if (!smux_byte_loopback) {
2692 smux_tx_tty(pkt);
2693 smux_flush_tty();
2694 } else {
2695 smux_tx_loopback(pkt);
2696 }
2697
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002698 smux_free_pkt(pkt);
2699 continue;
2700 }
2701
2702 /* get the next ready channel */
2703 if (list_empty(&smux.lch_tx_ready_list)) {
2704 /* no ready channels */
2705 SMUX_DBG("%s: no more ready channels, exiting\n",
2706 __func__);
2707 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2708 break;
2709 }
2710 smux.tx_activity_flag = 1;
2711
2712 if (smux.power_state != SMUX_PWR_ON) {
2713 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002714 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002715 __func__,
2716 smux.power_state);
2717 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2718 break;
2719 }
2720
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002721 /* get the next packet to send and rotate channel list */
2722 ch = list_first_entry(&smux.lch_tx_ready_list,
2723 struct smux_lch_t,
2724 tx_ready_list);
2725
2726 spin_lock(&ch->state_lock_lhb1);
2727 spin_lock(&ch->tx_lock_lhb2);
2728 if (!list_empty(&ch->tx_queue)) {
2729 /*
2730 * If remote TX flow control is enabled or
2731 * the channel is not fully opened, then only
2732 * send command packets.
2733 */
2734 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2735 struct smux_pkt_t *curr;
2736 list_for_each_entry(curr, &ch->tx_queue, list) {
2737 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2738 pkt = curr;
2739 break;
2740 }
2741 }
2742 } else {
2743 /* get next cmd/data packet to send */
2744 pkt = list_first_entry(&ch->tx_queue,
2745 struct smux_pkt_t, list);
2746 }
2747 }
2748
2749 if (pkt) {
2750 list_del(&pkt->list);
2751
2752 /* update packet stats */
2753 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2754 --ch->tx_pending_data_cnt;
2755 if (ch->notify_lwm &&
2756 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002757 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002758 ch->notify_lwm = 0;
2759 low_wm_notif = 1;
2760 }
2761 }
2762
2763 /* advance to the next ready channel */
2764 list_rotate_left(&smux.lch_tx_ready_list);
2765 } else {
2766 /* no data in channel to send, remove from ready list */
2767 list_del(&ch->tx_ready_list);
2768 INIT_LIST_HEAD(&ch->tx_ready_list);
2769 }
2770 lcid = ch->lcid;
2771 spin_unlock(&ch->tx_lock_lhb2);
2772 spin_unlock(&ch->state_lock_lhb1);
2773 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2774
2775 if (low_wm_notif)
2776 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2777
2778 /* send the packet */
2779 smux_tx_pkt(ch, pkt);
2780 smux_free_pkt(pkt);
2781 }
2782}
2783
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002784/**
2785 * Update the RX flow control (sent in the TIOCM Status command).
2786 *
2787 * @ch Channel for update
2788 *
2789 * @returns 1 for updated, 0 for not updated
2790 *
2791 * Must be called with ch->state_lock_lhb1 locked.
2792 */
2793static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2794{
2795 int updated = 0;
2796 int prev_state;
2797
2798 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2799
2800 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2801 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2802 else
2803 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2804
2805 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2806 smux_send_status_cmd(ch);
2807 updated = 1;
2808 }
2809
2810 return updated;
2811}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002812
Eric Holmberg06011322012-07-06 18:17:03 -06002813/**
2814 * Flush all SMUX workqueues.
2815 *
2816 * This sets the reset bit to abort any processing loops and then
2817 * flushes the workqueues to ensure that no new pending work is
2818 * running. Do not call with any locks used by workers held as
2819 * this will result in a deadlock.
2820 */
2821static void smux_flush_workqueues(void)
2822{
2823 smux.in_reset = 1;
2824
2825 SMUX_DBG("%s: flushing tx wq\n", __func__);
2826 flush_workqueue(smux_tx_wq);
2827 SMUX_DBG("%s: flushing rx wq\n", __func__);
2828 flush_workqueue(smux_rx_wq);
2829 SMUX_DBG("%s: flushing notify wq\n", __func__);
2830 flush_workqueue(smux_notify_wq);
2831}
2832
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002833/**********************************************************************/
2834/* Kernel API */
2835/**********************************************************************/
2836
2837/**
2838 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2839 * flags.
2840 *
2841 * @lcid Logical channel ID
2842 * @set Options to set
2843 * @clear Options to clear
2844 *
2845 * @returns 0 for success, < 0 for failure
2846 */
2847int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2848{
2849 unsigned long flags;
2850 struct smux_lch_t *ch;
2851 int tx_ready = 0;
2852 int ret = 0;
2853
2854 if (smux_assert_lch_id(lcid))
2855 return -ENXIO;
2856
2857 ch = &smux_lch[lcid];
2858 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2859
2860 /* Local loopback mode */
2861 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2862 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2863
2864 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2865 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2866
2867 /* Remote loopback mode */
2868 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2869 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2870
2871 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2872 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2873
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002874 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002875 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002876 ch->rx_flow_control_client = 1;
2877 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002878 }
2879
2880 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002881 ch->rx_flow_control_client = 0;
2882 tx_ready |= smux_rx_flow_control_updated(ch);
2883 }
2884
2885 /* Auto RX Flow Control */
2886 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2887 SMUX_DBG("%s: auto rx flow control option enabled\n",
2888 __func__);
2889 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2890 }
2891
2892 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2893 SMUX_DBG("%s: auto rx flow control option disabled\n",
2894 __func__);
2895 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2896 ch->rx_flow_control_auto = 0;
2897 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002898 }
2899
2900 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2901
2902 if (tx_ready)
2903 list_channel(ch);
2904
2905 return ret;
2906}
2907
2908/**
2909 * Starts the opening sequence for a logical channel.
2910 *
2911 * @lcid Logical channel ID
2912 * @priv Free for client usage
2913 * @notify Event notification function
2914 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2915 *
2916 * @returns 0 for success, <0 otherwise
2917 *
2918 * A channel must be fully closed (either not previously opened or
2919 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2920 * received.
2921 *
2922 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2923 * event.
2924 */
2925int msm_smux_open(uint8_t lcid, void *priv,
2926 void (*notify)(void *priv, int event_type, const void *metadata),
2927 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2928 int size))
2929{
2930 int ret;
2931 struct smux_lch_t *ch;
2932 struct smux_pkt_t *pkt;
2933 int tx_ready = 0;
2934 unsigned long flags;
2935
2936 if (smux_assert_lch_id(lcid))
2937 return -ENXIO;
2938
2939 ch = &smux_lch[lcid];
2940 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2941
2942 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2943 ret = -EAGAIN;
2944 goto out;
2945 }
2946
2947 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2948 pr_err("%s: open lcid %d local state %x invalid\n",
2949 __func__, lcid, ch->local_state);
2950 ret = -EINVAL;
2951 goto out;
2952 }
2953
2954 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2955 ch->local_state,
2956 SMUX_LCH_LOCAL_OPENING);
2957
Eric Holmberg06011322012-07-06 18:17:03 -06002958 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002959 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2960
2961 ch->priv = priv;
2962 ch->notify = notify;
2963 ch->get_rx_buffer = get_rx_buffer;
2964 ret = 0;
2965
2966 /* Send Open Command */
2967 pkt = smux_alloc_pkt();
2968 if (!pkt) {
2969 ret = -ENOMEM;
2970 goto out;
2971 }
2972 pkt->hdr.magic = SMUX_MAGIC;
2973 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2974 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2975 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2976 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2977 pkt->hdr.lcid = lcid;
2978 pkt->hdr.payload_len = 0;
2979 pkt->hdr.pad_len = 0;
2980 smux_tx_queue(pkt, ch, 0);
2981 tx_ready = 1;
2982
2983out:
2984 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002985 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002986 if (tx_ready)
2987 list_channel(ch);
2988 return ret;
2989}
2990
2991/**
2992 * Starts the closing sequence for a logical channel.
2993 *
2994 * @lcid Logical channel ID
2995 *
2996 * @returns 0 for success, <0 otherwise
2997 *
2998 * Once the close event has been acknowledge by the remote side, the client
2999 * will receive a SMUX_DISCONNECTED notification.
3000 */
3001int msm_smux_close(uint8_t lcid)
3002{
3003 int ret = 0;
3004 struct smux_lch_t *ch;
3005 struct smux_pkt_t *pkt;
3006 int tx_ready = 0;
3007 unsigned long flags;
3008
3009 if (smux_assert_lch_id(lcid))
3010 return -ENXIO;
3011
3012 ch = &smux_lch[lcid];
3013 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3014 ch->local_tiocm = 0x0;
3015 ch->remote_tiocm = 0x0;
3016 ch->tx_pending_data_cnt = 0;
3017 ch->notify_lwm = 0;
3018
3019 /* Purge TX queue */
3020 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003021 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003022 spin_unlock(&ch->tx_lock_lhb2);
3023
3024 /* Send Close Command */
3025 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3026 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
3027 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
3028 ch->local_state,
3029 SMUX_LCH_LOCAL_CLOSING);
3030
3031 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3032 pkt = smux_alloc_pkt();
3033 if (pkt) {
3034 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3035 pkt->hdr.flags = 0;
3036 pkt->hdr.lcid = lcid;
3037 pkt->hdr.payload_len = 0;
3038 pkt->hdr.pad_len = 0;
3039 smux_tx_queue(pkt, ch, 0);
3040 tx_ready = 1;
3041 } else {
3042 pr_err("%s: pkt allocation failed\n", __func__);
3043 ret = -ENOMEM;
3044 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003045
3046 /* Purge RX retry queue */
3047 if (ch->rx_retry_queue_cnt)
3048 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003049 }
3050 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3051
3052 if (tx_ready)
3053 list_channel(ch);
3054
3055 return ret;
3056}
3057
3058/**
3059 * Write data to a logical channel.
3060 *
3061 * @lcid Logical channel ID
3062 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3063 * SMUX_WRITE_FAIL notification.
3064 * @data Data to write
3065 * @len Length of @data
3066 *
3067 * @returns 0 for success, <0 otherwise
3068 *
3069 * Data may be written immediately after msm_smux_open() is called,
3070 * but the data will wait in the transmit queue until the channel has
3071 * been fully opened.
3072 *
3073 * Once the data has been written, the client will receive either a completion
3074 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3075 */
3076int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3077{
3078 struct smux_lch_t *ch;
3079 struct smux_pkt_t *pkt;
3080 int tx_ready = 0;
3081 unsigned long flags;
3082 int ret;
3083
3084 if (smux_assert_lch_id(lcid))
3085 return -ENXIO;
3086
3087 ch = &smux_lch[lcid];
3088 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3089
3090 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3091 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3092 pr_err("%s: hdr.invalid local state %d channel %d\n",
3093 __func__, ch->local_state, lcid);
3094 ret = -EINVAL;
3095 goto out;
3096 }
3097
3098 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3099 pr_err("%s: payload %d too large\n",
3100 __func__, len);
3101 ret = -E2BIG;
3102 goto out;
3103 }
3104
3105 pkt = smux_alloc_pkt();
3106 if (!pkt) {
3107 ret = -ENOMEM;
3108 goto out;
3109 }
3110
3111 pkt->hdr.cmd = SMUX_CMD_DATA;
3112 pkt->hdr.lcid = lcid;
3113 pkt->hdr.flags = 0;
3114 pkt->hdr.payload_len = len;
3115 pkt->payload = (void *)data;
3116 pkt->priv = pkt_priv;
3117 pkt->hdr.pad_len = 0;
3118
3119 spin_lock(&ch->tx_lock_lhb2);
3120 /* verify high watermark */
3121 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3122
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003123 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003124 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003125 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003126 ch->tx_pending_data_cnt);
3127 ret = -EAGAIN;
3128 goto out_inner;
3129 }
3130
3131 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003132 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003133 ch->notify_lwm = 1;
3134 pr_err("%s: high watermark hit\n", __func__);
3135 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3136 }
3137 list_add_tail(&pkt->list, &ch->tx_queue);
3138
3139 /* add to ready list */
3140 if (IS_FULLY_OPENED(ch))
3141 tx_ready = 1;
3142
3143 ret = 0;
3144
3145out_inner:
3146 spin_unlock(&ch->tx_lock_lhb2);
3147
3148out:
3149 if (ret)
3150 smux_free_pkt(pkt);
3151 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3152
3153 if (tx_ready)
3154 list_channel(ch);
3155
3156 return ret;
3157}
3158
3159/**
3160 * Returns true if the TX queue is currently full (high water mark).
3161 *
3162 * @lcid Logical channel ID
3163 * @returns 0 if channel is not full
3164 * 1 if it is full
3165 * < 0 for error
3166 */
3167int msm_smux_is_ch_full(uint8_t lcid)
3168{
3169 struct smux_lch_t *ch;
3170 unsigned long flags;
3171 int is_full = 0;
3172
3173 if (smux_assert_lch_id(lcid))
3174 return -ENXIO;
3175
3176 ch = &smux_lch[lcid];
3177
3178 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003179 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003180 is_full = 1;
3181 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3182
3183 return is_full;
3184}
3185
3186/**
3187 * Returns true if the TX queue has space for more packets it is at or
3188 * below the low water mark).
3189 *
3190 * @lcid Logical channel ID
3191 * @returns 0 if channel is above low watermark
3192 * 1 if it's at or below the low watermark
3193 * < 0 for error
3194 */
3195int msm_smux_is_ch_low(uint8_t lcid)
3196{
3197 struct smux_lch_t *ch;
3198 unsigned long flags;
3199 int is_low = 0;
3200
3201 if (smux_assert_lch_id(lcid))
3202 return -ENXIO;
3203
3204 ch = &smux_lch[lcid];
3205
3206 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003207 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003208 is_low = 1;
3209 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3210
3211 return is_low;
3212}
3213
3214/**
3215 * Send TIOCM status update.
3216 *
3217 * @ch Channel for update
3218 *
3219 * @returns 0 for success, <0 for failure
3220 *
3221 * Channel lock must be held before calling.
3222 */
3223static int smux_send_status_cmd(struct smux_lch_t *ch)
3224{
3225 struct smux_pkt_t *pkt;
3226
3227 if (!ch)
3228 return -EINVAL;
3229
3230 pkt = smux_alloc_pkt();
3231 if (!pkt)
3232 return -ENOMEM;
3233
3234 pkt->hdr.lcid = ch->lcid;
3235 pkt->hdr.cmd = SMUX_CMD_STATUS;
3236 pkt->hdr.flags = ch->local_tiocm;
3237 pkt->hdr.payload_len = 0;
3238 pkt->hdr.pad_len = 0;
3239 smux_tx_queue(pkt, ch, 0);
3240
3241 return 0;
3242}
3243
3244/**
3245 * Internal helper function for getting the TIOCM status with
3246 * state_lock_lhb1 already locked.
3247 *
3248 * @ch Channel pointer
3249 *
3250 * @returns TIOCM status
3251 */
3252static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3253{
3254 long status = 0x0;
3255
3256 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3257 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3258 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3259 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3260
3261 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3262 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3263
3264 return status;
3265}
3266
3267/**
3268 * Get the TIOCM status bits.
3269 *
3270 * @lcid Logical channel ID
3271 *
3272 * @returns >= 0 TIOCM status bits
3273 * < 0 Error condition
3274 */
3275long msm_smux_tiocm_get(uint8_t lcid)
3276{
3277 struct smux_lch_t *ch;
3278 unsigned long flags;
3279 long status = 0x0;
3280
3281 if (smux_assert_lch_id(lcid))
3282 return -ENXIO;
3283
3284 ch = &smux_lch[lcid];
3285 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3286 status = msm_smux_tiocm_get_atomic(ch);
3287 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3288
3289 return status;
3290}
3291
3292/**
3293 * Set/clear the TIOCM status bits.
3294 *
3295 * @lcid Logical channel ID
3296 * @set Bits to set
3297 * @clear Bits to clear
3298 *
3299 * @returns 0 for success; < 0 for failure
3300 *
3301 * If a bit is specified in both the @set and @clear masks, then the clear bit
3302 * definition will dominate and the bit will be cleared.
3303 */
3304int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3305{
3306 struct smux_lch_t *ch;
3307 unsigned long flags;
3308 uint8_t old_status;
3309 uint8_t status_set = 0x0;
3310 uint8_t status_clear = 0x0;
3311 int tx_ready = 0;
3312 int ret = 0;
3313
3314 if (smux_assert_lch_id(lcid))
3315 return -ENXIO;
3316
3317 ch = &smux_lch[lcid];
3318 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3319
3320 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3321 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3322 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3323 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3324
3325 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3326 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3327 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3328 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3329
3330 old_status = ch->local_tiocm;
3331 ch->local_tiocm |= status_set;
3332 ch->local_tiocm &= ~status_clear;
3333
3334 if (ch->local_tiocm != old_status) {
3335 ret = smux_send_status_cmd(ch);
3336 tx_ready = 1;
3337 }
3338 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3339
3340 if (tx_ready)
3341 list_channel(ch);
3342
3343 return ret;
3344}
3345
3346/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003347/* Subsystem Restart */
3348/**********************************************************************/
3349static struct notifier_block ssr_notifier = {
3350 .notifier_call = ssr_notifier_cb,
3351};
3352
3353/**
3354 * Handle Subsystem Restart (SSR) notifications.
3355 *
3356 * @this Pointer to ssr_notifier
3357 * @code SSR Code
3358 * @data Data pointer (not used)
3359 */
3360static int ssr_notifier_cb(struct notifier_block *this,
3361 unsigned long code,
3362 void *data)
3363{
3364 unsigned long flags;
3365 int power_off_uart = 0;
3366
Eric Holmbergd2697902012-06-15 09:58:46 -06003367 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3368 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3369 mutex_lock(&smux.mutex_lha0);
3370 smux.in_reset = 1;
3371 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003372 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003373 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3374 return NOTIFY_DONE;
3375 }
3376 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003377
3378 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003379 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003380 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003381 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003382 if (smux.tty)
3383 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003384
3385 /* Power-down UART */
3386 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3387 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003388 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003389 smux.power_state = SMUX_PWR_OFF;
3390 power_off_uart = 1;
3391 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003392 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003393 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3394
3395 if (power_off_uart)
Eric Holmberg06011322012-07-06 18:17:03 -06003396 smux_uart_power_off_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003397
Eric Holmberg06011322012-07-06 18:17:03 -06003398 smux.tx_activity_flag = 0;
3399 smux.rx_activity_flag = 0;
3400 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003401 smux.in_reset = 0;
3402 mutex_unlock(&smux.mutex_lha0);
3403
Eric Holmberged1f00c2012-06-07 09:45:18 -06003404 return NOTIFY_DONE;
3405}
3406
3407/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003408/* Line Discipline Interface */
3409/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003410static void smux_pdev_release(struct device *dev)
3411{
3412 struct platform_device *pdev;
3413
3414 pdev = container_of(dev, struct platform_device, dev);
3415 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3416 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3417}
3418
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003419static int smuxld_open(struct tty_struct *tty)
3420{
3421 int i;
3422 int tmp;
3423 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003424
3425 if (!smux.is_initialized)
3426 return -ENODEV;
3427
Eric Holmberged1f00c2012-06-07 09:45:18 -06003428 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003429 if (smux.ld_open_count) {
3430 pr_err("%s: %p multiple instances not supported\n",
3431 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003432 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003433 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003434 }
3435
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003436 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003437 pr_err("%s: tty->ops->write already NULL\n", __func__);
3438 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003439 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003440 }
3441
3442 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003443 ++smux.ld_open_count;
3444 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003445 smux.tty = tty;
3446 tty->disc_data = &smux;
3447 tty->receive_room = TTY_RECEIVE_ROOM;
3448 tty_driver_flush_buffer(tty);
3449
3450 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003451 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003452 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003453 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003454 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003455 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003456 queue_work(smux_tx_wq, &smux_inactivity_work);
3457 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003458 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003459 }
3460
3461 /* register platform devices */
3462 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003463 SMUX_DBG("%s: register pdev '%s'\n",
3464 __func__, smux_devs[i].name);
3465 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003466 tmp = platform_device_register(&smux_devs[i]);
3467 if (tmp)
3468 pr_err("%s: error %d registering device %s\n",
3469 __func__, tmp, smux_devs[i].name);
3470 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003471 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003472 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003473}
3474
3475static void smuxld_close(struct tty_struct *tty)
3476{
3477 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003478 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003479 int i;
3480
Eric Holmberged1f00c2012-06-07 09:45:18 -06003481 SMUX_DBG("%s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003482 smux_flush_workqueues();
3483
Eric Holmberged1f00c2012-06-07 09:45:18 -06003484 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003485 if (smux.ld_open_count <= 0) {
3486 pr_err("%s: invalid ld count %d\n", __func__,
3487 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003488 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003489 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003490 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003491 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003492
3493 /* Cleanup channels */
3494 smux_lch_purge();
3495
3496 /* Unregister platform devices */
3497 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3498 SMUX_DBG("%s: unregister pdev '%s'\n",
3499 __func__, smux_devs[i].name);
3500 platform_device_unregister(&smux_devs[i]);
3501 }
3502
3503 /* Schedule UART power-up if it's down */
3504 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003505 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003506 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003507 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003508 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003509 smux.tx_activity_flag = 0;
3510 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003511 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3512
3513 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003514 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003515
Eric Holmberg06011322012-07-06 18:17:03 -06003516 smux.rx_state = SMUX_RX_IDLE;
3517
Eric Holmberged1f00c2012-06-07 09:45:18 -06003518 /* Disconnect from TTY */
3519 smux.tty = NULL;
3520 mutex_unlock(&smux.mutex_lha0);
3521 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003522}
3523
3524/**
3525 * Receive data from TTY Line Discipline.
3526 *
3527 * @tty TTY structure
3528 * @cp Character data
3529 * @fp Flag data
3530 * @count Size of character and flag data
3531 */
3532void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3533 char *fp, int count)
3534{
3535 int i;
3536 int last_idx = 0;
3537 const char *tty_name = NULL;
3538 char *f;
3539
3540 if (smux_debug_mask & MSM_SMUX_DEBUG)
3541 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3542 16, 1, cp, count, true);
3543
3544 /* verify error flags */
3545 for (i = 0, f = fp; i < count; ++i, ++f) {
3546 if (*f != TTY_NORMAL) {
3547 if (tty)
3548 tty_name = tty->name;
3549 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3550 tty_name, *f, tty_flag_to_str(*f));
3551
3552 /* feed all previous valid data to the parser */
3553 smux_rx_state_machine(cp + last_idx, i - last_idx,
3554 TTY_NORMAL);
3555
3556 /* feed bad data to parser */
3557 smux_rx_state_machine(cp + i, 1, *f);
3558 last_idx = i + 1;
3559 }
3560 }
3561
3562 /* feed data to RX state machine */
3563 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3564}
3565
3566static void smuxld_flush_buffer(struct tty_struct *tty)
3567{
3568 pr_err("%s: not supported\n", __func__);
3569}
3570
3571static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3572{
3573 pr_err("%s: not supported\n", __func__);
3574 return -ENODEV;
3575}
3576
3577static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3578 unsigned char __user *buf, size_t nr)
3579{
3580 pr_err("%s: not supported\n", __func__);
3581 return -ENODEV;
3582}
3583
3584static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3585 const unsigned char *buf, size_t nr)
3586{
3587 pr_err("%s: not supported\n", __func__);
3588 return -ENODEV;
3589}
3590
3591static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3592 unsigned int cmd, unsigned long arg)
3593{
3594 pr_err("%s: not supported\n", __func__);
3595 return -ENODEV;
3596}
3597
3598static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3599 struct poll_table_struct *tbl)
3600{
3601 pr_err("%s: not supported\n", __func__);
3602 return -ENODEV;
3603}
3604
3605static void smuxld_write_wakeup(struct tty_struct *tty)
3606{
3607 pr_err("%s: not supported\n", __func__);
3608}
3609
3610static struct tty_ldisc_ops smux_ldisc_ops = {
3611 .owner = THIS_MODULE,
3612 .magic = TTY_LDISC_MAGIC,
3613 .name = "n_smux",
3614 .open = smuxld_open,
3615 .close = smuxld_close,
3616 .flush_buffer = smuxld_flush_buffer,
3617 .chars_in_buffer = smuxld_chars_in_buffer,
3618 .read = smuxld_read,
3619 .write = smuxld_write,
3620 .ioctl = smuxld_ioctl,
3621 .poll = smuxld_poll,
3622 .receive_buf = smuxld_receive_buf,
3623 .write_wakeup = smuxld_write_wakeup
3624};
3625
3626static int __init smux_init(void)
3627{
3628 int ret;
3629
Eric Holmberged1f00c2012-06-07 09:45:18 -06003630 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003631
3632 spin_lock_init(&smux.rx_lock_lha1);
3633 smux.rx_state = SMUX_RX_IDLE;
3634 smux.power_state = SMUX_PWR_OFF;
3635 smux.pwr_wakeup_delay_us = 1;
3636 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003637 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003638 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003639 smux.rx_activity_flag = 0;
3640 smux.tx_activity_flag = 0;
3641 smux.recv_len = 0;
3642 smux.tty = NULL;
3643 smux.ld_open_count = 0;
3644 smux.in_reset = 0;
3645 smux.is_initialized = 1;
3646 smux_byte_loopback = 0;
3647
3648 spin_lock_init(&smux.tx_lock_lha2);
3649 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3650
3651 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3652 if (ret != 0) {
3653 pr_err("%s: error %d registering line discipline\n",
3654 __func__, ret);
3655 return ret;
3656 }
3657
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003658 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003659
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003660 ret = lch_init();
3661 if (ret != 0) {
3662 pr_err("%s: lch_init failed\n", __func__);
3663 return ret;
3664 }
3665
3666 return 0;
3667}
3668
3669static void __exit smux_exit(void)
3670{
3671 int ret;
3672
3673 ret = tty_unregister_ldisc(N_SMUX);
3674 if (ret != 0) {
3675 pr_err("%s error %d unregistering line discipline\n",
3676 __func__, ret);
3677 return;
3678 }
3679}
3680
3681module_init(smux_init);
3682module_exit(smux_exit);
3683
3684MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3685MODULE_LICENSE("GPL v2");
3686MODULE_ALIAS_LDISC(N_SMUX);