blob: 88ed86bf2331103079172dbeb2fdedc6f87010d4 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060049#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060050
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060084#define SMUX_PWR_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_PWR_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
91 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
92 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
93 pr_info("smux: TX Wakeup ACK\n"); \
94 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
95 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
96 pr_info("smux: TX Wakeup REQ\n"); \
97 else \
98 smux_log_pkt(pkt, 0); \
99 } \
100} while (0)
101
102#define SMUX_PWR_BYTE_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 smux_log_pkt(pkt, 0); \
105 } \
106} while (0)
107
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600108#define SMUX_LOG_PKT_RX(pkt) do { \
109 if (smux_debug_mask & MSM_SMUX_PKT) \
110 smux_log_pkt(pkt, 1); \
111} while (0)
112
113#define SMUX_LOG_PKT_TX(pkt) do { \
114 if (smux_debug_mask & MSM_SMUX_PKT) \
115 smux_log_pkt(pkt, 0); \
116} while (0)
117
118/**
119 * Return true if channel is fully opened (both
120 * local and remote sides are in the OPENED state).
121 */
122#define IS_FULLY_OPENED(ch) \
123 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
124 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
125
126static struct platform_device smux_devs[] = {
127 {.name = "SMUX_CTL", .id = -1},
128 {.name = "SMUX_RMNET", .id = -1},
129 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
130 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
131 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
132 {.name = "SMUX_DIAG", .id = -1},
133};
134
135enum {
136 SMUX_CMD_STATUS_RTC = 1 << 0,
137 SMUX_CMD_STATUS_RTR = 1 << 1,
138 SMUX_CMD_STATUS_RI = 1 << 2,
139 SMUX_CMD_STATUS_DCD = 1 << 3,
140 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
141};
142
143/* Channel mode */
144enum {
145 SMUX_LCH_MODE_NORMAL,
146 SMUX_LCH_MODE_LOCAL_LOOPBACK,
147 SMUX_LCH_MODE_REMOTE_LOOPBACK,
148};
149
150enum {
151 SMUX_RX_IDLE,
152 SMUX_RX_MAGIC,
153 SMUX_RX_HDR,
154 SMUX_RX_PAYLOAD,
155 SMUX_RX_FAILURE,
156};
157
158/**
159 * Power states.
160 *
161 * The _FLUSH states are internal transitional states and are not part of the
162 * official state machine.
163 */
164enum {
165 SMUX_PWR_OFF,
166 SMUX_PWR_TURNING_ON,
167 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600168 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600169 SMUX_PWR_TURNING_OFF,
170 SMUX_PWR_OFF_FLUSH,
171};
172
173/**
174 * Logical Channel Structure. One instance per channel.
175 *
176 * Locking Hierarchy
177 * Each lock has a postfix that describes the locking level. If multiple locks
178 * are required, only increasing lock hierarchy numbers may be locked which
179 * ensures avoiding a deadlock.
180 *
181 * Locking Example
182 * If state_lock_lhb1 is currently held and the TX list needs to be
183 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
184 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
185 * not be acquired since it would result in a deadlock.
186 *
187 * Note that the Line Discipline locks (*_lha) should always be acquired
188 * before the logical channel locks.
189 */
190struct smux_lch_t {
191 /* channel state */
192 spinlock_t state_lock_lhb1;
193 uint8_t lcid;
194 unsigned local_state;
195 unsigned local_mode;
196 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600197 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600198
199 unsigned remote_state;
200 unsigned remote_mode;
201 uint8_t remote_tiocm;
202
203 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600204 int rx_flow_control_auto;
205 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 /* client callbacks and private data */
208 void *priv;
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
211 int size);
212
Eric Holmbergb8435c82012-06-05 14:51:29 -0600213 /* RX Info */
214 struct list_head rx_retry_queue;
215 unsigned rx_retry_queue_cnt;
216 struct delayed_work rx_retry_work;
217
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600218 /* TX Info */
219 spinlock_t tx_lock_lhb2;
220 struct list_head tx_queue;
221 struct list_head tx_ready_list;
222 unsigned tx_pending_data_cnt;
223 unsigned notify_lwm;
224};
225
226union notifier_metadata {
227 struct smux_meta_disconnected disconnected;
228 struct smux_meta_read read;
229 struct smux_meta_write write;
230 struct smux_meta_tiocm tiocm;
231};
232
233struct smux_notify_handle {
234 void (*notify)(void *priv, int event_type, const void *metadata);
235 void *priv;
236 int event_type;
237 union notifier_metadata *metadata;
238};
239
240/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600241 * Get RX Buffer Retry structure.
242 *
243 * This is used for clients that are unable to provide an RX buffer
244 * immediately. This temporary structure will be used to temporarily hold the
245 * data and perform a retry.
246 */
247struct smux_rx_pkt_retry {
248 struct smux_pkt_t *pkt;
249 struct list_head rx_retry_list;
250 unsigned timeout_in_ms;
251};
252
253/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254 * Receive worker data structure.
255 *
256 * One instance is created for every call to smux_rx_state_machine.
257 */
258struct smux_rx_worker_data {
259 const unsigned char *data;
260 int len;
261 int flag;
262
263 struct work_struct work;
264 struct completion work_complete;
265};
266
267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600268 * Line discipline and module structure.
269 *
270 * Only one instance since multiple instances of line discipline are not
271 * allowed.
272 */
273struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600274 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275
276 int is_initialized;
277 int in_reset;
278 int ld_open_count;
279 struct tty_struct *tty;
280
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600281 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600282 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
283 unsigned int recv_len;
284 unsigned int pkt_remain;
285 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600286
287 /* RX Activity - accessed by multiple threads */
288 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600289 unsigned rx_activity_flag;
290
291 /* TX / Power */
292 spinlock_t tx_lock_lha2;
293 struct list_head lch_tx_ready_list;
294 unsigned power_state;
295 unsigned pwr_wakeup_delay_us;
296 unsigned tx_activity_flag;
297 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600298 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600299 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600300};
301
302
303/* data structures */
304static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
305static struct smux_ldisc_t smux;
306static const char *tty_error_type[] = {
307 [TTY_NORMAL] = "normal",
308 [TTY_OVERRUN] = "overrun",
309 [TTY_BREAK] = "break",
310 [TTY_PARITY] = "parity",
311 [TTY_FRAME] = "framing",
312};
313
314static const char *smux_cmds[] = {
315 [SMUX_CMD_DATA] = "DATA",
316 [SMUX_CMD_OPEN_LCH] = "OPEN",
317 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
318 [SMUX_CMD_STATUS] = "STATUS",
319 [SMUX_CMD_PWR_CTL] = "PWR",
320 [SMUX_CMD_BYTE] = "Raw Byte",
321};
322
323static void smux_notify_local_fn(struct work_struct *work);
324static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
325
326static struct workqueue_struct *smux_notify_wq;
327static size_t handle_size;
328static struct kfifo smux_notify_fifo;
329static int queued_fifo_notifications;
330static DEFINE_SPINLOCK(notify_lock_lhc1);
331
332static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600333static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334static void smux_tx_worker(struct work_struct *work);
335static DECLARE_WORK(smux_tx_work, smux_tx_worker);
336
337static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600338static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600339static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600340static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
341static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
342
343static void smux_inactivity_worker(struct work_struct *work);
344static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
345static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
346 smux_inactivity_worker);
347
348static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
349static void list_channel(struct smux_lch_t *ch);
350static int smux_send_status_cmd(struct smux_lch_t *ch);
351static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600352static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600353static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
354static int schedule_notify(uint8_t lcid, int event,
355 const union notifier_metadata *metadata);
356static int ssr_notifier_cb(struct notifier_block *this,
357 unsigned long code,
358 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600359static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600360static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600361
362/**
363 * Convert TTY Error Flags to string for logging purposes.
364 *
365 * @flag TTY_* flag
366 * @returns String description or NULL if unknown
367 */
368static const char *tty_flag_to_str(unsigned flag)
369{
370 if (flag < ARRAY_SIZE(tty_error_type))
371 return tty_error_type[flag];
372 return NULL;
373}
374
375/**
376 * Convert SMUX Command to string for logging purposes.
377 *
378 * @cmd SMUX command
379 * @returns String description or NULL if unknown
380 */
381static const char *cmd_to_str(unsigned cmd)
382{
383 if (cmd < ARRAY_SIZE(smux_cmds))
384 return smux_cmds[cmd];
385 return NULL;
386}
387
388/**
389 * Set the reset state due to an unrecoverable failure.
390 */
391static void smux_enter_reset(void)
392{
393 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
394 smux.in_reset = 1;
395}
396
397static int lch_init(void)
398{
399 unsigned int id;
400 struct smux_lch_t *ch;
401 int i = 0;
402
403 handle_size = sizeof(struct smux_notify_handle *);
404
405 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
406 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600407 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600408
409 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
410 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
411 __func__);
412 return -ENOMEM;
413 }
414
415 i |= kfifo_alloc(&smux_notify_fifo,
416 SMUX_NOTIFY_FIFO_SIZE * handle_size,
417 GFP_KERNEL);
418 i |= smux_loopback_init();
419
420 if (i) {
421 pr_err("%s: out of memory error\n", __func__);
422 return -ENOMEM;
423 }
424
425 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
426 ch = &smux_lch[id];
427
428 spin_lock_init(&ch->state_lock_lhb1);
429 ch->lcid = id;
430 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
431 ch->local_mode = SMUX_LCH_MODE_NORMAL;
432 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600433 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600434 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
435 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
436 ch->remote_tiocm = 0x0;
437 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600438 ch->rx_flow_control_auto = 0;
439 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600440 ch->priv = 0;
441 ch->notify = 0;
442 ch->get_rx_buffer = 0;
443
Eric Holmbergb8435c82012-06-05 14:51:29 -0600444 INIT_LIST_HEAD(&ch->rx_retry_queue);
445 ch->rx_retry_queue_cnt = 0;
446 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
447
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600448 spin_lock_init(&ch->tx_lock_lhb2);
449 INIT_LIST_HEAD(&ch->tx_queue);
450 INIT_LIST_HEAD(&ch->tx_ready_list);
451 ch->tx_pending_data_cnt = 0;
452 ch->notify_lwm = 0;
453 }
454
455 return 0;
456}
457
Eric Holmberged1f00c2012-06-07 09:45:18 -0600458/**
459 * Empty and cleanup all SMUX logical channels for subsystem restart or line
460 * discipline disconnect.
461 */
462static void smux_lch_purge(void)
463{
464 struct smux_lch_t *ch;
465 unsigned long flags;
466 int i;
467
468 /* Empty TX ready list */
469 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
470 while (!list_empty(&smux.lch_tx_ready_list)) {
471 SMUX_DBG("%s: emptying ready list %p\n",
472 __func__, smux.lch_tx_ready_list.next);
473 ch = list_first_entry(&smux.lch_tx_ready_list,
474 struct smux_lch_t,
475 tx_ready_list);
476 list_del(&ch->tx_ready_list);
477 INIT_LIST_HEAD(&ch->tx_ready_list);
478 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600479
480 /* Purge Power Queue */
481 while (!list_empty(&smux.power_queue)) {
482 struct smux_pkt_t *pkt;
483
484 pkt = list_first_entry(&smux.power_queue,
485 struct smux_pkt_t,
486 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600487 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600488 SMUX_DBG("%s: emptying power queue pkt=%p\n",
489 __func__, pkt);
490 smux_free_pkt(pkt);
491 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600492 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
493
494 /* Close all ports */
495 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
496 ch = &smux_lch[i];
497 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
498
499 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
500
501 /* Purge TX queue */
502 spin_lock(&ch->tx_lock_lhb2);
503 smux_purge_ch_tx_queue(ch);
504 spin_unlock(&ch->tx_lock_lhb2);
505
506 /* Notify user of disconnect and reset channel state */
507 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
508 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
509 union notifier_metadata meta;
510
511 meta.disconnected.is_ssr = smux.in_reset;
512 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
513 }
514
515 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
516 ch->local_mode = SMUX_LCH_MODE_NORMAL;
517 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
518 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
519 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600520 ch->rx_flow_control_auto = 0;
521 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600522
523 /* Purge RX retry queue */
524 if (ch->rx_retry_queue_cnt)
525 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
526
527 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
528 }
529
530 /* Flush TX/RX workqueues */
531 SMUX_DBG("%s: flushing tx wq\n", __func__);
532 flush_workqueue(smux_tx_wq);
533 SMUX_DBG("%s: flushing rx wq\n", __func__);
534 flush_workqueue(smux_rx_wq);
535}
536
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600537int smux_assert_lch_id(uint32_t lcid)
538{
539 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
540 return -ENXIO;
541 else
542 return 0;
543}
544
545/**
546 * Log packet information for debug purposes.
547 *
548 * @pkt Packet to log
549 * @is_recv 1 = RX packet; 0 = TX Packet
550 *
551 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
552 *
553 * PKT Info:
554 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
555 *
556 * Direction: R = Receive, S = Send
557 * Local State: C = Closed; c = closing; o = opening; O = Opened
558 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
559 * Remote State: C = Closed; O = Opened
560 * Remote Mode: R = Remote loopback; N = Normal
561 */
562static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
563{
564 char logbuf[SMUX_PKT_LOG_SIZE];
565 char cmd_extra[16];
566 int i = 0;
567 int count;
568 int len;
569 char local_state;
570 char local_mode;
571 char remote_state;
572 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600573 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600574 unsigned char *data;
575
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600576 if (!smux_assert_lch_id(pkt->hdr.lcid))
577 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600578
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600579 if (ch) {
580 switch (ch->local_state) {
581 case SMUX_LCH_LOCAL_CLOSED:
582 local_state = 'C';
583 break;
584 case SMUX_LCH_LOCAL_OPENING:
585 local_state = 'o';
586 break;
587 case SMUX_LCH_LOCAL_OPENED:
588 local_state = 'O';
589 break;
590 case SMUX_LCH_LOCAL_CLOSING:
591 local_state = 'c';
592 break;
593 default:
594 local_state = 'U';
595 break;
596 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600597
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600598 switch (ch->local_mode) {
599 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
600 local_mode = 'L';
601 break;
602 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
603 local_mode = 'R';
604 break;
605 case SMUX_LCH_MODE_NORMAL:
606 local_mode = 'N';
607 break;
608 default:
609 local_mode = 'U';
610 break;
611 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600612
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600613 switch (ch->remote_state) {
614 case SMUX_LCH_REMOTE_CLOSED:
615 remote_state = 'C';
616 break;
617 case SMUX_LCH_REMOTE_OPENED:
618 remote_state = 'O';
619 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600620
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600621 default:
622 remote_state = 'U';
623 break;
624 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600625
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600626 switch (ch->remote_mode) {
627 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
628 remote_mode = 'R';
629 break;
630 case SMUX_LCH_MODE_NORMAL:
631 remote_mode = 'N';
632 break;
633 default:
634 remote_mode = 'U';
635 break;
636 }
637 } else {
638 /* broadcast channel */
639 local_state = '-';
640 local_mode = '-';
641 remote_state = '-';
642 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600643 }
644
645 /* determine command type (ACK, etc) */
646 cmd_extra[0] = '\0';
647 switch (pkt->hdr.cmd) {
648 case SMUX_CMD_OPEN_LCH:
649 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
650 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
651 break;
652 case SMUX_CMD_CLOSE_LCH:
653 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
654 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
655 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600656
657 case SMUX_CMD_PWR_CTL:
658 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
659 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
660 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600661 };
662
663 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
664 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
665 is_recv ? 'R' : 'S', pkt->hdr.lcid,
666 local_state, local_mode,
667 remote_state, remote_mode,
668 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
669 pkt->hdr.payload_len, pkt->hdr.pad_len);
670
671 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
672 data = (unsigned char *)pkt->payload;
673 for (count = 0; count < len; count++)
674 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
675 "%02x ", (unsigned)data[count]);
676
677 pr_info("%s\n", logbuf);
678}
679
680static void smux_notify_local_fn(struct work_struct *work)
681{
682 struct smux_notify_handle *notify_handle = NULL;
683 union notifier_metadata *metadata = NULL;
684 unsigned long flags;
685 int i;
686
687 for (;;) {
688 /* retrieve notification */
689 spin_lock_irqsave(&notify_lock_lhc1, flags);
690 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
691 i = kfifo_out(&smux_notify_fifo,
692 &notify_handle,
693 handle_size);
694 if (i != handle_size) {
695 pr_err("%s: unable to retrieve handle %d expected %d\n",
696 __func__, i, handle_size);
697 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
698 break;
699 }
700 } else {
701 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
702 break;
703 }
704 --queued_fifo_notifications;
705 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
706
707 /* notify client */
708 metadata = notify_handle->metadata;
709 notify_handle->notify(notify_handle->priv,
710 notify_handle->event_type,
711 metadata);
712
713 kfree(metadata);
714 kfree(notify_handle);
715 }
716}
717
718/**
719 * Initialize existing packet.
720 */
721void smux_init_pkt(struct smux_pkt_t *pkt)
722{
723 memset(pkt, 0x0, sizeof(*pkt));
724 pkt->hdr.magic = SMUX_MAGIC;
725 INIT_LIST_HEAD(&pkt->list);
726}
727
728/**
729 * Allocate and initialize packet.
730 *
731 * If a payload is needed, either set it directly and ensure that it's freed or
732 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
733 * automatically when smd_free_pkt() is called.
734 */
735struct smux_pkt_t *smux_alloc_pkt(void)
736{
737 struct smux_pkt_t *pkt;
738
739 /* Consider a free list implementation instead of kmalloc */
740 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
741 if (!pkt) {
742 pr_err("%s: out of memory\n", __func__);
743 return NULL;
744 }
745 smux_init_pkt(pkt);
746 pkt->allocated = 1;
747
748 return pkt;
749}
750
751/**
752 * Free packet.
753 *
754 * @pkt Packet to free (may be NULL)
755 *
756 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
757 * well. Otherwise, the caller is responsible for freeing the payload.
758 */
759void smux_free_pkt(struct smux_pkt_t *pkt)
760{
761 if (pkt) {
762 if (pkt->free_payload)
763 kfree(pkt->payload);
764 if (pkt->allocated)
765 kfree(pkt);
766 }
767}
768
769/**
770 * Allocate packet payload.
771 *
772 * @pkt Packet to add payload to
773 *
774 * @returns 0 on success, <0 upon error
775 *
776 * A flag is set to signal smux_free_pkt() to free the payload.
777 */
778int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
779{
780 if (!pkt)
781 return -EINVAL;
782
783 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
784 pkt->free_payload = 1;
785 if (!pkt->payload) {
786 pr_err("%s: unable to malloc %d bytes for payload\n",
787 __func__, pkt->hdr.payload_len);
788 return -ENOMEM;
789 }
790
791 return 0;
792}
793
794static int schedule_notify(uint8_t lcid, int event,
795 const union notifier_metadata *metadata)
796{
797 struct smux_notify_handle *notify_handle = 0;
798 union notifier_metadata *meta_copy = 0;
799 struct smux_lch_t *ch;
800 int i;
801 unsigned long flags;
802 int ret = 0;
803
804 ch = &smux_lch[lcid];
805 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
806 GFP_ATOMIC);
807 if (!notify_handle) {
808 pr_err("%s: out of memory\n", __func__);
809 ret = -ENOMEM;
810 goto free_out;
811 }
812
813 notify_handle->notify = ch->notify;
814 notify_handle->priv = ch->priv;
815 notify_handle->event_type = event;
816 if (metadata) {
817 meta_copy = kzalloc(sizeof(union notifier_metadata),
818 GFP_ATOMIC);
819 if (!meta_copy) {
820 pr_err("%s: out of memory\n", __func__);
821 ret = -ENOMEM;
822 goto free_out;
823 }
824 *meta_copy = *metadata;
825 notify_handle->metadata = meta_copy;
826 } else {
827 notify_handle->metadata = NULL;
828 }
829
830 spin_lock_irqsave(&notify_lock_lhc1, flags);
831 i = kfifo_avail(&smux_notify_fifo);
832 if (i < handle_size) {
833 pr_err("%s: fifo full error %d expected %d\n",
834 __func__, i, handle_size);
835 ret = -ENOMEM;
836 goto unlock_out;
837 }
838
839 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
840 if (i < 0 || i != handle_size) {
841 pr_err("%s: fifo not available error %d (expected %d)\n",
842 __func__, i, handle_size);
843 ret = -ENOSPC;
844 goto unlock_out;
845 }
846 ++queued_fifo_notifications;
847
848unlock_out:
849 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
850
851free_out:
852 queue_work(smux_notify_wq, &smux_notify_local);
853 if (ret < 0 && notify_handle) {
854 kfree(notify_handle->metadata);
855 kfree(notify_handle);
856 }
857 return ret;
858}
859
860/**
861 * Returns the serialized size of a packet.
862 *
863 * @pkt Packet to serialize
864 *
865 * @returns Serialized length of packet
866 */
867static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
868{
869 unsigned int size;
870
871 size = sizeof(struct smux_hdr_t);
872 size += pkt->hdr.payload_len;
873 size += pkt->hdr.pad_len;
874
875 return size;
876}
877
878/**
879 * Serialize packet @pkt into output buffer @data.
880 *
881 * @pkt Packet to serialize
882 * @out Destination buffer pointer
883 * @out_len Size of serialized packet
884 *
885 * @returns 0 for success
886 */
887int smux_serialize(struct smux_pkt_t *pkt, char *out,
888 unsigned int *out_len)
889{
890 char *data_start = out;
891
892 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
893 pr_err("%s: packet size %d too big\n",
894 __func__, smux_serialize_size(pkt));
895 return -E2BIG;
896 }
897
898 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
899 out += sizeof(struct smux_hdr_t);
900 if (pkt->payload) {
901 memcpy(out, pkt->payload, pkt->hdr.payload_len);
902 out += pkt->hdr.payload_len;
903 }
904 if (pkt->hdr.pad_len) {
905 memset(out, 0x0, pkt->hdr.pad_len);
906 out += pkt->hdr.pad_len;
907 }
908 *out_len = out - data_start;
909 return 0;
910}
911
912/**
913 * Serialize header and provide pointer to the data.
914 *
915 * @pkt Packet
916 * @out[out] Pointer to the serialized header data
917 * @out_len[out] Pointer to the serialized header length
918 */
919static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
920 unsigned int *out_len)
921{
922 *out = (char *)&pkt->hdr;
923 *out_len = sizeof(struct smux_hdr_t);
924}
925
926/**
927 * Serialize payload and provide pointer to the data.
928 *
929 * @pkt Packet
930 * @out[out] Pointer to the serialized payload data
931 * @out_len[out] Pointer to the serialized payload length
932 */
933static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
934 unsigned int *out_len)
935{
936 *out = pkt->payload;
937 *out_len = pkt->hdr.payload_len;
938}
939
940/**
941 * Serialize padding and provide pointer to the data.
942 *
943 * @pkt Packet
944 * @out[out] Pointer to the serialized padding (always NULL)
945 * @out_len[out] Pointer to the serialized payload length
946 *
947 * Since the padding field value is undefined, only the size of the patting
948 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
949 */
950static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
951 unsigned int *out_len)
952{
953 *out = NULL;
954 *out_len = pkt->hdr.pad_len;
955}
956
957/**
958 * Write data to TTY framework and handle breaking the writes up if needed.
959 *
960 * @data Data to write
961 * @len Length of data
962 *
963 * @returns 0 for success, < 0 for failure
964 */
965static int write_to_tty(char *data, unsigned len)
966{
967 int data_written;
968
969 if (!data)
970 return 0;
971
Eric Holmberged1f00c2012-06-07 09:45:18 -0600972 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600973 data_written = smux.tty->ops->write(smux.tty, data, len);
974 if (data_written >= 0) {
975 len -= data_written;
976 data += data_written;
977 } else {
978 pr_err("%s: TTY write returned error %d\n",
979 __func__, data_written);
980 return data_written;
981 }
982
983 if (len)
984 tty_wait_until_sent(smux.tty,
985 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600986 }
987 return 0;
988}
989
990/**
991 * Write packet to TTY.
992 *
993 * @pkt packet to write
994 *
995 * @returns 0 on success
996 */
997static int smux_tx_tty(struct smux_pkt_t *pkt)
998{
999 char *data;
1000 unsigned int len;
1001 int ret;
1002
1003 if (!smux.tty) {
1004 pr_err("%s: TTY not initialized", __func__);
1005 return -ENOTTY;
1006 }
1007
1008 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
1009 SMUX_DBG("%s: tty send single byte\n", __func__);
1010 ret = write_to_tty(&pkt->hdr.flags, 1);
1011 return ret;
1012 }
1013
1014 smux_serialize_hdr(pkt, &data, &len);
1015 ret = write_to_tty(data, len);
1016 if (ret) {
1017 pr_err("%s: failed %d to write header %d\n",
1018 __func__, ret, len);
1019 return ret;
1020 }
1021
1022 smux_serialize_payload(pkt, &data, &len);
1023 ret = write_to_tty(data, len);
1024 if (ret) {
1025 pr_err("%s: failed %d to write payload %d\n",
1026 __func__, ret, len);
1027 return ret;
1028 }
1029
1030 smux_serialize_padding(pkt, &data, &len);
1031 while (len > 0) {
1032 char zero = 0x0;
1033 ret = write_to_tty(&zero, 1);
1034 if (ret) {
1035 pr_err("%s: failed %d to write padding %d\n",
1036 __func__, ret, len);
1037 return ret;
1038 }
1039 --len;
1040 }
1041 return 0;
1042}
1043
1044/**
1045 * Send a single character.
1046 *
1047 * @ch Character to send
1048 */
1049static void smux_send_byte(char ch)
1050{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001051 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001052
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001053 pkt = smux_alloc_pkt();
1054 if (!pkt) {
1055 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1056 return;
1057 }
1058 pkt->hdr.cmd = SMUX_CMD_BYTE;
1059 pkt->hdr.flags = ch;
1060 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001061
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001062 list_add_tail(&pkt->list, &smux.power_queue);
1063 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001064}
1065
1066/**
1067 * Receive a single-character packet (used for internal testing).
1068 *
1069 * @ch Character to receive
1070 * @lcid Logical channel ID for packet
1071 *
1072 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001073 */
1074static int smux_receive_byte(char ch, int lcid)
1075{
1076 struct smux_pkt_t pkt;
1077
1078 smux_init_pkt(&pkt);
1079 pkt.hdr.lcid = lcid;
1080 pkt.hdr.cmd = SMUX_CMD_BYTE;
1081 pkt.hdr.flags = ch;
1082
1083 return smux_dispatch_rx_pkt(&pkt);
1084}
1085
1086/**
1087 * Queue packet for transmit.
1088 *
1089 * @pkt_ptr Packet to queue
1090 * @ch Channel to queue packet on
1091 * @queue Queue channel on ready list
1092 */
1093static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1094 int queue)
1095{
1096 unsigned long flags;
1097
1098 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1099
1100 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1101 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1102 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1103
1104 if (queue)
1105 list_channel(ch);
1106}
1107
1108/**
1109 * Handle receive OPEN ACK command.
1110 *
1111 * @pkt Received packet
1112 *
1113 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001114 */
1115static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1116{
1117 uint8_t lcid;
1118 int ret;
1119 struct smux_lch_t *ch;
1120 int enable_powerdown = 0;
1121
1122 lcid = pkt->hdr.lcid;
1123 ch = &smux_lch[lcid];
1124
1125 spin_lock(&ch->state_lock_lhb1);
1126 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1127 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1128 ch->local_state,
1129 SMUX_LCH_LOCAL_OPENED);
1130
1131 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1132 enable_powerdown = 1;
1133
1134 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1135 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1136 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1137 ret = 0;
1138 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1139 SMUX_DBG("Remote loopback OPEN ACK received\n");
1140 ret = 0;
1141 } else {
1142 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1143 __func__, lcid, ch->local_state);
1144 ret = -EINVAL;
1145 }
1146 spin_unlock(&ch->state_lock_lhb1);
1147
1148 if (enable_powerdown) {
1149 spin_lock(&smux.tx_lock_lha2);
1150 if (!smux.powerdown_enabled) {
1151 smux.powerdown_enabled = 1;
1152 SMUX_DBG("%s: enabling power-collapse support\n",
1153 __func__);
1154 }
1155 spin_unlock(&smux.tx_lock_lha2);
1156 }
1157
1158 return ret;
1159}
1160
1161static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1162{
1163 uint8_t lcid;
1164 int ret;
1165 struct smux_lch_t *ch;
1166 union notifier_metadata meta_disconnected;
1167 unsigned long flags;
1168
1169 lcid = pkt->hdr.lcid;
1170 ch = &smux_lch[lcid];
1171 meta_disconnected.disconnected.is_ssr = 0;
1172
1173 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1174
1175 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1176 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1177 SMUX_LCH_LOCAL_CLOSING,
1178 SMUX_LCH_LOCAL_CLOSED);
1179 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1180 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1181 schedule_notify(lcid, SMUX_DISCONNECTED,
1182 &meta_disconnected);
1183 ret = 0;
1184 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1185 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1186 ret = 0;
1187 } else {
1188 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1189 __func__, lcid, ch->local_state);
1190 ret = -EINVAL;
1191 }
1192 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1193 return ret;
1194}
1195
1196/**
1197 * Handle receive OPEN command.
1198 *
1199 * @pkt Received packet
1200 *
1201 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001202 */
1203static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1204{
1205 uint8_t lcid;
1206 int ret;
1207 struct smux_lch_t *ch;
1208 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001209 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001210 int tx_ready = 0;
1211 int enable_powerdown = 0;
1212
1213 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1214 return smux_handle_rx_open_ack(pkt);
1215
1216 lcid = pkt->hdr.lcid;
1217 ch = &smux_lch[lcid];
1218
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001219 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001220
1221 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1222 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1223 SMUX_LCH_REMOTE_CLOSED,
1224 SMUX_LCH_REMOTE_OPENED);
1225
1226 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1227 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1228 enable_powerdown = 1;
1229
1230 /* Send Open ACK */
1231 ack_pkt = smux_alloc_pkt();
1232 if (!ack_pkt) {
1233 /* exit out to allow retrying this later */
1234 ret = -ENOMEM;
1235 goto out;
1236 }
1237 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1238 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1239 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1240 ack_pkt->hdr.lcid = lcid;
1241 ack_pkt->hdr.payload_len = 0;
1242 ack_pkt->hdr.pad_len = 0;
1243 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1244 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1245 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1246 }
1247 smux_tx_queue(ack_pkt, ch, 0);
1248 tx_ready = 1;
1249
1250 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1251 /*
1252 * Send an Open command to the remote side to
1253 * simulate our local client doing it.
1254 */
1255 ack_pkt = smux_alloc_pkt();
1256 if (ack_pkt) {
1257 ack_pkt->hdr.lcid = lcid;
1258 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1259 ack_pkt->hdr.flags =
1260 SMUX_CMD_OPEN_POWER_COLLAPSE;
1261 ack_pkt->hdr.payload_len = 0;
1262 ack_pkt->hdr.pad_len = 0;
1263 smux_tx_queue(ack_pkt, ch, 0);
1264 tx_ready = 1;
1265 } else {
1266 pr_err("%s: Remote loopack allocation failure\n",
1267 __func__);
1268 }
1269 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1270 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1271 }
1272 ret = 0;
1273 } else {
1274 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1275 __func__, lcid, ch->remote_state);
1276 ret = -EINVAL;
1277 }
1278
1279out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001280 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001281
1282 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001283 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001284 if (!smux.powerdown_enabled) {
1285 smux.powerdown_enabled = 1;
1286 SMUX_DBG("%s: enabling power-collapse support\n",
1287 __func__);
1288 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001289 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001290 }
1291
1292 if (tx_ready)
1293 list_channel(ch);
1294
1295 return ret;
1296}
1297
1298/**
1299 * Handle receive CLOSE command.
1300 *
1301 * @pkt Received packet
1302 *
1303 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001304 */
1305static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1306{
1307 uint8_t lcid;
1308 int ret;
1309 struct smux_lch_t *ch;
1310 struct smux_pkt_t *ack_pkt;
1311 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001312 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001313 int tx_ready = 0;
1314
1315 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1316 return smux_handle_close_ack(pkt);
1317
1318 lcid = pkt->hdr.lcid;
1319 ch = &smux_lch[lcid];
1320 meta_disconnected.disconnected.is_ssr = 0;
1321
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001322 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001323 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1324 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1325 SMUX_LCH_REMOTE_OPENED,
1326 SMUX_LCH_REMOTE_CLOSED);
1327
1328 ack_pkt = smux_alloc_pkt();
1329 if (!ack_pkt) {
1330 /* exit out to allow retrying this later */
1331 ret = -ENOMEM;
1332 goto out;
1333 }
1334 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1335 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1336 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1337 ack_pkt->hdr.lcid = lcid;
1338 ack_pkt->hdr.payload_len = 0;
1339 ack_pkt->hdr.pad_len = 0;
1340 smux_tx_queue(ack_pkt, ch, 0);
1341 tx_ready = 1;
1342
1343 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1344 /*
1345 * Send a Close command to the remote side to simulate
1346 * our local client doing it.
1347 */
1348 ack_pkt = smux_alloc_pkt();
1349 if (ack_pkt) {
1350 ack_pkt->hdr.lcid = lcid;
1351 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1352 ack_pkt->hdr.flags = 0;
1353 ack_pkt->hdr.payload_len = 0;
1354 ack_pkt->hdr.pad_len = 0;
1355 smux_tx_queue(ack_pkt, ch, 0);
1356 tx_ready = 1;
1357 } else {
1358 pr_err("%s: Remote loopack allocation failure\n",
1359 __func__);
1360 }
1361 }
1362
1363 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1364 schedule_notify(lcid, SMUX_DISCONNECTED,
1365 &meta_disconnected);
1366 ret = 0;
1367 } else {
1368 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1369 __func__, lcid, ch->remote_state);
1370 ret = -EINVAL;
1371 }
1372out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001373 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001374 if (tx_ready)
1375 list_channel(ch);
1376
1377 return ret;
1378}
1379
1380/*
1381 * Handle receive DATA command.
1382 *
1383 * @pkt Received packet
1384 *
1385 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001386 */
1387static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1388{
1389 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001390 int ret = 0;
1391 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001392 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001393 int tmp;
1394 int rx_len;
1395 struct smux_lch_t *ch;
1396 union notifier_metadata metadata;
1397 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001398 struct smux_pkt_t *ack_pkt;
1399 unsigned long flags;
1400
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001401 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1402 ret = -ENXIO;
1403 goto out;
1404 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001405
Eric Holmbergb8435c82012-06-05 14:51:29 -06001406 rx_len = pkt->hdr.payload_len;
1407 if (rx_len == 0) {
1408 ret = -EINVAL;
1409 goto out;
1410 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001411
1412 lcid = pkt->hdr.lcid;
1413 ch = &smux_lch[lcid];
1414 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1415 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1416
1417 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1418 && !remote_loopback) {
1419 pr_err("smux: ch %d error data on local state 0x%x",
1420 lcid, ch->local_state);
1421 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001422 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001423 goto out;
1424 }
1425
1426 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1427 pr_err("smux: ch %d error data on remote state 0x%x",
1428 lcid, ch->remote_state);
1429 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001430 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001431 goto out;
1432 }
1433
Eric Holmbergb8435c82012-06-05 14:51:29 -06001434 if (!list_empty(&ch->rx_retry_queue)) {
1435 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001436
1437 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1438 !ch->rx_flow_control_auto &&
1439 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1440 /* need to flow control RX */
1441 ch->rx_flow_control_auto = 1;
1442 tx_ready |= smux_rx_flow_control_updated(ch);
1443 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1444 NULL);
1445 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001446 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1447 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001448 pr_err("%s: ch %d RX retry queue full\n",
1449 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001450 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1451 ret = -ENOMEM;
1452 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1453 goto out;
1454 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001455 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001456 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001457
Eric Holmbergb8435c82012-06-05 14:51:29 -06001458 if (remote_loopback) {
1459 /* Echo the data back to the remote client. */
1460 ack_pkt = smux_alloc_pkt();
1461 if (ack_pkt) {
1462 ack_pkt->hdr.lcid = lcid;
1463 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1464 ack_pkt->hdr.flags = 0;
1465 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1466 if (ack_pkt->hdr.payload_len) {
1467 smux_alloc_pkt_payload(ack_pkt);
1468 memcpy(ack_pkt->payload, pkt->payload,
1469 ack_pkt->hdr.payload_len);
1470 }
1471 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1472 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001473 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001474 } else {
1475 pr_err("%s: Remote loopack allocation failure\n",
1476 __func__);
1477 }
1478 } else if (!do_retry) {
1479 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001480 metadata.read.pkt_priv = 0;
1481 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001482 tmp = ch->get_rx_buffer(ch->priv,
1483 (void **)&metadata.read.pkt_priv,
1484 (void **)&metadata.read.buffer,
1485 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486
Eric Holmbergb8435c82012-06-05 14:51:29 -06001487 if (tmp == 0 && metadata.read.buffer) {
1488 /* place data into RX buffer */
1489 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001490 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001491 metadata.read.len = rx_len;
1492 schedule_notify(lcid, SMUX_READ_DONE,
1493 &metadata);
1494 } else if (tmp == -EAGAIN ||
1495 (tmp == 0 && !metadata.read.buffer)) {
1496 /* buffer allocation failed - add to retry queue */
1497 do_retry = 1;
1498 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001499 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1500 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001501 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1502 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001503 }
1504 }
1505
Eric Holmbergb8435c82012-06-05 14:51:29 -06001506 if (do_retry) {
1507 struct smux_rx_pkt_retry *retry;
1508
1509 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1510 if (!retry) {
1511 pr_err("%s: retry alloc failure\n", __func__);
1512 ret = -ENOMEM;
1513 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1514 goto out;
1515 }
1516 INIT_LIST_HEAD(&retry->rx_retry_list);
1517 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1518
1519 /* copy packet */
1520 retry->pkt = smux_alloc_pkt();
1521 if (!retry->pkt) {
1522 kfree(retry);
1523 pr_err("%s: pkt alloc failure\n", __func__);
1524 ret = -ENOMEM;
1525 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1526 goto out;
1527 }
1528 retry->pkt->hdr.lcid = lcid;
1529 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1530 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1531 if (retry->pkt->hdr.payload_len) {
1532 smux_alloc_pkt_payload(retry->pkt);
1533 memcpy(retry->pkt->payload, pkt->payload,
1534 retry->pkt->hdr.payload_len);
1535 }
1536
1537 /* add to retry queue */
1538 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1539 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1540 ++ch->rx_retry_queue_cnt;
1541 if (ch->rx_retry_queue_cnt == 1)
1542 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1543 msecs_to_jiffies(retry->timeout_in_ms));
1544 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1545 }
1546
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001547 if (tx_ready)
1548 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001549out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001550 return ret;
1551}
1552
1553/**
1554 * Handle receive byte command for testing purposes.
1555 *
1556 * @pkt Received packet
1557 *
1558 * @returns 0 for success
1559 */
1560static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1561{
1562 uint8_t lcid;
1563 int ret;
1564 struct smux_lch_t *ch;
1565 union notifier_metadata metadata;
1566 unsigned long flags;
1567
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001568 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1569 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001570 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001571 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001572
1573 lcid = pkt->hdr.lcid;
1574 ch = &smux_lch[lcid];
1575 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1576
1577 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1578 pr_err("smux: ch %d error data on local state 0x%x",
1579 lcid, ch->local_state);
1580 ret = -EIO;
1581 goto out;
1582 }
1583
1584 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1585 pr_err("smux: ch %d error data on remote state 0x%x",
1586 lcid, ch->remote_state);
1587 ret = -EIO;
1588 goto out;
1589 }
1590
1591 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1592 metadata.read.buffer = 0;
1593 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1594 ret = 0;
1595
1596out:
1597 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1598 return ret;
1599}
1600
1601/**
1602 * Handle receive status command.
1603 *
1604 * @pkt Received packet
1605 *
1606 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001607 */
1608static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1609{
1610 uint8_t lcid;
1611 int ret;
1612 struct smux_lch_t *ch;
1613 union notifier_metadata meta;
1614 unsigned long flags;
1615 int tx_ready = 0;
1616
1617 lcid = pkt->hdr.lcid;
1618 ch = &smux_lch[lcid];
1619
1620 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1621 meta.tiocm.tiocm_old = ch->remote_tiocm;
1622 meta.tiocm.tiocm_new = pkt->hdr.flags;
1623
1624 /* update logical channel flow control */
1625 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1626 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1627 /* logical channel flow control changed */
1628 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1629 /* disabled TX */
1630 SMUX_DBG("TX Flow control enabled\n");
1631 ch->tx_flow_control = 1;
1632 } else {
1633 /* re-enable channel */
1634 SMUX_DBG("TX Flow control disabled\n");
1635 ch->tx_flow_control = 0;
1636 tx_ready = 1;
1637 }
1638 }
1639 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1640 ch->remote_tiocm = pkt->hdr.flags;
1641 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1642
1643 /* client notification for status change */
1644 if (IS_FULLY_OPENED(ch)) {
1645 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1646 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1647 ret = 0;
1648 }
1649 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1650 if (tx_ready)
1651 list_channel(ch);
1652
1653 return ret;
1654}
1655
1656/**
1657 * Handle receive power command.
1658 *
1659 * @pkt Received packet
1660 *
1661 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001662 */
1663static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1664{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001665 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001666 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001667 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001669 SMUX_PWR_PKT_RX(pkt);
1670
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001671 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001672 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1673 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001674 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001675 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001676 power_down = 1;
1677 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001678 pr_err("%s: sleep request ack invalid in state %d\n",
1679 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001680 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001681 /*
1682 * Remote sleep request
1683 *
1684 * Even if we have data pending, we need to transition to the
1685 * POWER_OFF state and then perform a wakeup since the remote
1686 * side has requested a power-down.
1687 *
1688 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1689 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1690 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001691 *
1692 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001693 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001694 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001695 ack_pkt = smux_alloc_pkt();
1696 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001697 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001698 smux.power_state,
1699 SMUX_PWR_TURNING_OFF_FLUSH);
1700
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001701 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1702
1703 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001704 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1705 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001706 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1707 list_add_tail(&ack_pkt->list,
1708 &smux.power_queue);
1709 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001710 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001711 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1712 /* Local power-down request still in TX queue */
1713 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1714 __func__);
1715 smux.power_ctl_remote_req_received = 1;
1716 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1717 /*
1718 * Local power-down request already sent to remote
1719 * side, so this request gets treated as an ACK.
1720 */
1721 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1722 __func__);
1723 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001724 } else {
1725 pr_err("%s: sleep request invalid in state %d\n",
1726 __func__, smux.power_state);
1727 }
1728 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001729
1730 if (power_down) {
1731 SMUX_PWR("%s: Power %d->%d\n", __func__,
1732 smux.power_state, SMUX_PWR_OFF_FLUSH);
1733 smux.power_state = SMUX_PWR_OFF_FLUSH;
1734 queue_work(smux_tx_wq, &smux_inactivity_work);
1735 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001736 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001737
1738 return 0;
1739}
1740
1741/**
1742 * Handle dispatching a completed packet for receive processing.
1743 *
1744 * @pkt Packet to process
1745 *
1746 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747 */
1748static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1749{
Eric Holmbergf9622662012-06-13 15:55:45 -06001750 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001751
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001752 switch (pkt->hdr.cmd) {
1753 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001754 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001755 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1756 pr_err("%s: invalid channel id %d\n",
1757 __func__, pkt->hdr.lcid);
1758 break;
1759 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001760 ret = smux_handle_rx_open_cmd(pkt);
1761 break;
1762
1763 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001764 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001765 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1766 pr_err("%s: invalid channel id %d\n",
1767 __func__, pkt->hdr.lcid);
1768 break;
1769 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001770 ret = smux_handle_rx_data_cmd(pkt);
1771 break;
1772
1773 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001774 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001775 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1776 pr_err("%s: invalid channel id %d\n",
1777 __func__, pkt->hdr.lcid);
1778 break;
1779 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001780 ret = smux_handle_rx_close_cmd(pkt);
1781 break;
1782
1783 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001784 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001785 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1786 pr_err("%s: invalid channel id %d\n",
1787 __func__, pkt->hdr.lcid);
1788 break;
1789 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001790 ret = smux_handle_rx_status_cmd(pkt);
1791 break;
1792
1793 case SMUX_CMD_PWR_CTL:
1794 ret = smux_handle_rx_power_cmd(pkt);
1795 break;
1796
1797 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001798 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001799 ret = smux_handle_rx_byte_cmd(pkt);
1800 break;
1801
1802 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001803 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001804 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1805 ret = -EINVAL;
1806 }
1807 return ret;
1808}
1809
1810/**
1811 * Deserializes a packet and dispatches it to the packet receive logic.
1812 *
1813 * @data Raw data for one packet
1814 * @len Length of the data
1815 *
1816 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001817 */
1818static int smux_deserialize(unsigned char *data, int len)
1819{
1820 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001821
1822 smux_init_pkt(&recv);
1823
1824 /*
1825 * It may be possible to optimize this to not use the
1826 * temporary buffer.
1827 */
1828 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1829
1830 if (recv.hdr.magic != SMUX_MAGIC) {
1831 pr_err("%s: invalid header magic\n", __func__);
1832 return -EINVAL;
1833 }
1834
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001835 if (recv.hdr.payload_len)
1836 recv.payload = data + sizeof(struct smux_hdr_t);
1837
1838 return smux_dispatch_rx_pkt(&recv);
1839}
1840
1841/**
1842 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001843 */
1844static void smux_handle_wakeup_req(void)
1845{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001846 unsigned long flags;
1847
1848 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001849 if (smux.power_state == SMUX_PWR_OFF
1850 || smux.power_state == SMUX_PWR_TURNING_ON) {
1851 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001852 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001853 smux.power_state, SMUX_PWR_ON);
1854 smux.power_state = SMUX_PWR_ON;
1855 queue_work(smux_tx_wq, &smux_wakeup_work);
1856 queue_work(smux_tx_wq, &smux_tx_work);
1857 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1858 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1859 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001860 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001861 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001862 } else {
1863 /* stale wakeup request from previous wakeup */
1864 SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
1865 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001866 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001867 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001868}
1869
1870/**
1871 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001872 */
1873static void smux_handle_wakeup_ack(void)
1874{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001875 unsigned long flags;
1876
1877 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001878 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1879 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001880 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001881 smux.power_state, SMUX_PWR_ON);
1882 smux.power_state = SMUX_PWR_ON;
1883 queue_work(smux_tx_wq, &smux_tx_work);
1884 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1885 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1886
1887 } else if (smux.power_state != SMUX_PWR_ON) {
1888 /* invalid message */
Eric Holmberga9b06472012-06-22 09:46:34 -06001889 SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001890 __func__, smux.power_state);
1891 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001892 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001893}
1894
1895/**
1896 * RX State machine - IDLE state processing.
1897 *
1898 * @data New RX data to process
1899 * @len Length of the data
1900 * @used Return value of length processed
1901 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001902 */
1903static void smux_rx_handle_idle(const unsigned char *data,
1904 int len, int *used, int flag)
1905{
1906 int i;
1907
1908 if (flag) {
1909 if (smux_byte_loopback)
1910 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1911 smux_byte_loopback);
1912 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1913 ++*used;
1914 return;
1915 }
1916
1917 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1918 switch (data[i]) {
1919 case SMUX_MAGIC_WORD1:
1920 smux.rx_state = SMUX_RX_MAGIC;
1921 break;
1922 case SMUX_WAKEUP_REQ:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001923 SMUX_PWR("smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001924 smux_handle_wakeup_req();
1925 break;
1926 case SMUX_WAKEUP_ACK:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001927 SMUX_PWR("smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001928 smux_handle_wakeup_ack();
1929 break;
1930 default:
1931 /* unexpected character */
1932 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1933 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1934 smux_byte_loopback);
1935 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1936 (unsigned)data[i]);
1937 break;
1938 }
1939 }
1940
1941 *used = i;
1942}
1943
1944/**
1945 * RX State machine - Header Magic state processing.
1946 *
1947 * @data New RX data to process
1948 * @len Length of the data
1949 * @used Return value of length processed
1950 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001951 */
1952static void smux_rx_handle_magic(const unsigned char *data,
1953 int len, int *used, int flag)
1954{
1955 int i;
1956
1957 if (flag) {
1958 pr_err("%s: TTY RX error %d\n", __func__, flag);
1959 smux_enter_reset();
1960 smux.rx_state = SMUX_RX_FAILURE;
1961 ++*used;
1962 return;
1963 }
1964
1965 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1966 /* wait for completion of the magic */
1967 if (data[i] == SMUX_MAGIC_WORD2) {
1968 smux.recv_len = 0;
1969 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1970 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1971 smux.rx_state = SMUX_RX_HDR;
1972 } else {
1973 /* unexpected / trash character */
1974 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1975 __func__, data[i], *used, len);
1976 smux.rx_state = SMUX_RX_IDLE;
1977 }
1978 }
1979
1980 *used = i;
1981}
1982
1983/**
1984 * RX State machine - Packet Header state processing.
1985 *
1986 * @data New RX data to process
1987 * @len Length of the data
1988 * @used Return value of length processed
1989 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001990 */
1991static void smux_rx_handle_hdr(const unsigned char *data,
1992 int len, int *used, int flag)
1993{
1994 int i;
1995 struct smux_hdr_t *hdr;
1996
1997 if (flag) {
1998 pr_err("%s: TTY RX error %d\n", __func__, flag);
1999 smux_enter_reset();
2000 smux.rx_state = SMUX_RX_FAILURE;
2001 ++*used;
2002 return;
2003 }
2004
2005 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2006 smux.recv_buf[smux.recv_len++] = data[i];
2007
2008 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2009 /* complete header received */
2010 hdr = (struct smux_hdr_t *)smux.recv_buf;
2011 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2012 smux.rx_state = SMUX_RX_PAYLOAD;
2013 }
2014 }
2015 *used = i;
2016}
2017
2018/**
2019 * RX State machine - Packet Payload state processing.
2020 *
2021 * @data New RX data to process
2022 * @len Length of the data
2023 * @used Return value of length processed
2024 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002025 */
2026static void smux_rx_handle_pkt_payload(const unsigned char *data,
2027 int len, int *used, int flag)
2028{
2029 int remaining;
2030
2031 if (flag) {
2032 pr_err("%s: TTY RX error %d\n", __func__, flag);
2033 smux_enter_reset();
2034 smux.rx_state = SMUX_RX_FAILURE;
2035 ++*used;
2036 return;
2037 }
2038
2039 /* copy data into rx buffer */
2040 if (smux.pkt_remain < (len - *used))
2041 remaining = smux.pkt_remain;
2042 else
2043 remaining = len - *used;
2044
2045 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2046 smux.recv_len += remaining;
2047 smux.pkt_remain -= remaining;
2048 *used += remaining;
2049
2050 if (smux.pkt_remain == 0) {
2051 /* complete packet received */
2052 smux_deserialize(smux.recv_buf, smux.recv_len);
2053 smux.rx_state = SMUX_RX_IDLE;
2054 }
2055}
2056
2057/**
2058 * Feed data to the receive state machine.
2059 *
2060 * @data Pointer to data block
2061 * @len Length of data
2062 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002063 */
2064void smux_rx_state_machine(const unsigned char *data,
2065 int len, int flag)
2066{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002067 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002068
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002069 work.data = data;
2070 work.len = len;
2071 work.flag = flag;
2072 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2073 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002074
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002075 queue_work(smux_rx_wq, &work.work);
2076 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002077}
2078
2079/**
2080 * Add channel to transmit-ready list and trigger transmit worker.
2081 *
2082 * @ch Channel to add
2083 */
2084static void list_channel(struct smux_lch_t *ch)
2085{
2086 unsigned long flags;
2087
2088 SMUX_DBG("%s: listing channel %d\n",
2089 __func__, ch->lcid);
2090
2091 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2092 spin_lock(&ch->tx_lock_lhb2);
2093 smux.tx_activity_flag = 1;
2094 if (list_empty(&ch->tx_ready_list))
2095 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2096 spin_unlock(&ch->tx_lock_lhb2);
2097 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2098
2099 queue_work(smux_tx_wq, &smux_tx_work);
2100}
2101
2102/**
2103 * Transmit packet on correct transport and then perform client
2104 * notification.
2105 *
2106 * @ch Channel to transmit on
2107 * @pkt Packet to transmit
2108 */
2109static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2110{
2111 union notifier_metadata meta_write;
2112 int ret;
2113
2114 if (ch && pkt) {
2115 SMUX_LOG_PKT_TX(pkt);
2116 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2117 ret = smux_tx_loopback(pkt);
2118 else
2119 ret = smux_tx_tty(pkt);
2120
2121 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2122 /* notify write-done */
2123 meta_write.write.pkt_priv = pkt->priv;
2124 meta_write.write.buffer = pkt->payload;
2125 meta_write.write.len = pkt->hdr.payload_len;
2126 if (ret >= 0) {
2127 SMUX_DBG("%s: PKT write done", __func__);
2128 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2129 &meta_write);
2130 } else {
2131 pr_err("%s: failed to write pkt %d\n",
2132 __func__, ret);
2133 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2134 &meta_write);
2135 }
2136 }
2137 }
2138}
2139
2140/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002141 * Flush pending TTY TX data.
2142 */
2143static void smux_flush_tty(void)
2144{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002145 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002146 if (!smux.tty) {
2147 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002148 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002149 return;
2150 }
2151
2152 tty_wait_until_sent(smux.tty,
2153 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2154
2155 if (tty_chars_in_buffer(smux.tty) > 0)
2156 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002157
2158 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002159}
2160
2161/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002162 * Purge TX queue for logical channel.
2163 *
2164 * @ch Logical channel pointer
2165 *
2166 * Must be called with the following spinlocks locked:
2167 * state_lock_lhb1
2168 * tx_lock_lhb2
2169 */
2170static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2171{
2172 struct smux_pkt_t *pkt;
2173 int send_disconnect = 0;
2174
2175 while (!list_empty(&ch->tx_queue)) {
2176 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2177 list);
2178 list_del(&pkt->list);
2179
2180 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2181 /* Open was never sent, just force to closed state */
2182 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2183 send_disconnect = 1;
2184 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2185 /* Notify client of failed write */
2186 union notifier_metadata meta_write;
2187
2188 meta_write.write.pkt_priv = pkt->priv;
2189 meta_write.write.buffer = pkt->payload;
2190 meta_write.write.len = pkt->hdr.payload_len;
2191 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2192 }
2193 smux_free_pkt(pkt);
2194 }
2195
2196 if (send_disconnect) {
2197 union notifier_metadata meta_disconnected;
2198
2199 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2200 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2201 &meta_disconnected);
2202 }
2203}
2204
2205/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002206 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002207 *
2208 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002209 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002210static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002211{
2212 struct uart_state *state;
2213
2214 if (!smux.tty || !smux.tty->driver_data) {
2215 pr_err("%s: unable to find UART port for tty %p\n",
2216 __func__, smux.tty);
2217 return;
2218 }
2219 state = smux.tty->driver_data;
2220 msm_hs_request_clock_on(state->uart_port);
2221}
2222
2223/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002224 * Power-up the UART.
2225 */
2226static void smux_uart_power_on(void)
2227{
2228 mutex_lock(&smux.mutex_lha0);
2229 smux_uart_power_on_atomic();
2230 mutex_unlock(&smux.mutex_lha0);
2231}
2232
2233/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002234 * Power down the UART.
2235 */
2236static void smux_uart_power_off(void)
2237{
2238 struct uart_state *state;
2239
Eric Holmberg92a67df2012-06-25 13:56:24 -06002240 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002241 if (!smux.tty || !smux.tty->driver_data) {
2242 pr_err("%s: unable to find UART port for tty %p\n",
2243 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002244 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002245 return;
2246 }
2247 state = smux.tty->driver_data;
2248 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002249 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002250}
2251
2252/**
2253 * TX Wakeup Worker
2254 *
2255 * @work Not used
2256 *
2257 * Do an exponential back-off wakeup sequence with a maximum period
2258 * of approximately 1 second (1 << 20 microseconds).
2259 */
2260static void smux_wakeup_worker(struct work_struct *work)
2261{
2262 unsigned long flags;
2263 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002264
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002265 if (smux.in_reset)
2266 return;
2267
2268 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2269 if (smux.power_state == SMUX_PWR_ON) {
2270 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002271 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002272 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002273 SMUX_DBG("%s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002274
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002275 /*
2276 * Cancel any pending retry. This avoids a race condition with
2277 * a new power-up request because:
2278 * 1) this worker doesn't modify the state
2279 * 2) this worker is processed on the same single-threaded
2280 * workqueue as new TX wakeup requests
2281 */
2282 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002283 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002284 /* retry wakeup */
2285 wakeup_delay = smux.pwr_wakeup_delay_us;
2286 smux.pwr_wakeup_delay_us <<= 1;
2287 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2288 smux.pwr_wakeup_delay_us =
2289 SMUX_WAKEUP_DELAY_MAX;
2290
2291 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberga9b06472012-06-22 09:46:34 -06002292 SMUX_PWR("%s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002293 smux_send_byte(SMUX_WAKEUP_REQ);
2294
2295 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2296 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2297 wakeup_delay);
2298 usleep_range(wakeup_delay, 2*wakeup_delay);
2299 queue_work(smux_tx_wq, &smux_wakeup_work);
2300 } else {
2301 /* schedule delayed work */
2302 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2303 __func__, wakeup_delay / 1000);
2304 queue_delayed_work(smux_tx_wq,
2305 &smux_wakeup_delayed_work,
2306 msecs_to_jiffies(wakeup_delay / 1000));
2307 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002308 } else {
2309 /* wakeup aborted */
2310 smux.pwr_wakeup_delay_us = 1;
2311 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2312 SMUX_PWR("%s: wakeup aborted\n", __func__);
2313 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002314 }
2315}
2316
2317
2318/**
2319 * Inactivity timeout worker. Periodically scheduled when link is active.
2320 * When it detects inactivity, it will power-down the UART link.
2321 *
2322 * @work Work structure (not used)
2323 */
2324static void smux_inactivity_worker(struct work_struct *work)
2325{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002326 struct smux_pkt_t *pkt;
2327 unsigned long flags;
2328
2329 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2330 spin_lock(&smux.tx_lock_lha2);
2331
2332 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2333 /* no activity */
2334 if (smux.powerdown_enabled) {
2335 if (smux.power_state == SMUX_PWR_ON) {
2336 /* start power-down sequence */
2337 pkt = smux_alloc_pkt();
2338 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002339 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002340 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002341 SMUX_PWR_TURNING_OFF_FLUSH);
2342 smux.power_state =
2343 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002344
2345 /* send power-down request */
2346 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2347 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002348 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2349 list_add_tail(&pkt->list,
2350 &smux.power_queue);
2351 queue_work(smux_tx_wq, &smux_tx_work);
2352 } else {
2353 pr_err("%s: packet alloc failed\n",
2354 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002355 }
2356 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002357 }
2358 }
2359 smux.tx_activity_flag = 0;
2360 smux.rx_activity_flag = 0;
2361
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002362 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002363 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002364 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002365 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002366 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002367
2368 /* if data is pending, schedule a new wakeup */
2369 if (!list_empty(&smux.lch_tx_ready_list) ||
2370 !list_empty(&smux.power_queue))
2371 queue_work(smux_tx_wq, &smux_tx_work);
2372
2373 spin_unlock(&smux.tx_lock_lha2);
2374 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2375
2376 /* flush UART output queue and power down */
2377 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002378 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002379 } else {
2380 spin_unlock(&smux.tx_lock_lha2);
2381 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002382 }
2383
2384 /* reschedule inactivity worker */
2385 if (smux.power_state != SMUX_PWR_OFF)
2386 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2387 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2388}
2389
2390/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002391 * Remove RX retry packet from channel and free it.
2392 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002393 * @ch Channel for retry packet
2394 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002395 *
2396 * @returns 1 if flow control updated; 0 otherwise
2397 *
2398 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002399 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002400int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002401 struct smux_rx_pkt_retry *retry)
2402{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002403 int tx_ready = 0;
2404
Eric Holmbergb8435c82012-06-05 14:51:29 -06002405 list_del(&retry->rx_retry_list);
2406 --ch->rx_retry_queue_cnt;
2407 smux_free_pkt(retry->pkt);
2408 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002409
2410 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2411 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2412 ch->rx_flow_control_auto) {
2413 ch->rx_flow_control_auto = 0;
2414 smux_rx_flow_control_updated(ch);
2415 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2416 tx_ready = 1;
2417 }
2418 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002419}
2420
2421/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002422 * RX worker handles all receive operations.
2423 *
2424 * @work Work structure contained in TBD structure
2425 */
2426static void smux_rx_worker(struct work_struct *work)
2427{
2428 unsigned long flags;
2429 int used;
2430 int initial_rx_state;
2431 struct smux_rx_worker_data *w;
2432 const unsigned char *data;
2433 int len;
2434 int flag;
2435
2436 w = container_of(work, struct smux_rx_worker_data, work);
2437 data = w->data;
2438 len = w->len;
2439 flag = w->flag;
2440
2441 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2442 smux.rx_activity_flag = 1;
2443 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2444
2445 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2446 used = 0;
2447 do {
2448 SMUX_DBG("%s: state %d; %d of %d\n",
2449 __func__, smux.rx_state, used, len);
2450 initial_rx_state = smux.rx_state;
2451
2452 switch (smux.rx_state) {
2453 case SMUX_RX_IDLE:
2454 smux_rx_handle_idle(data, len, &used, flag);
2455 break;
2456 case SMUX_RX_MAGIC:
2457 smux_rx_handle_magic(data, len, &used, flag);
2458 break;
2459 case SMUX_RX_HDR:
2460 smux_rx_handle_hdr(data, len, &used, flag);
2461 break;
2462 case SMUX_RX_PAYLOAD:
2463 smux_rx_handle_pkt_payload(data, len, &used, flag);
2464 break;
2465 default:
2466 SMUX_DBG("%s: invalid state %d\n",
2467 __func__, smux.rx_state);
2468 smux.rx_state = SMUX_RX_IDLE;
2469 break;
2470 }
2471 } while (used < len || smux.rx_state != initial_rx_state);
2472
2473 complete(&w->work_complete);
2474}
2475
2476/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002477 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2478 * because the client was not ready (-EAGAIN).
2479 *
2480 * @work Work structure contained in smux_lch_t structure
2481 */
2482static void smux_rx_retry_worker(struct work_struct *work)
2483{
2484 struct smux_lch_t *ch;
2485 struct smux_rx_pkt_retry *retry;
2486 union notifier_metadata metadata;
2487 int tmp;
2488 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002489 int immediate_retry = 0;
2490 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002491
2492 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2493
2494 /* get next retry packet */
2495 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2496 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2497 /* port has been closed - remove all retries */
2498 while (!list_empty(&ch->rx_retry_queue)) {
2499 retry = list_first_entry(&ch->rx_retry_queue,
2500 struct smux_rx_pkt_retry,
2501 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002502 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002503 }
2504 }
2505
2506 if (list_empty(&ch->rx_retry_queue)) {
2507 SMUX_DBG("%s: retry list empty for channel %d\n",
2508 __func__, ch->lcid);
2509 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2510 return;
2511 }
2512 retry = list_first_entry(&ch->rx_retry_queue,
2513 struct smux_rx_pkt_retry,
2514 rx_retry_list);
2515 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2516
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002517 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2518 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002519 metadata.read.pkt_priv = 0;
2520 metadata.read.buffer = 0;
2521 tmp = ch->get_rx_buffer(ch->priv,
2522 (void **)&metadata.read.pkt_priv,
2523 (void **)&metadata.read.buffer,
2524 retry->pkt->hdr.payload_len);
2525 if (tmp == 0 && metadata.read.buffer) {
2526 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002527
Eric Holmbergb8435c82012-06-05 14:51:29 -06002528 memcpy(metadata.read.buffer, retry->pkt->payload,
2529 retry->pkt->hdr.payload_len);
2530 metadata.read.len = retry->pkt->hdr.payload_len;
2531
2532 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002533 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002534 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002535 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002536 if (tx_ready)
2537 list_channel(ch);
2538
2539 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002540 } else if (tmp == -EAGAIN ||
2541 (tmp == 0 && !metadata.read.buffer)) {
2542 /* retry again */
2543 retry->timeout_in_ms <<= 1;
2544 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2545 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002546 pr_err("%s: ch %d RX retry client timeout\n",
2547 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002548 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002549 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002550 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002551 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2552 if (tx_ready)
2553 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002554 }
2555 } else {
2556 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002557 pr_err("%s: ch %d RX retry client failed (%d)\n",
2558 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002559 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002560 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002561 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002562 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002563 if (tx_ready)
2564 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002565 }
2566
2567 /* schedule next retry */
2568 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2569 if (!list_empty(&ch->rx_retry_queue)) {
2570 retry = list_first_entry(&ch->rx_retry_queue,
2571 struct smux_rx_pkt_retry,
2572 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002573
2574 if (immediate_retry)
2575 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2576 else
2577 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2578 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002579 }
2580 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2581}
2582
2583/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002584 * Transmit worker handles serializing and transmitting packets onto the
2585 * underlying transport.
2586 *
2587 * @work Work structure (not used)
2588 */
2589static void smux_tx_worker(struct work_struct *work)
2590{
2591 struct smux_pkt_t *pkt;
2592 struct smux_lch_t *ch;
2593 unsigned low_wm_notif;
2594 unsigned lcid;
2595 unsigned long flags;
2596
2597
2598 /*
2599 * Transmit packets in round-robin fashion based upon ready
2600 * channels.
2601 *
2602 * To eliminate the need to hold a lock for the entire
2603 * iteration through the channel ready list, the head of the
2604 * ready-channel list is always the next channel to be
2605 * processed. To send a packet, the first valid packet in
2606 * the head channel is removed and the head channel is then
2607 * rescheduled at the end of the queue by removing it and
2608 * inserting after the tail. The locks can then be released
2609 * while the packet is processed.
2610 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002611 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002612 pkt = NULL;
2613 low_wm_notif = 0;
2614
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002615 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002616
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002617 /* handle wakeup if needed */
2618 if (smux.power_state == SMUX_PWR_OFF) {
2619 if (!list_empty(&smux.lch_tx_ready_list) ||
2620 !list_empty(&smux.power_queue)) {
2621 /* data to transmit, do wakeup */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002622 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002623 smux.power_state,
2624 SMUX_PWR_TURNING_ON);
2625 smux.power_state = SMUX_PWR_TURNING_ON;
2626 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2627 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002628 queue_work(smux_tx_wq, &smux_wakeup_work);
2629 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002630 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002631 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2632 flags);
2633 }
2634 break;
2635 }
2636
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002637 /* process any pending power packets */
2638 if (!list_empty(&smux.power_queue)) {
2639 pkt = list_first_entry(&smux.power_queue,
2640 struct smux_pkt_t, list);
2641 list_del(&pkt->list);
2642 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2643
Eric Holmberga9b06472012-06-22 09:46:34 -06002644 /* Adjust power state if this is a flush command */
2645 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2646 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2647 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2648 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2649 smux.power_ctl_remote_req_received) {
2650 /*
2651 * Sending remote power-down request ACK
2652 * or sending local power-down request
2653 * and we already received a remote
2654 * power-down request.
2655 */
2656 SMUX_PWR("%s: Power %d->%d\n", __func__,
2657 smux.power_state,
2658 SMUX_PWR_OFF_FLUSH);
2659 smux.power_state = SMUX_PWR_OFF_FLUSH;
2660 smux.power_ctl_remote_req_received = 0;
2661 queue_work(smux_tx_wq,
2662 &smux_inactivity_work);
2663 } else {
2664 /* sending local power-down request */
2665 SMUX_PWR("%s: Power %d->%d\n", __func__,
2666 smux.power_state,
2667 SMUX_PWR_TURNING_OFF);
2668 smux.power_state = SMUX_PWR_TURNING_OFF;
2669 }
2670 }
2671 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2672
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002673 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002674 smux_uart_power_on();
2675 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002676 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002677 if (!smux_byte_loopback) {
2678 smux_tx_tty(pkt);
2679 smux_flush_tty();
2680 } else {
2681 smux_tx_loopback(pkt);
2682 }
2683
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002684 smux_free_pkt(pkt);
2685 continue;
2686 }
2687
2688 /* get the next ready channel */
2689 if (list_empty(&smux.lch_tx_ready_list)) {
2690 /* no ready channels */
2691 SMUX_DBG("%s: no more ready channels, exiting\n",
2692 __func__);
2693 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2694 break;
2695 }
2696 smux.tx_activity_flag = 1;
2697
2698 if (smux.power_state != SMUX_PWR_ON) {
2699 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002700 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002701 __func__,
2702 smux.power_state);
2703 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2704 break;
2705 }
2706
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002707 /* get the next packet to send and rotate channel list */
2708 ch = list_first_entry(&smux.lch_tx_ready_list,
2709 struct smux_lch_t,
2710 tx_ready_list);
2711
2712 spin_lock(&ch->state_lock_lhb1);
2713 spin_lock(&ch->tx_lock_lhb2);
2714 if (!list_empty(&ch->tx_queue)) {
2715 /*
2716 * If remote TX flow control is enabled or
2717 * the channel is not fully opened, then only
2718 * send command packets.
2719 */
2720 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2721 struct smux_pkt_t *curr;
2722 list_for_each_entry(curr, &ch->tx_queue, list) {
2723 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2724 pkt = curr;
2725 break;
2726 }
2727 }
2728 } else {
2729 /* get next cmd/data packet to send */
2730 pkt = list_first_entry(&ch->tx_queue,
2731 struct smux_pkt_t, list);
2732 }
2733 }
2734
2735 if (pkt) {
2736 list_del(&pkt->list);
2737
2738 /* update packet stats */
2739 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2740 --ch->tx_pending_data_cnt;
2741 if (ch->notify_lwm &&
2742 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002743 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002744 ch->notify_lwm = 0;
2745 low_wm_notif = 1;
2746 }
2747 }
2748
2749 /* advance to the next ready channel */
2750 list_rotate_left(&smux.lch_tx_ready_list);
2751 } else {
2752 /* no data in channel to send, remove from ready list */
2753 list_del(&ch->tx_ready_list);
2754 INIT_LIST_HEAD(&ch->tx_ready_list);
2755 }
2756 lcid = ch->lcid;
2757 spin_unlock(&ch->tx_lock_lhb2);
2758 spin_unlock(&ch->state_lock_lhb1);
2759 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2760
2761 if (low_wm_notif)
2762 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2763
2764 /* send the packet */
2765 smux_tx_pkt(ch, pkt);
2766 smux_free_pkt(pkt);
2767 }
2768}
2769
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002770/**
2771 * Update the RX flow control (sent in the TIOCM Status command).
2772 *
2773 * @ch Channel for update
2774 *
2775 * @returns 1 for updated, 0 for not updated
2776 *
2777 * Must be called with ch->state_lock_lhb1 locked.
2778 */
2779static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2780{
2781 int updated = 0;
2782 int prev_state;
2783
2784 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2785
2786 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2787 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2788 else
2789 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2790
2791 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2792 smux_send_status_cmd(ch);
2793 updated = 1;
2794 }
2795
2796 return updated;
2797}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002798
2799/**********************************************************************/
2800/* Kernel API */
2801/**********************************************************************/
2802
2803/**
2804 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2805 * flags.
2806 *
2807 * @lcid Logical channel ID
2808 * @set Options to set
2809 * @clear Options to clear
2810 *
2811 * @returns 0 for success, < 0 for failure
2812 */
2813int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2814{
2815 unsigned long flags;
2816 struct smux_lch_t *ch;
2817 int tx_ready = 0;
2818 int ret = 0;
2819
2820 if (smux_assert_lch_id(lcid))
2821 return -ENXIO;
2822
2823 ch = &smux_lch[lcid];
2824 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2825
2826 /* Local loopback mode */
2827 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2828 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2829
2830 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2831 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2832
2833 /* Remote loopback mode */
2834 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2835 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2836
2837 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2838 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2839
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002840 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002841 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002842 ch->rx_flow_control_client = 1;
2843 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002844 }
2845
2846 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002847 ch->rx_flow_control_client = 0;
2848 tx_ready |= smux_rx_flow_control_updated(ch);
2849 }
2850
2851 /* Auto RX Flow Control */
2852 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2853 SMUX_DBG("%s: auto rx flow control option enabled\n",
2854 __func__);
2855 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2856 }
2857
2858 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2859 SMUX_DBG("%s: auto rx flow control option disabled\n",
2860 __func__);
2861 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2862 ch->rx_flow_control_auto = 0;
2863 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002864 }
2865
2866 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2867
2868 if (tx_ready)
2869 list_channel(ch);
2870
2871 return ret;
2872}
2873
2874/**
2875 * Starts the opening sequence for a logical channel.
2876 *
2877 * @lcid Logical channel ID
2878 * @priv Free for client usage
2879 * @notify Event notification function
2880 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2881 *
2882 * @returns 0 for success, <0 otherwise
2883 *
2884 * A channel must be fully closed (either not previously opened or
2885 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2886 * received.
2887 *
2888 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2889 * event.
2890 */
2891int msm_smux_open(uint8_t lcid, void *priv,
2892 void (*notify)(void *priv, int event_type, const void *metadata),
2893 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2894 int size))
2895{
2896 int ret;
2897 struct smux_lch_t *ch;
2898 struct smux_pkt_t *pkt;
2899 int tx_ready = 0;
2900 unsigned long flags;
2901
2902 if (smux_assert_lch_id(lcid))
2903 return -ENXIO;
2904
2905 ch = &smux_lch[lcid];
2906 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2907
2908 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2909 ret = -EAGAIN;
2910 goto out;
2911 }
2912
2913 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2914 pr_err("%s: open lcid %d local state %x invalid\n",
2915 __func__, lcid, ch->local_state);
2916 ret = -EINVAL;
2917 goto out;
2918 }
2919
2920 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2921 ch->local_state,
2922 SMUX_LCH_LOCAL_OPENING);
2923
2924 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2925
2926 ch->priv = priv;
2927 ch->notify = notify;
2928 ch->get_rx_buffer = get_rx_buffer;
2929 ret = 0;
2930
2931 /* Send Open Command */
2932 pkt = smux_alloc_pkt();
2933 if (!pkt) {
2934 ret = -ENOMEM;
2935 goto out;
2936 }
2937 pkt->hdr.magic = SMUX_MAGIC;
2938 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2939 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2940 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2941 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2942 pkt->hdr.lcid = lcid;
2943 pkt->hdr.payload_len = 0;
2944 pkt->hdr.pad_len = 0;
2945 smux_tx_queue(pkt, ch, 0);
2946 tx_ready = 1;
2947
2948out:
2949 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2950 if (tx_ready)
2951 list_channel(ch);
2952 return ret;
2953}
2954
2955/**
2956 * Starts the closing sequence for a logical channel.
2957 *
2958 * @lcid Logical channel ID
2959 *
2960 * @returns 0 for success, <0 otherwise
2961 *
2962 * Once the close event has been acknowledge by the remote side, the client
2963 * will receive a SMUX_DISCONNECTED notification.
2964 */
2965int msm_smux_close(uint8_t lcid)
2966{
2967 int ret = 0;
2968 struct smux_lch_t *ch;
2969 struct smux_pkt_t *pkt;
2970 int tx_ready = 0;
2971 unsigned long flags;
2972
2973 if (smux_assert_lch_id(lcid))
2974 return -ENXIO;
2975
2976 ch = &smux_lch[lcid];
2977 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2978 ch->local_tiocm = 0x0;
2979 ch->remote_tiocm = 0x0;
2980 ch->tx_pending_data_cnt = 0;
2981 ch->notify_lwm = 0;
2982
2983 /* Purge TX queue */
2984 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002985 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002986 spin_unlock(&ch->tx_lock_lhb2);
2987
2988 /* Send Close Command */
2989 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2990 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2991 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2992 ch->local_state,
2993 SMUX_LCH_LOCAL_CLOSING);
2994
2995 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2996 pkt = smux_alloc_pkt();
2997 if (pkt) {
2998 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2999 pkt->hdr.flags = 0;
3000 pkt->hdr.lcid = lcid;
3001 pkt->hdr.payload_len = 0;
3002 pkt->hdr.pad_len = 0;
3003 smux_tx_queue(pkt, ch, 0);
3004 tx_ready = 1;
3005 } else {
3006 pr_err("%s: pkt allocation failed\n", __func__);
3007 ret = -ENOMEM;
3008 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003009
3010 /* Purge RX retry queue */
3011 if (ch->rx_retry_queue_cnt)
3012 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003013 }
3014 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3015
3016 if (tx_ready)
3017 list_channel(ch);
3018
3019 return ret;
3020}
3021
3022/**
3023 * Write data to a logical channel.
3024 *
3025 * @lcid Logical channel ID
3026 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3027 * SMUX_WRITE_FAIL notification.
3028 * @data Data to write
3029 * @len Length of @data
3030 *
3031 * @returns 0 for success, <0 otherwise
3032 *
3033 * Data may be written immediately after msm_smux_open() is called,
3034 * but the data will wait in the transmit queue until the channel has
3035 * been fully opened.
3036 *
3037 * Once the data has been written, the client will receive either a completion
3038 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3039 */
3040int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3041{
3042 struct smux_lch_t *ch;
3043 struct smux_pkt_t *pkt;
3044 int tx_ready = 0;
3045 unsigned long flags;
3046 int ret;
3047
3048 if (smux_assert_lch_id(lcid))
3049 return -ENXIO;
3050
3051 ch = &smux_lch[lcid];
3052 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3053
3054 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3055 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3056 pr_err("%s: hdr.invalid local state %d channel %d\n",
3057 __func__, ch->local_state, lcid);
3058 ret = -EINVAL;
3059 goto out;
3060 }
3061
3062 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3063 pr_err("%s: payload %d too large\n",
3064 __func__, len);
3065 ret = -E2BIG;
3066 goto out;
3067 }
3068
3069 pkt = smux_alloc_pkt();
3070 if (!pkt) {
3071 ret = -ENOMEM;
3072 goto out;
3073 }
3074
3075 pkt->hdr.cmd = SMUX_CMD_DATA;
3076 pkt->hdr.lcid = lcid;
3077 pkt->hdr.flags = 0;
3078 pkt->hdr.payload_len = len;
3079 pkt->payload = (void *)data;
3080 pkt->priv = pkt_priv;
3081 pkt->hdr.pad_len = 0;
3082
3083 spin_lock(&ch->tx_lock_lhb2);
3084 /* verify high watermark */
3085 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3086
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003087 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003088 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003089 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003090 ch->tx_pending_data_cnt);
3091 ret = -EAGAIN;
3092 goto out_inner;
3093 }
3094
3095 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003096 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003097 ch->notify_lwm = 1;
3098 pr_err("%s: high watermark hit\n", __func__);
3099 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3100 }
3101 list_add_tail(&pkt->list, &ch->tx_queue);
3102
3103 /* add to ready list */
3104 if (IS_FULLY_OPENED(ch))
3105 tx_ready = 1;
3106
3107 ret = 0;
3108
3109out_inner:
3110 spin_unlock(&ch->tx_lock_lhb2);
3111
3112out:
3113 if (ret)
3114 smux_free_pkt(pkt);
3115 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3116
3117 if (tx_ready)
3118 list_channel(ch);
3119
3120 return ret;
3121}
3122
3123/**
3124 * Returns true if the TX queue is currently full (high water mark).
3125 *
3126 * @lcid Logical channel ID
3127 * @returns 0 if channel is not full
3128 * 1 if it is full
3129 * < 0 for error
3130 */
3131int msm_smux_is_ch_full(uint8_t lcid)
3132{
3133 struct smux_lch_t *ch;
3134 unsigned long flags;
3135 int is_full = 0;
3136
3137 if (smux_assert_lch_id(lcid))
3138 return -ENXIO;
3139
3140 ch = &smux_lch[lcid];
3141
3142 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003143 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003144 is_full = 1;
3145 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3146
3147 return is_full;
3148}
3149
3150/**
3151 * Returns true if the TX queue has space for more packets it is at or
3152 * below the low water mark).
3153 *
3154 * @lcid Logical channel ID
3155 * @returns 0 if channel is above low watermark
3156 * 1 if it's at or below the low watermark
3157 * < 0 for error
3158 */
3159int msm_smux_is_ch_low(uint8_t lcid)
3160{
3161 struct smux_lch_t *ch;
3162 unsigned long flags;
3163 int is_low = 0;
3164
3165 if (smux_assert_lch_id(lcid))
3166 return -ENXIO;
3167
3168 ch = &smux_lch[lcid];
3169
3170 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003171 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003172 is_low = 1;
3173 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3174
3175 return is_low;
3176}
3177
3178/**
3179 * Send TIOCM status update.
3180 *
3181 * @ch Channel for update
3182 *
3183 * @returns 0 for success, <0 for failure
3184 *
3185 * Channel lock must be held before calling.
3186 */
3187static int smux_send_status_cmd(struct smux_lch_t *ch)
3188{
3189 struct smux_pkt_t *pkt;
3190
3191 if (!ch)
3192 return -EINVAL;
3193
3194 pkt = smux_alloc_pkt();
3195 if (!pkt)
3196 return -ENOMEM;
3197
3198 pkt->hdr.lcid = ch->lcid;
3199 pkt->hdr.cmd = SMUX_CMD_STATUS;
3200 pkt->hdr.flags = ch->local_tiocm;
3201 pkt->hdr.payload_len = 0;
3202 pkt->hdr.pad_len = 0;
3203 smux_tx_queue(pkt, ch, 0);
3204
3205 return 0;
3206}
3207
3208/**
3209 * Internal helper function for getting the TIOCM status with
3210 * state_lock_lhb1 already locked.
3211 *
3212 * @ch Channel pointer
3213 *
3214 * @returns TIOCM status
3215 */
3216static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3217{
3218 long status = 0x0;
3219
3220 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3221 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3222 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3223 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3224
3225 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3226 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3227
3228 return status;
3229}
3230
3231/**
3232 * Get the TIOCM status bits.
3233 *
3234 * @lcid Logical channel ID
3235 *
3236 * @returns >= 0 TIOCM status bits
3237 * < 0 Error condition
3238 */
3239long msm_smux_tiocm_get(uint8_t lcid)
3240{
3241 struct smux_lch_t *ch;
3242 unsigned long flags;
3243 long status = 0x0;
3244
3245 if (smux_assert_lch_id(lcid))
3246 return -ENXIO;
3247
3248 ch = &smux_lch[lcid];
3249 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3250 status = msm_smux_tiocm_get_atomic(ch);
3251 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3252
3253 return status;
3254}
3255
3256/**
3257 * Set/clear the TIOCM status bits.
3258 *
3259 * @lcid Logical channel ID
3260 * @set Bits to set
3261 * @clear Bits to clear
3262 *
3263 * @returns 0 for success; < 0 for failure
3264 *
3265 * If a bit is specified in both the @set and @clear masks, then the clear bit
3266 * definition will dominate and the bit will be cleared.
3267 */
3268int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3269{
3270 struct smux_lch_t *ch;
3271 unsigned long flags;
3272 uint8_t old_status;
3273 uint8_t status_set = 0x0;
3274 uint8_t status_clear = 0x0;
3275 int tx_ready = 0;
3276 int ret = 0;
3277
3278 if (smux_assert_lch_id(lcid))
3279 return -ENXIO;
3280
3281 ch = &smux_lch[lcid];
3282 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3283
3284 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3285 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3286 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3287 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3288
3289 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3290 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3291 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3292 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3293
3294 old_status = ch->local_tiocm;
3295 ch->local_tiocm |= status_set;
3296 ch->local_tiocm &= ~status_clear;
3297
3298 if (ch->local_tiocm != old_status) {
3299 ret = smux_send_status_cmd(ch);
3300 tx_ready = 1;
3301 }
3302 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3303
3304 if (tx_ready)
3305 list_channel(ch);
3306
3307 return ret;
3308}
3309
3310/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003311/* Subsystem Restart */
3312/**********************************************************************/
3313static struct notifier_block ssr_notifier = {
3314 .notifier_call = ssr_notifier_cb,
3315};
3316
3317/**
3318 * Handle Subsystem Restart (SSR) notifications.
3319 *
3320 * @this Pointer to ssr_notifier
3321 * @code SSR Code
3322 * @data Data pointer (not used)
3323 */
3324static int ssr_notifier_cb(struct notifier_block *this,
3325 unsigned long code,
3326 void *data)
3327{
3328 unsigned long flags;
3329 int power_off_uart = 0;
3330
Eric Holmbergd2697902012-06-15 09:58:46 -06003331 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3332 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3333 mutex_lock(&smux.mutex_lha0);
3334 smux.in_reset = 1;
3335 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003336 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003337 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3338 return NOTIFY_DONE;
3339 }
3340 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003341
3342 /* Cleanup channels */
Eric Holmbergd2697902012-06-15 09:58:46 -06003343 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003344 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003345 if (smux.tty)
3346 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003347
3348 /* Power-down UART */
3349 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3350 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003351 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003352 smux.power_state = SMUX_PWR_OFF;
3353 power_off_uart = 1;
3354 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003355 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003356 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3357
3358 if (power_off_uart)
3359 smux_uart_power_off();
3360
Eric Holmbergd2697902012-06-15 09:58:46 -06003361 smux.in_reset = 0;
3362 mutex_unlock(&smux.mutex_lha0);
3363
Eric Holmberged1f00c2012-06-07 09:45:18 -06003364 return NOTIFY_DONE;
3365}
3366
3367/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003368/* Line Discipline Interface */
3369/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003370static void smux_pdev_release(struct device *dev)
3371{
3372 struct platform_device *pdev;
3373
3374 pdev = container_of(dev, struct platform_device, dev);
3375 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3376 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3377}
3378
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003379static int smuxld_open(struct tty_struct *tty)
3380{
3381 int i;
3382 int tmp;
3383 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003384
3385 if (!smux.is_initialized)
3386 return -ENODEV;
3387
Eric Holmberged1f00c2012-06-07 09:45:18 -06003388 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003389 if (smux.ld_open_count) {
3390 pr_err("%s: %p multiple instances not supported\n",
3391 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003392 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003393 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003394 }
3395
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003396 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003397 pr_err("%s: tty->ops->write already NULL\n", __func__);
3398 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003399 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003400 }
3401
3402 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003403 ++smux.ld_open_count;
3404 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003405 smux.tty = tty;
3406 tty->disc_data = &smux;
3407 tty->receive_room = TTY_RECEIVE_ROOM;
3408 tty_driver_flush_buffer(tty);
3409
3410 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003411 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003412 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003413 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003414 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003415 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003416 queue_work(smux_tx_wq, &smux_inactivity_work);
3417 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003418 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003419 }
3420
3421 /* register platform devices */
3422 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003423 SMUX_DBG("%s: register pdev '%s'\n",
3424 __func__, smux_devs[i].name);
3425 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003426 tmp = platform_device_register(&smux_devs[i]);
3427 if (tmp)
3428 pr_err("%s: error %d registering device %s\n",
3429 __func__, tmp, smux_devs[i].name);
3430 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003431 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003432 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003433}
3434
3435static void smuxld_close(struct tty_struct *tty)
3436{
3437 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003438 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003439 int i;
3440
Eric Holmberged1f00c2012-06-07 09:45:18 -06003441 SMUX_DBG("%s: ldisc unload\n", __func__);
3442 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003443 if (smux.ld_open_count <= 0) {
3444 pr_err("%s: invalid ld count %d\n", __func__,
3445 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003446 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003447 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003448 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003449 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003450 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003451
3452 /* Cleanup channels */
3453 smux_lch_purge();
3454
3455 /* Unregister platform devices */
3456 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3457 SMUX_DBG("%s: unregister pdev '%s'\n",
3458 __func__, smux_devs[i].name);
3459 platform_device_unregister(&smux_devs[i]);
3460 }
3461
3462 /* Schedule UART power-up if it's down */
3463 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003464 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003465 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003466 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003467 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003468 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3469
3470 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003471 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003472
3473 /* Disconnect from TTY */
3474 smux.tty = NULL;
3475 mutex_unlock(&smux.mutex_lha0);
3476 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003477}
3478
3479/**
3480 * Receive data from TTY Line Discipline.
3481 *
3482 * @tty TTY structure
3483 * @cp Character data
3484 * @fp Flag data
3485 * @count Size of character and flag data
3486 */
3487void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3488 char *fp, int count)
3489{
3490 int i;
3491 int last_idx = 0;
3492 const char *tty_name = NULL;
3493 char *f;
3494
3495 if (smux_debug_mask & MSM_SMUX_DEBUG)
3496 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3497 16, 1, cp, count, true);
3498
3499 /* verify error flags */
3500 for (i = 0, f = fp; i < count; ++i, ++f) {
3501 if (*f != TTY_NORMAL) {
3502 if (tty)
3503 tty_name = tty->name;
3504 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3505 tty_name, *f, tty_flag_to_str(*f));
3506
3507 /* feed all previous valid data to the parser */
3508 smux_rx_state_machine(cp + last_idx, i - last_idx,
3509 TTY_NORMAL);
3510
3511 /* feed bad data to parser */
3512 smux_rx_state_machine(cp + i, 1, *f);
3513 last_idx = i + 1;
3514 }
3515 }
3516
3517 /* feed data to RX state machine */
3518 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3519}
3520
3521static void smuxld_flush_buffer(struct tty_struct *tty)
3522{
3523 pr_err("%s: not supported\n", __func__);
3524}
3525
3526static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3527{
3528 pr_err("%s: not supported\n", __func__);
3529 return -ENODEV;
3530}
3531
3532static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3533 unsigned char __user *buf, size_t nr)
3534{
3535 pr_err("%s: not supported\n", __func__);
3536 return -ENODEV;
3537}
3538
3539static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3540 const unsigned char *buf, size_t nr)
3541{
3542 pr_err("%s: not supported\n", __func__);
3543 return -ENODEV;
3544}
3545
3546static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3547 unsigned int cmd, unsigned long arg)
3548{
3549 pr_err("%s: not supported\n", __func__);
3550 return -ENODEV;
3551}
3552
3553static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3554 struct poll_table_struct *tbl)
3555{
3556 pr_err("%s: not supported\n", __func__);
3557 return -ENODEV;
3558}
3559
3560static void smuxld_write_wakeup(struct tty_struct *tty)
3561{
3562 pr_err("%s: not supported\n", __func__);
3563}
3564
3565static struct tty_ldisc_ops smux_ldisc_ops = {
3566 .owner = THIS_MODULE,
3567 .magic = TTY_LDISC_MAGIC,
3568 .name = "n_smux",
3569 .open = smuxld_open,
3570 .close = smuxld_close,
3571 .flush_buffer = smuxld_flush_buffer,
3572 .chars_in_buffer = smuxld_chars_in_buffer,
3573 .read = smuxld_read,
3574 .write = smuxld_write,
3575 .ioctl = smuxld_ioctl,
3576 .poll = smuxld_poll,
3577 .receive_buf = smuxld_receive_buf,
3578 .write_wakeup = smuxld_write_wakeup
3579};
3580
3581static int __init smux_init(void)
3582{
3583 int ret;
3584
Eric Holmberged1f00c2012-06-07 09:45:18 -06003585 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003586
3587 spin_lock_init(&smux.rx_lock_lha1);
3588 smux.rx_state = SMUX_RX_IDLE;
3589 smux.power_state = SMUX_PWR_OFF;
3590 smux.pwr_wakeup_delay_us = 1;
3591 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003592 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003593 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003594 smux.rx_activity_flag = 0;
3595 smux.tx_activity_flag = 0;
3596 smux.recv_len = 0;
3597 smux.tty = NULL;
3598 smux.ld_open_count = 0;
3599 smux.in_reset = 0;
3600 smux.is_initialized = 1;
3601 smux_byte_loopback = 0;
3602
3603 spin_lock_init(&smux.tx_lock_lha2);
3604 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3605
3606 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3607 if (ret != 0) {
3608 pr_err("%s: error %d registering line discipline\n",
3609 __func__, ret);
3610 return ret;
3611 }
3612
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003613 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003614
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003615 ret = lch_init();
3616 if (ret != 0) {
3617 pr_err("%s: lch_init failed\n", __func__);
3618 return ret;
3619 }
3620
3621 return 0;
3622}
3623
3624static void __exit smux_exit(void)
3625{
3626 int ret;
3627
3628 ret = tty_unregister_ldisc(N_SMUX);
3629 if (ret != 0) {
3630 pr_err("%s error %d unregistering line discipline\n",
3631 __func__, ret);
3632 return;
3633 }
3634}
3635
3636module_init(smux_init);
3637module_exit(smux_exit);
3638
3639MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3640MODULE_LICENSE("GPL v2");
3641MODULE_ALIAS_LDISC(N_SMUX);