blob: c271ca4d5d68361e31376efd3b591c755e9b870c [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060049#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060050
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060084#define SMUX_PWR_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_PWR_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
91 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
92 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
93 pr_info("smux: TX Wakeup ACK\n"); \
94 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
95 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
96 pr_info("smux: TX Wakeup REQ\n"); \
97 else \
98 smux_log_pkt(pkt, 0); \
99 } \
100} while (0)
101
102#define SMUX_PWR_BYTE_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 smux_log_pkt(pkt, 0); \
105 } \
106} while (0)
107
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600108#define SMUX_LOG_PKT_RX(pkt) do { \
109 if (smux_debug_mask & MSM_SMUX_PKT) \
110 smux_log_pkt(pkt, 1); \
111} while (0)
112
113#define SMUX_LOG_PKT_TX(pkt) do { \
114 if (smux_debug_mask & MSM_SMUX_PKT) \
115 smux_log_pkt(pkt, 0); \
116} while (0)
117
118/**
119 * Return true if channel is fully opened (both
120 * local and remote sides are in the OPENED state).
121 */
122#define IS_FULLY_OPENED(ch) \
123 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
124 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
125
126static struct platform_device smux_devs[] = {
127 {.name = "SMUX_CTL", .id = -1},
128 {.name = "SMUX_RMNET", .id = -1},
129 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
130 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
131 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
132 {.name = "SMUX_DIAG", .id = -1},
133};
134
135enum {
136 SMUX_CMD_STATUS_RTC = 1 << 0,
137 SMUX_CMD_STATUS_RTR = 1 << 1,
138 SMUX_CMD_STATUS_RI = 1 << 2,
139 SMUX_CMD_STATUS_DCD = 1 << 3,
140 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
141};
142
143/* Channel mode */
144enum {
145 SMUX_LCH_MODE_NORMAL,
146 SMUX_LCH_MODE_LOCAL_LOOPBACK,
147 SMUX_LCH_MODE_REMOTE_LOOPBACK,
148};
149
150enum {
151 SMUX_RX_IDLE,
152 SMUX_RX_MAGIC,
153 SMUX_RX_HDR,
154 SMUX_RX_PAYLOAD,
155 SMUX_RX_FAILURE,
156};
157
158/**
159 * Power states.
160 *
161 * The _FLUSH states are internal transitional states and are not part of the
162 * official state machine.
163 */
164enum {
165 SMUX_PWR_OFF,
166 SMUX_PWR_TURNING_ON,
167 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600168 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600169 SMUX_PWR_TURNING_OFF,
170 SMUX_PWR_OFF_FLUSH,
171};
172
173/**
174 * Logical Channel Structure. One instance per channel.
175 *
176 * Locking Hierarchy
177 * Each lock has a postfix that describes the locking level. If multiple locks
178 * are required, only increasing lock hierarchy numbers may be locked which
179 * ensures avoiding a deadlock.
180 *
181 * Locking Example
182 * If state_lock_lhb1 is currently held and the TX list needs to be
183 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
184 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
185 * not be acquired since it would result in a deadlock.
186 *
187 * Note that the Line Discipline locks (*_lha) should always be acquired
188 * before the logical channel locks.
189 */
190struct smux_lch_t {
191 /* channel state */
192 spinlock_t state_lock_lhb1;
193 uint8_t lcid;
194 unsigned local_state;
195 unsigned local_mode;
196 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600197 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600198
199 unsigned remote_state;
200 unsigned remote_mode;
201 uint8_t remote_tiocm;
202
203 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600204 int rx_flow_control_auto;
205 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 /* client callbacks and private data */
208 void *priv;
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
211 int size);
212
Eric Holmbergb8435c82012-06-05 14:51:29 -0600213 /* RX Info */
214 struct list_head rx_retry_queue;
215 unsigned rx_retry_queue_cnt;
216 struct delayed_work rx_retry_work;
217
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600218 /* TX Info */
219 spinlock_t tx_lock_lhb2;
220 struct list_head tx_queue;
221 struct list_head tx_ready_list;
222 unsigned tx_pending_data_cnt;
223 unsigned notify_lwm;
224};
225
226union notifier_metadata {
227 struct smux_meta_disconnected disconnected;
228 struct smux_meta_read read;
229 struct smux_meta_write write;
230 struct smux_meta_tiocm tiocm;
231};
232
233struct smux_notify_handle {
234 void (*notify)(void *priv, int event_type, const void *metadata);
235 void *priv;
236 int event_type;
237 union notifier_metadata *metadata;
238};
239
240/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600241 * Get RX Buffer Retry structure.
242 *
243 * This is used for clients that are unable to provide an RX buffer
244 * immediately. This temporary structure will be used to temporarily hold the
245 * data and perform a retry.
246 */
247struct smux_rx_pkt_retry {
248 struct smux_pkt_t *pkt;
249 struct list_head rx_retry_list;
250 unsigned timeout_in_ms;
251};
252
253/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254 * Receive worker data structure.
255 *
256 * One instance is created for every call to smux_rx_state_machine.
257 */
258struct smux_rx_worker_data {
259 const unsigned char *data;
260 int len;
261 int flag;
262
263 struct work_struct work;
264 struct completion work_complete;
265};
266
267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600268 * Line discipline and module structure.
269 *
270 * Only one instance since multiple instances of line discipline are not
271 * allowed.
272 */
273struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600274 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275
276 int is_initialized;
277 int in_reset;
278 int ld_open_count;
279 struct tty_struct *tty;
280
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600281 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600282 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
283 unsigned int recv_len;
284 unsigned int pkt_remain;
285 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600286
287 /* RX Activity - accessed by multiple threads */
288 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600289 unsigned rx_activity_flag;
290
291 /* TX / Power */
292 spinlock_t tx_lock_lha2;
293 struct list_head lch_tx_ready_list;
294 unsigned power_state;
295 unsigned pwr_wakeup_delay_us;
296 unsigned tx_activity_flag;
297 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600298 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600299 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600300};
301
302
303/* data structures */
304static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
305static struct smux_ldisc_t smux;
306static const char *tty_error_type[] = {
307 [TTY_NORMAL] = "normal",
308 [TTY_OVERRUN] = "overrun",
309 [TTY_BREAK] = "break",
310 [TTY_PARITY] = "parity",
311 [TTY_FRAME] = "framing",
312};
313
314static const char *smux_cmds[] = {
315 [SMUX_CMD_DATA] = "DATA",
316 [SMUX_CMD_OPEN_LCH] = "OPEN",
317 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
318 [SMUX_CMD_STATUS] = "STATUS",
319 [SMUX_CMD_PWR_CTL] = "PWR",
320 [SMUX_CMD_BYTE] = "Raw Byte",
321};
322
323static void smux_notify_local_fn(struct work_struct *work);
324static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
325
326static struct workqueue_struct *smux_notify_wq;
327static size_t handle_size;
328static struct kfifo smux_notify_fifo;
329static int queued_fifo_notifications;
330static DEFINE_SPINLOCK(notify_lock_lhc1);
331
332static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600333static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334static void smux_tx_worker(struct work_struct *work);
335static DECLARE_WORK(smux_tx_work, smux_tx_worker);
336
337static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600338static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600339static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600340static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
341static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
342
343static void smux_inactivity_worker(struct work_struct *work);
344static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
345static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
346 smux_inactivity_worker);
347
348static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
349static void list_channel(struct smux_lch_t *ch);
350static int smux_send_status_cmd(struct smux_lch_t *ch);
351static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600352static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600353static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
354static int schedule_notify(uint8_t lcid, int event,
355 const union notifier_metadata *metadata);
356static int ssr_notifier_cb(struct notifier_block *this,
357 unsigned long code,
358 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600359static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600360static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600361
362/**
363 * Convert TTY Error Flags to string for logging purposes.
364 *
365 * @flag TTY_* flag
366 * @returns String description or NULL if unknown
367 */
368static const char *tty_flag_to_str(unsigned flag)
369{
370 if (flag < ARRAY_SIZE(tty_error_type))
371 return tty_error_type[flag];
372 return NULL;
373}
374
375/**
376 * Convert SMUX Command to string for logging purposes.
377 *
378 * @cmd SMUX command
379 * @returns String description or NULL if unknown
380 */
381static const char *cmd_to_str(unsigned cmd)
382{
383 if (cmd < ARRAY_SIZE(smux_cmds))
384 return smux_cmds[cmd];
385 return NULL;
386}
387
388/**
389 * Set the reset state due to an unrecoverable failure.
390 */
391static void smux_enter_reset(void)
392{
393 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
394 smux.in_reset = 1;
395}
396
397static int lch_init(void)
398{
399 unsigned int id;
400 struct smux_lch_t *ch;
401 int i = 0;
402
403 handle_size = sizeof(struct smux_notify_handle *);
404
405 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
406 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600407 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600408
409 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
410 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
411 __func__);
412 return -ENOMEM;
413 }
414
415 i |= kfifo_alloc(&smux_notify_fifo,
416 SMUX_NOTIFY_FIFO_SIZE * handle_size,
417 GFP_KERNEL);
418 i |= smux_loopback_init();
419
420 if (i) {
421 pr_err("%s: out of memory error\n", __func__);
422 return -ENOMEM;
423 }
424
425 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
426 ch = &smux_lch[id];
427
428 spin_lock_init(&ch->state_lock_lhb1);
429 ch->lcid = id;
430 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
431 ch->local_mode = SMUX_LCH_MODE_NORMAL;
432 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600433 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600434 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
435 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
436 ch->remote_tiocm = 0x0;
437 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600438 ch->rx_flow_control_auto = 0;
439 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600440 ch->priv = 0;
441 ch->notify = 0;
442 ch->get_rx_buffer = 0;
443
Eric Holmbergb8435c82012-06-05 14:51:29 -0600444 INIT_LIST_HEAD(&ch->rx_retry_queue);
445 ch->rx_retry_queue_cnt = 0;
446 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
447
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600448 spin_lock_init(&ch->tx_lock_lhb2);
449 INIT_LIST_HEAD(&ch->tx_queue);
450 INIT_LIST_HEAD(&ch->tx_ready_list);
451 ch->tx_pending_data_cnt = 0;
452 ch->notify_lwm = 0;
453 }
454
455 return 0;
456}
457
Eric Holmberged1f00c2012-06-07 09:45:18 -0600458/**
459 * Empty and cleanup all SMUX logical channels for subsystem restart or line
460 * discipline disconnect.
461 */
462static void smux_lch_purge(void)
463{
464 struct smux_lch_t *ch;
465 unsigned long flags;
466 int i;
467
468 /* Empty TX ready list */
469 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
470 while (!list_empty(&smux.lch_tx_ready_list)) {
471 SMUX_DBG("%s: emptying ready list %p\n",
472 __func__, smux.lch_tx_ready_list.next);
473 ch = list_first_entry(&smux.lch_tx_ready_list,
474 struct smux_lch_t,
475 tx_ready_list);
476 list_del(&ch->tx_ready_list);
477 INIT_LIST_HEAD(&ch->tx_ready_list);
478 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600479
480 /* Purge Power Queue */
481 while (!list_empty(&smux.power_queue)) {
482 struct smux_pkt_t *pkt;
483
484 pkt = list_first_entry(&smux.power_queue,
485 struct smux_pkt_t,
486 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600487 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600488 SMUX_DBG("%s: emptying power queue pkt=%p\n",
489 __func__, pkt);
490 smux_free_pkt(pkt);
491 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600492 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
493
494 /* Close all ports */
495 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
496 ch = &smux_lch[i];
497 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
498
499 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
500
501 /* Purge TX queue */
502 spin_lock(&ch->tx_lock_lhb2);
503 smux_purge_ch_tx_queue(ch);
504 spin_unlock(&ch->tx_lock_lhb2);
505
506 /* Notify user of disconnect and reset channel state */
507 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
508 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
509 union notifier_metadata meta;
510
511 meta.disconnected.is_ssr = smux.in_reset;
512 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
513 }
514
515 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
516 ch->local_mode = SMUX_LCH_MODE_NORMAL;
517 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
518 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
519 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600520 ch->rx_flow_control_auto = 0;
521 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600522
523 /* Purge RX retry queue */
524 if (ch->rx_retry_queue_cnt)
525 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
526
527 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
528 }
529
530 /* Flush TX/RX workqueues */
531 SMUX_DBG("%s: flushing tx wq\n", __func__);
532 flush_workqueue(smux_tx_wq);
533 SMUX_DBG("%s: flushing rx wq\n", __func__);
534 flush_workqueue(smux_rx_wq);
535}
536
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600537int smux_assert_lch_id(uint32_t lcid)
538{
539 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
540 return -ENXIO;
541 else
542 return 0;
543}
544
545/**
546 * Log packet information for debug purposes.
547 *
548 * @pkt Packet to log
549 * @is_recv 1 = RX packet; 0 = TX Packet
550 *
551 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
552 *
553 * PKT Info:
554 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
555 *
556 * Direction: R = Receive, S = Send
557 * Local State: C = Closed; c = closing; o = opening; O = Opened
558 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
559 * Remote State: C = Closed; O = Opened
560 * Remote Mode: R = Remote loopback; N = Normal
561 */
562static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
563{
564 char logbuf[SMUX_PKT_LOG_SIZE];
565 char cmd_extra[16];
566 int i = 0;
567 int count;
568 int len;
569 char local_state;
570 char local_mode;
571 char remote_state;
572 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600573 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600574 unsigned char *data;
575
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600576 if (!smux_assert_lch_id(pkt->hdr.lcid))
577 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600578
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600579 if (ch) {
580 switch (ch->local_state) {
581 case SMUX_LCH_LOCAL_CLOSED:
582 local_state = 'C';
583 break;
584 case SMUX_LCH_LOCAL_OPENING:
585 local_state = 'o';
586 break;
587 case SMUX_LCH_LOCAL_OPENED:
588 local_state = 'O';
589 break;
590 case SMUX_LCH_LOCAL_CLOSING:
591 local_state = 'c';
592 break;
593 default:
594 local_state = 'U';
595 break;
596 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600597
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600598 switch (ch->local_mode) {
599 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
600 local_mode = 'L';
601 break;
602 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
603 local_mode = 'R';
604 break;
605 case SMUX_LCH_MODE_NORMAL:
606 local_mode = 'N';
607 break;
608 default:
609 local_mode = 'U';
610 break;
611 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600612
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600613 switch (ch->remote_state) {
614 case SMUX_LCH_REMOTE_CLOSED:
615 remote_state = 'C';
616 break;
617 case SMUX_LCH_REMOTE_OPENED:
618 remote_state = 'O';
619 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600620
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600621 default:
622 remote_state = 'U';
623 break;
624 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600625
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600626 switch (ch->remote_mode) {
627 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
628 remote_mode = 'R';
629 break;
630 case SMUX_LCH_MODE_NORMAL:
631 remote_mode = 'N';
632 break;
633 default:
634 remote_mode = 'U';
635 break;
636 }
637 } else {
638 /* broadcast channel */
639 local_state = '-';
640 local_mode = '-';
641 remote_state = '-';
642 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600643 }
644
645 /* determine command type (ACK, etc) */
646 cmd_extra[0] = '\0';
647 switch (pkt->hdr.cmd) {
648 case SMUX_CMD_OPEN_LCH:
649 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
650 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
651 break;
652 case SMUX_CMD_CLOSE_LCH:
653 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
654 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
655 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600656
657 case SMUX_CMD_PWR_CTL:
658 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
659 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
660 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600661 };
662
663 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
664 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
665 is_recv ? 'R' : 'S', pkt->hdr.lcid,
666 local_state, local_mode,
667 remote_state, remote_mode,
668 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
669 pkt->hdr.payload_len, pkt->hdr.pad_len);
670
671 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
672 data = (unsigned char *)pkt->payload;
673 for (count = 0; count < len; count++)
674 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
675 "%02x ", (unsigned)data[count]);
676
677 pr_info("%s\n", logbuf);
678}
679
680static void smux_notify_local_fn(struct work_struct *work)
681{
682 struct smux_notify_handle *notify_handle = NULL;
683 union notifier_metadata *metadata = NULL;
684 unsigned long flags;
685 int i;
686
687 for (;;) {
688 /* retrieve notification */
689 spin_lock_irqsave(&notify_lock_lhc1, flags);
690 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
691 i = kfifo_out(&smux_notify_fifo,
692 &notify_handle,
693 handle_size);
694 if (i != handle_size) {
695 pr_err("%s: unable to retrieve handle %d expected %d\n",
696 __func__, i, handle_size);
697 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
698 break;
699 }
700 } else {
701 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
702 break;
703 }
704 --queued_fifo_notifications;
705 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
706
707 /* notify client */
708 metadata = notify_handle->metadata;
709 notify_handle->notify(notify_handle->priv,
710 notify_handle->event_type,
711 metadata);
712
713 kfree(metadata);
714 kfree(notify_handle);
715 }
716}
717
718/**
719 * Initialize existing packet.
720 */
721void smux_init_pkt(struct smux_pkt_t *pkt)
722{
723 memset(pkt, 0x0, sizeof(*pkt));
724 pkt->hdr.magic = SMUX_MAGIC;
725 INIT_LIST_HEAD(&pkt->list);
726}
727
728/**
729 * Allocate and initialize packet.
730 *
731 * If a payload is needed, either set it directly and ensure that it's freed or
732 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
733 * automatically when smd_free_pkt() is called.
734 */
735struct smux_pkt_t *smux_alloc_pkt(void)
736{
737 struct smux_pkt_t *pkt;
738
739 /* Consider a free list implementation instead of kmalloc */
740 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
741 if (!pkt) {
742 pr_err("%s: out of memory\n", __func__);
743 return NULL;
744 }
745 smux_init_pkt(pkt);
746 pkt->allocated = 1;
747
748 return pkt;
749}
750
751/**
752 * Free packet.
753 *
754 * @pkt Packet to free (may be NULL)
755 *
756 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
757 * well. Otherwise, the caller is responsible for freeing the payload.
758 */
759void smux_free_pkt(struct smux_pkt_t *pkt)
760{
761 if (pkt) {
762 if (pkt->free_payload)
763 kfree(pkt->payload);
764 if (pkt->allocated)
765 kfree(pkt);
766 }
767}
768
769/**
770 * Allocate packet payload.
771 *
772 * @pkt Packet to add payload to
773 *
774 * @returns 0 on success, <0 upon error
775 *
776 * A flag is set to signal smux_free_pkt() to free the payload.
777 */
778int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
779{
780 if (!pkt)
781 return -EINVAL;
782
783 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
784 pkt->free_payload = 1;
785 if (!pkt->payload) {
786 pr_err("%s: unable to malloc %d bytes for payload\n",
787 __func__, pkt->hdr.payload_len);
788 return -ENOMEM;
789 }
790
791 return 0;
792}
793
794static int schedule_notify(uint8_t lcid, int event,
795 const union notifier_metadata *metadata)
796{
797 struct smux_notify_handle *notify_handle = 0;
798 union notifier_metadata *meta_copy = 0;
799 struct smux_lch_t *ch;
800 int i;
801 unsigned long flags;
802 int ret = 0;
803
804 ch = &smux_lch[lcid];
805 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
806 GFP_ATOMIC);
807 if (!notify_handle) {
808 pr_err("%s: out of memory\n", __func__);
809 ret = -ENOMEM;
810 goto free_out;
811 }
812
813 notify_handle->notify = ch->notify;
814 notify_handle->priv = ch->priv;
815 notify_handle->event_type = event;
816 if (metadata) {
817 meta_copy = kzalloc(sizeof(union notifier_metadata),
818 GFP_ATOMIC);
819 if (!meta_copy) {
820 pr_err("%s: out of memory\n", __func__);
821 ret = -ENOMEM;
822 goto free_out;
823 }
824 *meta_copy = *metadata;
825 notify_handle->metadata = meta_copy;
826 } else {
827 notify_handle->metadata = NULL;
828 }
829
830 spin_lock_irqsave(&notify_lock_lhc1, flags);
831 i = kfifo_avail(&smux_notify_fifo);
832 if (i < handle_size) {
833 pr_err("%s: fifo full error %d expected %d\n",
834 __func__, i, handle_size);
835 ret = -ENOMEM;
836 goto unlock_out;
837 }
838
839 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
840 if (i < 0 || i != handle_size) {
841 pr_err("%s: fifo not available error %d (expected %d)\n",
842 __func__, i, handle_size);
843 ret = -ENOSPC;
844 goto unlock_out;
845 }
846 ++queued_fifo_notifications;
847
848unlock_out:
849 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
850
851free_out:
852 queue_work(smux_notify_wq, &smux_notify_local);
853 if (ret < 0 && notify_handle) {
854 kfree(notify_handle->metadata);
855 kfree(notify_handle);
856 }
857 return ret;
858}
859
860/**
861 * Returns the serialized size of a packet.
862 *
863 * @pkt Packet to serialize
864 *
865 * @returns Serialized length of packet
866 */
867static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
868{
869 unsigned int size;
870
871 size = sizeof(struct smux_hdr_t);
872 size += pkt->hdr.payload_len;
873 size += pkt->hdr.pad_len;
874
875 return size;
876}
877
878/**
879 * Serialize packet @pkt into output buffer @data.
880 *
881 * @pkt Packet to serialize
882 * @out Destination buffer pointer
883 * @out_len Size of serialized packet
884 *
885 * @returns 0 for success
886 */
887int smux_serialize(struct smux_pkt_t *pkt, char *out,
888 unsigned int *out_len)
889{
890 char *data_start = out;
891
892 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
893 pr_err("%s: packet size %d too big\n",
894 __func__, smux_serialize_size(pkt));
895 return -E2BIG;
896 }
897
898 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
899 out += sizeof(struct smux_hdr_t);
900 if (pkt->payload) {
901 memcpy(out, pkt->payload, pkt->hdr.payload_len);
902 out += pkt->hdr.payload_len;
903 }
904 if (pkt->hdr.pad_len) {
905 memset(out, 0x0, pkt->hdr.pad_len);
906 out += pkt->hdr.pad_len;
907 }
908 *out_len = out - data_start;
909 return 0;
910}
911
912/**
913 * Serialize header and provide pointer to the data.
914 *
915 * @pkt Packet
916 * @out[out] Pointer to the serialized header data
917 * @out_len[out] Pointer to the serialized header length
918 */
919static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
920 unsigned int *out_len)
921{
922 *out = (char *)&pkt->hdr;
923 *out_len = sizeof(struct smux_hdr_t);
924}
925
926/**
927 * Serialize payload and provide pointer to the data.
928 *
929 * @pkt Packet
930 * @out[out] Pointer to the serialized payload data
931 * @out_len[out] Pointer to the serialized payload length
932 */
933static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
934 unsigned int *out_len)
935{
936 *out = pkt->payload;
937 *out_len = pkt->hdr.payload_len;
938}
939
940/**
941 * Serialize padding and provide pointer to the data.
942 *
943 * @pkt Packet
944 * @out[out] Pointer to the serialized padding (always NULL)
945 * @out_len[out] Pointer to the serialized payload length
946 *
947 * Since the padding field value is undefined, only the size of the patting
948 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
949 */
950static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
951 unsigned int *out_len)
952{
953 *out = NULL;
954 *out_len = pkt->hdr.pad_len;
955}
956
957/**
958 * Write data to TTY framework and handle breaking the writes up if needed.
959 *
960 * @data Data to write
961 * @len Length of data
962 *
963 * @returns 0 for success, < 0 for failure
964 */
965static int write_to_tty(char *data, unsigned len)
966{
967 int data_written;
968
969 if (!data)
970 return 0;
971
Eric Holmberged1f00c2012-06-07 09:45:18 -0600972 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600973 data_written = smux.tty->ops->write(smux.tty, data, len);
974 if (data_written >= 0) {
975 len -= data_written;
976 data += data_written;
977 } else {
978 pr_err("%s: TTY write returned error %d\n",
979 __func__, data_written);
980 return data_written;
981 }
982
983 if (len)
984 tty_wait_until_sent(smux.tty,
985 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600986 }
987 return 0;
988}
989
990/**
991 * Write packet to TTY.
992 *
993 * @pkt packet to write
994 *
995 * @returns 0 on success
996 */
997static int smux_tx_tty(struct smux_pkt_t *pkt)
998{
999 char *data;
1000 unsigned int len;
1001 int ret;
1002
1003 if (!smux.tty) {
1004 pr_err("%s: TTY not initialized", __func__);
1005 return -ENOTTY;
1006 }
1007
1008 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
1009 SMUX_DBG("%s: tty send single byte\n", __func__);
1010 ret = write_to_tty(&pkt->hdr.flags, 1);
1011 return ret;
1012 }
1013
1014 smux_serialize_hdr(pkt, &data, &len);
1015 ret = write_to_tty(data, len);
1016 if (ret) {
1017 pr_err("%s: failed %d to write header %d\n",
1018 __func__, ret, len);
1019 return ret;
1020 }
1021
1022 smux_serialize_payload(pkt, &data, &len);
1023 ret = write_to_tty(data, len);
1024 if (ret) {
1025 pr_err("%s: failed %d to write payload %d\n",
1026 __func__, ret, len);
1027 return ret;
1028 }
1029
1030 smux_serialize_padding(pkt, &data, &len);
1031 while (len > 0) {
1032 char zero = 0x0;
1033 ret = write_to_tty(&zero, 1);
1034 if (ret) {
1035 pr_err("%s: failed %d to write padding %d\n",
1036 __func__, ret, len);
1037 return ret;
1038 }
1039 --len;
1040 }
1041 return 0;
1042}
1043
1044/**
1045 * Send a single character.
1046 *
1047 * @ch Character to send
1048 */
1049static void smux_send_byte(char ch)
1050{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001051 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001052
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001053 pkt = smux_alloc_pkt();
1054 if (!pkt) {
1055 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1056 return;
1057 }
1058 pkt->hdr.cmd = SMUX_CMD_BYTE;
1059 pkt->hdr.flags = ch;
1060 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001061
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001062 list_add_tail(&pkt->list, &smux.power_queue);
1063 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001064}
1065
1066/**
1067 * Receive a single-character packet (used for internal testing).
1068 *
1069 * @ch Character to receive
1070 * @lcid Logical channel ID for packet
1071 *
1072 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001073 */
1074static int smux_receive_byte(char ch, int lcid)
1075{
1076 struct smux_pkt_t pkt;
1077
1078 smux_init_pkt(&pkt);
1079 pkt.hdr.lcid = lcid;
1080 pkt.hdr.cmd = SMUX_CMD_BYTE;
1081 pkt.hdr.flags = ch;
1082
1083 return smux_dispatch_rx_pkt(&pkt);
1084}
1085
1086/**
1087 * Queue packet for transmit.
1088 *
1089 * @pkt_ptr Packet to queue
1090 * @ch Channel to queue packet on
1091 * @queue Queue channel on ready list
1092 */
1093static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1094 int queue)
1095{
1096 unsigned long flags;
1097
1098 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1099
1100 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1101 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1102 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1103
1104 if (queue)
1105 list_channel(ch);
1106}
1107
1108/**
1109 * Handle receive OPEN ACK command.
1110 *
1111 * @pkt Received packet
1112 *
1113 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001114 */
1115static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1116{
1117 uint8_t lcid;
1118 int ret;
1119 struct smux_lch_t *ch;
1120 int enable_powerdown = 0;
1121
1122 lcid = pkt->hdr.lcid;
1123 ch = &smux_lch[lcid];
1124
1125 spin_lock(&ch->state_lock_lhb1);
1126 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1127 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1128 ch->local_state,
1129 SMUX_LCH_LOCAL_OPENED);
1130
1131 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1132 enable_powerdown = 1;
1133
1134 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1135 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1136 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1137 ret = 0;
1138 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1139 SMUX_DBG("Remote loopback OPEN ACK received\n");
1140 ret = 0;
1141 } else {
1142 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1143 __func__, lcid, ch->local_state);
1144 ret = -EINVAL;
1145 }
1146 spin_unlock(&ch->state_lock_lhb1);
1147
1148 if (enable_powerdown) {
1149 spin_lock(&smux.tx_lock_lha2);
1150 if (!smux.powerdown_enabled) {
1151 smux.powerdown_enabled = 1;
1152 SMUX_DBG("%s: enabling power-collapse support\n",
1153 __func__);
1154 }
1155 spin_unlock(&smux.tx_lock_lha2);
1156 }
1157
1158 return ret;
1159}
1160
1161static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1162{
1163 uint8_t lcid;
1164 int ret;
1165 struct smux_lch_t *ch;
1166 union notifier_metadata meta_disconnected;
1167 unsigned long flags;
1168
1169 lcid = pkt->hdr.lcid;
1170 ch = &smux_lch[lcid];
1171 meta_disconnected.disconnected.is_ssr = 0;
1172
1173 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1174
1175 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1176 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1177 SMUX_LCH_LOCAL_CLOSING,
1178 SMUX_LCH_LOCAL_CLOSED);
1179 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1180 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1181 schedule_notify(lcid, SMUX_DISCONNECTED,
1182 &meta_disconnected);
1183 ret = 0;
1184 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1185 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1186 ret = 0;
1187 } else {
1188 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1189 __func__, lcid, ch->local_state);
1190 ret = -EINVAL;
1191 }
1192 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1193 return ret;
1194}
1195
1196/**
1197 * Handle receive OPEN command.
1198 *
1199 * @pkt Received packet
1200 *
1201 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001202 */
1203static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1204{
1205 uint8_t lcid;
1206 int ret;
1207 struct smux_lch_t *ch;
1208 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001209 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001210 int tx_ready = 0;
1211 int enable_powerdown = 0;
1212
1213 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1214 return smux_handle_rx_open_ack(pkt);
1215
1216 lcid = pkt->hdr.lcid;
1217 ch = &smux_lch[lcid];
1218
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001219 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001220
1221 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1222 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1223 SMUX_LCH_REMOTE_CLOSED,
1224 SMUX_LCH_REMOTE_OPENED);
1225
1226 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1227 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1228 enable_powerdown = 1;
1229
1230 /* Send Open ACK */
1231 ack_pkt = smux_alloc_pkt();
1232 if (!ack_pkt) {
1233 /* exit out to allow retrying this later */
1234 ret = -ENOMEM;
1235 goto out;
1236 }
1237 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1238 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1239 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1240 ack_pkt->hdr.lcid = lcid;
1241 ack_pkt->hdr.payload_len = 0;
1242 ack_pkt->hdr.pad_len = 0;
1243 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1244 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1245 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1246 }
1247 smux_tx_queue(ack_pkt, ch, 0);
1248 tx_ready = 1;
1249
1250 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1251 /*
1252 * Send an Open command to the remote side to
1253 * simulate our local client doing it.
1254 */
1255 ack_pkt = smux_alloc_pkt();
1256 if (ack_pkt) {
1257 ack_pkt->hdr.lcid = lcid;
1258 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1259 ack_pkt->hdr.flags =
1260 SMUX_CMD_OPEN_POWER_COLLAPSE;
1261 ack_pkt->hdr.payload_len = 0;
1262 ack_pkt->hdr.pad_len = 0;
1263 smux_tx_queue(ack_pkt, ch, 0);
1264 tx_ready = 1;
1265 } else {
1266 pr_err("%s: Remote loopack allocation failure\n",
1267 __func__);
1268 }
1269 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1270 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1271 }
1272 ret = 0;
1273 } else {
1274 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1275 __func__, lcid, ch->remote_state);
1276 ret = -EINVAL;
1277 }
1278
1279out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001280 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001281
1282 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001283 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001284 if (!smux.powerdown_enabled) {
1285 smux.powerdown_enabled = 1;
1286 SMUX_DBG("%s: enabling power-collapse support\n",
1287 __func__);
1288 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001289 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001290 }
1291
1292 if (tx_ready)
1293 list_channel(ch);
1294
1295 return ret;
1296}
1297
1298/**
1299 * Handle receive CLOSE command.
1300 *
1301 * @pkt Received packet
1302 *
1303 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001304 */
1305static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1306{
1307 uint8_t lcid;
1308 int ret;
1309 struct smux_lch_t *ch;
1310 struct smux_pkt_t *ack_pkt;
1311 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001312 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001313 int tx_ready = 0;
1314
1315 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1316 return smux_handle_close_ack(pkt);
1317
1318 lcid = pkt->hdr.lcid;
1319 ch = &smux_lch[lcid];
1320 meta_disconnected.disconnected.is_ssr = 0;
1321
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001322 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001323 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1324 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1325 SMUX_LCH_REMOTE_OPENED,
1326 SMUX_LCH_REMOTE_CLOSED);
1327
1328 ack_pkt = smux_alloc_pkt();
1329 if (!ack_pkt) {
1330 /* exit out to allow retrying this later */
1331 ret = -ENOMEM;
1332 goto out;
1333 }
1334 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1335 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1336 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1337 ack_pkt->hdr.lcid = lcid;
1338 ack_pkt->hdr.payload_len = 0;
1339 ack_pkt->hdr.pad_len = 0;
1340 smux_tx_queue(ack_pkt, ch, 0);
1341 tx_ready = 1;
1342
1343 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1344 /*
1345 * Send a Close command to the remote side to simulate
1346 * our local client doing it.
1347 */
1348 ack_pkt = smux_alloc_pkt();
1349 if (ack_pkt) {
1350 ack_pkt->hdr.lcid = lcid;
1351 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1352 ack_pkt->hdr.flags = 0;
1353 ack_pkt->hdr.payload_len = 0;
1354 ack_pkt->hdr.pad_len = 0;
1355 smux_tx_queue(ack_pkt, ch, 0);
1356 tx_ready = 1;
1357 } else {
1358 pr_err("%s: Remote loopack allocation failure\n",
1359 __func__);
1360 }
1361 }
1362
1363 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1364 schedule_notify(lcid, SMUX_DISCONNECTED,
1365 &meta_disconnected);
1366 ret = 0;
1367 } else {
1368 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1369 __func__, lcid, ch->remote_state);
1370 ret = -EINVAL;
1371 }
1372out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001373 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001374 if (tx_ready)
1375 list_channel(ch);
1376
1377 return ret;
1378}
1379
1380/*
1381 * Handle receive DATA command.
1382 *
1383 * @pkt Received packet
1384 *
1385 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001386 */
1387static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1388{
1389 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001390 int ret = 0;
1391 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001392 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001393 int tmp;
1394 int rx_len;
1395 struct smux_lch_t *ch;
1396 union notifier_metadata metadata;
1397 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001398 struct smux_pkt_t *ack_pkt;
1399 unsigned long flags;
1400
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001401 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1402 ret = -ENXIO;
1403 goto out;
1404 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001405
Eric Holmbergb8435c82012-06-05 14:51:29 -06001406 rx_len = pkt->hdr.payload_len;
1407 if (rx_len == 0) {
1408 ret = -EINVAL;
1409 goto out;
1410 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001411
1412 lcid = pkt->hdr.lcid;
1413 ch = &smux_lch[lcid];
1414 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1415 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1416
1417 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1418 && !remote_loopback) {
1419 pr_err("smux: ch %d error data on local state 0x%x",
1420 lcid, ch->local_state);
1421 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001422 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001423 goto out;
1424 }
1425
1426 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1427 pr_err("smux: ch %d error data on remote state 0x%x",
1428 lcid, ch->remote_state);
1429 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001430 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001431 goto out;
1432 }
1433
Eric Holmbergb8435c82012-06-05 14:51:29 -06001434 if (!list_empty(&ch->rx_retry_queue)) {
1435 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001436
1437 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1438 !ch->rx_flow_control_auto &&
1439 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1440 /* need to flow control RX */
1441 ch->rx_flow_control_auto = 1;
1442 tx_ready |= smux_rx_flow_control_updated(ch);
1443 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1444 NULL);
1445 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001446 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1447 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001448 pr_err("%s: ch %d RX retry queue full\n",
1449 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001450 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1451 ret = -ENOMEM;
1452 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1453 goto out;
1454 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001455 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001456 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001457
Eric Holmbergb8435c82012-06-05 14:51:29 -06001458 if (remote_loopback) {
1459 /* Echo the data back to the remote client. */
1460 ack_pkt = smux_alloc_pkt();
1461 if (ack_pkt) {
1462 ack_pkt->hdr.lcid = lcid;
1463 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1464 ack_pkt->hdr.flags = 0;
1465 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1466 if (ack_pkt->hdr.payload_len) {
1467 smux_alloc_pkt_payload(ack_pkt);
1468 memcpy(ack_pkt->payload, pkt->payload,
1469 ack_pkt->hdr.payload_len);
1470 }
1471 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1472 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001473 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001474 } else {
1475 pr_err("%s: Remote loopack allocation failure\n",
1476 __func__);
1477 }
1478 } else if (!do_retry) {
1479 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001480 metadata.read.pkt_priv = 0;
1481 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001482 tmp = ch->get_rx_buffer(ch->priv,
1483 (void **)&metadata.read.pkt_priv,
1484 (void **)&metadata.read.buffer,
1485 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486
Eric Holmbergb8435c82012-06-05 14:51:29 -06001487 if (tmp == 0 && metadata.read.buffer) {
1488 /* place data into RX buffer */
1489 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001490 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001491 metadata.read.len = rx_len;
1492 schedule_notify(lcid, SMUX_READ_DONE,
1493 &metadata);
1494 } else if (tmp == -EAGAIN ||
1495 (tmp == 0 && !metadata.read.buffer)) {
1496 /* buffer allocation failed - add to retry queue */
1497 do_retry = 1;
1498 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001499 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1500 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001501 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1502 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001503 }
1504 }
1505
Eric Holmbergb8435c82012-06-05 14:51:29 -06001506 if (do_retry) {
1507 struct smux_rx_pkt_retry *retry;
1508
1509 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1510 if (!retry) {
1511 pr_err("%s: retry alloc failure\n", __func__);
1512 ret = -ENOMEM;
1513 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1514 goto out;
1515 }
1516 INIT_LIST_HEAD(&retry->rx_retry_list);
1517 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1518
1519 /* copy packet */
1520 retry->pkt = smux_alloc_pkt();
1521 if (!retry->pkt) {
1522 kfree(retry);
1523 pr_err("%s: pkt alloc failure\n", __func__);
1524 ret = -ENOMEM;
1525 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1526 goto out;
1527 }
1528 retry->pkt->hdr.lcid = lcid;
1529 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1530 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1531 if (retry->pkt->hdr.payload_len) {
1532 smux_alloc_pkt_payload(retry->pkt);
1533 memcpy(retry->pkt->payload, pkt->payload,
1534 retry->pkt->hdr.payload_len);
1535 }
1536
1537 /* add to retry queue */
1538 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1539 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1540 ++ch->rx_retry_queue_cnt;
1541 if (ch->rx_retry_queue_cnt == 1)
1542 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1543 msecs_to_jiffies(retry->timeout_in_ms));
1544 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1545 }
1546
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001547 if (tx_ready)
1548 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001549out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001550 return ret;
1551}
1552
1553/**
1554 * Handle receive byte command for testing purposes.
1555 *
1556 * @pkt Received packet
1557 *
1558 * @returns 0 for success
1559 */
1560static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1561{
1562 uint8_t lcid;
1563 int ret;
1564 struct smux_lch_t *ch;
1565 union notifier_metadata metadata;
1566 unsigned long flags;
1567
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001568 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1569 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001570 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001571 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001572
1573 lcid = pkt->hdr.lcid;
1574 ch = &smux_lch[lcid];
1575 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1576
1577 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1578 pr_err("smux: ch %d error data on local state 0x%x",
1579 lcid, ch->local_state);
1580 ret = -EIO;
1581 goto out;
1582 }
1583
1584 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1585 pr_err("smux: ch %d error data on remote state 0x%x",
1586 lcid, ch->remote_state);
1587 ret = -EIO;
1588 goto out;
1589 }
1590
1591 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1592 metadata.read.buffer = 0;
1593 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1594 ret = 0;
1595
1596out:
1597 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1598 return ret;
1599}
1600
1601/**
1602 * Handle receive status command.
1603 *
1604 * @pkt Received packet
1605 *
1606 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001607 */
1608static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1609{
1610 uint8_t lcid;
1611 int ret;
1612 struct smux_lch_t *ch;
1613 union notifier_metadata meta;
1614 unsigned long flags;
1615 int tx_ready = 0;
1616
1617 lcid = pkt->hdr.lcid;
1618 ch = &smux_lch[lcid];
1619
1620 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1621 meta.tiocm.tiocm_old = ch->remote_tiocm;
1622 meta.tiocm.tiocm_new = pkt->hdr.flags;
1623
1624 /* update logical channel flow control */
1625 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1626 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1627 /* logical channel flow control changed */
1628 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1629 /* disabled TX */
1630 SMUX_DBG("TX Flow control enabled\n");
1631 ch->tx_flow_control = 1;
1632 } else {
1633 /* re-enable channel */
1634 SMUX_DBG("TX Flow control disabled\n");
1635 ch->tx_flow_control = 0;
1636 tx_ready = 1;
1637 }
1638 }
1639 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1640 ch->remote_tiocm = pkt->hdr.flags;
1641 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1642
1643 /* client notification for status change */
1644 if (IS_FULLY_OPENED(ch)) {
1645 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1646 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1647 ret = 0;
1648 }
1649 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1650 if (tx_ready)
1651 list_channel(ch);
1652
1653 return ret;
1654}
1655
1656/**
1657 * Handle receive power command.
1658 *
1659 * @pkt Received packet
1660 *
1661 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001662 */
1663static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1664{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001665 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001666 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001667 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001669 SMUX_PWR_PKT_RX(pkt);
1670
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001671 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001672 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1673 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001674 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001675 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001676 power_down = 1;
1677 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001678 pr_err("%s: sleep request ack invalid in state %d\n",
1679 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001680 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001681 /*
1682 * Remote sleep request
1683 *
1684 * Even if we have data pending, we need to transition to the
1685 * POWER_OFF state and then perform a wakeup since the remote
1686 * side has requested a power-down.
1687 *
1688 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1689 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1690 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001691 *
1692 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001693 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001694 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001695 ack_pkt = smux_alloc_pkt();
1696 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001697 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001698 smux.power_state,
1699 SMUX_PWR_TURNING_OFF_FLUSH);
1700
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001701 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1702
1703 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001704 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1705 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001706 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1707 list_add_tail(&ack_pkt->list,
1708 &smux.power_queue);
1709 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001710 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001711 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1712 /* Local power-down request still in TX queue */
1713 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1714 __func__);
1715 smux.power_ctl_remote_req_received = 1;
1716 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1717 /*
1718 * Local power-down request already sent to remote
1719 * side, so this request gets treated as an ACK.
1720 */
1721 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1722 __func__);
1723 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001724 } else {
1725 pr_err("%s: sleep request invalid in state %d\n",
1726 __func__, smux.power_state);
1727 }
1728 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001729
1730 if (power_down) {
1731 SMUX_PWR("%s: Power %d->%d\n", __func__,
1732 smux.power_state, SMUX_PWR_OFF_FLUSH);
1733 smux.power_state = SMUX_PWR_OFF_FLUSH;
1734 queue_work(smux_tx_wq, &smux_inactivity_work);
1735 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001736 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001737
1738 return 0;
1739}
1740
1741/**
1742 * Handle dispatching a completed packet for receive processing.
1743 *
1744 * @pkt Packet to process
1745 *
1746 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747 */
1748static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1749{
Eric Holmbergf9622662012-06-13 15:55:45 -06001750 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001751
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001752 switch (pkt->hdr.cmd) {
1753 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001754 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001755 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1756 pr_err("%s: invalid channel id %d\n",
1757 __func__, pkt->hdr.lcid);
1758 break;
1759 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001760 ret = smux_handle_rx_open_cmd(pkt);
1761 break;
1762
1763 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001764 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001765 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1766 pr_err("%s: invalid channel id %d\n",
1767 __func__, pkt->hdr.lcid);
1768 break;
1769 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001770 ret = smux_handle_rx_data_cmd(pkt);
1771 break;
1772
1773 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001774 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001775 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1776 pr_err("%s: invalid channel id %d\n",
1777 __func__, pkt->hdr.lcid);
1778 break;
1779 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001780 ret = smux_handle_rx_close_cmd(pkt);
1781 break;
1782
1783 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001784 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001785 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1786 pr_err("%s: invalid channel id %d\n",
1787 __func__, pkt->hdr.lcid);
1788 break;
1789 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001790 ret = smux_handle_rx_status_cmd(pkt);
1791 break;
1792
1793 case SMUX_CMD_PWR_CTL:
1794 ret = smux_handle_rx_power_cmd(pkt);
1795 break;
1796
1797 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001798 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001799 ret = smux_handle_rx_byte_cmd(pkt);
1800 break;
1801
1802 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001803 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001804 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1805 ret = -EINVAL;
1806 }
1807 return ret;
1808}
1809
1810/**
1811 * Deserializes a packet and dispatches it to the packet receive logic.
1812 *
1813 * @data Raw data for one packet
1814 * @len Length of the data
1815 *
1816 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001817 */
1818static int smux_deserialize(unsigned char *data, int len)
1819{
1820 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001821
1822 smux_init_pkt(&recv);
1823
1824 /*
1825 * It may be possible to optimize this to not use the
1826 * temporary buffer.
1827 */
1828 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1829
1830 if (recv.hdr.magic != SMUX_MAGIC) {
1831 pr_err("%s: invalid header magic\n", __func__);
1832 return -EINVAL;
1833 }
1834
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001835 if (recv.hdr.payload_len)
1836 recv.payload = data + sizeof(struct smux_hdr_t);
1837
1838 return smux_dispatch_rx_pkt(&recv);
1839}
1840
1841/**
1842 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001843 */
1844static void smux_handle_wakeup_req(void)
1845{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001846 unsigned long flags;
1847
1848 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001849 if (smux.power_state == SMUX_PWR_OFF
1850 || smux.power_state == SMUX_PWR_TURNING_ON) {
1851 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001852 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001853 smux.power_state, SMUX_PWR_ON);
1854 smux.power_state = SMUX_PWR_ON;
1855 queue_work(smux_tx_wq, &smux_wakeup_work);
1856 queue_work(smux_tx_wq, &smux_tx_work);
1857 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1858 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1859 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001860 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001861 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001862 } else {
1863 /* stale wakeup request from previous wakeup */
1864 SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
1865 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001866 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001867 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001868}
1869
1870/**
1871 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001872 */
1873static void smux_handle_wakeup_ack(void)
1874{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001875 unsigned long flags;
1876
1877 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001878 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1879 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001880 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001881 smux.power_state, SMUX_PWR_ON);
1882 smux.power_state = SMUX_PWR_ON;
1883 queue_work(smux_tx_wq, &smux_tx_work);
1884 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1885 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1886
1887 } else if (smux.power_state != SMUX_PWR_ON) {
1888 /* invalid message */
Eric Holmberga9b06472012-06-22 09:46:34 -06001889 SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001890 __func__, smux.power_state);
1891 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001892 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001893}
1894
1895/**
1896 * RX State machine - IDLE state processing.
1897 *
1898 * @data New RX data to process
1899 * @len Length of the data
1900 * @used Return value of length processed
1901 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001902 */
1903static void smux_rx_handle_idle(const unsigned char *data,
1904 int len, int *used, int flag)
1905{
1906 int i;
1907
1908 if (flag) {
1909 if (smux_byte_loopback)
1910 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1911 smux_byte_loopback);
1912 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1913 ++*used;
1914 return;
1915 }
1916
1917 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1918 switch (data[i]) {
1919 case SMUX_MAGIC_WORD1:
1920 smux.rx_state = SMUX_RX_MAGIC;
1921 break;
1922 case SMUX_WAKEUP_REQ:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001923 SMUX_PWR("smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001924 smux_handle_wakeup_req();
1925 break;
1926 case SMUX_WAKEUP_ACK:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001927 SMUX_PWR("smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001928 smux_handle_wakeup_ack();
1929 break;
1930 default:
1931 /* unexpected character */
1932 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1933 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1934 smux_byte_loopback);
1935 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1936 (unsigned)data[i]);
1937 break;
1938 }
1939 }
1940
1941 *used = i;
1942}
1943
1944/**
1945 * RX State machine - Header Magic state processing.
1946 *
1947 * @data New RX data to process
1948 * @len Length of the data
1949 * @used Return value of length processed
1950 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001951 */
1952static void smux_rx_handle_magic(const unsigned char *data,
1953 int len, int *used, int flag)
1954{
1955 int i;
1956
1957 if (flag) {
1958 pr_err("%s: TTY RX error %d\n", __func__, flag);
1959 smux_enter_reset();
1960 smux.rx_state = SMUX_RX_FAILURE;
1961 ++*used;
1962 return;
1963 }
1964
1965 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1966 /* wait for completion of the magic */
1967 if (data[i] == SMUX_MAGIC_WORD2) {
1968 smux.recv_len = 0;
1969 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1970 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1971 smux.rx_state = SMUX_RX_HDR;
1972 } else {
1973 /* unexpected / trash character */
1974 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1975 __func__, data[i], *used, len);
1976 smux.rx_state = SMUX_RX_IDLE;
1977 }
1978 }
1979
1980 *used = i;
1981}
1982
1983/**
1984 * RX State machine - Packet Header state processing.
1985 *
1986 * @data New RX data to process
1987 * @len Length of the data
1988 * @used Return value of length processed
1989 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001990 */
1991static void smux_rx_handle_hdr(const unsigned char *data,
1992 int len, int *used, int flag)
1993{
1994 int i;
1995 struct smux_hdr_t *hdr;
1996
1997 if (flag) {
1998 pr_err("%s: TTY RX error %d\n", __func__, flag);
1999 smux_enter_reset();
2000 smux.rx_state = SMUX_RX_FAILURE;
2001 ++*used;
2002 return;
2003 }
2004
2005 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2006 smux.recv_buf[smux.recv_len++] = data[i];
2007
2008 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2009 /* complete header received */
2010 hdr = (struct smux_hdr_t *)smux.recv_buf;
2011 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2012 smux.rx_state = SMUX_RX_PAYLOAD;
2013 }
2014 }
2015 *used = i;
2016}
2017
2018/**
2019 * RX State machine - Packet Payload state processing.
2020 *
2021 * @data New RX data to process
2022 * @len Length of the data
2023 * @used Return value of length processed
2024 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002025 */
2026static void smux_rx_handle_pkt_payload(const unsigned char *data,
2027 int len, int *used, int flag)
2028{
2029 int remaining;
2030
2031 if (flag) {
2032 pr_err("%s: TTY RX error %d\n", __func__, flag);
2033 smux_enter_reset();
2034 smux.rx_state = SMUX_RX_FAILURE;
2035 ++*used;
2036 return;
2037 }
2038
2039 /* copy data into rx buffer */
2040 if (smux.pkt_remain < (len - *used))
2041 remaining = smux.pkt_remain;
2042 else
2043 remaining = len - *used;
2044
2045 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2046 smux.recv_len += remaining;
2047 smux.pkt_remain -= remaining;
2048 *used += remaining;
2049
2050 if (smux.pkt_remain == 0) {
2051 /* complete packet received */
2052 smux_deserialize(smux.recv_buf, smux.recv_len);
2053 smux.rx_state = SMUX_RX_IDLE;
2054 }
2055}
2056
2057/**
2058 * Feed data to the receive state machine.
2059 *
2060 * @data Pointer to data block
2061 * @len Length of data
2062 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002063 */
2064void smux_rx_state_machine(const unsigned char *data,
2065 int len, int flag)
2066{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002067 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002068
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002069 work.data = data;
2070 work.len = len;
2071 work.flag = flag;
2072 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2073 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002074
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002075 queue_work(smux_rx_wq, &work.work);
2076 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002077}
2078
2079/**
2080 * Add channel to transmit-ready list and trigger transmit worker.
2081 *
2082 * @ch Channel to add
2083 */
2084static void list_channel(struct smux_lch_t *ch)
2085{
2086 unsigned long flags;
2087
2088 SMUX_DBG("%s: listing channel %d\n",
2089 __func__, ch->lcid);
2090
2091 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2092 spin_lock(&ch->tx_lock_lhb2);
2093 smux.tx_activity_flag = 1;
2094 if (list_empty(&ch->tx_ready_list))
2095 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2096 spin_unlock(&ch->tx_lock_lhb2);
2097 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2098
2099 queue_work(smux_tx_wq, &smux_tx_work);
2100}
2101
2102/**
2103 * Transmit packet on correct transport and then perform client
2104 * notification.
2105 *
2106 * @ch Channel to transmit on
2107 * @pkt Packet to transmit
2108 */
2109static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2110{
2111 union notifier_metadata meta_write;
2112 int ret;
2113
2114 if (ch && pkt) {
2115 SMUX_LOG_PKT_TX(pkt);
2116 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2117 ret = smux_tx_loopback(pkt);
2118 else
2119 ret = smux_tx_tty(pkt);
2120
2121 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2122 /* notify write-done */
2123 meta_write.write.pkt_priv = pkt->priv;
2124 meta_write.write.buffer = pkt->payload;
2125 meta_write.write.len = pkt->hdr.payload_len;
2126 if (ret >= 0) {
2127 SMUX_DBG("%s: PKT write done", __func__);
2128 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2129 &meta_write);
2130 } else {
2131 pr_err("%s: failed to write pkt %d\n",
2132 __func__, ret);
2133 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2134 &meta_write);
2135 }
2136 }
2137 }
2138}
2139
2140/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002141 * Flush pending TTY TX data.
2142 */
2143static void smux_flush_tty(void)
2144{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002145 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002146 if (!smux.tty) {
2147 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002148 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002149 return;
2150 }
2151
2152 tty_wait_until_sent(smux.tty,
2153 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2154
2155 if (tty_chars_in_buffer(smux.tty) > 0)
2156 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002157
2158 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002159}
2160
2161/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002162 * Purge TX queue for logical channel.
2163 *
2164 * @ch Logical channel pointer
2165 *
2166 * Must be called with the following spinlocks locked:
2167 * state_lock_lhb1
2168 * tx_lock_lhb2
2169 */
2170static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2171{
2172 struct smux_pkt_t *pkt;
2173 int send_disconnect = 0;
2174
2175 while (!list_empty(&ch->tx_queue)) {
2176 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2177 list);
2178 list_del(&pkt->list);
2179
2180 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2181 /* Open was never sent, just force to closed state */
2182 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2183 send_disconnect = 1;
2184 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2185 /* Notify client of failed write */
2186 union notifier_metadata meta_write;
2187
2188 meta_write.write.pkt_priv = pkt->priv;
2189 meta_write.write.buffer = pkt->payload;
2190 meta_write.write.len = pkt->hdr.payload_len;
2191 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2192 }
2193 smux_free_pkt(pkt);
2194 }
2195
2196 if (send_disconnect) {
2197 union notifier_metadata meta_disconnected;
2198
2199 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2200 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2201 &meta_disconnected);
2202 }
2203}
2204
2205/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002206 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002207 *
2208 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002209 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002210static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002211{
2212 struct uart_state *state;
2213
2214 if (!smux.tty || !smux.tty->driver_data) {
2215 pr_err("%s: unable to find UART port for tty %p\n",
2216 __func__, smux.tty);
2217 return;
2218 }
2219 state = smux.tty->driver_data;
2220 msm_hs_request_clock_on(state->uart_port);
2221}
2222
2223/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002224 * Power-up the UART.
2225 */
2226static void smux_uart_power_on(void)
2227{
2228 mutex_lock(&smux.mutex_lha0);
2229 smux_uart_power_on_atomic();
2230 mutex_unlock(&smux.mutex_lha0);
2231}
2232
2233/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002234 * Power down the UART.
2235 */
2236static void smux_uart_power_off(void)
2237{
2238 struct uart_state *state;
2239
Eric Holmberg92a67df2012-06-25 13:56:24 -06002240 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002241 if (!smux.tty || !smux.tty->driver_data) {
2242 pr_err("%s: unable to find UART port for tty %p\n",
2243 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002244 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002245 return;
2246 }
2247 state = smux.tty->driver_data;
2248 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002249 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002250}
2251
2252/**
2253 * TX Wakeup Worker
2254 *
2255 * @work Not used
2256 *
2257 * Do an exponential back-off wakeup sequence with a maximum period
2258 * of approximately 1 second (1 << 20 microseconds).
2259 */
2260static void smux_wakeup_worker(struct work_struct *work)
2261{
2262 unsigned long flags;
2263 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002264
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002265 if (smux.in_reset)
2266 return;
2267
2268 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2269 if (smux.power_state == SMUX_PWR_ON) {
2270 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002271 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002272 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002273 SMUX_DBG("%s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002274
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002275 /*
2276 * Cancel any pending retry. This avoids a race condition with
2277 * a new power-up request because:
2278 * 1) this worker doesn't modify the state
2279 * 2) this worker is processed on the same single-threaded
2280 * workqueue as new TX wakeup requests
2281 */
2282 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002283 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002284 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002285 /* retry wakeup */
2286 wakeup_delay = smux.pwr_wakeup_delay_us;
2287 smux.pwr_wakeup_delay_us <<= 1;
2288 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2289 smux.pwr_wakeup_delay_us =
2290 SMUX_WAKEUP_DELAY_MAX;
2291
2292 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberga9b06472012-06-22 09:46:34 -06002293 SMUX_PWR("%s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002294 smux_send_byte(SMUX_WAKEUP_REQ);
2295
2296 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2297 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2298 wakeup_delay);
2299 usleep_range(wakeup_delay, 2*wakeup_delay);
2300 queue_work(smux_tx_wq, &smux_wakeup_work);
2301 } else {
2302 /* schedule delayed work */
2303 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2304 __func__, wakeup_delay / 1000);
2305 queue_delayed_work(smux_tx_wq,
2306 &smux_wakeup_delayed_work,
2307 msecs_to_jiffies(wakeup_delay / 1000));
2308 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002309 } else {
2310 /* wakeup aborted */
2311 smux.pwr_wakeup_delay_us = 1;
2312 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2313 SMUX_PWR("%s: wakeup aborted\n", __func__);
2314 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002315 }
2316}
2317
2318
2319/**
2320 * Inactivity timeout worker. Periodically scheduled when link is active.
2321 * When it detects inactivity, it will power-down the UART link.
2322 *
2323 * @work Work structure (not used)
2324 */
2325static void smux_inactivity_worker(struct work_struct *work)
2326{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002327 struct smux_pkt_t *pkt;
2328 unsigned long flags;
2329
2330 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2331 spin_lock(&smux.tx_lock_lha2);
2332
2333 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2334 /* no activity */
2335 if (smux.powerdown_enabled) {
2336 if (smux.power_state == SMUX_PWR_ON) {
2337 /* start power-down sequence */
2338 pkt = smux_alloc_pkt();
2339 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002340 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002341 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002342 SMUX_PWR_TURNING_OFF_FLUSH);
2343 smux.power_state =
2344 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002345
2346 /* send power-down request */
2347 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2348 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002349 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2350 list_add_tail(&pkt->list,
2351 &smux.power_queue);
2352 queue_work(smux_tx_wq, &smux_tx_work);
2353 } else {
2354 pr_err("%s: packet alloc failed\n",
2355 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002356 }
2357 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002358 }
2359 }
2360 smux.tx_activity_flag = 0;
2361 smux.rx_activity_flag = 0;
2362
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002363 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002364 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002365 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002366 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002367 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002368
2369 /* if data is pending, schedule a new wakeup */
2370 if (!list_empty(&smux.lch_tx_ready_list) ||
2371 !list_empty(&smux.power_queue))
2372 queue_work(smux_tx_wq, &smux_tx_work);
2373
2374 spin_unlock(&smux.tx_lock_lha2);
2375 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2376
2377 /* flush UART output queue and power down */
2378 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002379 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002380 } else {
2381 spin_unlock(&smux.tx_lock_lha2);
2382 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002383 }
2384
2385 /* reschedule inactivity worker */
2386 if (smux.power_state != SMUX_PWR_OFF)
2387 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2388 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2389}
2390
2391/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002392 * Remove RX retry packet from channel and free it.
2393 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002394 * @ch Channel for retry packet
2395 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002396 *
2397 * @returns 1 if flow control updated; 0 otherwise
2398 *
2399 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002400 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002401int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002402 struct smux_rx_pkt_retry *retry)
2403{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002404 int tx_ready = 0;
2405
Eric Holmbergb8435c82012-06-05 14:51:29 -06002406 list_del(&retry->rx_retry_list);
2407 --ch->rx_retry_queue_cnt;
2408 smux_free_pkt(retry->pkt);
2409 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002410
2411 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2412 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2413 ch->rx_flow_control_auto) {
2414 ch->rx_flow_control_auto = 0;
2415 smux_rx_flow_control_updated(ch);
2416 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2417 tx_ready = 1;
2418 }
2419 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002420}
2421
2422/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002423 * RX worker handles all receive operations.
2424 *
2425 * @work Work structure contained in TBD structure
2426 */
2427static void smux_rx_worker(struct work_struct *work)
2428{
2429 unsigned long flags;
2430 int used;
2431 int initial_rx_state;
2432 struct smux_rx_worker_data *w;
2433 const unsigned char *data;
2434 int len;
2435 int flag;
2436
2437 w = container_of(work, struct smux_rx_worker_data, work);
2438 data = w->data;
2439 len = w->len;
2440 flag = w->flag;
2441
2442 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2443 smux.rx_activity_flag = 1;
2444 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2445
2446 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2447 used = 0;
2448 do {
2449 SMUX_DBG("%s: state %d; %d of %d\n",
2450 __func__, smux.rx_state, used, len);
2451 initial_rx_state = smux.rx_state;
2452
2453 switch (smux.rx_state) {
2454 case SMUX_RX_IDLE:
2455 smux_rx_handle_idle(data, len, &used, flag);
2456 break;
2457 case SMUX_RX_MAGIC:
2458 smux_rx_handle_magic(data, len, &used, flag);
2459 break;
2460 case SMUX_RX_HDR:
2461 smux_rx_handle_hdr(data, len, &used, flag);
2462 break;
2463 case SMUX_RX_PAYLOAD:
2464 smux_rx_handle_pkt_payload(data, len, &used, flag);
2465 break;
2466 default:
2467 SMUX_DBG("%s: invalid state %d\n",
2468 __func__, smux.rx_state);
2469 smux.rx_state = SMUX_RX_IDLE;
2470 break;
2471 }
2472 } while (used < len || smux.rx_state != initial_rx_state);
2473
2474 complete(&w->work_complete);
2475}
2476
2477/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002478 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2479 * because the client was not ready (-EAGAIN).
2480 *
2481 * @work Work structure contained in smux_lch_t structure
2482 */
2483static void smux_rx_retry_worker(struct work_struct *work)
2484{
2485 struct smux_lch_t *ch;
2486 struct smux_rx_pkt_retry *retry;
2487 union notifier_metadata metadata;
2488 int tmp;
2489 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002490 int immediate_retry = 0;
2491 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002492
2493 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2494
2495 /* get next retry packet */
2496 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2497 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2498 /* port has been closed - remove all retries */
2499 while (!list_empty(&ch->rx_retry_queue)) {
2500 retry = list_first_entry(&ch->rx_retry_queue,
2501 struct smux_rx_pkt_retry,
2502 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002503 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002504 }
2505 }
2506
2507 if (list_empty(&ch->rx_retry_queue)) {
2508 SMUX_DBG("%s: retry list empty for channel %d\n",
2509 __func__, ch->lcid);
2510 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2511 return;
2512 }
2513 retry = list_first_entry(&ch->rx_retry_queue,
2514 struct smux_rx_pkt_retry,
2515 rx_retry_list);
2516 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2517
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002518 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2519 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002520 metadata.read.pkt_priv = 0;
2521 metadata.read.buffer = 0;
2522 tmp = ch->get_rx_buffer(ch->priv,
2523 (void **)&metadata.read.pkt_priv,
2524 (void **)&metadata.read.buffer,
2525 retry->pkt->hdr.payload_len);
2526 if (tmp == 0 && metadata.read.buffer) {
2527 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002528
Eric Holmbergb8435c82012-06-05 14:51:29 -06002529 memcpy(metadata.read.buffer, retry->pkt->payload,
2530 retry->pkt->hdr.payload_len);
2531 metadata.read.len = retry->pkt->hdr.payload_len;
2532
2533 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002534 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002535 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002536 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002537 if (tx_ready)
2538 list_channel(ch);
2539
2540 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002541 } else if (tmp == -EAGAIN ||
2542 (tmp == 0 && !metadata.read.buffer)) {
2543 /* retry again */
2544 retry->timeout_in_ms <<= 1;
2545 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2546 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002547 pr_err("%s: ch %d RX retry client timeout\n",
2548 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002549 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002550 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002551 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002552 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2553 if (tx_ready)
2554 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002555 }
2556 } else {
2557 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002558 pr_err("%s: ch %d RX retry client failed (%d)\n",
2559 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002560 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002561 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002562 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002563 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002564 if (tx_ready)
2565 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002566 }
2567
2568 /* schedule next retry */
2569 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2570 if (!list_empty(&ch->rx_retry_queue)) {
2571 retry = list_first_entry(&ch->rx_retry_queue,
2572 struct smux_rx_pkt_retry,
2573 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002574
2575 if (immediate_retry)
2576 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2577 else
2578 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2579 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002580 }
2581 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2582}
2583
2584/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002585 * Transmit worker handles serializing and transmitting packets onto the
2586 * underlying transport.
2587 *
2588 * @work Work structure (not used)
2589 */
2590static void smux_tx_worker(struct work_struct *work)
2591{
2592 struct smux_pkt_t *pkt;
2593 struct smux_lch_t *ch;
2594 unsigned low_wm_notif;
2595 unsigned lcid;
2596 unsigned long flags;
2597
2598
2599 /*
2600 * Transmit packets in round-robin fashion based upon ready
2601 * channels.
2602 *
2603 * To eliminate the need to hold a lock for the entire
2604 * iteration through the channel ready list, the head of the
2605 * ready-channel list is always the next channel to be
2606 * processed. To send a packet, the first valid packet in
2607 * the head channel is removed and the head channel is then
2608 * rescheduled at the end of the queue by removing it and
2609 * inserting after the tail. The locks can then be released
2610 * while the packet is processed.
2611 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002612 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002613 pkt = NULL;
2614 low_wm_notif = 0;
2615
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002616 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002617
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002618 /* handle wakeup if needed */
2619 if (smux.power_state == SMUX_PWR_OFF) {
2620 if (!list_empty(&smux.lch_tx_ready_list) ||
2621 !list_empty(&smux.power_queue)) {
2622 /* data to transmit, do wakeup */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002623 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002624 smux.power_state,
2625 SMUX_PWR_TURNING_ON);
2626 smux.power_state = SMUX_PWR_TURNING_ON;
2627 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2628 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002629 queue_work(smux_tx_wq, &smux_wakeup_work);
2630 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002631 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002632 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2633 flags);
2634 }
2635 break;
2636 }
2637
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002638 /* process any pending power packets */
2639 if (!list_empty(&smux.power_queue)) {
2640 pkt = list_first_entry(&smux.power_queue,
2641 struct smux_pkt_t, list);
2642 list_del(&pkt->list);
2643 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2644
Eric Holmberga9b06472012-06-22 09:46:34 -06002645 /* Adjust power state if this is a flush command */
2646 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2647 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2648 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2649 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2650 smux.power_ctl_remote_req_received) {
2651 /*
2652 * Sending remote power-down request ACK
2653 * or sending local power-down request
2654 * and we already received a remote
2655 * power-down request.
2656 */
2657 SMUX_PWR("%s: Power %d->%d\n", __func__,
2658 smux.power_state,
2659 SMUX_PWR_OFF_FLUSH);
2660 smux.power_state = SMUX_PWR_OFF_FLUSH;
2661 smux.power_ctl_remote_req_received = 0;
2662 queue_work(smux_tx_wq,
2663 &smux_inactivity_work);
2664 } else {
2665 /* sending local power-down request */
2666 SMUX_PWR("%s: Power %d->%d\n", __func__,
2667 smux.power_state,
2668 SMUX_PWR_TURNING_OFF);
2669 smux.power_state = SMUX_PWR_TURNING_OFF;
2670 }
2671 }
2672 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2673
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002674 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002675 smux_uart_power_on();
2676 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002677 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002678 if (!smux_byte_loopback) {
2679 smux_tx_tty(pkt);
2680 smux_flush_tty();
2681 } else {
2682 smux_tx_loopback(pkt);
2683 }
2684
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002685 smux_free_pkt(pkt);
2686 continue;
2687 }
2688
2689 /* get the next ready channel */
2690 if (list_empty(&smux.lch_tx_ready_list)) {
2691 /* no ready channels */
2692 SMUX_DBG("%s: no more ready channels, exiting\n",
2693 __func__);
2694 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2695 break;
2696 }
2697 smux.tx_activity_flag = 1;
2698
2699 if (smux.power_state != SMUX_PWR_ON) {
2700 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002701 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002702 __func__,
2703 smux.power_state);
2704 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2705 break;
2706 }
2707
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002708 /* get the next packet to send and rotate channel list */
2709 ch = list_first_entry(&smux.lch_tx_ready_list,
2710 struct smux_lch_t,
2711 tx_ready_list);
2712
2713 spin_lock(&ch->state_lock_lhb1);
2714 spin_lock(&ch->tx_lock_lhb2);
2715 if (!list_empty(&ch->tx_queue)) {
2716 /*
2717 * If remote TX flow control is enabled or
2718 * the channel is not fully opened, then only
2719 * send command packets.
2720 */
2721 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2722 struct smux_pkt_t *curr;
2723 list_for_each_entry(curr, &ch->tx_queue, list) {
2724 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2725 pkt = curr;
2726 break;
2727 }
2728 }
2729 } else {
2730 /* get next cmd/data packet to send */
2731 pkt = list_first_entry(&ch->tx_queue,
2732 struct smux_pkt_t, list);
2733 }
2734 }
2735
2736 if (pkt) {
2737 list_del(&pkt->list);
2738
2739 /* update packet stats */
2740 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2741 --ch->tx_pending_data_cnt;
2742 if (ch->notify_lwm &&
2743 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002744 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002745 ch->notify_lwm = 0;
2746 low_wm_notif = 1;
2747 }
2748 }
2749
2750 /* advance to the next ready channel */
2751 list_rotate_left(&smux.lch_tx_ready_list);
2752 } else {
2753 /* no data in channel to send, remove from ready list */
2754 list_del(&ch->tx_ready_list);
2755 INIT_LIST_HEAD(&ch->tx_ready_list);
2756 }
2757 lcid = ch->lcid;
2758 spin_unlock(&ch->tx_lock_lhb2);
2759 spin_unlock(&ch->state_lock_lhb1);
2760 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2761
2762 if (low_wm_notif)
2763 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2764
2765 /* send the packet */
2766 smux_tx_pkt(ch, pkt);
2767 smux_free_pkt(pkt);
2768 }
2769}
2770
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002771/**
2772 * Update the RX flow control (sent in the TIOCM Status command).
2773 *
2774 * @ch Channel for update
2775 *
2776 * @returns 1 for updated, 0 for not updated
2777 *
2778 * Must be called with ch->state_lock_lhb1 locked.
2779 */
2780static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2781{
2782 int updated = 0;
2783 int prev_state;
2784
2785 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2786
2787 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2788 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2789 else
2790 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2791
2792 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2793 smux_send_status_cmd(ch);
2794 updated = 1;
2795 }
2796
2797 return updated;
2798}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002799
2800/**********************************************************************/
2801/* Kernel API */
2802/**********************************************************************/
2803
2804/**
2805 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2806 * flags.
2807 *
2808 * @lcid Logical channel ID
2809 * @set Options to set
2810 * @clear Options to clear
2811 *
2812 * @returns 0 for success, < 0 for failure
2813 */
2814int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2815{
2816 unsigned long flags;
2817 struct smux_lch_t *ch;
2818 int tx_ready = 0;
2819 int ret = 0;
2820
2821 if (smux_assert_lch_id(lcid))
2822 return -ENXIO;
2823
2824 ch = &smux_lch[lcid];
2825 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2826
2827 /* Local loopback mode */
2828 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2829 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2830
2831 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2832 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2833
2834 /* Remote loopback mode */
2835 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2836 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2837
2838 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2839 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2840
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002841 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002842 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002843 ch->rx_flow_control_client = 1;
2844 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002845 }
2846
2847 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002848 ch->rx_flow_control_client = 0;
2849 tx_ready |= smux_rx_flow_control_updated(ch);
2850 }
2851
2852 /* Auto RX Flow Control */
2853 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2854 SMUX_DBG("%s: auto rx flow control option enabled\n",
2855 __func__);
2856 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2857 }
2858
2859 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2860 SMUX_DBG("%s: auto rx flow control option disabled\n",
2861 __func__);
2862 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2863 ch->rx_flow_control_auto = 0;
2864 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002865 }
2866
2867 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2868
2869 if (tx_ready)
2870 list_channel(ch);
2871
2872 return ret;
2873}
2874
2875/**
2876 * Starts the opening sequence for a logical channel.
2877 *
2878 * @lcid Logical channel ID
2879 * @priv Free for client usage
2880 * @notify Event notification function
2881 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2882 *
2883 * @returns 0 for success, <0 otherwise
2884 *
2885 * A channel must be fully closed (either not previously opened or
2886 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2887 * received.
2888 *
2889 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2890 * event.
2891 */
2892int msm_smux_open(uint8_t lcid, void *priv,
2893 void (*notify)(void *priv, int event_type, const void *metadata),
2894 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2895 int size))
2896{
2897 int ret;
2898 struct smux_lch_t *ch;
2899 struct smux_pkt_t *pkt;
2900 int tx_ready = 0;
2901 unsigned long flags;
2902
2903 if (smux_assert_lch_id(lcid))
2904 return -ENXIO;
2905
2906 ch = &smux_lch[lcid];
2907 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2908
2909 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2910 ret = -EAGAIN;
2911 goto out;
2912 }
2913
2914 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2915 pr_err("%s: open lcid %d local state %x invalid\n",
2916 __func__, lcid, ch->local_state);
2917 ret = -EINVAL;
2918 goto out;
2919 }
2920
2921 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2922 ch->local_state,
2923 SMUX_LCH_LOCAL_OPENING);
2924
2925 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2926
2927 ch->priv = priv;
2928 ch->notify = notify;
2929 ch->get_rx_buffer = get_rx_buffer;
2930 ret = 0;
2931
2932 /* Send Open Command */
2933 pkt = smux_alloc_pkt();
2934 if (!pkt) {
2935 ret = -ENOMEM;
2936 goto out;
2937 }
2938 pkt->hdr.magic = SMUX_MAGIC;
2939 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2940 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2941 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2942 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2943 pkt->hdr.lcid = lcid;
2944 pkt->hdr.payload_len = 0;
2945 pkt->hdr.pad_len = 0;
2946 smux_tx_queue(pkt, ch, 0);
2947 tx_ready = 1;
2948
2949out:
2950 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2951 if (tx_ready)
2952 list_channel(ch);
2953 return ret;
2954}
2955
2956/**
2957 * Starts the closing sequence for a logical channel.
2958 *
2959 * @lcid Logical channel ID
2960 *
2961 * @returns 0 for success, <0 otherwise
2962 *
2963 * Once the close event has been acknowledge by the remote side, the client
2964 * will receive a SMUX_DISCONNECTED notification.
2965 */
2966int msm_smux_close(uint8_t lcid)
2967{
2968 int ret = 0;
2969 struct smux_lch_t *ch;
2970 struct smux_pkt_t *pkt;
2971 int tx_ready = 0;
2972 unsigned long flags;
2973
2974 if (smux_assert_lch_id(lcid))
2975 return -ENXIO;
2976
2977 ch = &smux_lch[lcid];
2978 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2979 ch->local_tiocm = 0x0;
2980 ch->remote_tiocm = 0x0;
2981 ch->tx_pending_data_cnt = 0;
2982 ch->notify_lwm = 0;
2983
2984 /* Purge TX queue */
2985 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002986 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002987 spin_unlock(&ch->tx_lock_lhb2);
2988
2989 /* Send Close Command */
2990 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2991 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2992 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2993 ch->local_state,
2994 SMUX_LCH_LOCAL_CLOSING);
2995
2996 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2997 pkt = smux_alloc_pkt();
2998 if (pkt) {
2999 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3000 pkt->hdr.flags = 0;
3001 pkt->hdr.lcid = lcid;
3002 pkt->hdr.payload_len = 0;
3003 pkt->hdr.pad_len = 0;
3004 smux_tx_queue(pkt, ch, 0);
3005 tx_ready = 1;
3006 } else {
3007 pr_err("%s: pkt allocation failed\n", __func__);
3008 ret = -ENOMEM;
3009 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003010
3011 /* Purge RX retry queue */
3012 if (ch->rx_retry_queue_cnt)
3013 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003014 }
3015 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3016
3017 if (tx_ready)
3018 list_channel(ch);
3019
3020 return ret;
3021}
3022
3023/**
3024 * Write data to a logical channel.
3025 *
3026 * @lcid Logical channel ID
3027 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3028 * SMUX_WRITE_FAIL notification.
3029 * @data Data to write
3030 * @len Length of @data
3031 *
3032 * @returns 0 for success, <0 otherwise
3033 *
3034 * Data may be written immediately after msm_smux_open() is called,
3035 * but the data will wait in the transmit queue until the channel has
3036 * been fully opened.
3037 *
3038 * Once the data has been written, the client will receive either a completion
3039 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3040 */
3041int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3042{
3043 struct smux_lch_t *ch;
3044 struct smux_pkt_t *pkt;
3045 int tx_ready = 0;
3046 unsigned long flags;
3047 int ret;
3048
3049 if (smux_assert_lch_id(lcid))
3050 return -ENXIO;
3051
3052 ch = &smux_lch[lcid];
3053 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3054
3055 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3056 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3057 pr_err("%s: hdr.invalid local state %d channel %d\n",
3058 __func__, ch->local_state, lcid);
3059 ret = -EINVAL;
3060 goto out;
3061 }
3062
3063 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3064 pr_err("%s: payload %d too large\n",
3065 __func__, len);
3066 ret = -E2BIG;
3067 goto out;
3068 }
3069
3070 pkt = smux_alloc_pkt();
3071 if (!pkt) {
3072 ret = -ENOMEM;
3073 goto out;
3074 }
3075
3076 pkt->hdr.cmd = SMUX_CMD_DATA;
3077 pkt->hdr.lcid = lcid;
3078 pkt->hdr.flags = 0;
3079 pkt->hdr.payload_len = len;
3080 pkt->payload = (void *)data;
3081 pkt->priv = pkt_priv;
3082 pkt->hdr.pad_len = 0;
3083
3084 spin_lock(&ch->tx_lock_lhb2);
3085 /* verify high watermark */
3086 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3087
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003088 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003089 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003090 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003091 ch->tx_pending_data_cnt);
3092 ret = -EAGAIN;
3093 goto out_inner;
3094 }
3095
3096 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003097 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003098 ch->notify_lwm = 1;
3099 pr_err("%s: high watermark hit\n", __func__);
3100 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3101 }
3102 list_add_tail(&pkt->list, &ch->tx_queue);
3103
3104 /* add to ready list */
3105 if (IS_FULLY_OPENED(ch))
3106 tx_ready = 1;
3107
3108 ret = 0;
3109
3110out_inner:
3111 spin_unlock(&ch->tx_lock_lhb2);
3112
3113out:
3114 if (ret)
3115 smux_free_pkt(pkt);
3116 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3117
3118 if (tx_ready)
3119 list_channel(ch);
3120
3121 return ret;
3122}
3123
3124/**
3125 * Returns true if the TX queue is currently full (high water mark).
3126 *
3127 * @lcid Logical channel ID
3128 * @returns 0 if channel is not full
3129 * 1 if it is full
3130 * < 0 for error
3131 */
3132int msm_smux_is_ch_full(uint8_t lcid)
3133{
3134 struct smux_lch_t *ch;
3135 unsigned long flags;
3136 int is_full = 0;
3137
3138 if (smux_assert_lch_id(lcid))
3139 return -ENXIO;
3140
3141 ch = &smux_lch[lcid];
3142
3143 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003144 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003145 is_full = 1;
3146 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3147
3148 return is_full;
3149}
3150
3151/**
3152 * Returns true if the TX queue has space for more packets it is at or
3153 * below the low water mark).
3154 *
3155 * @lcid Logical channel ID
3156 * @returns 0 if channel is above low watermark
3157 * 1 if it's at or below the low watermark
3158 * < 0 for error
3159 */
3160int msm_smux_is_ch_low(uint8_t lcid)
3161{
3162 struct smux_lch_t *ch;
3163 unsigned long flags;
3164 int is_low = 0;
3165
3166 if (smux_assert_lch_id(lcid))
3167 return -ENXIO;
3168
3169 ch = &smux_lch[lcid];
3170
3171 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003172 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003173 is_low = 1;
3174 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3175
3176 return is_low;
3177}
3178
3179/**
3180 * Send TIOCM status update.
3181 *
3182 * @ch Channel for update
3183 *
3184 * @returns 0 for success, <0 for failure
3185 *
3186 * Channel lock must be held before calling.
3187 */
3188static int smux_send_status_cmd(struct smux_lch_t *ch)
3189{
3190 struct smux_pkt_t *pkt;
3191
3192 if (!ch)
3193 return -EINVAL;
3194
3195 pkt = smux_alloc_pkt();
3196 if (!pkt)
3197 return -ENOMEM;
3198
3199 pkt->hdr.lcid = ch->lcid;
3200 pkt->hdr.cmd = SMUX_CMD_STATUS;
3201 pkt->hdr.flags = ch->local_tiocm;
3202 pkt->hdr.payload_len = 0;
3203 pkt->hdr.pad_len = 0;
3204 smux_tx_queue(pkt, ch, 0);
3205
3206 return 0;
3207}
3208
3209/**
3210 * Internal helper function for getting the TIOCM status with
3211 * state_lock_lhb1 already locked.
3212 *
3213 * @ch Channel pointer
3214 *
3215 * @returns TIOCM status
3216 */
3217static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3218{
3219 long status = 0x0;
3220
3221 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3222 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3223 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3224 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3225
3226 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3227 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3228
3229 return status;
3230}
3231
3232/**
3233 * Get the TIOCM status bits.
3234 *
3235 * @lcid Logical channel ID
3236 *
3237 * @returns >= 0 TIOCM status bits
3238 * < 0 Error condition
3239 */
3240long msm_smux_tiocm_get(uint8_t lcid)
3241{
3242 struct smux_lch_t *ch;
3243 unsigned long flags;
3244 long status = 0x0;
3245
3246 if (smux_assert_lch_id(lcid))
3247 return -ENXIO;
3248
3249 ch = &smux_lch[lcid];
3250 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3251 status = msm_smux_tiocm_get_atomic(ch);
3252 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3253
3254 return status;
3255}
3256
3257/**
3258 * Set/clear the TIOCM status bits.
3259 *
3260 * @lcid Logical channel ID
3261 * @set Bits to set
3262 * @clear Bits to clear
3263 *
3264 * @returns 0 for success; < 0 for failure
3265 *
3266 * If a bit is specified in both the @set and @clear masks, then the clear bit
3267 * definition will dominate and the bit will be cleared.
3268 */
3269int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3270{
3271 struct smux_lch_t *ch;
3272 unsigned long flags;
3273 uint8_t old_status;
3274 uint8_t status_set = 0x0;
3275 uint8_t status_clear = 0x0;
3276 int tx_ready = 0;
3277 int ret = 0;
3278
3279 if (smux_assert_lch_id(lcid))
3280 return -ENXIO;
3281
3282 ch = &smux_lch[lcid];
3283 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3284
3285 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3286 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3287 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3288 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3289
3290 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3291 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3292 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3293 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3294
3295 old_status = ch->local_tiocm;
3296 ch->local_tiocm |= status_set;
3297 ch->local_tiocm &= ~status_clear;
3298
3299 if (ch->local_tiocm != old_status) {
3300 ret = smux_send_status_cmd(ch);
3301 tx_ready = 1;
3302 }
3303 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3304
3305 if (tx_ready)
3306 list_channel(ch);
3307
3308 return ret;
3309}
3310
3311/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003312/* Subsystem Restart */
3313/**********************************************************************/
3314static struct notifier_block ssr_notifier = {
3315 .notifier_call = ssr_notifier_cb,
3316};
3317
3318/**
3319 * Handle Subsystem Restart (SSR) notifications.
3320 *
3321 * @this Pointer to ssr_notifier
3322 * @code SSR Code
3323 * @data Data pointer (not used)
3324 */
3325static int ssr_notifier_cb(struct notifier_block *this,
3326 unsigned long code,
3327 void *data)
3328{
3329 unsigned long flags;
3330 int power_off_uart = 0;
3331
Eric Holmbergd2697902012-06-15 09:58:46 -06003332 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3333 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3334 mutex_lock(&smux.mutex_lha0);
3335 smux.in_reset = 1;
3336 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003337 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003338 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3339 return NOTIFY_DONE;
3340 }
3341 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003342
3343 /* Cleanup channels */
Eric Holmbergd2697902012-06-15 09:58:46 -06003344 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003345 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003346 if (smux.tty)
3347 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003348
3349 /* Power-down UART */
3350 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3351 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003352 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003353 smux.power_state = SMUX_PWR_OFF;
3354 power_off_uart = 1;
3355 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003356 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003357 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3358
3359 if (power_off_uart)
3360 smux_uart_power_off();
3361
Eric Holmbergd2697902012-06-15 09:58:46 -06003362 smux.in_reset = 0;
3363 mutex_unlock(&smux.mutex_lha0);
3364
Eric Holmberged1f00c2012-06-07 09:45:18 -06003365 return NOTIFY_DONE;
3366}
3367
3368/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003369/* Line Discipline Interface */
3370/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003371static void smux_pdev_release(struct device *dev)
3372{
3373 struct platform_device *pdev;
3374
3375 pdev = container_of(dev, struct platform_device, dev);
3376 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3377 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3378}
3379
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003380static int smuxld_open(struct tty_struct *tty)
3381{
3382 int i;
3383 int tmp;
3384 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003385
3386 if (!smux.is_initialized)
3387 return -ENODEV;
3388
Eric Holmberged1f00c2012-06-07 09:45:18 -06003389 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003390 if (smux.ld_open_count) {
3391 pr_err("%s: %p multiple instances not supported\n",
3392 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003393 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003394 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003395 }
3396
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003397 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003398 pr_err("%s: tty->ops->write already NULL\n", __func__);
3399 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003400 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003401 }
3402
3403 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003404 ++smux.ld_open_count;
3405 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003406 smux.tty = tty;
3407 tty->disc_data = &smux;
3408 tty->receive_room = TTY_RECEIVE_ROOM;
3409 tty_driver_flush_buffer(tty);
3410
3411 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003412 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003413 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003414 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003415 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003416 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003417 queue_work(smux_tx_wq, &smux_inactivity_work);
3418 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003419 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003420 }
3421
3422 /* register platform devices */
3423 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003424 SMUX_DBG("%s: register pdev '%s'\n",
3425 __func__, smux_devs[i].name);
3426 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003427 tmp = platform_device_register(&smux_devs[i]);
3428 if (tmp)
3429 pr_err("%s: error %d registering device %s\n",
3430 __func__, tmp, smux_devs[i].name);
3431 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003432 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003433 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003434}
3435
3436static void smuxld_close(struct tty_struct *tty)
3437{
3438 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003439 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003440 int i;
3441
Eric Holmberged1f00c2012-06-07 09:45:18 -06003442 SMUX_DBG("%s: ldisc unload\n", __func__);
3443 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003444 if (smux.ld_open_count <= 0) {
3445 pr_err("%s: invalid ld count %d\n", __func__,
3446 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003447 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003448 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003449 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003450 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003451 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003452
3453 /* Cleanup channels */
3454 smux_lch_purge();
3455
3456 /* Unregister platform devices */
3457 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3458 SMUX_DBG("%s: unregister pdev '%s'\n",
3459 __func__, smux_devs[i].name);
3460 platform_device_unregister(&smux_devs[i]);
3461 }
3462
3463 /* Schedule UART power-up if it's down */
3464 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003465 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003466 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003467 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003468 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003469 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3470
3471 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003472 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003473
3474 /* Disconnect from TTY */
3475 smux.tty = NULL;
3476 mutex_unlock(&smux.mutex_lha0);
3477 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003478}
3479
3480/**
3481 * Receive data from TTY Line Discipline.
3482 *
3483 * @tty TTY structure
3484 * @cp Character data
3485 * @fp Flag data
3486 * @count Size of character and flag data
3487 */
3488void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3489 char *fp, int count)
3490{
3491 int i;
3492 int last_idx = 0;
3493 const char *tty_name = NULL;
3494 char *f;
3495
3496 if (smux_debug_mask & MSM_SMUX_DEBUG)
3497 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3498 16, 1, cp, count, true);
3499
3500 /* verify error flags */
3501 for (i = 0, f = fp; i < count; ++i, ++f) {
3502 if (*f != TTY_NORMAL) {
3503 if (tty)
3504 tty_name = tty->name;
3505 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3506 tty_name, *f, tty_flag_to_str(*f));
3507
3508 /* feed all previous valid data to the parser */
3509 smux_rx_state_machine(cp + last_idx, i - last_idx,
3510 TTY_NORMAL);
3511
3512 /* feed bad data to parser */
3513 smux_rx_state_machine(cp + i, 1, *f);
3514 last_idx = i + 1;
3515 }
3516 }
3517
3518 /* feed data to RX state machine */
3519 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3520}
3521
3522static void smuxld_flush_buffer(struct tty_struct *tty)
3523{
3524 pr_err("%s: not supported\n", __func__);
3525}
3526
3527static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3528{
3529 pr_err("%s: not supported\n", __func__);
3530 return -ENODEV;
3531}
3532
3533static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3534 unsigned char __user *buf, size_t nr)
3535{
3536 pr_err("%s: not supported\n", __func__);
3537 return -ENODEV;
3538}
3539
3540static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3541 const unsigned char *buf, size_t nr)
3542{
3543 pr_err("%s: not supported\n", __func__);
3544 return -ENODEV;
3545}
3546
3547static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3548 unsigned int cmd, unsigned long arg)
3549{
3550 pr_err("%s: not supported\n", __func__);
3551 return -ENODEV;
3552}
3553
3554static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3555 struct poll_table_struct *tbl)
3556{
3557 pr_err("%s: not supported\n", __func__);
3558 return -ENODEV;
3559}
3560
3561static void smuxld_write_wakeup(struct tty_struct *tty)
3562{
3563 pr_err("%s: not supported\n", __func__);
3564}
3565
3566static struct tty_ldisc_ops smux_ldisc_ops = {
3567 .owner = THIS_MODULE,
3568 .magic = TTY_LDISC_MAGIC,
3569 .name = "n_smux",
3570 .open = smuxld_open,
3571 .close = smuxld_close,
3572 .flush_buffer = smuxld_flush_buffer,
3573 .chars_in_buffer = smuxld_chars_in_buffer,
3574 .read = smuxld_read,
3575 .write = smuxld_write,
3576 .ioctl = smuxld_ioctl,
3577 .poll = smuxld_poll,
3578 .receive_buf = smuxld_receive_buf,
3579 .write_wakeup = smuxld_write_wakeup
3580};
3581
3582static int __init smux_init(void)
3583{
3584 int ret;
3585
Eric Holmberged1f00c2012-06-07 09:45:18 -06003586 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003587
3588 spin_lock_init(&smux.rx_lock_lha1);
3589 smux.rx_state = SMUX_RX_IDLE;
3590 smux.power_state = SMUX_PWR_OFF;
3591 smux.pwr_wakeup_delay_us = 1;
3592 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003593 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003594 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003595 smux.rx_activity_flag = 0;
3596 smux.tx_activity_flag = 0;
3597 smux.recv_len = 0;
3598 smux.tty = NULL;
3599 smux.ld_open_count = 0;
3600 smux.in_reset = 0;
3601 smux.is_initialized = 1;
3602 smux_byte_loopback = 0;
3603
3604 spin_lock_init(&smux.tx_lock_lha2);
3605 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3606
3607 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3608 if (ret != 0) {
3609 pr_err("%s: error %d registering line discipline\n",
3610 __func__, ret);
3611 return ret;
3612 }
3613
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003614 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003615
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003616 ret = lch_init();
3617 if (ret != 0) {
3618 pr_err("%s: lch_init failed\n", __func__);
3619 return ret;
3620 }
3621
3622 return 0;
3623}
3624
3625static void __exit smux_exit(void)
3626{
3627 int ret;
3628
3629 ret = tty_unregister_ldisc(N_SMUX);
3630 if (ret != 0) {
3631 pr_err("%s error %d unregistering line discipline\n",
3632 __func__, ret);
3633 return;
3634 }
3635}
3636
3637module_init(smux_init);
3638module_exit(smux_exit);
3639
3640MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3641MODULE_LICENSE("GPL v2");
3642MODULE_ALIAS_LDISC(N_SMUX);