blob: 1385e0855770fca3bcda5bf14499bee804b9738f [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
Eric Holmberg05620172012-07-03 11:13:18 -060049#define SMUX_INACTIVITY_TIMEOUT_MS 1000000
Eric Holmberg8ed30f22012-05-10 19:16:51 -060050
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -060084#define SMUX_PWR_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_PWR_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
91 if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
92 pkt->hdr.flags == SMUX_WAKEUP_ACK) \
93 pr_info("smux: TX Wakeup ACK\n"); \
94 else if (pkt->hdr.cmd == SMUX_CMD_BYTE && \
95 pkt->hdr.flags == SMUX_WAKEUP_REQ) \
96 pr_info("smux: TX Wakeup REQ\n"); \
97 else \
98 smux_log_pkt(pkt, 0); \
99 } \
100} while (0)
101
102#define SMUX_PWR_BYTE_TX(pkt) do { \
103 if (smux_debug_mask & MSM_SMUX_POWER_INFO) { \
104 smux_log_pkt(pkt, 0); \
105 } \
106} while (0)
107
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600108#define SMUX_LOG_PKT_RX(pkt) do { \
109 if (smux_debug_mask & MSM_SMUX_PKT) \
110 smux_log_pkt(pkt, 1); \
111} while (0)
112
113#define SMUX_LOG_PKT_TX(pkt) do { \
114 if (smux_debug_mask & MSM_SMUX_PKT) \
115 smux_log_pkt(pkt, 0); \
116} while (0)
117
118/**
119 * Return true if channel is fully opened (both
120 * local and remote sides are in the OPENED state).
121 */
122#define IS_FULLY_OPENED(ch) \
123 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
124 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
125
126static struct platform_device smux_devs[] = {
127 {.name = "SMUX_CTL", .id = -1},
128 {.name = "SMUX_RMNET", .id = -1},
129 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
130 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
131 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
132 {.name = "SMUX_DIAG", .id = -1},
133};
134
135enum {
136 SMUX_CMD_STATUS_RTC = 1 << 0,
137 SMUX_CMD_STATUS_RTR = 1 << 1,
138 SMUX_CMD_STATUS_RI = 1 << 2,
139 SMUX_CMD_STATUS_DCD = 1 << 3,
140 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
141};
142
143/* Channel mode */
144enum {
145 SMUX_LCH_MODE_NORMAL,
146 SMUX_LCH_MODE_LOCAL_LOOPBACK,
147 SMUX_LCH_MODE_REMOTE_LOOPBACK,
148};
149
150enum {
151 SMUX_RX_IDLE,
152 SMUX_RX_MAGIC,
153 SMUX_RX_HDR,
154 SMUX_RX_PAYLOAD,
155 SMUX_RX_FAILURE,
156};
157
158/**
159 * Power states.
160 *
161 * The _FLUSH states are internal transitional states and are not part of the
162 * official state machine.
163 */
164enum {
165 SMUX_PWR_OFF,
166 SMUX_PWR_TURNING_ON,
167 SMUX_PWR_ON,
Eric Holmberga9b06472012-06-22 09:46:34 -0600168 SMUX_PWR_TURNING_OFF_FLUSH, /* power-off req/ack in TX queue */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600169 SMUX_PWR_TURNING_OFF,
170 SMUX_PWR_OFF_FLUSH,
171};
172
173/**
174 * Logical Channel Structure. One instance per channel.
175 *
176 * Locking Hierarchy
177 * Each lock has a postfix that describes the locking level. If multiple locks
178 * are required, only increasing lock hierarchy numbers may be locked which
179 * ensures avoiding a deadlock.
180 *
181 * Locking Example
182 * If state_lock_lhb1 is currently held and the TX list needs to be
183 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
184 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
185 * not be acquired since it would result in a deadlock.
186 *
187 * Note that the Line Discipline locks (*_lha) should always be acquired
188 * before the logical channel locks.
189 */
190struct smux_lch_t {
191 /* channel state */
192 spinlock_t state_lock_lhb1;
193 uint8_t lcid;
194 unsigned local_state;
195 unsigned local_mode;
196 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600197 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600198
199 unsigned remote_state;
200 unsigned remote_mode;
201 uint8_t remote_tiocm;
202
203 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600204 int rx_flow_control_auto;
205 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600206
207 /* client callbacks and private data */
208 void *priv;
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
211 int size);
212
Eric Holmbergb8435c82012-06-05 14:51:29 -0600213 /* RX Info */
214 struct list_head rx_retry_queue;
215 unsigned rx_retry_queue_cnt;
216 struct delayed_work rx_retry_work;
217
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600218 /* TX Info */
219 spinlock_t tx_lock_lhb2;
220 struct list_head tx_queue;
221 struct list_head tx_ready_list;
222 unsigned tx_pending_data_cnt;
223 unsigned notify_lwm;
224};
225
226union notifier_metadata {
227 struct smux_meta_disconnected disconnected;
228 struct smux_meta_read read;
229 struct smux_meta_write write;
230 struct smux_meta_tiocm tiocm;
231};
232
233struct smux_notify_handle {
234 void (*notify)(void *priv, int event_type, const void *metadata);
235 void *priv;
236 int event_type;
237 union notifier_metadata *metadata;
238};
239
240/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600241 * Get RX Buffer Retry structure.
242 *
243 * This is used for clients that are unable to provide an RX buffer
244 * immediately. This temporary structure will be used to temporarily hold the
245 * data and perform a retry.
246 */
247struct smux_rx_pkt_retry {
248 struct smux_pkt_t *pkt;
249 struct list_head rx_retry_list;
250 unsigned timeout_in_ms;
251};
252
253/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600254 * Receive worker data structure.
255 *
256 * One instance is created for every call to smux_rx_state_machine.
257 */
258struct smux_rx_worker_data {
259 const unsigned char *data;
260 int len;
261 int flag;
262
263 struct work_struct work;
264 struct completion work_complete;
265};
266
267/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600268 * Line discipline and module structure.
269 *
270 * Only one instance since multiple instances of line discipline are not
271 * allowed.
272 */
273struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600274 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275
276 int is_initialized;
Eric Holmberg2bf9c522012-08-09 13:23:21 -0600277 int platform_devs_registered;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600278 int in_reset;
279 int ld_open_count;
280 struct tty_struct *tty;
281
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600282 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600283 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
284 unsigned int recv_len;
285 unsigned int pkt_remain;
286 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600287
288 /* RX Activity - accessed by multiple threads */
289 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600290 unsigned rx_activity_flag;
291
292 /* TX / Power */
293 spinlock_t tx_lock_lha2;
294 struct list_head lch_tx_ready_list;
295 unsigned power_state;
296 unsigned pwr_wakeup_delay_us;
297 unsigned tx_activity_flag;
298 unsigned powerdown_enabled;
Eric Holmberga9b06472012-06-22 09:46:34 -0600299 unsigned power_ctl_remote_req_received;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600300 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600301};
302
303
304/* data structures */
305static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
306static struct smux_ldisc_t smux;
307static const char *tty_error_type[] = {
308 [TTY_NORMAL] = "normal",
309 [TTY_OVERRUN] = "overrun",
310 [TTY_BREAK] = "break",
311 [TTY_PARITY] = "parity",
312 [TTY_FRAME] = "framing",
313};
314
315static const char *smux_cmds[] = {
316 [SMUX_CMD_DATA] = "DATA",
317 [SMUX_CMD_OPEN_LCH] = "OPEN",
318 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
319 [SMUX_CMD_STATUS] = "STATUS",
320 [SMUX_CMD_PWR_CTL] = "PWR",
321 [SMUX_CMD_BYTE] = "Raw Byte",
322};
323
324static void smux_notify_local_fn(struct work_struct *work);
325static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
326
327static struct workqueue_struct *smux_notify_wq;
328static size_t handle_size;
329static struct kfifo smux_notify_fifo;
330static int queued_fifo_notifications;
331static DEFINE_SPINLOCK(notify_lock_lhc1);
332
333static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600334static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600335static void smux_tx_worker(struct work_struct *work);
336static DECLARE_WORK(smux_tx_work, smux_tx_worker);
337
338static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600339static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600340static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600341static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
342static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
343
344static void smux_inactivity_worker(struct work_struct *work);
345static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
346static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
347 smux_inactivity_worker);
348
349static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
350static void list_channel(struct smux_lch_t *ch);
351static int smux_send_status_cmd(struct smux_lch_t *ch);
352static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600353static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600354static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
355static int schedule_notify(uint8_t lcid, int event,
356 const union notifier_metadata *metadata);
357static int ssr_notifier_cb(struct notifier_block *this,
358 unsigned long code,
359 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600360static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600361static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg06011322012-07-06 18:17:03 -0600362static void smux_flush_workqueues(void);
Eric Holmbergf6a364e2012-08-07 18:41:44 -0600363static void smux_pdev_release(struct device *dev);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600364
365/**
366 * Convert TTY Error Flags to string for logging purposes.
367 *
368 * @flag TTY_* flag
369 * @returns String description or NULL if unknown
370 */
371static const char *tty_flag_to_str(unsigned flag)
372{
373 if (flag < ARRAY_SIZE(tty_error_type))
374 return tty_error_type[flag];
375 return NULL;
376}
377
378/**
379 * Convert SMUX Command to string for logging purposes.
380 *
381 * @cmd SMUX command
382 * @returns String description or NULL if unknown
383 */
384static const char *cmd_to_str(unsigned cmd)
385{
386 if (cmd < ARRAY_SIZE(smux_cmds))
387 return smux_cmds[cmd];
388 return NULL;
389}
390
391/**
392 * Set the reset state due to an unrecoverable failure.
393 */
394static void smux_enter_reset(void)
395{
396 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
397 smux.in_reset = 1;
398}
399
400static int lch_init(void)
401{
402 unsigned int id;
403 struct smux_lch_t *ch;
404 int i = 0;
405
406 handle_size = sizeof(struct smux_notify_handle *);
407
408 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
409 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600410 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600411
412 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
413 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
414 __func__);
415 return -ENOMEM;
416 }
417
418 i |= kfifo_alloc(&smux_notify_fifo,
419 SMUX_NOTIFY_FIFO_SIZE * handle_size,
420 GFP_KERNEL);
421 i |= smux_loopback_init();
422
423 if (i) {
424 pr_err("%s: out of memory error\n", __func__);
425 return -ENOMEM;
426 }
427
428 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
429 ch = &smux_lch[id];
430
431 spin_lock_init(&ch->state_lock_lhb1);
432 ch->lcid = id;
433 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
434 ch->local_mode = SMUX_LCH_MODE_NORMAL;
435 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600436 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600437 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
438 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
439 ch->remote_tiocm = 0x0;
440 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600441 ch->rx_flow_control_auto = 0;
442 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600443 ch->priv = 0;
444 ch->notify = 0;
445 ch->get_rx_buffer = 0;
446
Eric Holmbergb8435c82012-06-05 14:51:29 -0600447 INIT_LIST_HEAD(&ch->rx_retry_queue);
448 ch->rx_retry_queue_cnt = 0;
449 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
450
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600451 spin_lock_init(&ch->tx_lock_lhb2);
452 INIT_LIST_HEAD(&ch->tx_queue);
453 INIT_LIST_HEAD(&ch->tx_ready_list);
454 ch->tx_pending_data_cnt = 0;
455 ch->notify_lwm = 0;
456 }
457
458 return 0;
459}
460
Eric Holmberged1f00c2012-06-07 09:45:18 -0600461/**
462 * Empty and cleanup all SMUX logical channels for subsystem restart or line
463 * discipline disconnect.
464 */
465static void smux_lch_purge(void)
466{
467 struct smux_lch_t *ch;
468 unsigned long flags;
469 int i;
470
471 /* Empty TX ready list */
472 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
473 while (!list_empty(&smux.lch_tx_ready_list)) {
474 SMUX_DBG("%s: emptying ready list %p\n",
475 __func__, smux.lch_tx_ready_list.next);
476 ch = list_first_entry(&smux.lch_tx_ready_list,
477 struct smux_lch_t,
478 tx_ready_list);
479 list_del(&ch->tx_ready_list);
480 INIT_LIST_HEAD(&ch->tx_ready_list);
481 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600482
483 /* Purge Power Queue */
484 while (!list_empty(&smux.power_queue)) {
485 struct smux_pkt_t *pkt;
486
487 pkt = list_first_entry(&smux.power_queue,
488 struct smux_pkt_t,
489 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600490 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600491 SMUX_DBG("%s: emptying power queue pkt=%p\n",
492 __func__, pkt);
493 smux_free_pkt(pkt);
494 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600495 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
496
497 /* Close all ports */
498 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
499 ch = &smux_lch[i];
500 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
501
502 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
503
504 /* Purge TX queue */
505 spin_lock(&ch->tx_lock_lhb2);
506 smux_purge_ch_tx_queue(ch);
507 spin_unlock(&ch->tx_lock_lhb2);
508
509 /* Notify user of disconnect and reset channel state */
510 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
511 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
512 union notifier_metadata meta;
513
514 meta.disconnected.is_ssr = smux.in_reset;
515 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
516 }
517
518 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600519 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
520 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
521 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600522 ch->rx_flow_control_auto = 0;
523 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600524
525 /* Purge RX retry queue */
526 if (ch->rx_retry_queue_cnt)
527 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
528
529 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
530 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600531}
532
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600533int smux_assert_lch_id(uint32_t lcid)
534{
535 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
536 return -ENXIO;
537 else
538 return 0;
539}
540
541/**
542 * Log packet information for debug purposes.
543 *
544 * @pkt Packet to log
545 * @is_recv 1 = RX packet; 0 = TX Packet
546 *
547 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
548 *
549 * PKT Info:
550 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
551 *
552 * Direction: R = Receive, S = Send
553 * Local State: C = Closed; c = closing; o = opening; O = Opened
554 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
555 * Remote State: C = Closed; O = Opened
556 * Remote Mode: R = Remote loopback; N = Normal
557 */
558static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
559{
560 char logbuf[SMUX_PKT_LOG_SIZE];
561 char cmd_extra[16];
562 int i = 0;
563 int count;
564 int len;
565 char local_state;
566 char local_mode;
567 char remote_state;
568 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600569 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600570 unsigned char *data;
571
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600572 if (!smux_assert_lch_id(pkt->hdr.lcid))
573 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600574
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600575 if (ch) {
576 switch (ch->local_state) {
577 case SMUX_LCH_LOCAL_CLOSED:
578 local_state = 'C';
579 break;
580 case SMUX_LCH_LOCAL_OPENING:
581 local_state = 'o';
582 break;
583 case SMUX_LCH_LOCAL_OPENED:
584 local_state = 'O';
585 break;
586 case SMUX_LCH_LOCAL_CLOSING:
587 local_state = 'c';
588 break;
589 default:
590 local_state = 'U';
591 break;
592 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600593
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600594 switch (ch->local_mode) {
595 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
596 local_mode = 'L';
597 break;
598 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
599 local_mode = 'R';
600 break;
601 case SMUX_LCH_MODE_NORMAL:
602 local_mode = 'N';
603 break;
604 default:
605 local_mode = 'U';
606 break;
607 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600608
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600609 switch (ch->remote_state) {
610 case SMUX_LCH_REMOTE_CLOSED:
611 remote_state = 'C';
612 break;
613 case SMUX_LCH_REMOTE_OPENED:
614 remote_state = 'O';
615 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600616
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600617 default:
618 remote_state = 'U';
619 break;
620 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600621
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600622 switch (ch->remote_mode) {
623 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
624 remote_mode = 'R';
625 break;
626 case SMUX_LCH_MODE_NORMAL:
627 remote_mode = 'N';
628 break;
629 default:
630 remote_mode = 'U';
631 break;
632 }
633 } else {
634 /* broadcast channel */
635 local_state = '-';
636 local_mode = '-';
637 remote_state = '-';
638 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600639 }
640
641 /* determine command type (ACK, etc) */
642 cmd_extra[0] = '\0';
643 switch (pkt->hdr.cmd) {
644 case SMUX_CMD_OPEN_LCH:
645 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
646 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
647 break;
648 case SMUX_CMD_CLOSE_LCH:
649 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
650 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
651 break;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -0600652
653 case SMUX_CMD_PWR_CTL:
654 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)
655 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
656 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600657 };
658
659 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
660 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
661 is_recv ? 'R' : 'S', pkt->hdr.lcid,
662 local_state, local_mode,
663 remote_state, remote_mode,
664 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
665 pkt->hdr.payload_len, pkt->hdr.pad_len);
666
667 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
668 data = (unsigned char *)pkt->payload;
669 for (count = 0; count < len; count++)
670 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
671 "%02x ", (unsigned)data[count]);
672
673 pr_info("%s\n", logbuf);
674}
675
676static void smux_notify_local_fn(struct work_struct *work)
677{
678 struct smux_notify_handle *notify_handle = NULL;
679 union notifier_metadata *metadata = NULL;
680 unsigned long flags;
681 int i;
682
683 for (;;) {
684 /* retrieve notification */
685 spin_lock_irqsave(&notify_lock_lhc1, flags);
686 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
687 i = kfifo_out(&smux_notify_fifo,
688 &notify_handle,
689 handle_size);
690 if (i != handle_size) {
691 pr_err("%s: unable to retrieve handle %d expected %d\n",
692 __func__, i, handle_size);
693 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
694 break;
695 }
696 } else {
697 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
698 break;
699 }
700 --queued_fifo_notifications;
701 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
702
703 /* notify client */
704 metadata = notify_handle->metadata;
705 notify_handle->notify(notify_handle->priv,
706 notify_handle->event_type,
707 metadata);
708
709 kfree(metadata);
710 kfree(notify_handle);
711 }
712}
713
714/**
715 * Initialize existing packet.
716 */
717void smux_init_pkt(struct smux_pkt_t *pkt)
718{
719 memset(pkt, 0x0, sizeof(*pkt));
720 pkt->hdr.magic = SMUX_MAGIC;
721 INIT_LIST_HEAD(&pkt->list);
722}
723
724/**
725 * Allocate and initialize packet.
726 *
727 * If a payload is needed, either set it directly and ensure that it's freed or
728 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
729 * automatically when smd_free_pkt() is called.
730 */
731struct smux_pkt_t *smux_alloc_pkt(void)
732{
733 struct smux_pkt_t *pkt;
734
735 /* Consider a free list implementation instead of kmalloc */
736 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
737 if (!pkt) {
738 pr_err("%s: out of memory\n", __func__);
739 return NULL;
740 }
741 smux_init_pkt(pkt);
742 pkt->allocated = 1;
743
744 return pkt;
745}
746
747/**
748 * Free packet.
749 *
750 * @pkt Packet to free (may be NULL)
751 *
752 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
753 * well. Otherwise, the caller is responsible for freeing the payload.
754 */
755void smux_free_pkt(struct smux_pkt_t *pkt)
756{
757 if (pkt) {
758 if (pkt->free_payload)
759 kfree(pkt->payload);
760 if (pkt->allocated)
761 kfree(pkt);
762 }
763}
764
765/**
766 * Allocate packet payload.
767 *
768 * @pkt Packet to add payload to
769 *
770 * @returns 0 on success, <0 upon error
771 *
772 * A flag is set to signal smux_free_pkt() to free the payload.
773 */
774int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
775{
776 if (!pkt)
777 return -EINVAL;
778
779 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
780 pkt->free_payload = 1;
781 if (!pkt->payload) {
782 pr_err("%s: unable to malloc %d bytes for payload\n",
783 __func__, pkt->hdr.payload_len);
784 return -ENOMEM;
785 }
786
787 return 0;
788}
789
790static int schedule_notify(uint8_t lcid, int event,
791 const union notifier_metadata *metadata)
792{
793 struct smux_notify_handle *notify_handle = 0;
794 union notifier_metadata *meta_copy = 0;
795 struct smux_lch_t *ch;
796 int i;
797 unsigned long flags;
798 int ret = 0;
799
800 ch = &smux_lch[lcid];
801 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
802 GFP_ATOMIC);
803 if (!notify_handle) {
804 pr_err("%s: out of memory\n", __func__);
805 ret = -ENOMEM;
806 goto free_out;
807 }
808
809 notify_handle->notify = ch->notify;
810 notify_handle->priv = ch->priv;
811 notify_handle->event_type = event;
812 if (metadata) {
813 meta_copy = kzalloc(sizeof(union notifier_metadata),
814 GFP_ATOMIC);
815 if (!meta_copy) {
816 pr_err("%s: out of memory\n", __func__);
817 ret = -ENOMEM;
818 goto free_out;
819 }
820 *meta_copy = *metadata;
821 notify_handle->metadata = meta_copy;
822 } else {
823 notify_handle->metadata = NULL;
824 }
825
826 spin_lock_irqsave(&notify_lock_lhc1, flags);
827 i = kfifo_avail(&smux_notify_fifo);
828 if (i < handle_size) {
829 pr_err("%s: fifo full error %d expected %d\n",
830 __func__, i, handle_size);
831 ret = -ENOMEM;
832 goto unlock_out;
833 }
834
835 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
836 if (i < 0 || i != handle_size) {
837 pr_err("%s: fifo not available error %d (expected %d)\n",
838 __func__, i, handle_size);
839 ret = -ENOSPC;
840 goto unlock_out;
841 }
842 ++queued_fifo_notifications;
843
844unlock_out:
845 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
846
847free_out:
848 queue_work(smux_notify_wq, &smux_notify_local);
849 if (ret < 0 && notify_handle) {
850 kfree(notify_handle->metadata);
851 kfree(notify_handle);
852 }
853 return ret;
854}
855
856/**
857 * Returns the serialized size of a packet.
858 *
859 * @pkt Packet to serialize
860 *
861 * @returns Serialized length of packet
862 */
863static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
864{
865 unsigned int size;
866
867 size = sizeof(struct smux_hdr_t);
868 size += pkt->hdr.payload_len;
869 size += pkt->hdr.pad_len;
870
871 return size;
872}
873
874/**
875 * Serialize packet @pkt into output buffer @data.
876 *
877 * @pkt Packet to serialize
878 * @out Destination buffer pointer
879 * @out_len Size of serialized packet
880 *
881 * @returns 0 for success
882 */
883int smux_serialize(struct smux_pkt_t *pkt, char *out,
884 unsigned int *out_len)
885{
886 char *data_start = out;
887
888 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
889 pr_err("%s: packet size %d too big\n",
890 __func__, smux_serialize_size(pkt));
891 return -E2BIG;
892 }
893
894 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
895 out += sizeof(struct smux_hdr_t);
896 if (pkt->payload) {
897 memcpy(out, pkt->payload, pkt->hdr.payload_len);
898 out += pkt->hdr.payload_len;
899 }
900 if (pkt->hdr.pad_len) {
901 memset(out, 0x0, pkt->hdr.pad_len);
902 out += pkt->hdr.pad_len;
903 }
904 *out_len = out - data_start;
905 return 0;
906}
907
908/**
909 * Serialize header and provide pointer to the data.
910 *
911 * @pkt Packet
912 * @out[out] Pointer to the serialized header data
913 * @out_len[out] Pointer to the serialized header length
914 */
915static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
916 unsigned int *out_len)
917{
918 *out = (char *)&pkt->hdr;
919 *out_len = sizeof(struct smux_hdr_t);
920}
921
922/**
923 * Serialize payload and provide pointer to the data.
924 *
925 * @pkt Packet
926 * @out[out] Pointer to the serialized payload data
927 * @out_len[out] Pointer to the serialized payload length
928 */
929static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
930 unsigned int *out_len)
931{
932 *out = pkt->payload;
933 *out_len = pkt->hdr.payload_len;
934}
935
936/**
937 * Serialize padding and provide pointer to the data.
938 *
939 * @pkt Packet
940 * @out[out] Pointer to the serialized padding (always NULL)
941 * @out_len[out] Pointer to the serialized payload length
942 *
943 * Since the padding field value is undefined, only the size of the patting
944 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
945 */
946static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
947 unsigned int *out_len)
948{
949 *out = NULL;
950 *out_len = pkt->hdr.pad_len;
951}
952
953/**
954 * Write data to TTY framework and handle breaking the writes up if needed.
955 *
956 * @data Data to write
957 * @len Length of data
958 *
959 * @returns 0 for success, < 0 for failure
960 */
961static int write_to_tty(char *data, unsigned len)
962{
963 int data_written;
964
965 if (!data)
966 return 0;
967
Eric Holmberged1f00c2012-06-07 09:45:18 -0600968 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600969 data_written = smux.tty->ops->write(smux.tty, data, len);
970 if (data_written >= 0) {
971 len -= data_written;
972 data += data_written;
973 } else {
974 pr_err("%s: TTY write returned error %d\n",
975 __func__, data_written);
976 return data_written;
977 }
978
979 if (len)
980 tty_wait_until_sent(smux.tty,
981 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600982 }
983 return 0;
984}
985
986/**
987 * Write packet to TTY.
988 *
989 * @pkt packet to write
990 *
991 * @returns 0 on success
992 */
993static int smux_tx_tty(struct smux_pkt_t *pkt)
994{
995 char *data;
996 unsigned int len;
997 int ret;
998
999 if (!smux.tty) {
1000 pr_err("%s: TTY not initialized", __func__);
1001 return -ENOTTY;
1002 }
1003
1004 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
1005 SMUX_DBG("%s: tty send single byte\n", __func__);
1006 ret = write_to_tty(&pkt->hdr.flags, 1);
1007 return ret;
1008 }
1009
1010 smux_serialize_hdr(pkt, &data, &len);
1011 ret = write_to_tty(data, len);
1012 if (ret) {
1013 pr_err("%s: failed %d to write header %d\n",
1014 __func__, ret, len);
1015 return ret;
1016 }
1017
1018 smux_serialize_payload(pkt, &data, &len);
1019 ret = write_to_tty(data, len);
1020 if (ret) {
1021 pr_err("%s: failed %d to write payload %d\n",
1022 __func__, ret, len);
1023 return ret;
1024 }
1025
1026 smux_serialize_padding(pkt, &data, &len);
1027 while (len > 0) {
1028 char zero = 0x0;
1029 ret = write_to_tty(&zero, 1);
1030 if (ret) {
1031 pr_err("%s: failed %d to write padding %d\n",
1032 __func__, ret, len);
1033 return ret;
1034 }
1035 --len;
1036 }
1037 return 0;
1038}
1039
1040/**
1041 * Send a single character.
1042 *
1043 * @ch Character to send
1044 */
1045static void smux_send_byte(char ch)
1046{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001047 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001048
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001049 pkt = smux_alloc_pkt();
1050 if (!pkt) {
1051 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1052 return;
1053 }
1054 pkt->hdr.cmd = SMUX_CMD_BYTE;
1055 pkt->hdr.flags = ch;
1056 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001057
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001058 list_add_tail(&pkt->list, &smux.power_queue);
1059 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001060}
1061
1062/**
1063 * Receive a single-character packet (used for internal testing).
1064 *
1065 * @ch Character to receive
1066 * @lcid Logical channel ID for packet
1067 *
1068 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001069 */
1070static int smux_receive_byte(char ch, int lcid)
1071{
1072 struct smux_pkt_t pkt;
1073
1074 smux_init_pkt(&pkt);
1075 pkt.hdr.lcid = lcid;
1076 pkt.hdr.cmd = SMUX_CMD_BYTE;
1077 pkt.hdr.flags = ch;
1078
1079 return smux_dispatch_rx_pkt(&pkt);
1080}
1081
1082/**
1083 * Queue packet for transmit.
1084 *
1085 * @pkt_ptr Packet to queue
1086 * @ch Channel to queue packet on
1087 * @queue Queue channel on ready list
1088 */
1089static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1090 int queue)
1091{
1092 unsigned long flags;
1093
1094 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1095
1096 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1097 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1098 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1099
1100 if (queue)
1101 list_channel(ch);
1102}
1103
1104/**
1105 * Handle receive OPEN ACK command.
1106 *
1107 * @pkt Received packet
1108 *
1109 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001110 */
1111static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1112{
1113 uint8_t lcid;
1114 int ret;
1115 struct smux_lch_t *ch;
1116 int enable_powerdown = 0;
1117
1118 lcid = pkt->hdr.lcid;
1119 ch = &smux_lch[lcid];
1120
1121 spin_lock(&ch->state_lock_lhb1);
1122 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1123 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1124 ch->local_state,
1125 SMUX_LCH_LOCAL_OPENED);
1126
1127 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1128 enable_powerdown = 1;
1129
1130 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1131 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1132 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1133 ret = 0;
1134 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1135 SMUX_DBG("Remote loopback OPEN ACK received\n");
1136 ret = 0;
1137 } else {
1138 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1139 __func__, lcid, ch->local_state);
1140 ret = -EINVAL;
1141 }
1142 spin_unlock(&ch->state_lock_lhb1);
1143
1144 if (enable_powerdown) {
1145 spin_lock(&smux.tx_lock_lha2);
1146 if (!smux.powerdown_enabled) {
1147 smux.powerdown_enabled = 1;
1148 SMUX_DBG("%s: enabling power-collapse support\n",
1149 __func__);
1150 }
1151 spin_unlock(&smux.tx_lock_lha2);
1152 }
1153
1154 return ret;
1155}
1156
1157static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1158{
1159 uint8_t lcid;
1160 int ret;
1161 struct smux_lch_t *ch;
1162 union notifier_metadata meta_disconnected;
1163 unsigned long flags;
1164
1165 lcid = pkt->hdr.lcid;
1166 ch = &smux_lch[lcid];
1167 meta_disconnected.disconnected.is_ssr = 0;
1168
1169 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1170
1171 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1172 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1173 SMUX_LCH_LOCAL_CLOSING,
1174 SMUX_LCH_LOCAL_CLOSED);
1175 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1176 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1177 schedule_notify(lcid, SMUX_DISCONNECTED,
1178 &meta_disconnected);
1179 ret = 0;
1180 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1181 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1182 ret = 0;
1183 } else {
1184 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1185 __func__, lcid, ch->local_state);
1186 ret = -EINVAL;
1187 }
1188 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1189 return ret;
1190}
1191
1192/**
1193 * Handle receive OPEN command.
1194 *
1195 * @pkt Received packet
1196 *
1197 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001198 */
1199static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1200{
1201 uint8_t lcid;
1202 int ret;
1203 struct smux_lch_t *ch;
1204 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001205 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001206 int tx_ready = 0;
1207 int enable_powerdown = 0;
1208
1209 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1210 return smux_handle_rx_open_ack(pkt);
1211
1212 lcid = pkt->hdr.lcid;
1213 ch = &smux_lch[lcid];
1214
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001215 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001216
1217 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1218 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1219 SMUX_LCH_REMOTE_CLOSED,
1220 SMUX_LCH_REMOTE_OPENED);
1221
1222 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1223 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1224 enable_powerdown = 1;
1225
1226 /* Send Open ACK */
1227 ack_pkt = smux_alloc_pkt();
1228 if (!ack_pkt) {
1229 /* exit out to allow retrying this later */
1230 ret = -ENOMEM;
1231 goto out;
1232 }
1233 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1234 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1235 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1236 ack_pkt->hdr.lcid = lcid;
1237 ack_pkt->hdr.payload_len = 0;
1238 ack_pkt->hdr.pad_len = 0;
1239 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1240 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1241 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1242 }
1243 smux_tx_queue(ack_pkt, ch, 0);
1244 tx_ready = 1;
1245
1246 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1247 /*
1248 * Send an Open command to the remote side to
1249 * simulate our local client doing it.
1250 */
1251 ack_pkt = smux_alloc_pkt();
1252 if (ack_pkt) {
1253 ack_pkt->hdr.lcid = lcid;
1254 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1255 ack_pkt->hdr.flags =
1256 SMUX_CMD_OPEN_POWER_COLLAPSE;
1257 ack_pkt->hdr.payload_len = 0;
1258 ack_pkt->hdr.pad_len = 0;
1259 smux_tx_queue(ack_pkt, ch, 0);
1260 tx_ready = 1;
1261 } else {
1262 pr_err("%s: Remote loopack allocation failure\n",
1263 __func__);
1264 }
1265 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1266 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1267 }
1268 ret = 0;
1269 } else {
1270 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1271 __func__, lcid, ch->remote_state);
1272 ret = -EINVAL;
1273 }
1274
1275out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001276 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001277
1278 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001279 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001280 if (!smux.powerdown_enabled) {
1281 smux.powerdown_enabled = 1;
1282 SMUX_DBG("%s: enabling power-collapse support\n",
1283 __func__);
1284 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001285 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001286 }
1287
1288 if (tx_ready)
1289 list_channel(ch);
1290
1291 return ret;
1292}
1293
1294/**
1295 * Handle receive CLOSE command.
1296 *
1297 * @pkt Received packet
1298 *
1299 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001300 */
1301static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1302{
1303 uint8_t lcid;
1304 int ret;
1305 struct smux_lch_t *ch;
1306 struct smux_pkt_t *ack_pkt;
1307 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001308 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001309 int tx_ready = 0;
1310
1311 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1312 return smux_handle_close_ack(pkt);
1313
1314 lcid = pkt->hdr.lcid;
1315 ch = &smux_lch[lcid];
1316 meta_disconnected.disconnected.is_ssr = 0;
1317
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001318 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001319 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1320 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1321 SMUX_LCH_REMOTE_OPENED,
1322 SMUX_LCH_REMOTE_CLOSED);
1323
1324 ack_pkt = smux_alloc_pkt();
1325 if (!ack_pkt) {
1326 /* exit out to allow retrying this later */
1327 ret = -ENOMEM;
1328 goto out;
1329 }
1330 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1331 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1332 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1333 ack_pkt->hdr.lcid = lcid;
1334 ack_pkt->hdr.payload_len = 0;
1335 ack_pkt->hdr.pad_len = 0;
1336 smux_tx_queue(ack_pkt, ch, 0);
1337 tx_ready = 1;
1338
1339 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1340 /*
1341 * Send a Close command to the remote side to simulate
1342 * our local client doing it.
1343 */
1344 ack_pkt = smux_alloc_pkt();
1345 if (ack_pkt) {
1346 ack_pkt->hdr.lcid = lcid;
1347 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1348 ack_pkt->hdr.flags = 0;
1349 ack_pkt->hdr.payload_len = 0;
1350 ack_pkt->hdr.pad_len = 0;
1351 smux_tx_queue(ack_pkt, ch, 0);
1352 tx_ready = 1;
1353 } else {
1354 pr_err("%s: Remote loopack allocation failure\n",
1355 __func__);
1356 }
1357 }
1358
1359 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1360 schedule_notify(lcid, SMUX_DISCONNECTED,
1361 &meta_disconnected);
1362 ret = 0;
1363 } else {
1364 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1365 __func__, lcid, ch->remote_state);
1366 ret = -EINVAL;
1367 }
1368out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001369 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001370 if (tx_ready)
1371 list_channel(ch);
1372
1373 return ret;
1374}
1375
1376/*
1377 * Handle receive DATA command.
1378 *
1379 * @pkt Received packet
1380 *
1381 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001382 */
1383static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1384{
1385 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001386 int ret = 0;
1387 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001388 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001389 int tmp;
1390 int rx_len;
1391 struct smux_lch_t *ch;
1392 union notifier_metadata metadata;
1393 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001394 struct smux_pkt_t *ack_pkt;
1395 unsigned long flags;
1396
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001397 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1398 ret = -ENXIO;
1399 goto out;
1400 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001401
Eric Holmbergb8435c82012-06-05 14:51:29 -06001402 rx_len = pkt->hdr.payload_len;
1403 if (rx_len == 0) {
1404 ret = -EINVAL;
1405 goto out;
1406 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001407
1408 lcid = pkt->hdr.lcid;
1409 ch = &smux_lch[lcid];
1410 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1411 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1412
1413 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1414 && !remote_loopback) {
1415 pr_err("smux: ch %d error data on local state 0x%x",
1416 lcid, ch->local_state);
1417 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001418 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001419 goto out;
1420 }
1421
1422 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1423 pr_err("smux: ch %d error data on remote state 0x%x",
1424 lcid, ch->remote_state);
1425 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001426 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001427 goto out;
1428 }
1429
Eric Holmbergb8435c82012-06-05 14:51:29 -06001430 if (!list_empty(&ch->rx_retry_queue)) {
1431 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001432
1433 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1434 !ch->rx_flow_control_auto &&
1435 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1436 /* need to flow control RX */
1437 ch->rx_flow_control_auto = 1;
1438 tx_ready |= smux_rx_flow_control_updated(ch);
1439 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1440 NULL);
1441 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001442 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1443 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001444 pr_err("%s: ch %d RX retry queue full\n",
1445 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001446 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1447 ret = -ENOMEM;
1448 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1449 goto out;
1450 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001451 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001452 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001453
Eric Holmbergb8435c82012-06-05 14:51:29 -06001454 if (remote_loopback) {
1455 /* Echo the data back to the remote client. */
1456 ack_pkt = smux_alloc_pkt();
1457 if (ack_pkt) {
1458 ack_pkt->hdr.lcid = lcid;
1459 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1460 ack_pkt->hdr.flags = 0;
1461 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1462 if (ack_pkt->hdr.payload_len) {
1463 smux_alloc_pkt_payload(ack_pkt);
1464 memcpy(ack_pkt->payload, pkt->payload,
1465 ack_pkt->hdr.payload_len);
1466 }
1467 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1468 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001469 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001470 } else {
1471 pr_err("%s: Remote loopack allocation failure\n",
1472 __func__);
1473 }
1474 } else if (!do_retry) {
1475 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001476 metadata.read.pkt_priv = 0;
1477 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001478 tmp = ch->get_rx_buffer(ch->priv,
1479 (void **)&metadata.read.pkt_priv,
1480 (void **)&metadata.read.buffer,
1481 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001482
Eric Holmbergb8435c82012-06-05 14:51:29 -06001483 if (tmp == 0 && metadata.read.buffer) {
1484 /* place data into RX buffer */
1485 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001487 metadata.read.len = rx_len;
1488 schedule_notify(lcid, SMUX_READ_DONE,
1489 &metadata);
1490 } else if (tmp == -EAGAIN ||
1491 (tmp == 0 && !metadata.read.buffer)) {
1492 /* buffer allocation failed - add to retry queue */
1493 do_retry = 1;
1494 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001495 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1496 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001497 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1498 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001499 }
1500 }
1501
Eric Holmbergb8435c82012-06-05 14:51:29 -06001502 if (do_retry) {
1503 struct smux_rx_pkt_retry *retry;
1504
1505 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1506 if (!retry) {
1507 pr_err("%s: retry alloc failure\n", __func__);
1508 ret = -ENOMEM;
1509 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1510 goto out;
1511 }
1512 INIT_LIST_HEAD(&retry->rx_retry_list);
1513 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1514
1515 /* copy packet */
1516 retry->pkt = smux_alloc_pkt();
1517 if (!retry->pkt) {
1518 kfree(retry);
1519 pr_err("%s: pkt alloc failure\n", __func__);
1520 ret = -ENOMEM;
1521 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1522 goto out;
1523 }
1524 retry->pkt->hdr.lcid = lcid;
1525 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1526 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1527 if (retry->pkt->hdr.payload_len) {
1528 smux_alloc_pkt_payload(retry->pkt);
1529 memcpy(retry->pkt->payload, pkt->payload,
1530 retry->pkt->hdr.payload_len);
1531 }
1532
1533 /* add to retry queue */
1534 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1535 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1536 ++ch->rx_retry_queue_cnt;
1537 if (ch->rx_retry_queue_cnt == 1)
1538 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1539 msecs_to_jiffies(retry->timeout_in_ms));
1540 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1541 }
1542
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001543 if (tx_ready)
1544 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001545out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001546 return ret;
1547}
1548
1549/**
1550 * Handle receive byte command for testing purposes.
1551 *
1552 * @pkt Received packet
1553 *
1554 * @returns 0 for success
1555 */
1556static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1557{
1558 uint8_t lcid;
1559 int ret;
1560 struct smux_lch_t *ch;
1561 union notifier_metadata metadata;
1562 unsigned long flags;
1563
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001564 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1565 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001566 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001567 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001568
1569 lcid = pkt->hdr.lcid;
1570 ch = &smux_lch[lcid];
1571 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1572
1573 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1574 pr_err("smux: ch %d error data on local state 0x%x",
1575 lcid, ch->local_state);
1576 ret = -EIO;
1577 goto out;
1578 }
1579
1580 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1581 pr_err("smux: ch %d error data on remote state 0x%x",
1582 lcid, ch->remote_state);
1583 ret = -EIO;
1584 goto out;
1585 }
1586
1587 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1588 metadata.read.buffer = 0;
1589 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1590 ret = 0;
1591
1592out:
1593 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1594 return ret;
1595}
1596
1597/**
1598 * Handle receive status command.
1599 *
1600 * @pkt Received packet
1601 *
1602 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001603 */
1604static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1605{
1606 uint8_t lcid;
1607 int ret;
1608 struct smux_lch_t *ch;
1609 union notifier_metadata meta;
1610 unsigned long flags;
1611 int tx_ready = 0;
1612
1613 lcid = pkt->hdr.lcid;
1614 ch = &smux_lch[lcid];
1615
1616 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1617 meta.tiocm.tiocm_old = ch->remote_tiocm;
1618 meta.tiocm.tiocm_new = pkt->hdr.flags;
1619
1620 /* update logical channel flow control */
1621 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1622 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1623 /* logical channel flow control changed */
1624 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1625 /* disabled TX */
1626 SMUX_DBG("TX Flow control enabled\n");
1627 ch->tx_flow_control = 1;
1628 } else {
1629 /* re-enable channel */
1630 SMUX_DBG("TX Flow control disabled\n");
1631 ch->tx_flow_control = 0;
1632 tx_ready = 1;
1633 }
1634 }
1635 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1636 ch->remote_tiocm = pkt->hdr.flags;
1637 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1638
1639 /* client notification for status change */
1640 if (IS_FULLY_OPENED(ch)) {
1641 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1642 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1643 ret = 0;
1644 }
1645 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1646 if (tx_ready)
1647 list_channel(ch);
1648
1649 return ret;
1650}
1651
1652/**
1653 * Handle receive power command.
1654 *
1655 * @pkt Received packet
1656 *
1657 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001658 */
1659static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1660{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001661 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberga9b06472012-06-22 09:46:34 -06001662 int power_down = 0;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001663 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001664
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001665 SMUX_PWR_PKT_RX(pkt);
1666
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001667 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1669 /* local sleep request ack */
Eric Holmberga9b06472012-06-22 09:46:34 -06001670 if (smux.power_state == SMUX_PWR_TURNING_OFF)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001671 /* Power-down complete, turn off UART */
Eric Holmberga9b06472012-06-22 09:46:34 -06001672 power_down = 1;
1673 else
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001674 pr_err("%s: sleep request ack invalid in state %d\n",
1675 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001676 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001677 /*
1678 * Remote sleep request
1679 *
1680 * Even if we have data pending, we need to transition to the
1681 * POWER_OFF state and then perform a wakeup since the remote
1682 * side has requested a power-down.
1683 *
1684 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1685 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1686 * when it sends the packet.
Eric Holmberga9b06472012-06-22 09:46:34 -06001687 *
1688 * If we are already powering down, then no ACK is sent.
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001689 */
Eric Holmberga9b06472012-06-22 09:46:34 -06001690 if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001691 ack_pkt = smux_alloc_pkt();
1692 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001693 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001694 smux.power_state,
1695 SMUX_PWR_TURNING_OFF_FLUSH);
1696
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001697 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1698
1699 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001700 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1701 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001702 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1703 list_add_tail(&ack_pkt->list,
1704 &smux.power_queue);
1705 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001706 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001707 } else if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH) {
1708 /* Local power-down request still in TX queue */
1709 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1710 __func__);
1711 smux.power_ctl_remote_req_received = 1;
1712 } else if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1713 /*
1714 * Local power-down request already sent to remote
1715 * side, so this request gets treated as an ACK.
1716 */
1717 SMUX_PWR("%s: Power-down shortcut - no ack\n",
1718 __func__);
1719 power_down = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001720 } else {
1721 pr_err("%s: sleep request invalid in state %d\n",
1722 __func__, smux.power_state);
1723 }
1724 }
Eric Holmberga9b06472012-06-22 09:46:34 -06001725
1726 if (power_down) {
1727 SMUX_PWR("%s: Power %d->%d\n", __func__,
1728 smux.power_state, SMUX_PWR_OFF_FLUSH);
1729 smux.power_state = SMUX_PWR_OFF_FLUSH;
1730 queue_work(smux_tx_wq, &smux_inactivity_work);
1731 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001732 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001733
1734 return 0;
1735}
1736
1737/**
1738 * Handle dispatching a completed packet for receive processing.
1739 *
1740 * @pkt Packet to process
1741 *
1742 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001743 */
1744static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1745{
Eric Holmbergf9622662012-06-13 15:55:45 -06001746 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001748 switch (pkt->hdr.cmd) {
1749 case SMUX_CMD_OPEN_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001750 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001751 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1752 pr_err("%s: invalid channel id %d\n",
1753 __func__, pkt->hdr.lcid);
1754 break;
1755 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001756 ret = smux_handle_rx_open_cmd(pkt);
1757 break;
1758
1759 case SMUX_CMD_DATA:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001760 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001761 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1762 pr_err("%s: invalid channel id %d\n",
1763 __func__, pkt->hdr.lcid);
1764 break;
1765 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001766 ret = smux_handle_rx_data_cmd(pkt);
1767 break;
1768
1769 case SMUX_CMD_CLOSE_LCH:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001770 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001771 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1772 pr_err("%s: invalid channel id %d\n",
1773 __func__, pkt->hdr.lcid);
1774 break;
1775 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001776 ret = smux_handle_rx_close_cmd(pkt);
1777 break;
1778
1779 case SMUX_CMD_STATUS:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001780 SMUX_LOG_PKT_RX(pkt);
Eric Holmbergf9622662012-06-13 15:55:45 -06001781 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1782 pr_err("%s: invalid channel id %d\n",
1783 __func__, pkt->hdr.lcid);
1784 break;
1785 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001786 ret = smux_handle_rx_status_cmd(pkt);
1787 break;
1788
1789 case SMUX_CMD_PWR_CTL:
1790 ret = smux_handle_rx_power_cmd(pkt);
1791 break;
1792
1793 case SMUX_CMD_BYTE:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001794 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001795 ret = smux_handle_rx_byte_cmd(pkt);
1796 break;
1797
1798 default:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001799 SMUX_LOG_PKT_RX(pkt);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001800 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1801 ret = -EINVAL;
1802 }
1803 return ret;
1804}
1805
1806/**
1807 * Deserializes a packet and dispatches it to the packet receive logic.
1808 *
1809 * @data Raw data for one packet
1810 * @len Length of the data
1811 *
1812 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001813 */
1814static int smux_deserialize(unsigned char *data, int len)
1815{
1816 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001817
1818 smux_init_pkt(&recv);
1819
1820 /*
1821 * It may be possible to optimize this to not use the
1822 * temporary buffer.
1823 */
1824 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1825
1826 if (recv.hdr.magic != SMUX_MAGIC) {
1827 pr_err("%s: invalid header magic\n", __func__);
1828 return -EINVAL;
1829 }
1830
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001831 if (recv.hdr.payload_len)
1832 recv.payload = data + sizeof(struct smux_hdr_t);
1833
1834 return smux_dispatch_rx_pkt(&recv);
1835}
1836
1837/**
1838 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001839 */
1840static void smux_handle_wakeup_req(void)
1841{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001842 unsigned long flags;
1843
1844 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001845 if (smux.power_state == SMUX_PWR_OFF
1846 || smux.power_state == SMUX_PWR_TURNING_ON) {
1847 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001848 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001849 smux.power_state, SMUX_PWR_ON);
1850 smux.power_state = SMUX_PWR_ON;
1851 queue_work(smux_tx_wq, &smux_wakeup_work);
1852 queue_work(smux_tx_wq, &smux_tx_work);
1853 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1854 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1855 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001856 } else if (smux.power_state == SMUX_PWR_ON) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001857 smux_send_byte(SMUX_WAKEUP_ACK);
Eric Holmberga9b06472012-06-22 09:46:34 -06001858 } else {
1859 /* stale wakeup request from previous wakeup */
1860 SMUX_PWR("%s: stale Wakeup REQ in state %d\n",
1861 __func__, smux.power_state);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001862 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001863 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001864}
1865
1866/**
1867 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001868 */
1869static void smux_handle_wakeup_ack(void)
1870{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001871 unsigned long flags;
1872
1873 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001874 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1875 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001876 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001877 smux.power_state, SMUX_PWR_ON);
1878 smux.power_state = SMUX_PWR_ON;
1879 queue_work(smux_tx_wq, &smux_tx_work);
1880 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1881 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1882
1883 } else if (smux.power_state != SMUX_PWR_ON) {
1884 /* invalid message */
Eric Holmberga9b06472012-06-22 09:46:34 -06001885 SMUX_PWR("%s: stale Wakeup REQ ACK in state %d\n",
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001886 __func__, smux.power_state);
1887 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001888 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001889}
1890
1891/**
1892 * RX State machine - IDLE state processing.
1893 *
1894 * @data New RX data to process
1895 * @len Length of the data
1896 * @used Return value of length processed
1897 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001898 */
1899static void smux_rx_handle_idle(const unsigned char *data,
1900 int len, int *used, int flag)
1901{
1902 int i;
1903
1904 if (flag) {
1905 if (smux_byte_loopback)
1906 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1907 smux_byte_loopback);
1908 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1909 ++*used;
1910 return;
1911 }
1912
1913 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1914 switch (data[i]) {
1915 case SMUX_MAGIC_WORD1:
1916 smux.rx_state = SMUX_RX_MAGIC;
1917 break;
1918 case SMUX_WAKEUP_REQ:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001919 SMUX_PWR("smux: RX Wakeup REQ\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001920 smux_handle_wakeup_req();
1921 break;
1922 case SMUX_WAKEUP_ACK:
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06001923 SMUX_PWR("smux: RX Wakeup ACK\n");
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001924 smux_handle_wakeup_ack();
1925 break;
1926 default:
1927 /* unexpected character */
1928 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1929 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1930 smux_byte_loopback);
1931 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1932 (unsigned)data[i]);
1933 break;
1934 }
1935 }
1936
1937 *used = i;
1938}
1939
1940/**
1941 * RX State machine - Header Magic state processing.
1942 *
1943 * @data New RX data to process
1944 * @len Length of the data
1945 * @used Return value of length processed
1946 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001947 */
1948static void smux_rx_handle_magic(const unsigned char *data,
1949 int len, int *used, int flag)
1950{
1951 int i;
1952
1953 if (flag) {
1954 pr_err("%s: TTY RX error %d\n", __func__, flag);
1955 smux_enter_reset();
1956 smux.rx_state = SMUX_RX_FAILURE;
1957 ++*used;
1958 return;
1959 }
1960
1961 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1962 /* wait for completion of the magic */
1963 if (data[i] == SMUX_MAGIC_WORD2) {
1964 smux.recv_len = 0;
1965 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1966 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1967 smux.rx_state = SMUX_RX_HDR;
1968 } else {
1969 /* unexpected / trash character */
1970 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1971 __func__, data[i], *used, len);
1972 smux.rx_state = SMUX_RX_IDLE;
1973 }
1974 }
1975
1976 *used = i;
1977}
1978
1979/**
1980 * RX State machine - Packet Header state processing.
1981 *
1982 * @data New RX data to process
1983 * @len Length of the data
1984 * @used Return value of length processed
1985 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001986 */
1987static void smux_rx_handle_hdr(const unsigned char *data,
1988 int len, int *used, int flag)
1989{
1990 int i;
1991 struct smux_hdr_t *hdr;
1992
1993 if (flag) {
1994 pr_err("%s: TTY RX error %d\n", __func__, flag);
1995 smux_enter_reset();
1996 smux.rx_state = SMUX_RX_FAILURE;
1997 ++*used;
1998 return;
1999 }
2000
2001 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
2002 smux.recv_buf[smux.recv_len++] = data[i];
2003
2004 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
2005 /* complete header received */
2006 hdr = (struct smux_hdr_t *)smux.recv_buf;
2007 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
2008 smux.rx_state = SMUX_RX_PAYLOAD;
2009 }
2010 }
2011 *used = i;
2012}
2013
2014/**
2015 * RX State machine - Packet Payload state processing.
2016 *
2017 * @data New RX data to process
2018 * @len Length of the data
2019 * @used Return value of length processed
2020 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002021 */
2022static void smux_rx_handle_pkt_payload(const unsigned char *data,
2023 int len, int *used, int flag)
2024{
2025 int remaining;
2026
2027 if (flag) {
2028 pr_err("%s: TTY RX error %d\n", __func__, flag);
2029 smux_enter_reset();
2030 smux.rx_state = SMUX_RX_FAILURE;
2031 ++*used;
2032 return;
2033 }
2034
2035 /* copy data into rx buffer */
2036 if (smux.pkt_remain < (len - *used))
2037 remaining = smux.pkt_remain;
2038 else
2039 remaining = len - *used;
2040
2041 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
2042 smux.recv_len += remaining;
2043 smux.pkt_remain -= remaining;
2044 *used += remaining;
2045
2046 if (smux.pkt_remain == 0) {
2047 /* complete packet received */
2048 smux_deserialize(smux.recv_buf, smux.recv_len);
2049 smux.rx_state = SMUX_RX_IDLE;
2050 }
2051}
2052
2053/**
2054 * Feed data to the receive state machine.
2055 *
2056 * @data Pointer to data block
2057 * @len Length of data
2058 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002059 */
2060void smux_rx_state_machine(const unsigned char *data,
2061 int len, int flag)
2062{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002063 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002064
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002065 work.data = data;
2066 work.len = len;
2067 work.flag = flag;
2068 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2069 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002070
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002071 queue_work(smux_rx_wq, &work.work);
2072 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002073}
2074
2075/**
2076 * Add channel to transmit-ready list and trigger transmit worker.
2077 *
2078 * @ch Channel to add
2079 */
2080static void list_channel(struct smux_lch_t *ch)
2081{
2082 unsigned long flags;
2083
2084 SMUX_DBG("%s: listing channel %d\n",
2085 __func__, ch->lcid);
2086
2087 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2088 spin_lock(&ch->tx_lock_lhb2);
2089 smux.tx_activity_flag = 1;
2090 if (list_empty(&ch->tx_ready_list))
2091 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2092 spin_unlock(&ch->tx_lock_lhb2);
2093 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2094
2095 queue_work(smux_tx_wq, &smux_tx_work);
2096}
2097
2098/**
2099 * Transmit packet on correct transport and then perform client
2100 * notification.
2101 *
2102 * @ch Channel to transmit on
2103 * @pkt Packet to transmit
2104 */
2105static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2106{
2107 union notifier_metadata meta_write;
2108 int ret;
2109
2110 if (ch && pkt) {
2111 SMUX_LOG_PKT_TX(pkt);
2112 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2113 ret = smux_tx_loopback(pkt);
2114 else
2115 ret = smux_tx_tty(pkt);
2116
2117 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2118 /* notify write-done */
2119 meta_write.write.pkt_priv = pkt->priv;
2120 meta_write.write.buffer = pkt->payload;
2121 meta_write.write.len = pkt->hdr.payload_len;
2122 if (ret >= 0) {
2123 SMUX_DBG("%s: PKT write done", __func__);
2124 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2125 &meta_write);
2126 } else {
2127 pr_err("%s: failed to write pkt %d\n",
2128 __func__, ret);
2129 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2130 &meta_write);
2131 }
2132 }
2133 }
2134}
2135
2136/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002137 * Flush pending TTY TX data.
2138 */
2139static void smux_flush_tty(void)
2140{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002141 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002142 if (!smux.tty) {
2143 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002144 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002145 return;
2146 }
2147
2148 tty_wait_until_sent(smux.tty,
2149 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2150
2151 if (tty_chars_in_buffer(smux.tty) > 0)
2152 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002153
2154 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002155}
2156
2157/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002158 * Purge TX queue for logical channel.
2159 *
2160 * @ch Logical channel pointer
2161 *
2162 * Must be called with the following spinlocks locked:
2163 * state_lock_lhb1
2164 * tx_lock_lhb2
2165 */
2166static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2167{
2168 struct smux_pkt_t *pkt;
2169 int send_disconnect = 0;
2170
2171 while (!list_empty(&ch->tx_queue)) {
2172 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2173 list);
2174 list_del(&pkt->list);
2175
2176 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2177 /* Open was never sent, just force to closed state */
2178 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2179 send_disconnect = 1;
2180 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2181 /* Notify client of failed write */
2182 union notifier_metadata meta_write;
2183
2184 meta_write.write.pkt_priv = pkt->priv;
2185 meta_write.write.buffer = pkt->payload;
2186 meta_write.write.len = pkt->hdr.payload_len;
2187 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2188 }
2189 smux_free_pkt(pkt);
2190 }
2191
2192 if (send_disconnect) {
2193 union notifier_metadata meta_disconnected;
2194
2195 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2196 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2197 &meta_disconnected);
2198 }
2199}
2200
2201/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002202 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002203 *
2204 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002205 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002206static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002207{
2208 struct uart_state *state;
2209
2210 if (!smux.tty || !smux.tty->driver_data) {
2211 pr_err("%s: unable to find UART port for tty %p\n",
2212 __func__, smux.tty);
2213 return;
2214 }
2215 state = smux.tty->driver_data;
2216 msm_hs_request_clock_on(state->uart_port);
2217}
2218
2219/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002220 * Power-up the UART.
2221 */
2222static void smux_uart_power_on(void)
2223{
2224 mutex_lock(&smux.mutex_lha0);
2225 smux_uart_power_on_atomic();
2226 mutex_unlock(&smux.mutex_lha0);
2227}
2228
2229/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002230 * Power down the UART.
Eric Holmberg06011322012-07-06 18:17:03 -06002231 *
2232 * Must be called with mutex_lha0 locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002233 */
Eric Holmberg06011322012-07-06 18:17:03 -06002234static void smux_uart_power_off_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002235{
2236 struct uart_state *state;
2237
2238 if (!smux.tty || !smux.tty->driver_data) {
2239 pr_err("%s: unable to find UART port for tty %p\n",
2240 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002241 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002242 return;
2243 }
2244 state = smux.tty->driver_data;
2245 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg06011322012-07-06 18:17:03 -06002246}
2247
2248/**
2249 * Power down the UART.
2250 */
2251static void smux_uart_power_off(void)
2252{
2253 mutex_lock(&smux.mutex_lha0);
2254 smux_uart_power_off_atomic();
Eric Holmberg92a67df2012-06-25 13:56:24 -06002255 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002256}
2257
2258/**
2259 * TX Wakeup Worker
2260 *
2261 * @work Not used
2262 *
2263 * Do an exponential back-off wakeup sequence with a maximum period
2264 * of approximately 1 second (1 << 20 microseconds).
2265 */
2266static void smux_wakeup_worker(struct work_struct *work)
2267{
2268 unsigned long flags;
2269 unsigned wakeup_delay;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002270
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002271 if (smux.in_reset)
2272 return;
2273
2274 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2275 if (smux.power_state == SMUX_PWR_ON) {
2276 /* wakeup complete */
Eric Holmberga9b06472012-06-22 09:46:34 -06002277 smux.pwr_wakeup_delay_us = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002278 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002279 SMUX_DBG("%s: wakeup complete\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002280
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002281 /*
2282 * Cancel any pending retry. This avoids a race condition with
2283 * a new power-up request because:
2284 * 1) this worker doesn't modify the state
2285 * 2) this worker is processed on the same single-threaded
2286 * workqueue as new TX wakeup requests
2287 */
2288 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmbergd032f5b2012-06-29 19:02:00 -06002289 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberga9b06472012-06-22 09:46:34 -06002290 } else if (smux.power_state == SMUX_PWR_TURNING_ON) {
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002291 /* retry wakeup */
2292 wakeup_delay = smux.pwr_wakeup_delay_us;
2293 smux.pwr_wakeup_delay_us <<= 1;
2294 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2295 smux.pwr_wakeup_delay_us =
2296 SMUX_WAKEUP_DELAY_MAX;
2297
2298 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberga9b06472012-06-22 09:46:34 -06002299 SMUX_PWR("%s: triggering wakeup\n", __func__);
Eric Holmberg2d4f9e82012-06-21 13:12:39 -06002300 smux_send_byte(SMUX_WAKEUP_REQ);
2301
2302 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2303 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2304 wakeup_delay);
2305 usleep_range(wakeup_delay, 2*wakeup_delay);
2306 queue_work(smux_tx_wq, &smux_wakeup_work);
2307 } else {
2308 /* schedule delayed work */
2309 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2310 __func__, wakeup_delay / 1000);
2311 queue_delayed_work(smux_tx_wq,
2312 &smux_wakeup_delayed_work,
2313 msecs_to_jiffies(wakeup_delay / 1000));
2314 }
Eric Holmberga9b06472012-06-22 09:46:34 -06002315 } else {
2316 /* wakeup aborted */
2317 smux.pwr_wakeup_delay_us = 1;
2318 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2319 SMUX_PWR("%s: wakeup aborted\n", __func__);
2320 cancel_delayed_work(&smux_wakeup_delayed_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002321 }
2322}
2323
2324
2325/**
2326 * Inactivity timeout worker. Periodically scheduled when link is active.
2327 * When it detects inactivity, it will power-down the UART link.
2328 *
2329 * @work Work structure (not used)
2330 */
2331static void smux_inactivity_worker(struct work_struct *work)
2332{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002333 struct smux_pkt_t *pkt;
2334 unsigned long flags;
2335
Eric Holmberg06011322012-07-06 18:17:03 -06002336 if (smux.in_reset)
2337 return;
2338
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002339 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2340 spin_lock(&smux.tx_lock_lha2);
2341
2342 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2343 /* no activity */
2344 if (smux.powerdown_enabled) {
2345 if (smux.power_state == SMUX_PWR_ON) {
2346 /* start power-down sequence */
2347 pkt = smux_alloc_pkt();
2348 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002349 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002350 smux.power_state,
Eric Holmberga9b06472012-06-22 09:46:34 -06002351 SMUX_PWR_TURNING_OFF_FLUSH);
2352 smux.power_state =
2353 SMUX_PWR_TURNING_OFF_FLUSH;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002354
2355 /* send power-down request */
2356 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2357 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002358 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2359 list_add_tail(&pkt->list,
2360 &smux.power_queue);
2361 queue_work(smux_tx_wq, &smux_tx_work);
2362 } else {
2363 pr_err("%s: packet alloc failed\n",
2364 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002365 }
2366 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002367 }
2368 }
2369 smux.tx_activity_flag = 0;
2370 smux.rx_activity_flag = 0;
2371
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002372 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002373 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002374 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002375 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002376 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002377
2378 /* if data is pending, schedule a new wakeup */
2379 if (!list_empty(&smux.lch_tx_ready_list) ||
2380 !list_empty(&smux.power_queue))
2381 queue_work(smux_tx_wq, &smux_tx_work);
2382
2383 spin_unlock(&smux.tx_lock_lha2);
2384 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2385
2386 /* flush UART output queue and power down */
2387 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002388 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002389 } else {
2390 spin_unlock(&smux.tx_lock_lha2);
2391 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002392 }
2393
2394 /* reschedule inactivity worker */
2395 if (smux.power_state != SMUX_PWR_OFF)
2396 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2397 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2398}
2399
2400/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002401 * Remove RX retry packet from channel and free it.
2402 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002403 * @ch Channel for retry packet
2404 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002405 *
2406 * @returns 1 if flow control updated; 0 otherwise
2407 *
2408 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002409 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002410int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002411 struct smux_rx_pkt_retry *retry)
2412{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002413 int tx_ready = 0;
2414
Eric Holmbergb8435c82012-06-05 14:51:29 -06002415 list_del(&retry->rx_retry_list);
2416 --ch->rx_retry_queue_cnt;
2417 smux_free_pkt(retry->pkt);
2418 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002419
2420 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2421 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2422 ch->rx_flow_control_auto) {
2423 ch->rx_flow_control_auto = 0;
2424 smux_rx_flow_control_updated(ch);
2425 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2426 tx_ready = 1;
2427 }
2428 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002429}
2430
2431/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002432 * RX worker handles all receive operations.
2433 *
2434 * @work Work structure contained in TBD structure
2435 */
2436static void smux_rx_worker(struct work_struct *work)
2437{
2438 unsigned long flags;
2439 int used;
2440 int initial_rx_state;
2441 struct smux_rx_worker_data *w;
2442 const unsigned char *data;
2443 int len;
2444 int flag;
2445
2446 w = container_of(work, struct smux_rx_worker_data, work);
2447 data = w->data;
2448 len = w->len;
2449 flag = w->flag;
2450
2451 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2452 smux.rx_activity_flag = 1;
2453 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2454
2455 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2456 used = 0;
2457 do {
Eric Holmberg06011322012-07-06 18:17:03 -06002458 if (smux.in_reset) {
2459 SMUX_DBG("%s: abort RX due to reset\n", __func__);
2460 smux.rx_state = SMUX_RX_IDLE;
2461 break;
2462 }
2463
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002464 SMUX_DBG("%s: state %d; %d of %d\n",
2465 __func__, smux.rx_state, used, len);
2466 initial_rx_state = smux.rx_state;
2467
2468 switch (smux.rx_state) {
2469 case SMUX_RX_IDLE:
2470 smux_rx_handle_idle(data, len, &used, flag);
2471 break;
2472 case SMUX_RX_MAGIC:
2473 smux_rx_handle_magic(data, len, &used, flag);
2474 break;
2475 case SMUX_RX_HDR:
2476 smux_rx_handle_hdr(data, len, &used, flag);
2477 break;
2478 case SMUX_RX_PAYLOAD:
2479 smux_rx_handle_pkt_payload(data, len, &used, flag);
2480 break;
2481 default:
2482 SMUX_DBG("%s: invalid state %d\n",
2483 __func__, smux.rx_state);
2484 smux.rx_state = SMUX_RX_IDLE;
2485 break;
2486 }
2487 } while (used < len || smux.rx_state != initial_rx_state);
2488
2489 complete(&w->work_complete);
2490}
2491
2492/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002493 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2494 * because the client was not ready (-EAGAIN).
2495 *
2496 * @work Work structure contained in smux_lch_t structure
2497 */
2498static void smux_rx_retry_worker(struct work_struct *work)
2499{
2500 struct smux_lch_t *ch;
2501 struct smux_rx_pkt_retry *retry;
2502 union notifier_metadata metadata;
2503 int tmp;
2504 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002505 int immediate_retry = 0;
2506 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002507
2508 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2509
2510 /* get next retry packet */
2511 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002512 if ((ch->local_state != SMUX_LCH_LOCAL_OPENED) || smux.in_reset) {
Eric Holmbergb8435c82012-06-05 14:51:29 -06002513 /* port has been closed - remove all retries */
2514 while (!list_empty(&ch->rx_retry_queue)) {
2515 retry = list_first_entry(&ch->rx_retry_queue,
2516 struct smux_rx_pkt_retry,
2517 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002518 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002519 }
2520 }
2521
2522 if (list_empty(&ch->rx_retry_queue)) {
2523 SMUX_DBG("%s: retry list empty for channel %d\n",
2524 __func__, ch->lcid);
2525 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2526 return;
2527 }
2528 retry = list_first_entry(&ch->rx_retry_queue,
2529 struct smux_rx_pkt_retry,
2530 rx_retry_list);
2531 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2532
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002533 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2534 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002535 metadata.read.pkt_priv = 0;
2536 metadata.read.buffer = 0;
2537 tmp = ch->get_rx_buffer(ch->priv,
2538 (void **)&metadata.read.pkt_priv,
2539 (void **)&metadata.read.buffer,
2540 retry->pkt->hdr.payload_len);
2541 if (tmp == 0 && metadata.read.buffer) {
2542 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002543
Eric Holmbergb8435c82012-06-05 14:51:29 -06002544 memcpy(metadata.read.buffer, retry->pkt->payload,
2545 retry->pkt->hdr.payload_len);
2546 metadata.read.len = retry->pkt->hdr.payload_len;
2547
2548 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002549 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002550 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002551 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002552 if (tx_ready)
2553 list_channel(ch);
2554
2555 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002556 } else if (tmp == -EAGAIN ||
2557 (tmp == 0 && !metadata.read.buffer)) {
2558 /* retry again */
2559 retry->timeout_in_ms <<= 1;
2560 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2561 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002562 pr_err("%s: ch %d RX retry client timeout\n",
2563 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002564 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002565 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002566 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002567 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2568 if (tx_ready)
2569 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002570 }
2571 } else {
2572 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002573 pr_err("%s: ch %d RX retry client failed (%d)\n",
2574 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002575 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002576 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002577 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002578 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002579 if (tx_ready)
2580 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002581 }
2582
2583 /* schedule next retry */
2584 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2585 if (!list_empty(&ch->rx_retry_queue)) {
2586 retry = list_first_entry(&ch->rx_retry_queue,
2587 struct smux_rx_pkt_retry,
2588 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002589
2590 if (immediate_retry)
2591 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2592 else
2593 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2594 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002595 }
2596 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2597}
2598
2599/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002600 * Transmit worker handles serializing and transmitting packets onto the
2601 * underlying transport.
2602 *
2603 * @work Work structure (not used)
2604 */
2605static void smux_tx_worker(struct work_struct *work)
2606{
2607 struct smux_pkt_t *pkt;
2608 struct smux_lch_t *ch;
2609 unsigned low_wm_notif;
2610 unsigned lcid;
2611 unsigned long flags;
2612
2613
2614 /*
2615 * Transmit packets in round-robin fashion based upon ready
2616 * channels.
2617 *
2618 * To eliminate the need to hold a lock for the entire
2619 * iteration through the channel ready list, the head of the
2620 * ready-channel list is always the next channel to be
2621 * processed. To send a packet, the first valid packet in
2622 * the head channel is removed and the head channel is then
2623 * rescheduled at the end of the queue by removing it and
2624 * inserting after the tail. The locks can then be released
2625 * while the packet is processed.
2626 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002627 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002628 pkt = NULL;
2629 low_wm_notif = 0;
2630
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002631 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002632
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002633 /* handle wakeup if needed */
2634 if (smux.power_state == SMUX_PWR_OFF) {
2635 if (!list_empty(&smux.lch_tx_ready_list) ||
2636 !list_empty(&smux.power_queue)) {
2637 /* data to transmit, do wakeup */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002638 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002639 smux.power_state,
2640 SMUX_PWR_TURNING_ON);
2641 smux.power_state = SMUX_PWR_TURNING_ON;
2642 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2643 flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002644 queue_work(smux_tx_wq, &smux_wakeup_work);
2645 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002646 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002647 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2648 flags);
2649 }
2650 break;
2651 }
2652
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002653 /* process any pending power packets */
2654 if (!list_empty(&smux.power_queue)) {
2655 pkt = list_first_entry(&smux.power_queue,
2656 struct smux_pkt_t, list);
2657 list_del(&pkt->list);
2658 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2659
Eric Holmberga9b06472012-06-22 09:46:34 -06002660 /* Adjust power state if this is a flush command */
2661 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2662 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2663 pkt->hdr.cmd == SMUX_CMD_PWR_CTL) {
2664 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK ||
2665 smux.power_ctl_remote_req_received) {
2666 /*
2667 * Sending remote power-down request ACK
2668 * or sending local power-down request
2669 * and we already received a remote
2670 * power-down request.
2671 */
2672 SMUX_PWR("%s: Power %d->%d\n", __func__,
2673 smux.power_state,
2674 SMUX_PWR_OFF_FLUSH);
2675 smux.power_state = SMUX_PWR_OFF_FLUSH;
2676 smux.power_ctl_remote_req_received = 0;
2677 queue_work(smux_tx_wq,
2678 &smux_inactivity_work);
2679 } else {
2680 /* sending local power-down request */
2681 SMUX_PWR("%s: Power %d->%d\n", __func__,
2682 smux.power_state,
2683 SMUX_PWR_TURNING_OFF);
2684 smux.power_state = SMUX_PWR_TURNING_OFF;
2685 }
2686 }
2687 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2688
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002689 /* send the packet */
Eric Holmberga9b06472012-06-22 09:46:34 -06002690 smux_uart_power_on();
2691 smux.tx_activity_flag = 1;
Eric Holmberg4dd6e1a2012-06-26 13:46:29 -06002692 SMUX_PWR_PKT_TX(pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002693 if (!smux_byte_loopback) {
2694 smux_tx_tty(pkt);
2695 smux_flush_tty();
2696 } else {
2697 smux_tx_loopback(pkt);
2698 }
2699
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002700 smux_free_pkt(pkt);
2701 continue;
2702 }
2703
2704 /* get the next ready channel */
2705 if (list_empty(&smux.lch_tx_ready_list)) {
2706 /* no ready channels */
2707 SMUX_DBG("%s: no more ready channels, exiting\n",
2708 __func__);
2709 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2710 break;
2711 }
2712 smux.tx_activity_flag = 1;
2713
2714 if (smux.power_state != SMUX_PWR_ON) {
2715 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002716 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002717 __func__,
2718 smux.power_state);
2719 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2720 break;
2721 }
2722
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002723 /* get the next packet to send and rotate channel list */
2724 ch = list_first_entry(&smux.lch_tx_ready_list,
2725 struct smux_lch_t,
2726 tx_ready_list);
2727
2728 spin_lock(&ch->state_lock_lhb1);
2729 spin_lock(&ch->tx_lock_lhb2);
2730 if (!list_empty(&ch->tx_queue)) {
2731 /*
2732 * If remote TX flow control is enabled or
2733 * the channel is not fully opened, then only
2734 * send command packets.
2735 */
2736 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2737 struct smux_pkt_t *curr;
2738 list_for_each_entry(curr, &ch->tx_queue, list) {
2739 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2740 pkt = curr;
2741 break;
2742 }
2743 }
2744 } else {
2745 /* get next cmd/data packet to send */
2746 pkt = list_first_entry(&ch->tx_queue,
2747 struct smux_pkt_t, list);
2748 }
2749 }
2750
2751 if (pkt) {
2752 list_del(&pkt->list);
2753
2754 /* update packet stats */
2755 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2756 --ch->tx_pending_data_cnt;
2757 if (ch->notify_lwm &&
2758 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002759 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002760 ch->notify_lwm = 0;
2761 low_wm_notif = 1;
2762 }
2763 }
2764
2765 /* advance to the next ready channel */
2766 list_rotate_left(&smux.lch_tx_ready_list);
2767 } else {
2768 /* no data in channel to send, remove from ready list */
2769 list_del(&ch->tx_ready_list);
2770 INIT_LIST_HEAD(&ch->tx_ready_list);
2771 }
2772 lcid = ch->lcid;
2773 spin_unlock(&ch->tx_lock_lhb2);
2774 spin_unlock(&ch->state_lock_lhb1);
2775 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2776
2777 if (low_wm_notif)
2778 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2779
2780 /* send the packet */
2781 smux_tx_pkt(ch, pkt);
2782 smux_free_pkt(pkt);
2783 }
2784}
2785
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002786/**
2787 * Update the RX flow control (sent in the TIOCM Status command).
2788 *
2789 * @ch Channel for update
2790 *
2791 * @returns 1 for updated, 0 for not updated
2792 *
2793 * Must be called with ch->state_lock_lhb1 locked.
2794 */
2795static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2796{
2797 int updated = 0;
2798 int prev_state;
2799
2800 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2801
2802 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2803 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2804 else
2805 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2806
2807 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2808 smux_send_status_cmd(ch);
2809 updated = 1;
2810 }
2811
2812 return updated;
2813}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002814
Eric Holmberg06011322012-07-06 18:17:03 -06002815/**
2816 * Flush all SMUX workqueues.
2817 *
2818 * This sets the reset bit to abort any processing loops and then
2819 * flushes the workqueues to ensure that no new pending work is
2820 * running. Do not call with any locks used by workers held as
2821 * this will result in a deadlock.
2822 */
2823static void smux_flush_workqueues(void)
2824{
2825 smux.in_reset = 1;
2826
2827 SMUX_DBG("%s: flushing tx wq\n", __func__);
2828 flush_workqueue(smux_tx_wq);
2829 SMUX_DBG("%s: flushing rx wq\n", __func__);
2830 flush_workqueue(smux_rx_wq);
2831 SMUX_DBG("%s: flushing notify wq\n", __func__);
2832 flush_workqueue(smux_notify_wq);
2833}
2834
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002835/**********************************************************************/
2836/* Kernel API */
2837/**********************************************************************/
2838
2839/**
2840 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2841 * flags.
2842 *
2843 * @lcid Logical channel ID
2844 * @set Options to set
2845 * @clear Options to clear
2846 *
2847 * @returns 0 for success, < 0 for failure
2848 */
2849int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2850{
2851 unsigned long flags;
2852 struct smux_lch_t *ch;
2853 int tx_ready = 0;
2854 int ret = 0;
2855
2856 if (smux_assert_lch_id(lcid))
2857 return -ENXIO;
2858
2859 ch = &smux_lch[lcid];
2860 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2861
2862 /* Local loopback mode */
2863 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2864 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2865
2866 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2867 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2868
2869 /* Remote loopback mode */
2870 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2871 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2872
2873 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2874 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2875
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002876 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002877 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002878 ch->rx_flow_control_client = 1;
2879 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002880 }
2881
2882 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002883 ch->rx_flow_control_client = 0;
2884 tx_ready |= smux_rx_flow_control_updated(ch);
2885 }
2886
2887 /* Auto RX Flow Control */
2888 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2889 SMUX_DBG("%s: auto rx flow control option enabled\n",
2890 __func__);
2891 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2892 }
2893
2894 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2895 SMUX_DBG("%s: auto rx flow control option disabled\n",
2896 __func__);
2897 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2898 ch->rx_flow_control_auto = 0;
2899 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002900 }
2901
2902 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2903
2904 if (tx_ready)
2905 list_channel(ch);
2906
2907 return ret;
2908}
2909
2910/**
2911 * Starts the opening sequence for a logical channel.
2912 *
2913 * @lcid Logical channel ID
2914 * @priv Free for client usage
2915 * @notify Event notification function
2916 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2917 *
2918 * @returns 0 for success, <0 otherwise
2919 *
2920 * A channel must be fully closed (either not previously opened or
2921 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2922 * received.
2923 *
2924 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2925 * event.
2926 */
2927int msm_smux_open(uint8_t lcid, void *priv,
2928 void (*notify)(void *priv, int event_type, const void *metadata),
2929 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2930 int size))
2931{
2932 int ret;
2933 struct smux_lch_t *ch;
2934 struct smux_pkt_t *pkt;
2935 int tx_ready = 0;
2936 unsigned long flags;
2937
2938 if (smux_assert_lch_id(lcid))
2939 return -ENXIO;
2940
2941 ch = &smux_lch[lcid];
2942 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2943
2944 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2945 ret = -EAGAIN;
2946 goto out;
2947 }
2948
2949 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2950 pr_err("%s: open lcid %d local state %x invalid\n",
2951 __func__, lcid, ch->local_state);
2952 ret = -EINVAL;
2953 goto out;
2954 }
2955
2956 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2957 ch->local_state,
2958 SMUX_LCH_LOCAL_OPENING);
2959
Eric Holmberg06011322012-07-06 18:17:03 -06002960 ch->rx_flow_control_auto = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002961 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2962
2963 ch->priv = priv;
2964 ch->notify = notify;
2965 ch->get_rx_buffer = get_rx_buffer;
2966 ret = 0;
2967
2968 /* Send Open Command */
2969 pkt = smux_alloc_pkt();
2970 if (!pkt) {
2971 ret = -ENOMEM;
2972 goto out;
2973 }
2974 pkt->hdr.magic = SMUX_MAGIC;
2975 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2976 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2977 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2978 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2979 pkt->hdr.lcid = lcid;
2980 pkt->hdr.payload_len = 0;
2981 pkt->hdr.pad_len = 0;
2982 smux_tx_queue(pkt, ch, 0);
2983 tx_ready = 1;
2984
2985out:
2986 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg06011322012-07-06 18:17:03 -06002987 smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002988 if (tx_ready)
2989 list_channel(ch);
2990 return ret;
2991}
2992
2993/**
2994 * Starts the closing sequence for a logical channel.
2995 *
2996 * @lcid Logical channel ID
2997 *
2998 * @returns 0 for success, <0 otherwise
2999 *
3000 * Once the close event has been acknowledge by the remote side, the client
3001 * will receive a SMUX_DISCONNECTED notification.
3002 */
3003int msm_smux_close(uint8_t lcid)
3004{
3005 int ret = 0;
3006 struct smux_lch_t *ch;
3007 struct smux_pkt_t *pkt;
3008 int tx_ready = 0;
3009 unsigned long flags;
3010
3011 if (smux_assert_lch_id(lcid))
3012 return -ENXIO;
3013
3014 ch = &smux_lch[lcid];
3015 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3016 ch->local_tiocm = 0x0;
3017 ch->remote_tiocm = 0x0;
3018 ch->tx_pending_data_cnt = 0;
3019 ch->notify_lwm = 0;
3020
3021 /* Purge TX queue */
3022 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003023 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003024 spin_unlock(&ch->tx_lock_lhb2);
3025
3026 /* Send Close Command */
3027 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
3028 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
3029 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
3030 ch->local_state,
3031 SMUX_LCH_LOCAL_CLOSING);
3032
3033 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
3034 pkt = smux_alloc_pkt();
3035 if (pkt) {
3036 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
3037 pkt->hdr.flags = 0;
3038 pkt->hdr.lcid = lcid;
3039 pkt->hdr.payload_len = 0;
3040 pkt->hdr.pad_len = 0;
3041 smux_tx_queue(pkt, ch, 0);
3042 tx_ready = 1;
3043 } else {
3044 pr_err("%s: pkt allocation failed\n", __func__);
3045 ret = -ENOMEM;
3046 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06003047
3048 /* Purge RX retry queue */
3049 if (ch->rx_retry_queue_cnt)
3050 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003051 }
3052 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3053
3054 if (tx_ready)
3055 list_channel(ch);
3056
3057 return ret;
3058}
3059
3060/**
3061 * Write data to a logical channel.
3062 *
3063 * @lcid Logical channel ID
3064 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
3065 * SMUX_WRITE_FAIL notification.
3066 * @data Data to write
3067 * @len Length of @data
3068 *
3069 * @returns 0 for success, <0 otherwise
3070 *
3071 * Data may be written immediately after msm_smux_open() is called,
3072 * but the data will wait in the transmit queue until the channel has
3073 * been fully opened.
3074 *
3075 * Once the data has been written, the client will receive either a completion
3076 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
3077 */
3078int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
3079{
3080 struct smux_lch_t *ch;
3081 struct smux_pkt_t *pkt;
3082 int tx_ready = 0;
3083 unsigned long flags;
3084 int ret;
3085
3086 if (smux_assert_lch_id(lcid))
3087 return -ENXIO;
3088
3089 ch = &smux_lch[lcid];
3090 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3091
3092 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
3093 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
3094 pr_err("%s: hdr.invalid local state %d channel %d\n",
3095 __func__, ch->local_state, lcid);
3096 ret = -EINVAL;
3097 goto out;
3098 }
3099
3100 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
3101 pr_err("%s: payload %d too large\n",
3102 __func__, len);
3103 ret = -E2BIG;
3104 goto out;
3105 }
3106
3107 pkt = smux_alloc_pkt();
3108 if (!pkt) {
3109 ret = -ENOMEM;
3110 goto out;
3111 }
3112
3113 pkt->hdr.cmd = SMUX_CMD_DATA;
3114 pkt->hdr.lcid = lcid;
3115 pkt->hdr.flags = 0;
3116 pkt->hdr.payload_len = len;
3117 pkt->payload = (void *)data;
3118 pkt->priv = pkt_priv;
3119 pkt->hdr.pad_len = 0;
3120
3121 spin_lock(&ch->tx_lock_lhb2);
3122 /* verify high watermark */
3123 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3124
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003125 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003126 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003127 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003128 ch->tx_pending_data_cnt);
3129 ret = -EAGAIN;
3130 goto out_inner;
3131 }
3132
3133 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003134 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003135 ch->notify_lwm = 1;
3136 pr_err("%s: high watermark hit\n", __func__);
3137 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3138 }
3139 list_add_tail(&pkt->list, &ch->tx_queue);
3140
3141 /* add to ready list */
3142 if (IS_FULLY_OPENED(ch))
3143 tx_ready = 1;
3144
3145 ret = 0;
3146
3147out_inner:
3148 spin_unlock(&ch->tx_lock_lhb2);
3149
3150out:
3151 if (ret)
3152 smux_free_pkt(pkt);
3153 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3154
3155 if (tx_ready)
3156 list_channel(ch);
3157
3158 return ret;
3159}
3160
3161/**
3162 * Returns true if the TX queue is currently full (high water mark).
3163 *
3164 * @lcid Logical channel ID
3165 * @returns 0 if channel is not full
3166 * 1 if it is full
3167 * < 0 for error
3168 */
3169int msm_smux_is_ch_full(uint8_t lcid)
3170{
3171 struct smux_lch_t *ch;
3172 unsigned long flags;
3173 int is_full = 0;
3174
3175 if (smux_assert_lch_id(lcid))
3176 return -ENXIO;
3177
3178 ch = &smux_lch[lcid];
3179
3180 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003181 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003182 is_full = 1;
3183 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3184
3185 return is_full;
3186}
3187
3188/**
3189 * Returns true if the TX queue has space for more packets it is at or
3190 * below the low water mark).
3191 *
3192 * @lcid Logical channel ID
3193 * @returns 0 if channel is above low watermark
3194 * 1 if it's at or below the low watermark
3195 * < 0 for error
3196 */
3197int msm_smux_is_ch_low(uint8_t lcid)
3198{
3199 struct smux_lch_t *ch;
3200 unsigned long flags;
3201 int is_low = 0;
3202
3203 if (smux_assert_lch_id(lcid))
3204 return -ENXIO;
3205
3206 ch = &smux_lch[lcid];
3207
3208 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003209 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003210 is_low = 1;
3211 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3212
3213 return is_low;
3214}
3215
3216/**
3217 * Send TIOCM status update.
3218 *
3219 * @ch Channel for update
3220 *
3221 * @returns 0 for success, <0 for failure
3222 *
3223 * Channel lock must be held before calling.
3224 */
3225static int smux_send_status_cmd(struct smux_lch_t *ch)
3226{
3227 struct smux_pkt_t *pkt;
3228
3229 if (!ch)
3230 return -EINVAL;
3231
3232 pkt = smux_alloc_pkt();
3233 if (!pkt)
3234 return -ENOMEM;
3235
3236 pkt->hdr.lcid = ch->lcid;
3237 pkt->hdr.cmd = SMUX_CMD_STATUS;
3238 pkt->hdr.flags = ch->local_tiocm;
3239 pkt->hdr.payload_len = 0;
3240 pkt->hdr.pad_len = 0;
3241 smux_tx_queue(pkt, ch, 0);
3242
3243 return 0;
3244}
3245
3246/**
3247 * Internal helper function for getting the TIOCM status with
3248 * state_lock_lhb1 already locked.
3249 *
3250 * @ch Channel pointer
3251 *
3252 * @returns TIOCM status
3253 */
3254static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3255{
3256 long status = 0x0;
3257
3258 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3259 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3260 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3261 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3262
3263 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3264 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3265
3266 return status;
3267}
3268
3269/**
3270 * Get the TIOCM status bits.
3271 *
3272 * @lcid Logical channel ID
3273 *
3274 * @returns >= 0 TIOCM status bits
3275 * < 0 Error condition
3276 */
3277long msm_smux_tiocm_get(uint8_t lcid)
3278{
3279 struct smux_lch_t *ch;
3280 unsigned long flags;
3281 long status = 0x0;
3282
3283 if (smux_assert_lch_id(lcid))
3284 return -ENXIO;
3285
3286 ch = &smux_lch[lcid];
3287 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3288 status = msm_smux_tiocm_get_atomic(ch);
3289 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3290
3291 return status;
3292}
3293
3294/**
3295 * Set/clear the TIOCM status bits.
3296 *
3297 * @lcid Logical channel ID
3298 * @set Bits to set
3299 * @clear Bits to clear
3300 *
3301 * @returns 0 for success; < 0 for failure
3302 *
3303 * If a bit is specified in both the @set and @clear masks, then the clear bit
3304 * definition will dominate and the bit will be cleared.
3305 */
3306int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3307{
3308 struct smux_lch_t *ch;
3309 unsigned long flags;
3310 uint8_t old_status;
3311 uint8_t status_set = 0x0;
3312 uint8_t status_clear = 0x0;
3313 int tx_ready = 0;
3314 int ret = 0;
3315
3316 if (smux_assert_lch_id(lcid))
3317 return -ENXIO;
3318
3319 ch = &smux_lch[lcid];
3320 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3321
3322 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3323 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3324 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3325 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3326
3327 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3328 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3329 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3330 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3331
3332 old_status = ch->local_tiocm;
3333 ch->local_tiocm |= status_set;
3334 ch->local_tiocm &= ~status_clear;
3335
3336 if (ch->local_tiocm != old_status) {
3337 ret = smux_send_status_cmd(ch);
3338 tx_ready = 1;
3339 }
3340 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3341
3342 if (tx_ready)
3343 list_channel(ch);
3344
3345 return ret;
3346}
3347
3348/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003349/* Subsystem Restart */
3350/**********************************************************************/
3351static struct notifier_block ssr_notifier = {
3352 .notifier_call = ssr_notifier_cb,
3353};
3354
3355/**
3356 * Handle Subsystem Restart (SSR) notifications.
3357 *
3358 * @this Pointer to ssr_notifier
3359 * @code SSR Code
3360 * @data Data pointer (not used)
3361 */
3362static int ssr_notifier_cb(struct notifier_block *this,
3363 unsigned long code,
3364 void *data)
3365{
3366 unsigned long flags;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003367 int i;
3368 int tmp;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003369 int power_off_uart = 0;
3370
Eric Holmbergd2697902012-06-15 09:58:46 -06003371 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3372 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3373 mutex_lock(&smux.mutex_lha0);
3374 smux.in_reset = 1;
3375 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003376 return NOTIFY_DONE;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003377 } else if (code == SUBSYS_AFTER_POWERUP) {
3378 /* re-register platform devices */
3379 SMUX_DBG("%s: ssr - after power-up\n", __func__);
3380 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003381 if (smux.ld_open_count > 0
3382 && !smux.platform_devs_registered) {
3383 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3384 SMUX_DBG("%s: register pdev '%s'\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003385 __func__, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003386 smux_devs[i].dev.release = smux_pdev_release;
3387 tmp = platform_device_register(&smux_devs[i]);
3388 if (tmp)
3389 pr_err("%s: error %d registering device %s\n",
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003390 __func__, tmp, smux_devs[i].name);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003391 }
3392 smux.platform_devs_registered = 1;
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003393 }
3394 mutex_unlock(&smux.mutex_lha0);
3395 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003396 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3397 return NOTIFY_DONE;
3398 }
3399 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003400
3401 /* Cleanup channels */
Eric Holmberg06011322012-07-06 18:17:03 -06003402 smux_flush_workqueues();
Eric Holmbergd2697902012-06-15 09:58:46 -06003403 mutex_lock(&smux.mutex_lha0);
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003404 if (smux.ld_open_count > 0) {
3405 smux_lch_purge();
3406 if (smux.tty)
3407 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003408
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003409 /* Unregister platform devices */
3410 if (smux.platform_devs_registered) {
3411 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3412 SMUX_DBG("%s: unregister pdev '%s'\n",
3413 __func__, smux_devs[i].name);
3414 platform_device_unregister(&smux_devs[i]);
3415 }
3416 smux.platform_devs_registered = 0;
3417 }
3418
3419 /* Power-down UART */
3420 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3421 if (smux.power_state != SMUX_PWR_OFF) {
3422 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
3423 smux.power_state = SMUX_PWR_OFF;
3424 power_off_uart = 1;
3425 }
3426 smux.powerdown_enabled = 0;
3427 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3428
3429 if (power_off_uart)
3430 smux_uart_power_off_atomic();
Eric Holmbergf6a364e2012-08-07 18:41:44 -06003431 }
Eric Holmberg06011322012-07-06 18:17:03 -06003432 smux.tx_activity_flag = 0;
3433 smux.rx_activity_flag = 0;
3434 smux.rx_state = SMUX_RX_IDLE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003435 smux.in_reset = 0;
3436 mutex_unlock(&smux.mutex_lha0);
3437
Eric Holmberged1f00c2012-06-07 09:45:18 -06003438 return NOTIFY_DONE;
3439}
3440
3441/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003442/* Line Discipline Interface */
3443/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003444static void smux_pdev_release(struct device *dev)
3445{
3446 struct platform_device *pdev;
3447
3448 pdev = container_of(dev, struct platform_device, dev);
3449 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3450 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3451}
3452
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003453static int smuxld_open(struct tty_struct *tty)
3454{
3455 int i;
3456 int tmp;
3457 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003458
3459 if (!smux.is_initialized)
3460 return -ENODEV;
3461
Eric Holmberged1f00c2012-06-07 09:45:18 -06003462 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003463 if (smux.ld_open_count) {
3464 pr_err("%s: %p multiple instances not supported\n",
3465 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003466 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003467 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003468 }
3469
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003470 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003471 pr_err("%s: tty->ops->write already NULL\n", __func__);
3472 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003473 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003474 }
3475
3476 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003477 ++smux.ld_open_count;
3478 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003479 smux.tty = tty;
3480 tty->disc_data = &smux;
3481 tty->receive_room = TTY_RECEIVE_ROOM;
3482 tty_driver_flush_buffer(tty);
3483
3484 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003485 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003486 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003487 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003488 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003489 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003490 queue_work(smux_tx_wq, &smux_inactivity_work);
3491 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003492 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003493 }
3494
3495 /* register platform devices */
3496 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003497 SMUX_DBG("%s: register pdev '%s'\n",
3498 __func__, smux_devs[i].name);
3499 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003500 tmp = platform_device_register(&smux_devs[i]);
3501 if (tmp)
3502 pr_err("%s: error %d registering device %s\n",
3503 __func__, tmp, smux_devs[i].name);
3504 }
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003505 smux.platform_devs_registered = 1;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003506 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003507 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003508}
3509
3510static void smuxld_close(struct tty_struct *tty)
3511{
3512 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003513 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003514 int i;
3515
Eric Holmberged1f00c2012-06-07 09:45:18 -06003516 SMUX_DBG("%s: ldisc unload\n", __func__);
Eric Holmberg06011322012-07-06 18:17:03 -06003517 smux_flush_workqueues();
3518
Eric Holmberged1f00c2012-06-07 09:45:18 -06003519 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003520 if (smux.ld_open_count <= 0) {
3521 pr_err("%s: invalid ld count %d\n", __func__,
3522 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003523 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003524 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003525 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003526 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003527
3528 /* Cleanup channels */
3529 smux_lch_purge();
3530
3531 /* Unregister platform devices */
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003532 if (smux.platform_devs_registered) {
3533 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3534 SMUX_DBG("%s: unregister pdev '%s'\n",
3535 __func__, smux_devs[i].name);
3536 platform_device_unregister(&smux_devs[i]);
3537 }
3538 smux.platform_devs_registered = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003539 }
3540
3541 /* Schedule UART power-up if it's down */
3542 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003543 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003544 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003545 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003546 smux.powerdown_enabled = 0;
Eric Holmberg06011322012-07-06 18:17:03 -06003547 smux.tx_activity_flag = 0;
3548 smux.rx_activity_flag = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003549 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3550
3551 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003552 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003553
Eric Holmberg06011322012-07-06 18:17:03 -06003554 smux.rx_state = SMUX_RX_IDLE;
3555
Eric Holmberged1f00c2012-06-07 09:45:18 -06003556 /* Disconnect from TTY */
3557 smux.tty = NULL;
3558 mutex_unlock(&smux.mutex_lha0);
3559 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003560}
3561
3562/**
3563 * Receive data from TTY Line Discipline.
3564 *
3565 * @tty TTY structure
3566 * @cp Character data
3567 * @fp Flag data
3568 * @count Size of character and flag data
3569 */
3570void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3571 char *fp, int count)
3572{
3573 int i;
3574 int last_idx = 0;
3575 const char *tty_name = NULL;
3576 char *f;
3577
3578 if (smux_debug_mask & MSM_SMUX_DEBUG)
3579 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3580 16, 1, cp, count, true);
3581
3582 /* verify error flags */
3583 for (i = 0, f = fp; i < count; ++i, ++f) {
3584 if (*f != TTY_NORMAL) {
3585 if (tty)
3586 tty_name = tty->name;
3587 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3588 tty_name, *f, tty_flag_to_str(*f));
3589
3590 /* feed all previous valid data to the parser */
3591 smux_rx_state_machine(cp + last_idx, i - last_idx,
3592 TTY_NORMAL);
3593
3594 /* feed bad data to parser */
3595 smux_rx_state_machine(cp + i, 1, *f);
3596 last_idx = i + 1;
3597 }
3598 }
3599
3600 /* feed data to RX state machine */
3601 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3602}
3603
3604static void smuxld_flush_buffer(struct tty_struct *tty)
3605{
3606 pr_err("%s: not supported\n", __func__);
3607}
3608
3609static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3610{
3611 pr_err("%s: not supported\n", __func__);
3612 return -ENODEV;
3613}
3614
3615static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3616 unsigned char __user *buf, size_t nr)
3617{
3618 pr_err("%s: not supported\n", __func__);
3619 return -ENODEV;
3620}
3621
3622static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3623 const unsigned char *buf, size_t nr)
3624{
3625 pr_err("%s: not supported\n", __func__);
3626 return -ENODEV;
3627}
3628
3629static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3630 unsigned int cmd, unsigned long arg)
3631{
3632 pr_err("%s: not supported\n", __func__);
3633 return -ENODEV;
3634}
3635
3636static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3637 struct poll_table_struct *tbl)
3638{
3639 pr_err("%s: not supported\n", __func__);
3640 return -ENODEV;
3641}
3642
3643static void smuxld_write_wakeup(struct tty_struct *tty)
3644{
3645 pr_err("%s: not supported\n", __func__);
3646}
3647
3648static struct tty_ldisc_ops smux_ldisc_ops = {
3649 .owner = THIS_MODULE,
3650 .magic = TTY_LDISC_MAGIC,
3651 .name = "n_smux",
3652 .open = smuxld_open,
3653 .close = smuxld_close,
3654 .flush_buffer = smuxld_flush_buffer,
3655 .chars_in_buffer = smuxld_chars_in_buffer,
3656 .read = smuxld_read,
3657 .write = smuxld_write,
3658 .ioctl = smuxld_ioctl,
3659 .poll = smuxld_poll,
3660 .receive_buf = smuxld_receive_buf,
3661 .write_wakeup = smuxld_write_wakeup
3662};
3663
3664static int __init smux_init(void)
3665{
3666 int ret;
3667
Eric Holmberged1f00c2012-06-07 09:45:18 -06003668 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003669
3670 spin_lock_init(&smux.rx_lock_lha1);
3671 smux.rx_state = SMUX_RX_IDLE;
3672 smux.power_state = SMUX_PWR_OFF;
3673 smux.pwr_wakeup_delay_us = 1;
3674 smux.powerdown_enabled = 0;
Eric Holmberga9b06472012-06-22 09:46:34 -06003675 smux.power_ctl_remote_req_received = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003676 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003677 smux.rx_activity_flag = 0;
3678 smux.tx_activity_flag = 0;
3679 smux.recv_len = 0;
3680 smux.tty = NULL;
3681 smux.ld_open_count = 0;
3682 smux.in_reset = 0;
3683 smux.is_initialized = 1;
Eric Holmberg2bf9c522012-08-09 13:23:21 -06003684 smux.platform_devs_registered = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003685 smux_byte_loopback = 0;
3686
3687 spin_lock_init(&smux.tx_lock_lha2);
3688 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3689
3690 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3691 if (ret != 0) {
3692 pr_err("%s: error %d registering line discipline\n",
3693 __func__, ret);
3694 return ret;
3695 }
3696
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003697 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003698
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003699 ret = lch_init();
3700 if (ret != 0) {
3701 pr_err("%s: lch_init failed\n", __func__);
3702 return ret;
3703 }
3704
3705 return 0;
3706}
3707
3708static void __exit smux_exit(void)
3709{
3710 int ret;
3711
3712 ret = tty_unregister_ldisc(N_SMUX);
3713 if (ret != 0) {
3714 pr_err("%s error %d unregistering line discipline\n",
3715 __func__, ret);
3716 return;
3717 }
3718}
3719
3720module_init(smux_init);
3721module_exit(smux_exit);
3722
3723MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3724MODULE_LICENSE("GPL v2");
3725MODULE_ALIAS_LDISC(N_SMUX);