blob: b6a2ffb7235883016982d78a9a495a1467a12ba9 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_WM_LOW 2
37#define SMUX_WM_HIGH 4
38#define SMUX_PKT_LOG_SIZE 80
39
40/* Maximum size we can accept in a single RX buffer */
41#define TTY_RECEIVE_ROOM 65536
42#define TTY_BUFFER_FULL_WAIT_MS 50
43
44/* maximum sleep time between wakeup attempts */
45#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
46
47/* minimum delay for scheduling delayed work */
48#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
49
50/* inactivity timeout for no rx/tx activity */
51#define SMUX_INACTIVITY_TIMEOUT_MS 1000
52
Eric Holmbergb8435c82012-06-05 14:51:29 -060053/* RX get_rx_buffer retry timeout values */
54#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
55#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
56
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057enum {
58 MSM_SMUX_DEBUG = 1U << 0,
59 MSM_SMUX_INFO = 1U << 1,
60 MSM_SMUX_POWER_INFO = 1U << 2,
61 MSM_SMUX_PKT = 1U << 3,
62};
63
64static int smux_debug_mask;
65module_param_named(debug_mask, smux_debug_mask,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67
68/* Simulated wakeup used for testing */
69int smux_byte_loopback;
70module_param_named(byte_loopback, smux_byte_loopback,
71 int, S_IRUGO | S_IWUSR | S_IWGRP);
72int smux_simulate_wakeup_delay = 1;
73module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
74 int, S_IRUGO | S_IWUSR | S_IWGRP);
75
76#define SMUX_DBG(x...) do { \
77 if (smux_debug_mask & MSM_SMUX_DEBUG) \
78 pr_info(x); \
79} while (0)
80
Eric Holmbergff0b0112012-06-08 15:06:57 -060081#define SMUX_PWR(x...) do { \
82 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
83 pr_info(x); \
84} while (0)
85
Eric Holmberg8ed30f22012-05-10 19:16:51 -060086#define SMUX_LOG_PKT_RX(pkt) do { \
87 if (smux_debug_mask & MSM_SMUX_PKT) \
88 smux_log_pkt(pkt, 1); \
89} while (0)
90
91#define SMUX_LOG_PKT_TX(pkt) do { \
92 if (smux_debug_mask & MSM_SMUX_PKT) \
93 smux_log_pkt(pkt, 0); \
94} while (0)
95
96/**
97 * Return true if channel is fully opened (both
98 * local and remote sides are in the OPENED state).
99 */
100#define IS_FULLY_OPENED(ch) \
101 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
102 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
103
104static struct platform_device smux_devs[] = {
105 {.name = "SMUX_CTL", .id = -1},
106 {.name = "SMUX_RMNET", .id = -1},
107 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
108 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
109 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
110 {.name = "SMUX_DIAG", .id = -1},
111};
112
113enum {
114 SMUX_CMD_STATUS_RTC = 1 << 0,
115 SMUX_CMD_STATUS_RTR = 1 << 1,
116 SMUX_CMD_STATUS_RI = 1 << 2,
117 SMUX_CMD_STATUS_DCD = 1 << 3,
118 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
119};
120
121/* Channel mode */
122enum {
123 SMUX_LCH_MODE_NORMAL,
124 SMUX_LCH_MODE_LOCAL_LOOPBACK,
125 SMUX_LCH_MODE_REMOTE_LOOPBACK,
126};
127
128enum {
129 SMUX_RX_IDLE,
130 SMUX_RX_MAGIC,
131 SMUX_RX_HDR,
132 SMUX_RX_PAYLOAD,
133 SMUX_RX_FAILURE,
134};
135
136/**
137 * Power states.
138 *
139 * The _FLUSH states are internal transitional states and are not part of the
140 * official state machine.
141 */
142enum {
143 SMUX_PWR_OFF,
144 SMUX_PWR_TURNING_ON,
145 SMUX_PWR_ON,
146 SMUX_PWR_TURNING_OFF_FLUSH,
147 SMUX_PWR_TURNING_OFF,
148 SMUX_PWR_OFF_FLUSH,
149};
150
151/**
152 * Logical Channel Structure. One instance per channel.
153 *
154 * Locking Hierarchy
155 * Each lock has a postfix that describes the locking level. If multiple locks
156 * are required, only increasing lock hierarchy numbers may be locked which
157 * ensures avoiding a deadlock.
158 *
159 * Locking Example
160 * If state_lock_lhb1 is currently held and the TX list needs to be
161 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
162 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
163 * not be acquired since it would result in a deadlock.
164 *
165 * Note that the Line Discipline locks (*_lha) should always be acquired
166 * before the logical channel locks.
167 */
168struct smux_lch_t {
169 /* channel state */
170 spinlock_t state_lock_lhb1;
171 uint8_t lcid;
172 unsigned local_state;
173 unsigned local_mode;
174 uint8_t local_tiocm;
175
176 unsigned remote_state;
177 unsigned remote_mode;
178 uint8_t remote_tiocm;
179
180 int tx_flow_control;
181
182 /* client callbacks and private data */
183 void *priv;
184 void (*notify)(void *priv, int event_type, const void *metadata);
185 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
186 int size);
187
Eric Holmbergb8435c82012-06-05 14:51:29 -0600188 /* RX Info */
189 struct list_head rx_retry_queue;
190 unsigned rx_retry_queue_cnt;
191 struct delayed_work rx_retry_work;
192
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600193 /* TX Info */
194 spinlock_t tx_lock_lhb2;
195 struct list_head tx_queue;
196 struct list_head tx_ready_list;
197 unsigned tx_pending_data_cnt;
198 unsigned notify_lwm;
199};
200
201union notifier_metadata {
202 struct smux_meta_disconnected disconnected;
203 struct smux_meta_read read;
204 struct smux_meta_write write;
205 struct smux_meta_tiocm tiocm;
206};
207
208struct smux_notify_handle {
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 void *priv;
211 int event_type;
212 union notifier_metadata *metadata;
213};
214
215/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600216 * Get RX Buffer Retry structure.
217 *
218 * This is used for clients that are unable to provide an RX buffer
219 * immediately. This temporary structure will be used to temporarily hold the
220 * data and perform a retry.
221 */
222struct smux_rx_pkt_retry {
223 struct smux_pkt_t *pkt;
224 struct list_head rx_retry_list;
225 unsigned timeout_in_ms;
226};
227
228/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600229 * Receive worker data structure.
230 *
231 * One instance is created for every call to smux_rx_state_machine.
232 */
233struct smux_rx_worker_data {
234 const unsigned char *data;
235 int len;
236 int flag;
237
238 struct work_struct work;
239 struct completion work_complete;
240};
241
242/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600243 * Line discipline and module structure.
244 *
245 * Only one instance since multiple instances of line discipline are not
246 * allowed.
247 */
248struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600249 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250
251 int is_initialized;
252 int in_reset;
253 int ld_open_count;
254 struct tty_struct *tty;
255
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600256 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600257 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
258 unsigned int recv_len;
259 unsigned int pkt_remain;
260 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600261
262 /* RX Activity - accessed by multiple threads */
263 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264 unsigned rx_activity_flag;
265
266 /* TX / Power */
267 spinlock_t tx_lock_lha2;
268 struct list_head lch_tx_ready_list;
269 unsigned power_state;
270 unsigned pwr_wakeup_delay_us;
271 unsigned tx_activity_flag;
272 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600273 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600274};
275
276
277/* data structures */
278static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
279static struct smux_ldisc_t smux;
280static const char *tty_error_type[] = {
281 [TTY_NORMAL] = "normal",
282 [TTY_OVERRUN] = "overrun",
283 [TTY_BREAK] = "break",
284 [TTY_PARITY] = "parity",
285 [TTY_FRAME] = "framing",
286};
287
288static const char *smux_cmds[] = {
289 [SMUX_CMD_DATA] = "DATA",
290 [SMUX_CMD_OPEN_LCH] = "OPEN",
291 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
292 [SMUX_CMD_STATUS] = "STATUS",
293 [SMUX_CMD_PWR_CTL] = "PWR",
294 [SMUX_CMD_BYTE] = "Raw Byte",
295};
296
297static void smux_notify_local_fn(struct work_struct *work);
298static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
299
300static struct workqueue_struct *smux_notify_wq;
301static size_t handle_size;
302static struct kfifo smux_notify_fifo;
303static int queued_fifo_notifications;
304static DEFINE_SPINLOCK(notify_lock_lhc1);
305
306static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600307static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600308static void smux_tx_worker(struct work_struct *work);
309static DECLARE_WORK(smux_tx_work, smux_tx_worker);
310
311static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600312static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600313static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600314static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
315static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
316
317static void smux_inactivity_worker(struct work_struct *work);
318static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
319static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
320 smux_inactivity_worker);
321
322static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
323static void list_channel(struct smux_lch_t *ch);
324static int smux_send_status_cmd(struct smux_lch_t *ch);
325static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600326static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600327static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
328static int schedule_notify(uint8_t lcid, int event,
329 const union notifier_metadata *metadata);
330static int ssr_notifier_cb(struct notifier_block *this,
331 unsigned long code,
332 void *data);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600333
334/**
335 * Convert TTY Error Flags to string for logging purposes.
336 *
337 * @flag TTY_* flag
338 * @returns String description or NULL if unknown
339 */
340static const char *tty_flag_to_str(unsigned flag)
341{
342 if (flag < ARRAY_SIZE(tty_error_type))
343 return tty_error_type[flag];
344 return NULL;
345}
346
347/**
348 * Convert SMUX Command to string for logging purposes.
349 *
350 * @cmd SMUX command
351 * @returns String description or NULL if unknown
352 */
353static const char *cmd_to_str(unsigned cmd)
354{
355 if (cmd < ARRAY_SIZE(smux_cmds))
356 return smux_cmds[cmd];
357 return NULL;
358}
359
360/**
361 * Set the reset state due to an unrecoverable failure.
362 */
363static void smux_enter_reset(void)
364{
365 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
366 smux.in_reset = 1;
367}
368
369static int lch_init(void)
370{
371 unsigned int id;
372 struct smux_lch_t *ch;
373 int i = 0;
374
375 handle_size = sizeof(struct smux_notify_handle *);
376
377 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
378 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600379 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600380
381 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
382 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
383 __func__);
384 return -ENOMEM;
385 }
386
387 i |= kfifo_alloc(&smux_notify_fifo,
388 SMUX_NOTIFY_FIFO_SIZE * handle_size,
389 GFP_KERNEL);
390 i |= smux_loopback_init();
391
392 if (i) {
393 pr_err("%s: out of memory error\n", __func__);
394 return -ENOMEM;
395 }
396
397 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
398 ch = &smux_lch[id];
399
400 spin_lock_init(&ch->state_lock_lhb1);
401 ch->lcid = id;
402 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
403 ch->local_mode = SMUX_LCH_MODE_NORMAL;
404 ch->local_tiocm = 0x0;
405 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
406 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
407 ch->remote_tiocm = 0x0;
408 ch->tx_flow_control = 0;
409 ch->priv = 0;
410 ch->notify = 0;
411 ch->get_rx_buffer = 0;
412
Eric Holmbergb8435c82012-06-05 14:51:29 -0600413 INIT_LIST_HEAD(&ch->rx_retry_queue);
414 ch->rx_retry_queue_cnt = 0;
415 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
416
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600417 spin_lock_init(&ch->tx_lock_lhb2);
418 INIT_LIST_HEAD(&ch->tx_queue);
419 INIT_LIST_HEAD(&ch->tx_ready_list);
420 ch->tx_pending_data_cnt = 0;
421 ch->notify_lwm = 0;
422 }
423
424 return 0;
425}
426
Eric Holmberged1f00c2012-06-07 09:45:18 -0600427/**
428 * Empty and cleanup all SMUX logical channels for subsystem restart or line
429 * discipline disconnect.
430 */
431static void smux_lch_purge(void)
432{
433 struct smux_lch_t *ch;
434 unsigned long flags;
435 int i;
436
437 /* Empty TX ready list */
438 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
439 while (!list_empty(&smux.lch_tx_ready_list)) {
440 SMUX_DBG("%s: emptying ready list %p\n",
441 __func__, smux.lch_tx_ready_list.next);
442 ch = list_first_entry(&smux.lch_tx_ready_list,
443 struct smux_lch_t,
444 tx_ready_list);
445 list_del(&ch->tx_ready_list);
446 INIT_LIST_HEAD(&ch->tx_ready_list);
447 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600448
449 /* Purge Power Queue */
450 while (!list_empty(&smux.power_queue)) {
451 struct smux_pkt_t *pkt;
452
453 pkt = list_first_entry(&smux.power_queue,
454 struct smux_pkt_t,
455 list);
456 SMUX_DBG("%s: emptying power queue pkt=%p\n",
457 __func__, pkt);
458 smux_free_pkt(pkt);
459 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600460 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
461
462 /* Close all ports */
463 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
464 ch = &smux_lch[i];
465 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
466
467 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
468
469 /* Purge TX queue */
470 spin_lock(&ch->tx_lock_lhb2);
471 smux_purge_ch_tx_queue(ch);
472 spin_unlock(&ch->tx_lock_lhb2);
473
474 /* Notify user of disconnect and reset channel state */
475 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
476 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
477 union notifier_metadata meta;
478
479 meta.disconnected.is_ssr = smux.in_reset;
480 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
481 }
482
483 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
484 ch->local_mode = SMUX_LCH_MODE_NORMAL;
485 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
486 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
487 ch->tx_flow_control = 0;
488
489 /* Purge RX retry queue */
490 if (ch->rx_retry_queue_cnt)
491 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
492
493 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
494 }
495
496 /* Flush TX/RX workqueues */
497 SMUX_DBG("%s: flushing tx wq\n", __func__);
498 flush_workqueue(smux_tx_wq);
499 SMUX_DBG("%s: flushing rx wq\n", __func__);
500 flush_workqueue(smux_rx_wq);
501}
502
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600503int smux_assert_lch_id(uint32_t lcid)
504{
505 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
506 return -ENXIO;
507 else
508 return 0;
509}
510
511/**
512 * Log packet information for debug purposes.
513 *
514 * @pkt Packet to log
515 * @is_recv 1 = RX packet; 0 = TX Packet
516 *
517 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
518 *
519 * PKT Info:
520 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
521 *
522 * Direction: R = Receive, S = Send
523 * Local State: C = Closed; c = closing; o = opening; O = Opened
524 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
525 * Remote State: C = Closed; O = Opened
526 * Remote Mode: R = Remote loopback; N = Normal
527 */
528static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
529{
530 char logbuf[SMUX_PKT_LOG_SIZE];
531 char cmd_extra[16];
532 int i = 0;
533 int count;
534 int len;
535 char local_state;
536 char local_mode;
537 char remote_state;
538 char remote_mode;
539 struct smux_lch_t *ch;
540 unsigned char *data;
541
542 ch = &smux_lch[pkt->hdr.lcid];
543
544 switch (ch->local_state) {
545 case SMUX_LCH_LOCAL_CLOSED:
546 local_state = 'C';
547 break;
548 case SMUX_LCH_LOCAL_OPENING:
549 local_state = 'o';
550 break;
551 case SMUX_LCH_LOCAL_OPENED:
552 local_state = 'O';
553 break;
554 case SMUX_LCH_LOCAL_CLOSING:
555 local_state = 'c';
556 break;
557 default:
558 local_state = 'U';
559 break;
560 }
561
562 switch (ch->local_mode) {
563 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
564 local_mode = 'L';
565 break;
566 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
567 local_mode = 'R';
568 break;
569 case SMUX_LCH_MODE_NORMAL:
570 local_mode = 'N';
571 break;
572 default:
573 local_mode = 'U';
574 break;
575 }
576
577 switch (ch->remote_state) {
578 case SMUX_LCH_REMOTE_CLOSED:
579 remote_state = 'C';
580 break;
581 case SMUX_LCH_REMOTE_OPENED:
582 remote_state = 'O';
583 break;
584
585 default:
586 remote_state = 'U';
587 break;
588 }
589
590 switch (ch->remote_mode) {
591 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
592 remote_mode = 'R';
593 break;
594 case SMUX_LCH_MODE_NORMAL:
595 remote_mode = 'N';
596 break;
597 default:
598 remote_mode = 'U';
599 break;
600 }
601
602 /* determine command type (ACK, etc) */
603 cmd_extra[0] = '\0';
604 switch (pkt->hdr.cmd) {
605 case SMUX_CMD_OPEN_LCH:
606 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
607 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
608 break;
609 case SMUX_CMD_CLOSE_LCH:
610 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
611 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
612 break;
613 };
614
615 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
616 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
617 is_recv ? 'R' : 'S', pkt->hdr.lcid,
618 local_state, local_mode,
619 remote_state, remote_mode,
620 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
621 pkt->hdr.payload_len, pkt->hdr.pad_len);
622
623 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
624 data = (unsigned char *)pkt->payload;
625 for (count = 0; count < len; count++)
626 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
627 "%02x ", (unsigned)data[count]);
628
629 pr_info("%s\n", logbuf);
630}
631
632static void smux_notify_local_fn(struct work_struct *work)
633{
634 struct smux_notify_handle *notify_handle = NULL;
635 union notifier_metadata *metadata = NULL;
636 unsigned long flags;
637 int i;
638
639 for (;;) {
640 /* retrieve notification */
641 spin_lock_irqsave(&notify_lock_lhc1, flags);
642 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
643 i = kfifo_out(&smux_notify_fifo,
644 &notify_handle,
645 handle_size);
646 if (i != handle_size) {
647 pr_err("%s: unable to retrieve handle %d expected %d\n",
648 __func__, i, handle_size);
649 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
650 break;
651 }
652 } else {
653 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
654 break;
655 }
656 --queued_fifo_notifications;
657 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
658
659 /* notify client */
660 metadata = notify_handle->metadata;
661 notify_handle->notify(notify_handle->priv,
662 notify_handle->event_type,
663 metadata);
664
665 kfree(metadata);
666 kfree(notify_handle);
667 }
668}
669
670/**
671 * Initialize existing packet.
672 */
673void smux_init_pkt(struct smux_pkt_t *pkt)
674{
675 memset(pkt, 0x0, sizeof(*pkt));
676 pkt->hdr.magic = SMUX_MAGIC;
677 INIT_LIST_HEAD(&pkt->list);
678}
679
680/**
681 * Allocate and initialize packet.
682 *
683 * If a payload is needed, either set it directly and ensure that it's freed or
684 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
685 * automatically when smd_free_pkt() is called.
686 */
687struct smux_pkt_t *smux_alloc_pkt(void)
688{
689 struct smux_pkt_t *pkt;
690
691 /* Consider a free list implementation instead of kmalloc */
692 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
693 if (!pkt) {
694 pr_err("%s: out of memory\n", __func__);
695 return NULL;
696 }
697 smux_init_pkt(pkt);
698 pkt->allocated = 1;
699
700 return pkt;
701}
702
703/**
704 * Free packet.
705 *
706 * @pkt Packet to free (may be NULL)
707 *
708 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
709 * well. Otherwise, the caller is responsible for freeing the payload.
710 */
711void smux_free_pkt(struct smux_pkt_t *pkt)
712{
713 if (pkt) {
714 if (pkt->free_payload)
715 kfree(pkt->payload);
716 if (pkt->allocated)
717 kfree(pkt);
718 }
719}
720
721/**
722 * Allocate packet payload.
723 *
724 * @pkt Packet to add payload to
725 *
726 * @returns 0 on success, <0 upon error
727 *
728 * A flag is set to signal smux_free_pkt() to free the payload.
729 */
730int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
731{
732 if (!pkt)
733 return -EINVAL;
734
735 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
736 pkt->free_payload = 1;
737 if (!pkt->payload) {
738 pr_err("%s: unable to malloc %d bytes for payload\n",
739 __func__, pkt->hdr.payload_len);
740 return -ENOMEM;
741 }
742
743 return 0;
744}
745
746static int schedule_notify(uint8_t lcid, int event,
747 const union notifier_metadata *metadata)
748{
749 struct smux_notify_handle *notify_handle = 0;
750 union notifier_metadata *meta_copy = 0;
751 struct smux_lch_t *ch;
752 int i;
753 unsigned long flags;
754 int ret = 0;
755
756 ch = &smux_lch[lcid];
757 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
758 GFP_ATOMIC);
759 if (!notify_handle) {
760 pr_err("%s: out of memory\n", __func__);
761 ret = -ENOMEM;
762 goto free_out;
763 }
764
765 notify_handle->notify = ch->notify;
766 notify_handle->priv = ch->priv;
767 notify_handle->event_type = event;
768 if (metadata) {
769 meta_copy = kzalloc(sizeof(union notifier_metadata),
770 GFP_ATOMIC);
771 if (!meta_copy) {
772 pr_err("%s: out of memory\n", __func__);
773 ret = -ENOMEM;
774 goto free_out;
775 }
776 *meta_copy = *metadata;
777 notify_handle->metadata = meta_copy;
778 } else {
779 notify_handle->metadata = NULL;
780 }
781
782 spin_lock_irqsave(&notify_lock_lhc1, flags);
783 i = kfifo_avail(&smux_notify_fifo);
784 if (i < handle_size) {
785 pr_err("%s: fifo full error %d expected %d\n",
786 __func__, i, handle_size);
787 ret = -ENOMEM;
788 goto unlock_out;
789 }
790
791 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
792 if (i < 0 || i != handle_size) {
793 pr_err("%s: fifo not available error %d (expected %d)\n",
794 __func__, i, handle_size);
795 ret = -ENOSPC;
796 goto unlock_out;
797 }
798 ++queued_fifo_notifications;
799
800unlock_out:
801 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
802
803free_out:
804 queue_work(smux_notify_wq, &smux_notify_local);
805 if (ret < 0 && notify_handle) {
806 kfree(notify_handle->metadata);
807 kfree(notify_handle);
808 }
809 return ret;
810}
811
812/**
813 * Returns the serialized size of a packet.
814 *
815 * @pkt Packet to serialize
816 *
817 * @returns Serialized length of packet
818 */
819static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
820{
821 unsigned int size;
822
823 size = sizeof(struct smux_hdr_t);
824 size += pkt->hdr.payload_len;
825 size += pkt->hdr.pad_len;
826
827 return size;
828}
829
830/**
831 * Serialize packet @pkt into output buffer @data.
832 *
833 * @pkt Packet to serialize
834 * @out Destination buffer pointer
835 * @out_len Size of serialized packet
836 *
837 * @returns 0 for success
838 */
839int smux_serialize(struct smux_pkt_t *pkt, char *out,
840 unsigned int *out_len)
841{
842 char *data_start = out;
843
844 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
845 pr_err("%s: packet size %d too big\n",
846 __func__, smux_serialize_size(pkt));
847 return -E2BIG;
848 }
849
850 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
851 out += sizeof(struct smux_hdr_t);
852 if (pkt->payload) {
853 memcpy(out, pkt->payload, pkt->hdr.payload_len);
854 out += pkt->hdr.payload_len;
855 }
856 if (pkt->hdr.pad_len) {
857 memset(out, 0x0, pkt->hdr.pad_len);
858 out += pkt->hdr.pad_len;
859 }
860 *out_len = out - data_start;
861 return 0;
862}
863
864/**
865 * Serialize header and provide pointer to the data.
866 *
867 * @pkt Packet
868 * @out[out] Pointer to the serialized header data
869 * @out_len[out] Pointer to the serialized header length
870 */
871static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
872 unsigned int *out_len)
873{
874 *out = (char *)&pkt->hdr;
875 *out_len = sizeof(struct smux_hdr_t);
876}
877
878/**
879 * Serialize payload and provide pointer to the data.
880 *
881 * @pkt Packet
882 * @out[out] Pointer to the serialized payload data
883 * @out_len[out] Pointer to the serialized payload length
884 */
885static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
886 unsigned int *out_len)
887{
888 *out = pkt->payload;
889 *out_len = pkt->hdr.payload_len;
890}
891
892/**
893 * Serialize padding and provide pointer to the data.
894 *
895 * @pkt Packet
896 * @out[out] Pointer to the serialized padding (always NULL)
897 * @out_len[out] Pointer to the serialized payload length
898 *
899 * Since the padding field value is undefined, only the size of the patting
900 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
901 */
902static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
903 unsigned int *out_len)
904{
905 *out = NULL;
906 *out_len = pkt->hdr.pad_len;
907}
908
909/**
910 * Write data to TTY framework and handle breaking the writes up if needed.
911 *
912 * @data Data to write
913 * @len Length of data
914 *
915 * @returns 0 for success, < 0 for failure
916 */
917static int write_to_tty(char *data, unsigned len)
918{
919 int data_written;
920
921 if (!data)
922 return 0;
923
Eric Holmberged1f00c2012-06-07 09:45:18 -0600924 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600925 data_written = smux.tty->ops->write(smux.tty, data, len);
926 if (data_written >= 0) {
927 len -= data_written;
928 data += data_written;
929 } else {
930 pr_err("%s: TTY write returned error %d\n",
931 __func__, data_written);
932 return data_written;
933 }
934
935 if (len)
936 tty_wait_until_sent(smux.tty,
937 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600938 }
939 return 0;
940}
941
942/**
943 * Write packet to TTY.
944 *
945 * @pkt packet to write
946 *
947 * @returns 0 on success
948 */
949static int smux_tx_tty(struct smux_pkt_t *pkt)
950{
951 char *data;
952 unsigned int len;
953 int ret;
954
955 if (!smux.tty) {
956 pr_err("%s: TTY not initialized", __func__);
957 return -ENOTTY;
958 }
959
960 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
961 SMUX_DBG("%s: tty send single byte\n", __func__);
962 ret = write_to_tty(&pkt->hdr.flags, 1);
963 return ret;
964 }
965
966 smux_serialize_hdr(pkt, &data, &len);
967 ret = write_to_tty(data, len);
968 if (ret) {
969 pr_err("%s: failed %d to write header %d\n",
970 __func__, ret, len);
971 return ret;
972 }
973
974 smux_serialize_payload(pkt, &data, &len);
975 ret = write_to_tty(data, len);
976 if (ret) {
977 pr_err("%s: failed %d to write payload %d\n",
978 __func__, ret, len);
979 return ret;
980 }
981
982 smux_serialize_padding(pkt, &data, &len);
983 while (len > 0) {
984 char zero = 0x0;
985 ret = write_to_tty(&zero, 1);
986 if (ret) {
987 pr_err("%s: failed %d to write padding %d\n",
988 __func__, ret, len);
989 return ret;
990 }
991 --len;
992 }
993 return 0;
994}
995
996/**
997 * Send a single character.
998 *
999 * @ch Character to send
1000 */
1001static void smux_send_byte(char ch)
1002{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001003 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001004
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001005 pkt = smux_alloc_pkt();
1006 if (!pkt) {
1007 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1008 return;
1009 }
1010 pkt->hdr.cmd = SMUX_CMD_BYTE;
1011 pkt->hdr.flags = ch;
1012 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001013
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001014 list_add_tail(&pkt->list, &smux.power_queue);
1015 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001016}
1017
1018/**
1019 * Receive a single-character packet (used for internal testing).
1020 *
1021 * @ch Character to receive
1022 * @lcid Logical channel ID for packet
1023 *
1024 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001025 */
1026static int smux_receive_byte(char ch, int lcid)
1027{
1028 struct smux_pkt_t pkt;
1029
1030 smux_init_pkt(&pkt);
1031 pkt.hdr.lcid = lcid;
1032 pkt.hdr.cmd = SMUX_CMD_BYTE;
1033 pkt.hdr.flags = ch;
1034
1035 return smux_dispatch_rx_pkt(&pkt);
1036}
1037
1038/**
1039 * Queue packet for transmit.
1040 *
1041 * @pkt_ptr Packet to queue
1042 * @ch Channel to queue packet on
1043 * @queue Queue channel on ready list
1044 */
1045static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1046 int queue)
1047{
1048 unsigned long flags;
1049
1050 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1051
1052 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1053 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1054 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1055
1056 if (queue)
1057 list_channel(ch);
1058}
1059
1060/**
1061 * Handle receive OPEN ACK command.
1062 *
1063 * @pkt Received packet
1064 *
1065 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001066 */
1067static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1068{
1069 uint8_t lcid;
1070 int ret;
1071 struct smux_lch_t *ch;
1072 int enable_powerdown = 0;
1073
1074 lcid = pkt->hdr.lcid;
1075 ch = &smux_lch[lcid];
1076
1077 spin_lock(&ch->state_lock_lhb1);
1078 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1079 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1080 ch->local_state,
1081 SMUX_LCH_LOCAL_OPENED);
1082
1083 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1084 enable_powerdown = 1;
1085
1086 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1087 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1088 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1089 ret = 0;
1090 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1091 SMUX_DBG("Remote loopback OPEN ACK received\n");
1092 ret = 0;
1093 } else {
1094 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1095 __func__, lcid, ch->local_state);
1096 ret = -EINVAL;
1097 }
1098 spin_unlock(&ch->state_lock_lhb1);
1099
1100 if (enable_powerdown) {
1101 spin_lock(&smux.tx_lock_lha2);
1102 if (!smux.powerdown_enabled) {
1103 smux.powerdown_enabled = 1;
1104 SMUX_DBG("%s: enabling power-collapse support\n",
1105 __func__);
1106 }
1107 spin_unlock(&smux.tx_lock_lha2);
1108 }
1109
1110 return ret;
1111}
1112
1113static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1114{
1115 uint8_t lcid;
1116 int ret;
1117 struct smux_lch_t *ch;
1118 union notifier_metadata meta_disconnected;
1119 unsigned long flags;
1120
1121 lcid = pkt->hdr.lcid;
1122 ch = &smux_lch[lcid];
1123 meta_disconnected.disconnected.is_ssr = 0;
1124
1125 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1126
1127 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1128 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1129 SMUX_LCH_LOCAL_CLOSING,
1130 SMUX_LCH_LOCAL_CLOSED);
1131 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1132 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1133 schedule_notify(lcid, SMUX_DISCONNECTED,
1134 &meta_disconnected);
1135 ret = 0;
1136 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1137 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1138 ret = 0;
1139 } else {
1140 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1141 __func__, lcid, ch->local_state);
1142 ret = -EINVAL;
1143 }
1144 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1145 return ret;
1146}
1147
1148/**
1149 * Handle receive OPEN command.
1150 *
1151 * @pkt Received packet
1152 *
1153 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001154 */
1155static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1156{
1157 uint8_t lcid;
1158 int ret;
1159 struct smux_lch_t *ch;
1160 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001161 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001162 int tx_ready = 0;
1163 int enable_powerdown = 0;
1164
1165 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1166 return smux_handle_rx_open_ack(pkt);
1167
1168 lcid = pkt->hdr.lcid;
1169 ch = &smux_lch[lcid];
1170
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001171 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001172
1173 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1174 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1175 SMUX_LCH_REMOTE_CLOSED,
1176 SMUX_LCH_REMOTE_OPENED);
1177
1178 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1179 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1180 enable_powerdown = 1;
1181
1182 /* Send Open ACK */
1183 ack_pkt = smux_alloc_pkt();
1184 if (!ack_pkt) {
1185 /* exit out to allow retrying this later */
1186 ret = -ENOMEM;
1187 goto out;
1188 }
1189 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1190 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1191 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1192 ack_pkt->hdr.lcid = lcid;
1193 ack_pkt->hdr.payload_len = 0;
1194 ack_pkt->hdr.pad_len = 0;
1195 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1196 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1197 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1198 }
1199 smux_tx_queue(ack_pkt, ch, 0);
1200 tx_ready = 1;
1201
1202 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1203 /*
1204 * Send an Open command to the remote side to
1205 * simulate our local client doing it.
1206 */
1207 ack_pkt = smux_alloc_pkt();
1208 if (ack_pkt) {
1209 ack_pkt->hdr.lcid = lcid;
1210 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1211 ack_pkt->hdr.flags =
1212 SMUX_CMD_OPEN_POWER_COLLAPSE;
1213 ack_pkt->hdr.payload_len = 0;
1214 ack_pkt->hdr.pad_len = 0;
1215 smux_tx_queue(ack_pkt, ch, 0);
1216 tx_ready = 1;
1217 } else {
1218 pr_err("%s: Remote loopack allocation failure\n",
1219 __func__);
1220 }
1221 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1222 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1223 }
1224 ret = 0;
1225 } else {
1226 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1227 __func__, lcid, ch->remote_state);
1228 ret = -EINVAL;
1229 }
1230
1231out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001232 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001233
1234 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001235 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001236 if (!smux.powerdown_enabled) {
1237 smux.powerdown_enabled = 1;
1238 SMUX_DBG("%s: enabling power-collapse support\n",
1239 __func__);
1240 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001241 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001242 }
1243
1244 if (tx_ready)
1245 list_channel(ch);
1246
1247 return ret;
1248}
1249
1250/**
1251 * Handle receive CLOSE command.
1252 *
1253 * @pkt Received packet
1254 *
1255 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001256 */
1257static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1258{
1259 uint8_t lcid;
1260 int ret;
1261 struct smux_lch_t *ch;
1262 struct smux_pkt_t *ack_pkt;
1263 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001264 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001265 int tx_ready = 0;
1266
1267 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1268 return smux_handle_close_ack(pkt);
1269
1270 lcid = pkt->hdr.lcid;
1271 ch = &smux_lch[lcid];
1272 meta_disconnected.disconnected.is_ssr = 0;
1273
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001274 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001275 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1276 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1277 SMUX_LCH_REMOTE_OPENED,
1278 SMUX_LCH_REMOTE_CLOSED);
1279
1280 ack_pkt = smux_alloc_pkt();
1281 if (!ack_pkt) {
1282 /* exit out to allow retrying this later */
1283 ret = -ENOMEM;
1284 goto out;
1285 }
1286 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1287 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1288 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1289 ack_pkt->hdr.lcid = lcid;
1290 ack_pkt->hdr.payload_len = 0;
1291 ack_pkt->hdr.pad_len = 0;
1292 smux_tx_queue(ack_pkt, ch, 0);
1293 tx_ready = 1;
1294
1295 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1296 /*
1297 * Send a Close command to the remote side to simulate
1298 * our local client doing it.
1299 */
1300 ack_pkt = smux_alloc_pkt();
1301 if (ack_pkt) {
1302 ack_pkt->hdr.lcid = lcid;
1303 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1304 ack_pkt->hdr.flags = 0;
1305 ack_pkt->hdr.payload_len = 0;
1306 ack_pkt->hdr.pad_len = 0;
1307 smux_tx_queue(ack_pkt, ch, 0);
1308 tx_ready = 1;
1309 } else {
1310 pr_err("%s: Remote loopack allocation failure\n",
1311 __func__);
1312 }
1313 }
1314
1315 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1316 schedule_notify(lcid, SMUX_DISCONNECTED,
1317 &meta_disconnected);
1318 ret = 0;
1319 } else {
1320 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1321 __func__, lcid, ch->remote_state);
1322 ret = -EINVAL;
1323 }
1324out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001325 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001326 if (tx_ready)
1327 list_channel(ch);
1328
1329 return ret;
1330}
1331
1332/*
1333 * Handle receive DATA command.
1334 *
1335 * @pkt Received packet
1336 *
1337 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001338 */
1339static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1340{
1341 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001342 int ret = 0;
1343 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001344 int tmp;
1345 int rx_len;
1346 struct smux_lch_t *ch;
1347 union notifier_metadata metadata;
1348 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001349 struct smux_pkt_t *ack_pkt;
1350 unsigned long flags;
1351
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001352 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1353 ret = -ENXIO;
1354 goto out;
1355 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001356
Eric Holmbergb8435c82012-06-05 14:51:29 -06001357 rx_len = pkt->hdr.payload_len;
1358 if (rx_len == 0) {
1359 ret = -EINVAL;
1360 goto out;
1361 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001362
1363 lcid = pkt->hdr.lcid;
1364 ch = &smux_lch[lcid];
1365 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1366 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1367
1368 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1369 && !remote_loopback) {
1370 pr_err("smux: ch %d error data on local state 0x%x",
1371 lcid, ch->local_state);
1372 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001373 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001374 goto out;
1375 }
1376
1377 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1378 pr_err("smux: ch %d error data on remote state 0x%x",
1379 lcid, ch->remote_state);
1380 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001381 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001382 goto out;
1383 }
1384
Eric Holmbergb8435c82012-06-05 14:51:29 -06001385 if (!list_empty(&ch->rx_retry_queue)) {
1386 do_retry = 1;
1387 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1388 /* retry queue full */
1389 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1390 ret = -ENOMEM;
1391 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1392 goto out;
1393 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001394 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001395 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001396
Eric Holmbergb8435c82012-06-05 14:51:29 -06001397 if (remote_loopback) {
1398 /* Echo the data back to the remote client. */
1399 ack_pkt = smux_alloc_pkt();
1400 if (ack_pkt) {
1401 ack_pkt->hdr.lcid = lcid;
1402 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1403 ack_pkt->hdr.flags = 0;
1404 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1405 if (ack_pkt->hdr.payload_len) {
1406 smux_alloc_pkt_payload(ack_pkt);
1407 memcpy(ack_pkt->payload, pkt->payload,
1408 ack_pkt->hdr.payload_len);
1409 }
1410 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1411 smux_tx_queue(ack_pkt, ch, 0);
1412 list_channel(ch);
1413 } else {
1414 pr_err("%s: Remote loopack allocation failure\n",
1415 __func__);
1416 }
1417 } else if (!do_retry) {
1418 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001419 metadata.read.pkt_priv = 0;
1420 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001421 tmp = ch->get_rx_buffer(ch->priv,
1422 (void **)&metadata.read.pkt_priv,
1423 (void **)&metadata.read.buffer,
1424 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001425
Eric Holmbergb8435c82012-06-05 14:51:29 -06001426 if (tmp == 0 && metadata.read.buffer) {
1427 /* place data into RX buffer */
1428 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001429 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001430 metadata.read.len = rx_len;
1431 schedule_notify(lcid, SMUX_READ_DONE,
1432 &metadata);
1433 } else if (tmp == -EAGAIN ||
1434 (tmp == 0 && !metadata.read.buffer)) {
1435 /* buffer allocation failed - add to retry queue */
1436 do_retry = 1;
1437 } else if (tmp < 0) {
1438 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1439 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001440 }
1441 }
1442
Eric Holmbergb8435c82012-06-05 14:51:29 -06001443 if (do_retry) {
1444 struct smux_rx_pkt_retry *retry;
1445
1446 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1447 if (!retry) {
1448 pr_err("%s: retry alloc failure\n", __func__);
1449 ret = -ENOMEM;
1450 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1451 goto out;
1452 }
1453 INIT_LIST_HEAD(&retry->rx_retry_list);
1454 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1455
1456 /* copy packet */
1457 retry->pkt = smux_alloc_pkt();
1458 if (!retry->pkt) {
1459 kfree(retry);
1460 pr_err("%s: pkt alloc failure\n", __func__);
1461 ret = -ENOMEM;
1462 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1463 goto out;
1464 }
1465 retry->pkt->hdr.lcid = lcid;
1466 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1467 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1468 if (retry->pkt->hdr.payload_len) {
1469 smux_alloc_pkt_payload(retry->pkt);
1470 memcpy(retry->pkt->payload, pkt->payload,
1471 retry->pkt->hdr.payload_len);
1472 }
1473
1474 /* add to retry queue */
1475 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1476 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1477 ++ch->rx_retry_queue_cnt;
1478 if (ch->rx_retry_queue_cnt == 1)
1479 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1480 msecs_to_jiffies(retry->timeout_in_ms));
1481 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1482 }
1483
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001484out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001485 return ret;
1486}
1487
1488/**
1489 * Handle receive byte command for testing purposes.
1490 *
1491 * @pkt Received packet
1492 *
1493 * @returns 0 for success
1494 */
1495static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1496{
1497 uint8_t lcid;
1498 int ret;
1499 struct smux_lch_t *ch;
1500 union notifier_metadata metadata;
1501 unsigned long flags;
1502
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001503 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1504 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001505 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001506 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001507
1508 lcid = pkt->hdr.lcid;
1509 ch = &smux_lch[lcid];
1510 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1511
1512 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1513 pr_err("smux: ch %d error data on local state 0x%x",
1514 lcid, ch->local_state);
1515 ret = -EIO;
1516 goto out;
1517 }
1518
1519 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1520 pr_err("smux: ch %d error data on remote state 0x%x",
1521 lcid, ch->remote_state);
1522 ret = -EIO;
1523 goto out;
1524 }
1525
1526 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1527 metadata.read.buffer = 0;
1528 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1529 ret = 0;
1530
1531out:
1532 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1533 return ret;
1534}
1535
1536/**
1537 * Handle receive status command.
1538 *
1539 * @pkt Received packet
1540 *
1541 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001542 */
1543static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1544{
1545 uint8_t lcid;
1546 int ret;
1547 struct smux_lch_t *ch;
1548 union notifier_metadata meta;
1549 unsigned long flags;
1550 int tx_ready = 0;
1551
1552 lcid = pkt->hdr.lcid;
1553 ch = &smux_lch[lcid];
1554
1555 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1556 meta.tiocm.tiocm_old = ch->remote_tiocm;
1557 meta.tiocm.tiocm_new = pkt->hdr.flags;
1558
1559 /* update logical channel flow control */
1560 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1561 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1562 /* logical channel flow control changed */
1563 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1564 /* disabled TX */
1565 SMUX_DBG("TX Flow control enabled\n");
1566 ch->tx_flow_control = 1;
1567 } else {
1568 /* re-enable channel */
1569 SMUX_DBG("TX Flow control disabled\n");
1570 ch->tx_flow_control = 0;
1571 tx_ready = 1;
1572 }
1573 }
1574 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1575 ch->remote_tiocm = pkt->hdr.flags;
1576 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1577
1578 /* client notification for status change */
1579 if (IS_FULLY_OPENED(ch)) {
1580 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1581 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1582 ret = 0;
1583 }
1584 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1585 if (tx_ready)
1586 list_channel(ch);
1587
1588 return ret;
1589}
1590
1591/**
1592 * Handle receive power command.
1593 *
1594 * @pkt Received packet
1595 *
1596 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001597 */
1598static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1599{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001600 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001601 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001602
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001603 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001604 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1605 /* local sleep request ack */
1606 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1607 /* Power-down complete, turn off UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001608 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001609 smux.power_state, SMUX_PWR_OFF_FLUSH);
1610 smux.power_state = SMUX_PWR_OFF_FLUSH;
1611 queue_work(smux_tx_wq, &smux_inactivity_work);
1612 } else {
1613 pr_err("%s: sleep request ack invalid in state %d\n",
1614 __func__, smux.power_state);
1615 }
1616 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001617 /*
1618 * Remote sleep request
1619 *
1620 * Even if we have data pending, we need to transition to the
1621 * POWER_OFF state and then perform a wakeup since the remote
1622 * side has requested a power-down.
1623 *
1624 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1625 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1626 * when it sends the packet.
1627 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001628 if (smux.power_state == SMUX_PWR_ON
1629 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1630 ack_pkt = smux_alloc_pkt();
1631 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001632 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001633 smux.power_state,
1634 SMUX_PWR_TURNING_OFF_FLUSH);
1635
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001636 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1637
1638 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001639 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1640 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001641 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1642 list_add_tail(&ack_pkt->list,
1643 &smux.power_queue);
1644 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001645 }
1646 } else {
1647 pr_err("%s: sleep request invalid in state %d\n",
1648 __func__, smux.power_state);
1649 }
1650 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001651 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001652
1653 return 0;
1654}
1655
1656/**
1657 * Handle dispatching a completed packet for receive processing.
1658 *
1659 * @pkt Packet to process
1660 *
1661 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001662 */
1663static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1664{
Eric Holmbergf9622662012-06-13 15:55:45 -06001665 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001666
1667 SMUX_LOG_PKT_RX(pkt);
1668
1669 switch (pkt->hdr.cmd) {
1670 case SMUX_CMD_OPEN_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001671 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1672 pr_err("%s: invalid channel id %d\n",
1673 __func__, pkt->hdr.lcid);
1674 break;
1675 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001676 ret = smux_handle_rx_open_cmd(pkt);
1677 break;
1678
1679 case SMUX_CMD_DATA:
Eric Holmbergf9622662012-06-13 15:55:45 -06001680 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1681 pr_err("%s: invalid channel id %d\n",
1682 __func__, pkt->hdr.lcid);
1683 break;
1684 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001685 ret = smux_handle_rx_data_cmd(pkt);
1686 break;
1687
1688 case SMUX_CMD_CLOSE_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001689 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1690 pr_err("%s: invalid channel id %d\n",
1691 __func__, pkt->hdr.lcid);
1692 break;
1693 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001694 ret = smux_handle_rx_close_cmd(pkt);
1695 break;
1696
1697 case SMUX_CMD_STATUS:
Eric Holmbergf9622662012-06-13 15:55:45 -06001698 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1699 pr_err("%s: invalid channel id %d\n",
1700 __func__, pkt->hdr.lcid);
1701 break;
1702 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001703 ret = smux_handle_rx_status_cmd(pkt);
1704 break;
1705
1706 case SMUX_CMD_PWR_CTL:
1707 ret = smux_handle_rx_power_cmd(pkt);
1708 break;
1709
1710 case SMUX_CMD_BYTE:
1711 ret = smux_handle_rx_byte_cmd(pkt);
1712 break;
1713
1714 default:
1715 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1716 ret = -EINVAL;
1717 }
1718 return ret;
1719}
1720
1721/**
1722 * Deserializes a packet and dispatches it to the packet receive logic.
1723 *
1724 * @data Raw data for one packet
1725 * @len Length of the data
1726 *
1727 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001728 */
1729static int smux_deserialize(unsigned char *data, int len)
1730{
1731 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001732
1733 smux_init_pkt(&recv);
1734
1735 /*
1736 * It may be possible to optimize this to not use the
1737 * temporary buffer.
1738 */
1739 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1740
1741 if (recv.hdr.magic != SMUX_MAGIC) {
1742 pr_err("%s: invalid header magic\n", __func__);
1743 return -EINVAL;
1744 }
1745
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001746 if (recv.hdr.payload_len)
1747 recv.payload = data + sizeof(struct smux_hdr_t);
1748
1749 return smux_dispatch_rx_pkt(&recv);
1750}
1751
1752/**
1753 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001754 */
1755static void smux_handle_wakeup_req(void)
1756{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001757 unsigned long flags;
1758
1759 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001760 if (smux.power_state == SMUX_PWR_OFF
1761 || smux.power_state == SMUX_PWR_TURNING_ON) {
1762 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001763 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001764 smux.power_state, SMUX_PWR_ON);
1765 smux.power_state = SMUX_PWR_ON;
1766 queue_work(smux_tx_wq, &smux_wakeup_work);
1767 queue_work(smux_tx_wq, &smux_tx_work);
1768 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1769 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1770 smux_send_byte(SMUX_WAKEUP_ACK);
1771 } else {
1772 smux_send_byte(SMUX_WAKEUP_ACK);
1773 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001774 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001775}
1776
1777/**
1778 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001779 */
1780static void smux_handle_wakeup_ack(void)
1781{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001782 unsigned long flags;
1783
1784 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001785 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1786 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001787 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001788 smux.power_state, SMUX_PWR_ON);
1789 smux.power_state = SMUX_PWR_ON;
1790 queue_work(smux_tx_wq, &smux_tx_work);
1791 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1792 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1793
1794 } else if (smux.power_state != SMUX_PWR_ON) {
1795 /* invalid message */
1796 pr_err("%s: wakeup request ack invalid in state %d\n",
1797 __func__, smux.power_state);
1798 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001799 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001800}
1801
1802/**
1803 * RX State machine - IDLE state processing.
1804 *
1805 * @data New RX data to process
1806 * @len Length of the data
1807 * @used Return value of length processed
1808 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001809 */
1810static void smux_rx_handle_idle(const unsigned char *data,
1811 int len, int *used, int flag)
1812{
1813 int i;
1814
1815 if (flag) {
1816 if (smux_byte_loopback)
1817 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1818 smux_byte_loopback);
1819 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1820 ++*used;
1821 return;
1822 }
1823
1824 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1825 switch (data[i]) {
1826 case SMUX_MAGIC_WORD1:
1827 smux.rx_state = SMUX_RX_MAGIC;
1828 break;
1829 case SMUX_WAKEUP_REQ:
1830 smux_handle_wakeup_req();
1831 break;
1832 case SMUX_WAKEUP_ACK:
1833 smux_handle_wakeup_ack();
1834 break;
1835 default:
1836 /* unexpected character */
1837 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1838 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1839 smux_byte_loopback);
1840 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1841 (unsigned)data[i]);
1842 break;
1843 }
1844 }
1845
1846 *used = i;
1847}
1848
1849/**
1850 * RX State machine - Header Magic state processing.
1851 *
1852 * @data New RX data to process
1853 * @len Length of the data
1854 * @used Return value of length processed
1855 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001856 */
1857static void smux_rx_handle_magic(const unsigned char *data,
1858 int len, int *used, int flag)
1859{
1860 int i;
1861
1862 if (flag) {
1863 pr_err("%s: TTY RX error %d\n", __func__, flag);
1864 smux_enter_reset();
1865 smux.rx_state = SMUX_RX_FAILURE;
1866 ++*used;
1867 return;
1868 }
1869
1870 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1871 /* wait for completion of the magic */
1872 if (data[i] == SMUX_MAGIC_WORD2) {
1873 smux.recv_len = 0;
1874 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1875 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1876 smux.rx_state = SMUX_RX_HDR;
1877 } else {
1878 /* unexpected / trash character */
1879 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1880 __func__, data[i], *used, len);
1881 smux.rx_state = SMUX_RX_IDLE;
1882 }
1883 }
1884
1885 *used = i;
1886}
1887
1888/**
1889 * RX State machine - Packet Header state processing.
1890 *
1891 * @data New RX data to process
1892 * @len Length of the data
1893 * @used Return value of length processed
1894 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001895 */
1896static void smux_rx_handle_hdr(const unsigned char *data,
1897 int len, int *used, int flag)
1898{
1899 int i;
1900 struct smux_hdr_t *hdr;
1901
1902 if (flag) {
1903 pr_err("%s: TTY RX error %d\n", __func__, flag);
1904 smux_enter_reset();
1905 smux.rx_state = SMUX_RX_FAILURE;
1906 ++*used;
1907 return;
1908 }
1909
1910 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1911 smux.recv_buf[smux.recv_len++] = data[i];
1912
1913 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1914 /* complete header received */
1915 hdr = (struct smux_hdr_t *)smux.recv_buf;
1916 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1917 smux.rx_state = SMUX_RX_PAYLOAD;
1918 }
1919 }
1920 *used = i;
1921}
1922
1923/**
1924 * RX State machine - Packet Payload state processing.
1925 *
1926 * @data New RX data to process
1927 * @len Length of the data
1928 * @used Return value of length processed
1929 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001930 */
1931static void smux_rx_handle_pkt_payload(const unsigned char *data,
1932 int len, int *used, int flag)
1933{
1934 int remaining;
1935
1936 if (flag) {
1937 pr_err("%s: TTY RX error %d\n", __func__, flag);
1938 smux_enter_reset();
1939 smux.rx_state = SMUX_RX_FAILURE;
1940 ++*used;
1941 return;
1942 }
1943
1944 /* copy data into rx buffer */
1945 if (smux.pkt_remain < (len - *used))
1946 remaining = smux.pkt_remain;
1947 else
1948 remaining = len - *used;
1949
1950 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1951 smux.recv_len += remaining;
1952 smux.pkt_remain -= remaining;
1953 *used += remaining;
1954
1955 if (smux.pkt_remain == 0) {
1956 /* complete packet received */
1957 smux_deserialize(smux.recv_buf, smux.recv_len);
1958 smux.rx_state = SMUX_RX_IDLE;
1959 }
1960}
1961
1962/**
1963 * Feed data to the receive state machine.
1964 *
1965 * @data Pointer to data block
1966 * @len Length of data
1967 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001968 */
1969void smux_rx_state_machine(const unsigned char *data,
1970 int len, int flag)
1971{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001972 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001973
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001974 work.data = data;
1975 work.len = len;
1976 work.flag = flag;
1977 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1978 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001979
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001980 queue_work(smux_rx_wq, &work.work);
1981 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001982}
1983
1984/**
1985 * Add channel to transmit-ready list and trigger transmit worker.
1986 *
1987 * @ch Channel to add
1988 */
1989static void list_channel(struct smux_lch_t *ch)
1990{
1991 unsigned long flags;
1992
1993 SMUX_DBG("%s: listing channel %d\n",
1994 __func__, ch->lcid);
1995
1996 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1997 spin_lock(&ch->tx_lock_lhb2);
1998 smux.tx_activity_flag = 1;
1999 if (list_empty(&ch->tx_ready_list))
2000 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2001 spin_unlock(&ch->tx_lock_lhb2);
2002 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2003
2004 queue_work(smux_tx_wq, &smux_tx_work);
2005}
2006
2007/**
2008 * Transmit packet on correct transport and then perform client
2009 * notification.
2010 *
2011 * @ch Channel to transmit on
2012 * @pkt Packet to transmit
2013 */
2014static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2015{
2016 union notifier_metadata meta_write;
2017 int ret;
2018
2019 if (ch && pkt) {
2020 SMUX_LOG_PKT_TX(pkt);
2021 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2022 ret = smux_tx_loopback(pkt);
2023 else
2024 ret = smux_tx_tty(pkt);
2025
2026 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2027 /* notify write-done */
2028 meta_write.write.pkt_priv = pkt->priv;
2029 meta_write.write.buffer = pkt->payload;
2030 meta_write.write.len = pkt->hdr.payload_len;
2031 if (ret >= 0) {
2032 SMUX_DBG("%s: PKT write done", __func__);
2033 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2034 &meta_write);
2035 } else {
2036 pr_err("%s: failed to write pkt %d\n",
2037 __func__, ret);
2038 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2039 &meta_write);
2040 }
2041 }
2042 }
2043}
2044
2045/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002046 * Flush pending TTY TX data.
2047 */
2048static void smux_flush_tty(void)
2049{
2050 if (!smux.tty) {
2051 pr_err("%s: ldisc not loaded\n", __func__);
2052 return;
2053 }
2054
2055 tty_wait_until_sent(smux.tty,
2056 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2057
2058 if (tty_chars_in_buffer(smux.tty) > 0)
2059 pr_err("%s: unable to flush UART queue\n", __func__);
2060}
2061
2062/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002063 * Purge TX queue for logical channel.
2064 *
2065 * @ch Logical channel pointer
2066 *
2067 * Must be called with the following spinlocks locked:
2068 * state_lock_lhb1
2069 * tx_lock_lhb2
2070 */
2071static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2072{
2073 struct smux_pkt_t *pkt;
2074 int send_disconnect = 0;
2075
2076 while (!list_empty(&ch->tx_queue)) {
2077 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2078 list);
2079 list_del(&pkt->list);
2080
2081 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2082 /* Open was never sent, just force to closed state */
2083 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2084 send_disconnect = 1;
2085 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2086 /* Notify client of failed write */
2087 union notifier_metadata meta_write;
2088
2089 meta_write.write.pkt_priv = pkt->priv;
2090 meta_write.write.buffer = pkt->payload;
2091 meta_write.write.len = pkt->hdr.payload_len;
2092 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2093 }
2094 smux_free_pkt(pkt);
2095 }
2096
2097 if (send_disconnect) {
2098 union notifier_metadata meta_disconnected;
2099
2100 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2101 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2102 &meta_disconnected);
2103 }
2104}
2105
2106/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002107 * Power-up the UART.
2108 */
2109static void smux_uart_power_on(void)
2110{
2111 struct uart_state *state;
2112
2113 if (!smux.tty || !smux.tty->driver_data) {
2114 pr_err("%s: unable to find UART port for tty %p\n",
2115 __func__, smux.tty);
2116 return;
2117 }
2118 state = smux.tty->driver_data;
2119 msm_hs_request_clock_on(state->uart_port);
2120}
2121
2122/**
2123 * Power down the UART.
2124 */
2125static void smux_uart_power_off(void)
2126{
2127 struct uart_state *state;
2128
2129 if (!smux.tty || !smux.tty->driver_data) {
2130 pr_err("%s: unable to find UART port for tty %p\n",
2131 __func__, smux.tty);
2132 return;
2133 }
2134 state = smux.tty->driver_data;
2135 msm_hs_request_clock_off(state->uart_port);
2136}
2137
2138/**
2139 * TX Wakeup Worker
2140 *
2141 * @work Not used
2142 *
2143 * Do an exponential back-off wakeup sequence with a maximum period
2144 * of approximately 1 second (1 << 20 microseconds).
2145 */
2146static void smux_wakeup_worker(struct work_struct *work)
2147{
2148 unsigned long flags;
2149 unsigned wakeup_delay;
2150 int complete = 0;
2151
Eric Holmberged1f00c2012-06-07 09:45:18 -06002152 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002153 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2154 if (smux.power_state == SMUX_PWR_ON) {
2155 /* wakeup complete */
2156 complete = 1;
2157 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2158 break;
2159 } else {
2160 /* retry */
2161 wakeup_delay = smux.pwr_wakeup_delay_us;
2162 smux.pwr_wakeup_delay_us <<= 1;
2163 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2164 smux.pwr_wakeup_delay_us =
2165 SMUX_WAKEUP_DELAY_MAX;
2166 }
2167 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2168 SMUX_DBG("%s: triggering wakeup\n", __func__);
2169 smux_send_byte(SMUX_WAKEUP_REQ);
2170
2171 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2172 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2173 wakeup_delay);
2174 usleep_range(wakeup_delay, 2*wakeup_delay);
2175 } else {
2176 /* schedule delayed work */
2177 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2178 __func__, wakeup_delay / 1000);
2179 queue_delayed_work(smux_tx_wq,
2180 &smux_wakeup_delayed_work,
2181 msecs_to_jiffies(wakeup_delay / 1000));
2182 break;
2183 }
2184 }
2185
2186 if (complete) {
2187 SMUX_DBG("%s: wakeup complete\n", __func__);
2188 /*
2189 * Cancel any pending retry. This avoids a race condition with
2190 * a new power-up request because:
2191 * 1) this worker doesn't modify the state
2192 * 2) this worker is processed on the same single-threaded
2193 * workqueue as new TX wakeup requests
2194 */
2195 cancel_delayed_work(&smux_wakeup_delayed_work);
2196 }
2197}
2198
2199
2200/**
2201 * Inactivity timeout worker. Periodically scheduled when link is active.
2202 * When it detects inactivity, it will power-down the UART link.
2203 *
2204 * @work Work structure (not used)
2205 */
2206static void smux_inactivity_worker(struct work_struct *work)
2207{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002208 struct smux_pkt_t *pkt;
2209 unsigned long flags;
2210
2211 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2212 spin_lock(&smux.tx_lock_lha2);
2213
2214 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2215 /* no activity */
2216 if (smux.powerdown_enabled) {
2217 if (smux.power_state == SMUX_PWR_ON) {
2218 /* start power-down sequence */
2219 pkt = smux_alloc_pkt();
2220 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002221 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002222 smux.power_state,
2223 SMUX_PWR_TURNING_OFF);
2224 smux.power_state = SMUX_PWR_TURNING_OFF;
2225
2226 /* send power-down request */
2227 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2228 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002229 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2230 list_add_tail(&pkt->list,
2231 &smux.power_queue);
2232 queue_work(smux_tx_wq, &smux_tx_work);
2233 } else {
2234 pr_err("%s: packet alloc failed\n",
2235 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002236 }
2237 }
2238 } else {
2239 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2240 __func__);
2241 }
2242 }
2243 smux.tx_activity_flag = 0;
2244 smux.rx_activity_flag = 0;
2245
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002246 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002247 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002248 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002249 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002250 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002251
2252 /* if data is pending, schedule a new wakeup */
2253 if (!list_empty(&smux.lch_tx_ready_list) ||
2254 !list_empty(&smux.power_queue))
2255 queue_work(smux_tx_wq, &smux_tx_work);
2256
2257 spin_unlock(&smux.tx_lock_lha2);
2258 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2259
2260 /* flush UART output queue and power down */
2261 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002262 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002263 } else {
2264 spin_unlock(&smux.tx_lock_lha2);
2265 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002266 }
2267
2268 /* reschedule inactivity worker */
2269 if (smux.power_state != SMUX_PWR_OFF)
2270 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2271 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2272}
2273
2274/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002275 * Remove RX retry packet from channel and free it.
2276 *
2277 * Must be called with state_lock_lhb1 locked.
2278 *
2279 * @ch Channel for retry packet
2280 * @retry Retry packet to remove
2281 */
2282void smux_remove_rx_retry(struct smux_lch_t *ch,
2283 struct smux_rx_pkt_retry *retry)
2284{
2285 list_del(&retry->rx_retry_list);
2286 --ch->rx_retry_queue_cnt;
2287 smux_free_pkt(retry->pkt);
2288 kfree(retry);
2289}
2290
2291/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002292 * RX worker handles all receive operations.
2293 *
2294 * @work Work structure contained in TBD structure
2295 */
2296static void smux_rx_worker(struct work_struct *work)
2297{
2298 unsigned long flags;
2299 int used;
2300 int initial_rx_state;
2301 struct smux_rx_worker_data *w;
2302 const unsigned char *data;
2303 int len;
2304 int flag;
2305
2306 w = container_of(work, struct smux_rx_worker_data, work);
2307 data = w->data;
2308 len = w->len;
2309 flag = w->flag;
2310
2311 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2312 smux.rx_activity_flag = 1;
2313 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2314
2315 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2316 used = 0;
2317 do {
2318 SMUX_DBG("%s: state %d; %d of %d\n",
2319 __func__, smux.rx_state, used, len);
2320 initial_rx_state = smux.rx_state;
2321
2322 switch (smux.rx_state) {
2323 case SMUX_RX_IDLE:
2324 smux_rx_handle_idle(data, len, &used, flag);
2325 break;
2326 case SMUX_RX_MAGIC:
2327 smux_rx_handle_magic(data, len, &used, flag);
2328 break;
2329 case SMUX_RX_HDR:
2330 smux_rx_handle_hdr(data, len, &used, flag);
2331 break;
2332 case SMUX_RX_PAYLOAD:
2333 smux_rx_handle_pkt_payload(data, len, &used, flag);
2334 break;
2335 default:
2336 SMUX_DBG("%s: invalid state %d\n",
2337 __func__, smux.rx_state);
2338 smux.rx_state = SMUX_RX_IDLE;
2339 break;
2340 }
2341 } while (used < len || smux.rx_state != initial_rx_state);
2342
2343 complete(&w->work_complete);
2344}
2345
2346/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002347 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2348 * because the client was not ready (-EAGAIN).
2349 *
2350 * @work Work structure contained in smux_lch_t structure
2351 */
2352static void smux_rx_retry_worker(struct work_struct *work)
2353{
2354 struct smux_lch_t *ch;
2355 struct smux_rx_pkt_retry *retry;
2356 union notifier_metadata metadata;
2357 int tmp;
2358 unsigned long flags;
2359
2360 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2361
2362 /* get next retry packet */
2363 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2364 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2365 /* port has been closed - remove all retries */
2366 while (!list_empty(&ch->rx_retry_queue)) {
2367 retry = list_first_entry(&ch->rx_retry_queue,
2368 struct smux_rx_pkt_retry,
2369 rx_retry_list);
2370 smux_remove_rx_retry(ch, retry);
2371 }
2372 }
2373
2374 if (list_empty(&ch->rx_retry_queue)) {
2375 SMUX_DBG("%s: retry list empty for channel %d\n",
2376 __func__, ch->lcid);
2377 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2378 return;
2379 }
2380 retry = list_first_entry(&ch->rx_retry_queue,
2381 struct smux_rx_pkt_retry,
2382 rx_retry_list);
2383 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2384
2385 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2386 metadata.read.pkt_priv = 0;
2387 metadata.read.buffer = 0;
2388 tmp = ch->get_rx_buffer(ch->priv,
2389 (void **)&metadata.read.pkt_priv,
2390 (void **)&metadata.read.buffer,
2391 retry->pkt->hdr.payload_len);
2392 if (tmp == 0 && metadata.read.buffer) {
2393 /* have valid RX buffer */
2394 memcpy(metadata.read.buffer, retry->pkt->payload,
2395 retry->pkt->hdr.payload_len);
2396 metadata.read.len = retry->pkt->hdr.payload_len;
2397
2398 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2399 smux_remove_rx_retry(ch, retry);
2400 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2401
2402 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2403 } else if (tmp == -EAGAIN ||
2404 (tmp == 0 && !metadata.read.buffer)) {
2405 /* retry again */
2406 retry->timeout_in_ms <<= 1;
2407 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2408 /* timed out */
2409 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2410 smux_remove_rx_retry(ch, retry);
2411 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2412 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2413 }
2414 } else {
2415 /* client error - drop packet */
2416 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2417 smux_remove_rx_retry(ch, retry);
2418 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2419
2420 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2421 }
2422
2423 /* schedule next retry */
2424 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2425 if (!list_empty(&ch->rx_retry_queue)) {
2426 retry = list_first_entry(&ch->rx_retry_queue,
2427 struct smux_rx_pkt_retry,
2428 rx_retry_list);
2429 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2430 msecs_to_jiffies(retry->timeout_in_ms));
2431 }
2432 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2433}
2434
2435/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002436 * Transmit worker handles serializing and transmitting packets onto the
2437 * underlying transport.
2438 *
2439 * @work Work structure (not used)
2440 */
2441static void smux_tx_worker(struct work_struct *work)
2442{
2443 struct smux_pkt_t *pkt;
2444 struct smux_lch_t *ch;
2445 unsigned low_wm_notif;
2446 unsigned lcid;
2447 unsigned long flags;
2448
2449
2450 /*
2451 * Transmit packets in round-robin fashion based upon ready
2452 * channels.
2453 *
2454 * To eliminate the need to hold a lock for the entire
2455 * iteration through the channel ready list, the head of the
2456 * ready-channel list is always the next channel to be
2457 * processed. To send a packet, the first valid packet in
2458 * the head channel is removed and the head channel is then
2459 * rescheduled at the end of the queue by removing it and
2460 * inserting after the tail. The locks can then be released
2461 * while the packet is processed.
2462 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002463 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002464 pkt = NULL;
2465 low_wm_notif = 0;
2466
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002467 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002468
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002469 /* handle wakeup if needed */
2470 if (smux.power_state == SMUX_PWR_OFF) {
2471 if (!list_empty(&smux.lch_tx_ready_list) ||
2472 !list_empty(&smux.power_queue)) {
2473 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002474 smux.pwr_wakeup_delay_us = 1;
Eric Holmbergff0b0112012-06-08 15:06:57 -06002475 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002476 smux.power_state,
2477 SMUX_PWR_TURNING_ON);
2478 smux.power_state = SMUX_PWR_TURNING_ON;
2479 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2480 flags);
2481 smux_uart_power_on();
2482 queue_work(smux_tx_wq, &smux_wakeup_work);
2483 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002484 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002485 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2486 flags);
2487 }
2488 break;
2489 }
2490
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002491 /* process any pending power packets */
2492 if (!list_empty(&smux.power_queue)) {
2493 pkt = list_first_entry(&smux.power_queue,
2494 struct smux_pkt_t, list);
2495 list_del(&pkt->list);
2496 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2497
2498 /* send the packet */
2499 SMUX_LOG_PKT_TX(pkt);
2500 if (!smux_byte_loopback) {
2501 smux_tx_tty(pkt);
2502 smux_flush_tty();
2503 } else {
2504 smux_tx_loopback(pkt);
2505 }
2506
2507 /* Adjust power state if this is a flush command */
2508 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2509 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2510 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2511 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002512 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002513 smux.power_state,
2514 SMUX_PWR_OFF_FLUSH);
2515 smux.power_state = SMUX_PWR_OFF_FLUSH;
2516 queue_work(smux_tx_wq, &smux_inactivity_work);
2517 }
2518 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2519
2520 smux_free_pkt(pkt);
2521 continue;
2522 }
2523
2524 /* get the next ready channel */
2525 if (list_empty(&smux.lch_tx_ready_list)) {
2526 /* no ready channels */
2527 SMUX_DBG("%s: no more ready channels, exiting\n",
2528 __func__);
2529 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2530 break;
2531 }
2532 smux.tx_activity_flag = 1;
2533
2534 if (smux.power_state != SMUX_PWR_ON) {
2535 /* channel not ready to transmit */
2536 SMUX_DBG("%s: can not tx with power state %d\n",
2537 __func__,
2538 smux.power_state);
2539 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2540 break;
2541 }
2542
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002543 /* get the next packet to send and rotate channel list */
2544 ch = list_first_entry(&smux.lch_tx_ready_list,
2545 struct smux_lch_t,
2546 tx_ready_list);
2547
2548 spin_lock(&ch->state_lock_lhb1);
2549 spin_lock(&ch->tx_lock_lhb2);
2550 if (!list_empty(&ch->tx_queue)) {
2551 /*
2552 * If remote TX flow control is enabled or
2553 * the channel is not fully opened, then only
2554 * send command packets.
2555 */
2556 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2557 struct smux_pkt_t *curr;
2558 list_for_each_entry(curr, &ch->tx_queue, list) {
2559 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2560 pkt = curr;
2561 break;
2562 }
2563 }
2564 } else {
2565 /* get next cmd/data packet to send */
2566 pkt = list_first_entry(&ch->tx_queue,
2567 struct smux_pkt_t, list);
2568 }
2569 }
2570
2571 if (pkt) {
2572 list_del(&pkt->list);
2573
2574 /* update packet stats */
2575 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2576 --ch->tx_pending_data_cnt;
2577 if (ch->notify_lwm &&
2578 ch->tx_pending_data_cnt
2579 <= SMUX_WM_LOW) {
2580 ch->notify_lwm = 0;
2581 low_wm_notif = 1;
2582 }
2583 }
2584
2585 /* advance to the next ready channel */
2586 list_rotate_left(&smux.lch_tx_ready_list);
2587 } else {
2588 /* no data in channel to send, remove from ready list */
2589 list_del(&ch->tx_ready_list);
2590 INIT_LIST_HEAD(&ch->tx_ready_list);
2591 }
2592 lcid = ch->lcid;
2593 spin_unlock(&ch->tx_lock_lhb2);
2594 spin_unlock(&ch->state_lock_lhb1);
2595 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2596
2597 if (low_wm_notif)
2598 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2599
2600 /* send the packet */
2601 smux_tx_pkt(ch, pkt);
2602 smux_free_pkt(pkt);
2603 }
2604}
2605
2606
2607/**********************************************************************/
2608/* Kernel API */
2609/**********************************************************************/
2610
2611/**
2612 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2613 * flags.
2614 *
2615 * @lcid Logical channel ID
2616 * @set Options to set
2617 * @clear Options to clear
2618 *
2619 * @returns 0 for success, < 0 for failure
2620 */
2621int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2622{
2623 unsigned long flags;
2624 struct smux_lch_t *ch;
2625 int tx_ready = 0;
2626 int ret = 0;
2627
2628 if (smux_assert_lch_id(lcid))
2629 return -ENXIO;
2630
2631 ch = &smux_lch[lcid];
2632 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2633
2634 /* Local loopback mode */
2635 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2636 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2637
2638 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2639 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2640
2641 /* Remote loopback mode */
2642 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2643 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2644
2645 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2646 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2647
2648 /* Flow control */
2649 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2650 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2651 ret = smux_send_status_cmd(ch);
2652 tx_ready = 1;
2653 }
2654
2655 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2656 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2657 ret = smux_send_status_cmd(ch);
2658 tx_ready = 1;
2659 }
2660
2661 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2662
2663 if (tx_ready)
2664 list_channel(ch);
2665
2666 return ret;
2667}
2668
2669/**
2670 * Starts the opening sequence for a logical channel.
2671 *
2672 * @lcid Logical channel ID
2673 * @priv Free for client usage
2674 * @notify Event notification function
2675 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2676 *
2677 * @returns 0 for success, <0 otherwise
2678 *
2679 * A channel must be fully closed (either not previously opened or
2680 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2681 * received.
2682 *
2683 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2684 * event.
2685 */
2686int msm_smux_open(uint8_t lcid, void *priv,
2687 void (*notify)(void *priv, int event_type, const void *metadata),
2688 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2689 int size))
2690{
2691 int ret;
2692 struct smux_lch_t *ch;
2693 struct smux_pkt_t *pkt;
2694 int tx_ready = 0;
2695 unsigned long flags;
2696
2697 if (smux_assert_lch_id(lcid))
2698 return -ENXIO;
2699
2700 ch = &smux_lch[lcid];
2701 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2702
2703 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2704 ret = -EAGAIN;
2705 goto out;
2706 }
2707
2708 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2709 pr_err("%s: open lcid %d local state %x invalid\n",
2710 __func__, lcid, ch->local_state);
2711 ret = -EINVAL;
2712 goto out;
2713 }
2714
2715 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2716 ch->local_state,
2717 SMUX_LCH_LOCAL_OPENING);
2718
2719 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2720
2721 ch->priv = priv;
2722 ch->notify = notify;
2723 ch->get_rx_buffer = get_rx_buffer;
2724 ret = 0;
2725
2726 /* Send Open Command */
2727 pkt = smux_alloc_pkt();
2728 if (!pkt) {
2729 ret = -ENOMEM;
2730 goto out;
2731 }
2732 pkt->hdr.magic = SMUX_MAGIC;
2733 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2734 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2735 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2736 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2737 pkt->hdr.lcid = lcid;
2738 pkt->hdr.payload_len = 0;
2739 pkt->hdr.pad_len = 0;
2740 smux_tx_queue(pkt, ch, 0);
2741 tx_ready = 1;
2742
2743out:
2744 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2745 if (tx_ready)
2746 list_channel(ch);
2747 return ret;
2748}
2749
2750/**
2751 * Starts the closing sequence for a logical channel.
2752 *
2753 * @lcid Logical channel ID
2754 *
2755 * @returns 0 for success, <0 otherwise
2756 *
2757 * Once the close event has been acknowledge by the remote side, the client
2758 * will receive a SMUX_DISCONNECTED notification.
2759 */
2760int msm_smux_close(uint8_t lcid)
2761{
2762 int ret = 0;
2763 struct smux_lch_t *ch;
2764 struct smux_pkt_t *pkt;
2765 int tx_ready = 0;
2766 unsigned long flags;
2767
2768 if (smux_assert_lch_id(lcid))
2769 return -ENXIO;
2770
2771 ch = &smux_lch[lcid];
2772 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2773 ch->local_tiocm = 0x0;
2774 ch->remote_tiocm = 0x0;
2775 ch->tx_pending_data_cnt = 0;
2776 ch->notify_lwm = 0;
2777
2778 /* Purge TX queue */
2779 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002780 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002781 spin_unlock(&ch->tx_lock_lhb2);
2782
2783 /* Send Close Command */
2784 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2785 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2786 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2787 ch->local_state,
2788 SMUX_LCH_LOCAL_CLOSING);
2789
2790 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2791 pkt = smux_alloc_pkt();
2792 if (pkt) {
2793 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2794 pkt->hdr.flags = 0;
2795 pkt->hdr.lcid = lcid;
2796 pkt->hdr.payload_len = 0;
2797 pkt->hdr.pad_len = 0;
2798 smux_tx_queue(pkt, ch, 0);
2799 tx_ready = 1;
2800 } else {
2801 pr_err("%s: pkt allocation failed\n", __func__);
2802 ret = -ENOMEM;
2803 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002804
2805 /* Purge RX retry queue */
2806 if (ch->rx_retry_queue_cnt)
2807 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002808 }
2809 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2810
2811 if (tx_ready)
2812 list_channel(ch);
2813
2814 return ret;
2815}
2816
2817/**
2818 * Write data to a logical channel.
2819 *
2820 * @lcid Logical channel ID
2821 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2822 * SMUX_WRITE_FAIL notification.
2823 * @data Data to write
2824 * @len Length of @data
2825 *
2826 * @returns 0 for success, <0 otherwise
2827 *
2828 * Data may be written immediately after msm_smux_open() is called,
2829 * but the data will wait in the transmit queue until the channel has
2830 * been fully opened.
2831 *
2832 * Once the data has been written, the client will receive either a completion
2833 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2834 */
2835int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2836{
2837 struct smux_lch_t *ch;
2838 struct smux_pkt_t *pkt;
2839 int tx_ready = 0;
2840 unsigned long flags;
2841 int ret;
2842
2843 if (smux_assert_lch_id(lcid))
2844 return -ENXIO;
2845
2846 ch = &smux_lch[lcid];
2847 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2848
2849 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2850 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2851 pr_err("%s: hdr.invalid local state %d channel %d\n",
2852 __func__, ch->local_state, lcid);
2853 ret = -EINVAL;
2854 goto out;
2855 }
2856
2857 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2858 pr_err("%s: payload %d too large\n",
2859 __func__, len);
2860 ret = -E2BIG;
2861 goto out;
2862 }
2863
2864 pkt = smux_alloc_pkt();
2865 if (!pkt) {
2866 ret = -ENOMEM;
2867 goto out;
2868 }
2869
2870 pkt->hdr.cmd = SMUX_CMD_DATA;
2871 pkt->hdr.lcid = lcid;
2872 pkt->hdr.flags = 0;
2873 pkt->hdr.payload_len = len;
2874 pkt->payload = (void *)data;
2875 pkt->priv = pkt_priv;
2876 pkt->hdr.pad_len = 0;
2877
2878 spin_lock(&ch->tx_lock_lhb2);
2879 /* verify high watermark */
2880 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2881
2882 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2883 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2884 __func__, lcid, SMUX_WM_HIGH,
2885 ch->tx_pending_data_cnt);
2886 ret = -EAGAIN;
2887 goto out_inner;
2888 }
2889
2890 /* queue packet for transmit */
2891 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2892 ch->notify_lwm = 1;
2893 pr_err("%s: high watermark hit\n", __func__);
2894 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2895 }
2896 list_add_tail(&pkt->list, &ch->tx_queue);
2897
2898 /* add to ready list */
2899 if (IS_FULLY_OPENED(ch))
2900 tx_ready = 1;
2901
2902 ret = 0;
2903
2904out_inner:
2905 spin_unlock(&ch->tx_lock_lhb2);
2906
2907out:
2908 if (ret)
2909 smux_free_pkt(pkt);
2910 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2911
2912 if (tx_ready)
2913 list_channel(ch);
2914
2915 return ret;
2916}
2917
2918/**
2919 * Returns true if the TX queue is currently full (high water mark).
2920 *
2921 * @lcid Logical channel ID
2922 * @returns 0 if channel is not full
2923 * 1 if it is full
2924 * < 0 for error
2925 */
2926int msm_smux_is_ch_full(uint8_t lcid)
2927{
2928 struct smux_lch_t *ch;
2929 unsigned long flags;
2930 int is_full = 0;
2931
2932 if (smux_assert_lch_id(lcid))
2933 return -ENXIO;
2934
2935 ch = &smux_lch[lcid];
2936
2937 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2938 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2939 is_full = 1;
2940 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2941
2942 return is_full;
2943}
2944
2945/**
2946 * Returns true if the TX queue has space for more packets it is at or
2947 * below the low water mark).
2948 *
2949 * @lcid Logical channel ID
2950 * @returns 0 if channel is above low watermark
2951 * 1 if it's at or below the low watermark
2952 * < 0 for error
2953 */
2954int msm_smux_is_ch_low(uint8_t lcid)
2955{
2956 struct smux_lch_t *ch;
2957 unsigned long flags;
2958 int is_low = 0;
2959
2960 if (smux_assert_lch_id(lcid))
2961 return -ENXIO;
2962
2963 ch = &smux_lch[lcid];
2964
2965 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2966 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2967 is_low = 1;
2968 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2969
2970 return is_low;
2971}
2972
2973/**
2974 * Send TIOCM status update.
2975 *
2976 * @ch Channel for update
2977 *
2978 * @returns 0 for success, <0 for failure
2979 *
2980 * Channel lock must be held before calling.
2981 */
2982static int smux_send_status_cmd(struct smux_lch_t *ch)
2983{
2984 struct smux_pkt_t *pkt;
2985
2986 if (!ch)
2987 return -EINVAL;
2988
2989 pkt = smux_alloc_pkt();
2990 if (!pkt)
2991 return -ENOMEM;
2992
2993 pkt->hdr.lcid = ch->lcid;
2994 pkt->hdr.cmd = SMUX_CMD_STATUS;
2995 pkt->hdr.flags = ch->local_tiocm;
2996 pkt->hdr.payload_len = 0;
2997 pkt->hdr.pad_len = 0;
2998 smux_tx_queue(pkt, ch, 0);
2999
3000 return 0;
3001}
3002
3003/**
3004 * Internal helper function for getting the TIOCM status with
3005 * state_lock_lhb1 already locked.
3006 *
3007 * @ch Channel pointer
3008 *
3009 * @returns TIOCM status
3010 */
3011static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3012{
3013 long status = 0x0;
3014
3015 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3016 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3017 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3018 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3019
3020 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3021 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3022
3023 return status;
3024}
3025
3026/**
3027 * Get the TIOCM status bits.
3028 *
3029 * @lcid Logical channel ID
3030 *
3031 * @returns >= 0 TIOCM status bits
3032 * < 0 Error condition
3033 */
3034long msm_smux_tiocm_get(uint8_t lcid)
3035{
3036 struct smux_lch_t *ch;
3037 unsigned long flags;
3038 long status = 0x0;
3039
3040 if (smux_assert_lch_id(lcid))
3041 return -ENXIO;
3042
3043 ch = &smux_lch[lcid];
3044 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3045 status = msm_smux_tiocm_get_atomic(ch);
3046 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3047
3048 return status;
3049}
3050
3051/**
3052 * Set/clear the TIOCM status bits.
3053 *
3054 * @lcid Logical channel ID
3055 * @set Bits to set
3056 * @clear Bits to clear
3057 *
3058 * @returns 0 for success; < 0 for failure
3059 *
3060 * If a bit is specified in both the @set and @clear masks, then the clear bit
3061 * definition will dominate and the bit will be cleared.
3062 */
3063int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3064{
3065 struct smux_lch_t *ch;
3066 unsigned long flags;
3067 uint8_t old_status;
3068 uint8_t status_set = 0x0;
3069 uint8_t status_clear = 0x0;
3070 int tx_ready = 0;
3071 int ret = 0;
3072
3073 if (smux_assert_lch_id(lcid))
3074 return -ENXIO;
3075
3076 ch = &smux_lch[lcid];
3077 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3078
3079 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3080 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3081 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3082 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3083
3084 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3085 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3086 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3087 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3088
3089 old_status = ch->local_tiocm;
3090 ch->local_tiocm |= status_set;
3091 ch->local_tiocm &= ~status_clear;
3092
3093 if (ch->local_tiocm != old_status) {
3094 ret = smux_send_status_cmd(ch);
3095 tx_ready = 1;
3096 }
3097 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3098
3099 if (tx_ready)
3100 list_channel(ch);
3101
3102 return ret;
3103}
3104
3105/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003106/* Subsystem Restart */
3107/**********************************************************************/
3108static struct notifier_block ssr_notifier = {
3109 .notifier_call = ssr_notifier_cb,
3110};
3111
3112/**
3113 * Handle Subsystem Restart (SSR) notifications.
3114 *
3115 * @this Pointer to ssr_notifier
3116 * @code SSR Code
3117 * @data Data pointer (not used)
3118 */
3119static int ssr_notifier_cb(struct notifier_block *this,
3120 unsigned long code,
3121 void *data)
3122{
3123 unsigned long flags;
3124 int power_off_uart = 0;
3125
3126 if (code != SUBSYS_AFTER_SHUTDOWN)
3127 return NOTIFY_DONE;
3128
3129 /* Cleanup channels */
3130 smux_lch_purge();
3131
3132 /* Power-down UART */
3133 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3134 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003135 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003136 smux.power_state = SMUX_PWR_OFF;
3137 power_off_uart = 1;
3138 }
3139 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3140
3141 if (power_off_uart)
3142 smux_uart_power_off();
3143
3144 return NOTIFY_DONE;
3145}
3146
3147/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003148/* Line Discipline Interface */
3149/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003150static void smux_pdev_release(struct device *dev)
3151{
3152 struct platform_device *pdev;
3153
3154 pdev = container_of(dev, struct platform_device, dev);
3155 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3156 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3157}
3158
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003159static int smuxld_open(struct tty_struct *tty)
3160{
3161 int i;
3162 int tmp;
3163 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003164
3165 if (!smux.is_initialized)
3166 return -ENODEV;
3167
Eric Holmberged1f00c2012-06-07 09:45:18 -06003168 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003169 if (smux.ld_open_count) {
3170 pr_err("%s: %p multiple instances not supported\n",
3171 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003172 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003173 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003174 }
3175
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003176 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003177 pr_err("%s: tty->ops->write already NULL\n", __func__);
3178 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003179 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003180 }
3181
3182 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003183 ++smux.ld_open_count;
3184 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003185 smux.tty = tty;
3186 tty->disc_data = &smux;
3187 tty->receive_room = TTY_RECEIVE_ROOM;
3188 tty_driver_flush_buffer(tty);
3189
3190 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003191 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003192 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003193 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003194 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003195 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003196 queue_work(smux_tx_wq, &smux_inactivity_work);
3197 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003198 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003199 }
3200
3201 /* register platform devices */
3202 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003203 SMUX_DBG("%s: register pdev '%s'\n",
3204 __func__, smux_devs[i].name);
3205 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003206 tmp = platform_device_register(&smux_devs[i]);
3207 if (tmp)
3208 pr_err("%s: error %d registering device %s\n",
3209 __func__, tmp, smux_devs[i].name);
3210 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003211 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003212 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003213}
3214
3215static void smuxld_close(struct tty_struct *tty)
3216{
3217 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003218 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003219 int i;
3220
Eric Holmberged1f00c2012-06-07 09:45:18 -06003221 SMUX_DBG("%s: ldisc unload\n", __func__);
3222 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003223 if (smux.ld_open_count <= 0) {
3224 pr_err("%s: invalid ld count %d\n", __func__,
3225 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003226 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003227 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003228 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003229 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003230 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003231
3232 /* Cleanup channels */
3233 smux_lch_purge();
3234
3235 /* Unregister platform devices */
3236 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3237 SMUX_DBG("%s: unregister pdev '%s'\n",
3238 __func__, smux_devs[i].name);
3239 platform_device_unregister(&smux_devs[i]);
3240 }
3241
3242 /* Schedule UART power-up if it's down */
3243 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003244 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003245 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003246 smux.power_state = SMUX_PWR_OFF;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003247 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3248
3249 if (power_up_uart)
3250 smux_uart_power_on();
3251
3252 /* Disconnect from TTY */
3253 smux.tty = NULL;
3254 mutex_unlock(&smux.mutex_lha0);
3255 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003256}
3257
3258/**
3259 * Receive data from TTY Line Discipline.
3260 *
3261 * @tty TTY structure
3262 * @cp Character data
3263 * @fp Flag data
3264 * @count Size of character and flag data
3265 */
3266void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3267 char *fp, int count)
3268{
3269 int i;
3270 int last_idx = 0;
3271 const char *tty_name = NULL;
3272 char *f;
3273
3274 if (smux_debug_mask & MSM_SMUX_DEBUG)
3275 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3276 16, 1, cp, count, true);
3277
3278 /* verify error flags */
3279 for (i = 0, f = fp; i < count; ++i, ++f) {
3280 if (*f != TTY_NORMAL) {
3281 if (tty)
3282 tty_name = tty->name;
3283 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3284 tty_name, *f, tty_flag_to_str(*f));
3285
3286 /* feed all previous valid data to the parser */
3287 smux_rx_state_machine(cp + last_idx, i - last_idx,
3288 TTY_NORMAL);
3289
3290 /* feed bad data to parser */
3291 smux_rx_state_machine(cp + i, 1, *f);
3292 last_idx = i + 1;
3293 }
3294 }
3295
3296 /* feed data to RX state machine */
3297 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3298}
3299
3300static void smuxld_flush_buffer(struct tty_struct *tty)
3301{
3302 pr_err("%s: not supported\n", __func__);
3303}
3304
3305static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3306{
3307 pr_err("%s: not supported\n", __func__);
3308 return -ENODEV;
3309}
3310
3311static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3312 unsigned char __user *buf, size_t nr)
3313{
3314 pr_err("%s: not supported\n", __func__);
3315 return -ENODEV;
3316}
3317
3318static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3319 const unsigned char *buf, size_t nr)
3320{
3321 pr_err("%s: not supported\n", __func__);
3322 return -ENODEV;
3323}
3324
3325static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3326 unsigned int cmd, unsigned long arg)
3327{
3328 pr_err("%s: not supported\n", __func__);
3329 return -ENODEV;
3330}
3331
3332static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3333 struct poll_table_struct *tbl)
3334{
3335 pr_err("%s: not supported\n", __func__);
3336 return -ENODEV;
3337}
3338
3339static void smuxld_write_wakeup(struct tty_struct *tty)
3340{
3341 pr_err("%s: not supported\n", __func__);
3342}
3343
3344static struct tty_ldisc_ops smux_ldisc_ops = {
3345 .owner = THIS_MODULE,
3346 .magic = TTY_LDISC_MAGIC,
3347 .name = "n_smux",
3348 .open = smuxld_open,
3349 .close = smuxld_close,
3350 .flush_buffer = smuxld_flush_buffer,
3351 .chars_in_buffer = smuxld_chars_in_buffer,
3352 .read = smuxld_read,
3353 .write = smuxld_write,
3354 .ioctl = smuxld_ioctl,
3355 .poll = smuxld_poll,
3356 .receive_buf = smuxld_receive_buf,
3357 .write_wakeup = smuxld_write_wakeup
3358};
3359
3360static int __init smux_init(void)
3361{
3362 int ret;
3363
Eric Holmberged1f00c2012-06-07 09:45:18 -06003364 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003365
3366 spin_lock_init(&smux.rx_lock_lha1);
3367 smux.rx_state = SMUX_RX_IDLE;
3368 smux.power_state = SMUX_PWR_OFF;
3369 smux.pwr_wakeup_delay_us = 1;
3370 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003371 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003372 smux.rx_activity_flag = 0;
3373 smux.tx_activity_flag = 0;
3374 smux.recv_len = 0;
3375 smux.tty = NULL;
3376 smux.ld_open_count = 0;
3377 smux.in_reset = 0;
3378 smux.is_initialized = 1;
3379 smux_byte_loopback = 0;
3380
3381 spin_lock_init(&smux.tx_lock_lha2);
3382 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3383
3384 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3385 if (ret != 0) {
3386 pr_err("%s: error %d registering line discipline\n",
3387 __func__, ret);
3388 return ret;
3389 }
3390
Eric Holmberged1f00c2012-06-07 09:45:18 -06003391 subsys_notif_register_notifier("qsc", &ssr_notifier);
3392
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003393 ret = lch_init();
3394 if (ret != 0) {
3395 pr_err("%s: lch_init failed\n", __func__);
3396 return ret;
3397 }
3398
3399 return 0;
3400}
3401
3402static void __exit smux_exit(void)
3403{
3404 int ret;
3405
3406 ret = tty_unregister_ldisc(N_SMUX);
3407 if (ret != 0) {
3408 pr_err("%s error %d unregistering line discipline\n",
3409 __func__, ret);
3410 return;
3411 }
3412}
3413
3414module_init(smux_init);
3415module_exit(smux_exit);
3416
3417MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3418MODULE_LICENSE("GPL v2");
3419MODULE_ALIAS_LDISC(N_SMUX);