blob: 249c72fad104d3616e4e6c563a87d24c1cb15f87 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
28#include <mach/msm_serial_hs.h>
29#include "smux_private.h"
30#include "smux_loopback.h"
31
32#define SMUX_NOTIFY_FIFO_SIZE 128
33#define SMUX_TX_QUEUE_SIZE 256
34#define SMUX_GET_RX_BUFF_MAX_RETRY_CNT 2
35#define SMUX_WM_LOW 2
36#define SMUX_WM_HIGH 4
37#define SMUX_PKT_LOG_SIZE 80
38
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
50#define SMUX_INACTIVITY_TIMEOUT_MS 1000
51
52enum {
53 MSM_SMUX_DEBUG = 1U << 0,
54 MSM_SMUX_INFO = 1U << 1,
55 MSM_SMUX_POWER_INFO = 1U << 2,
56 MSM_SMUX_PKT = 1U << 3,
57};
58
59static int smux_debug_mask;
60module_param_named(debug_mask, smux_debug_mask,
61 int, S_IRUGO | S_IWUSR | S_IWGRP);
62
63/* Simulated wakeup used for testing */
64int smux_byte_loopback;
65module_param_named(byte_loopback, smux_byte_loopback,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67int smux_simulate_wakeup_delay = 1;
68module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70
71#define SMUX_DBG(x...) do { \
72 if (smux_debug_mask & MSM_SMUX_DEBUG) \
73 pr_info(x); \
74} while (0)
75
76#define SMUX_LOG_PKT_RX(pkt) do { \
77 if (smux_debug_mask & MSM_SMUX_PKT) \
78 smux_log_pkt(pkt, 1); \
79} while (0)
80
81#define SMUX_LOG_PKT_TX(pkt) do { \
82 if (smux_debug_mask & MSM_SMUX_PKT) \
83 smux_log_pkt(pkt, 0); \
84} while (0)
85
86/**
87 * Return true if channel is fully opened (both
88 * local and remote sides are in the OPENED state).
89 */
90#define IS_FULLY_OPENED(ch) \
91 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
92 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
93
94static struct platform_device smux_devs[] = {
95 {.name = "SMUX_CTL", .id = -1},
96 {.name = "SMUX_RMNET", .id = -1},
97 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
98 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
99 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
100 {.name = "SMUX_DIAG", .id = -1},
101};
102
103enum {
104 SMUX_CMD_STATUS_RTC = 1 << 0,
105 SMUX_CMD_STATUS_RTR = 1 << 1,
106 SMUX_CMD_STATUS_RI = 1 << 2,
107 SMUX_CMD_STATUS_DCD = 1 << 3,
108 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
109};
110
111/* Channel mode */
112enum {
113 SMUX_LCH_MODE_NORMAL,
114 SMUX_LCH_MODE_LOCAL_LOOPBACK,
115 SMUX_LCH_MODE_REMOTE_LOOPBACK,
116};
117
118enum {
119 SMUX_RX_IDLE,
120 SMUX_RX_MAGIC,
121 SMUX_RX_HDR,
122 SMUX_RX_PAYLOAD,
123 SMUX_RX_FAILURE,
124};
125
126/**
127 * Power states.
128 *
129 * The _FLUSH states are internal transitional states and are not part of the
130 * official state machine.
131 */
132enum {
133 SMUX_PWR_OFF,
134 SMUX_PWR_TURNING_ON,
135 SMUX_PWR_ON,
136 SMUX_PWR_TURNING_OFF_FLUSH,
137 SMUX_PWR_TURNING_OFF,
138 SMUX_PWR_OFF_FLUSH,
139};
140
141/**
142 * Logical Channel Structure. One instance per channel.
143 *
144 * Locking Hierarchy
145 * Each lock has a postfix that describes the locking level. If multiple locks
146 * are required, only increasing lock hierarchy numbers may be locked which
147 * ensures avoiding a deadlock.
148 *
149 * Locking Example
150 * If state_lock_lhb1 is currently held and the TX list needs to be
151 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
152 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
153 * not be acquired since it would result in a deadlock.
154 *
155 * Note that the Line Discipline locks (*_lha) should always be acquired
156 * before the logical channel locks.
157 */
158struct smux_lch_t {
159 /* channel state */
160 spinlock_t state_lock_lhb1;
161 uint8_t lcid;
162 unsigned local_state;
163 unsigned local_mode;
164 uint8_t local_tiocm;
165
166 unsigned remote_state;
167 unsigned remote_mode;
168 uint8_t remote_tiocm;
169
170 int tx_flow_control;
171
172 /* client callbacks and private data */
173 void *priv;
174 void (*notify)(void *priv, int event_type, const void *metadata);
175 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
176 int size);
177
178 /* TX Info */
179 spinlock_t tx_lock_lhb2;
180 struct list_head tx_queue;
181 struct list_head tx_ready_list;
182 unsigned tx_pending_data_cnt;
183 unsigned notify_lwm;
184};
185
186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600201 * Receive worker data structure.
202 *
203 * One instance is created for every call to smux_rx_state_machine.
204 */
205struct smux_rx_worker_data {
206 const unsigned char *data;
207 int len;
208 int flag;
209
210 struct work_struct work;
211 struct completion work_complete;
212};
213
214/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600215 * Line discipline and module structure.
216 *
217 * Only one instance since multiple instances of line discipline are not
218 * allowed.
219 */
220struct smux_ldisc_t {
221 spinlock_t lock_lha0;
222
223 int is_initialized;
224 int in_reset;
225 int ld_open_count;
226 struct tty_struct *tty;
227
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600228 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600229 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
230 unsigned int recv_len;
231 unsigned int pkt_remain;
232 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600233
234 /* RX Activity - accessed by multiple threads */
235 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600236 unsigned rx_activity_flag;
237
238 /* TX / Power */
239 spinlock_t tx_lock_lha2;
240 struct list_head lch_tx_ready_list;
241 unsigned power_state;
242 unsigned pwr_wakeup_delay_us;
243 unsigned tx_activity_flag;
244 unsigned powerdown_enabled;
245};
246
247
248/* data structures */
249static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
250static struct smux_ldisc_t smux;
251static const char *tty_error_type[] = {
252 [TTY_NORMAL] = "normal",
253 [TTY_OVERRUN] = "overrun",
254 [TTY_BREAK] = "break",
255 [TTY_PARITY] = "parity",
256 [TTY_FRAME] = "framing",
257};
258
259static const char *smux_cmds[] = {
260 [SMUX_CMD_DATA] = "DATA",
261 [SMUX_CMD_OPEN_LCH] = "OPEN",
262 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
263 [SMUX_CMD_STATUS] = "STATUS",
264 [SMUX_CMD_PWR_CTL] = "PWR",
265 [SMUX_CMD_BYTE] = "Raw Byte",
266};
267
268static void smux_notify_local_fn(struct work_struct *work);
269static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
270
271static struct workqueue_struct *smux_notify_wq;
272static size_t handle_size;
273static struct kfifo smux_notify_fifo;
274static int queued_fifo_notifications;
275static DEFINE_SPINLOCK(notify_lock_lhc1);
276
277static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600278static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600279static void smux_tx_worker(struct work_struct *work);
280static DECLARE_WORK(smux_tx_work, smux_tx_worker);
281
282static void smux_wakeup_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600283static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600284static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
285static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
286
287static void smux_inactivity_worker(struct work_struct *work);
288static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
289static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
290 smux_inactivity_worker);
291
292static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
293static void list_channel(struct smux_lch_t *ch);
294static int smux_send_status_cmd(struct smux_lch_t *ch);
295static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
296
297/**
298 * Convert TTY Error Flags to string for logging purposes.
299 *
300 * @flag TTY_* flag
301 * @returns String description or NULL if unknown
302 */
303static const char *tty_flag_to_str(unsigned flag)
304{
305 if (flag < ARRAY_SIZE(tty_error_type))
306 return tty_error_type[flag];
307 return NULL;
308}
309
310/**
311 * Convert SMUX Command to string for logging purposes.
312 *
313 * @cmd SMUX command
314 * @returns String description or NULL if unknown
315 */
316static const char *cmd_to_str(unsigned cmd)
317{
318 if (cmd < ARRAY_SIZE(smux_cmds))
319 return smux_cmds[cmd];
320 return NULL;
321}
322
323/**
324 * Set the reset state due to an unrecoverable failure.
325 */
326static void smux_enter_reset(void)
327{
328 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
329 smux.in_reset = 1;
330}
331
332static int lch_init(void)
333{
334 unsigned int id;
335 struct smux_lch_t *ch;
336 int i = 0;
337
338 handle_size = sizeof(struct smux_notify_handle *);
339
340 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
341 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600342 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600343
344 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
345 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
346 __func__);
347 return -ENOMEM;
348 }
349
350 i |= kfifo_alloc(&smux_notify_fifo,
351 SMUX_NOTIFY_FIFO_SIZE * handle_size,
352 GFP_KERNEL);
353 i |= smux_loopback_init();
354
355 if (i) {
356 pr_err("%s: out of memory error\n", __func__);
357 return -ENOMEM;
358 }
359
360 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
361 ch = &smux_lch[id];
362
363 spin_lock_init(&ch->state_lock_lhb1);
364 ch->lcid = id;
365 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
366 ch->local_mode = SMUX_LCH_MODE_NORMAL;
367 ch->local_tiocm = 0x0;
368 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
369 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
370 ch->remote_tiocm = 0x0;
371 ch->tx_flow_control = 0;
372 ch->priv = 0;
373 ch->notify = 0;
374 ch->get_rx_buffer = 0;
375
376 spin_lock_init(&ch->tx_lock_lhb2);
377 INIT_LIST_HEAD(&ch->tx_queue);
378 INIT_LIST_HEAD(&ch->tx_ready_list);
379 ch->tx_pending_data_cnt = 0;
380 ch->notify_lwm = 0;
381 }
382
383 return 0;
384}
385
386int smux_assert_lch_id(uint32_t lcid)
387{
388 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
389 return -ENXIO;
390 else
391 return 0;
392}
393
394/**
395 * Log packet information for debug purposes.
396 *
397 * @pkt Packet to log
398 * @is_recv 1 = RX packet; 0 = TX Packet
399 *
400 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
401 *
402 * PKT Info:
403 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
404 *
405 * Direction: R = Receive, S = Send
406 * Local State: C = Closed; c = closing; o = opening; O = Opened
407 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
408 * Remote State: C = Closed; O = Opened
409 * Remote Mode: R = Remote loopback; N = Normal
410 */
411static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
412{
413 char logbuf[SMUX_PKT_LOG_SIZE];
414 char cmd_extra[16];
415 int i = 0;
416 int count;
417 int len;
418 char local_state;
419 char local_mode;
420 char remote_state;
421 char remote_mode;
422 struct smux_lch_t *ch;
423 unsigned char *data;
424
425 ch = &smux_lch[pkt->hdr.lcid];
426
427 switch (ch->local_state) {
428 case SMUX_LCH_LOCAL_CLOSED:
429 local_state = 'C';
430 break;
431 case SMUX_LCH_LOCAL_OPENING:
432 local_state = 'o';
433 break;
434 case SMUX_LCH_LOCAL_OPENED:
435 local_state = 'O';
436 break;
437 case SMUX_LCH_LOCAL_CLOSING:
438 local_state = 'c';
439 break;
440 default:
441 local_state = 'U';
442 break;
443 }
444
445 switch (ch->local_mode) {
446 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
447 local_mode = 'L';
448 break;
449 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
450 local_mode = 'R';
451 break;
452 case SMUX_LCH_MODE_NORMAL:
453 local_mode = 'N';
454 break;
455 default:
456 local_mode = 'U';
457 break;
458 }
459
460 switch (ch->remote_state) {
461 case SMUX_LCH_REMOTE_CLOSED:
462 remote_state = 'C';
463 break;
464 case SMUX_LCH_REMOTE_OPENED:
465 remote_state = 'O';
466 break;
467
468 default:
469 remote_state = 'U';
470 break;
471 }
472
473 switch (ch->remote_mode) {
474 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
475 remote_mode = 'R';
476 break;
477 case SMUX_LCH_MODE_NORMAL:
478 remote_mode = 'N';
479 break;
480 default:
481 remote_mode = 'U';
482 break;
483 }
484
485 /* determine command type (ACK, etc) */
486 cmd_extra[0] = '\0';
487 switch (pkt->hdr.cmd) {
488 case SMUX_CMD_OPEN_LCH:
489 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
490 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
491 break;
492 case SMUX_CMD_CLOSE_LCH:
493 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
494 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
495 break;
496 };
497
498 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
499 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
500 is_recv ? 'R' : 'S', pkt->hdr.lcid,
501 local_state, local_mode,
502 remote_state, remote_mode,
503 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
504 pkt->hdr.payload_len, pkt->hdr.pad_len);
505
506 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
507 data = (unsigned char *)pkt->payload;
508 for (count = 0; count < len; count++)
509 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
510 "%02x ", (unsigned)data[count]);
511
512 pr_info("%s\n", logbuf);
513}
514
515static void smux_notify_local_fn(struct work_struct *work)
516{
517 struct smux_notify_handle *notify_handle = NULL;
518 union notifier_metadata *metadata = NULL;
519 unsigned long flags;
520 int i;
521
522 for (;;) {
523 /* retrieve notification */
524 spin_lock_irqsave(&notify_lock_lhc1, flags);
525 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
526 i = kfifo_out(&smux_notify_fifo,
527 &notify_handle,
528 handle_size);
529 if (i != handle_size) {
530 pr_err("%s: unable to retrieve handle %d expected %d\n",
531 __func__, i, handle_size);
532 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
533 break;
534 }
535 } else {
536 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
537 break;
538 }
539 --queued_fifo_notifications;
540 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
541
542 /* notify client */
543 metadata = notify_handle->metadata;
544 notify_handle->notify(notify_handle->priv,
545 notify_handle->event_type,
546 metadata);
547
548 kfree(metadata);
549 kfree(notify_handle);
550 }
551}
552
553/**
554 * Initialize existing packet.
555 */
556void smux_init_pkt(struct smux_pkt_t *pkt)
557{
558 memset(pkt, 0x0, sizeof(*pkt));
559 pkt->hdr.magic = SMUX_MAGIC;
560 INIT_LIST_HEAD(&pkt->list);
561}
562
563/**
564 * Allocate and initialize packet.
565 *
566 * If a payload is needed, either set it directly and ensure that it's freed or
567 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
568 * automatically when smd_free_pkt() is called.
569 */
570struct smux_pkt_t *smux_alloc_pkt(void)
571{
572 struct smux_pkt_t *pkt;
573
574 /* Consider a free list implementation instead of kmalloc */
575 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
576 if (!pkt) {
577 pr_err("%s: out of memory\n", __func__);
578 return NULL;
579 }
580 smux_init_pkt(pkt);
581 pkt->allocated = 1;
582
583 return pkt;
584}
585
586/**
587 * Free packet.
588 *
589 * @pkt Packet to free (may be NULL)
590 *
591 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
592 * well. Otherwise, the caller is responsible for freeing the payload.
593 */
594void smux_free_pkt(struct smux_pkt_t *pkt)
595{
596 if (pkt) {
597 if (pkt->free_payload)
598 kfree(pkt->payload);
599 if (pkt->allocated)
600 kfree(pkt);
601 }
602}
603
604/**
605 * Allocate packet payload.
606 *
607 * @pkt Packet to add payload to
608 *
609 * @returns 0 on success, <0 upon error
610 *
611 * A flag is set to signal smux_free_pkt() to free the payload.
612 */
613int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
614{
615 if (!pkt)
616 return -EINVAL;
617
618 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
619 pkt->free_payload = 1;
620 if (!pkt->payload) {
621 pr_err("%s: unable to malloc %d bytes for payload\n",
622 __func__, pkt->hdr.payload_len);
623 return -ENOMEM;
624 }
625
626 return 0;
627}
628
629static int schedule_notify(uint8_t lcid, int event,
630 const union notifier_metadata *metadata)
631{
632 struct smux_notify_handle *notify_handle = 0;
633 union notifier_metadata *meta_copy = 0;
634 struct smux_lch_t *ch;
635 int i;
636 unsigned long flags;
637 int ret = 0;
638
639 ch = &smux_lch[lcid];
640 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
641 GFP_ATOMIC);
642 if (!notify_handle) {
643 pr_err("%s: out of memory\n", __func__);
644 ret = -ENOMEM;
645 goto free_out;
646 }
647
648 notify_handle->notify = ch->notify;
649 notify_handle->priv = ch->priv;
650 notify_handle->event_type = event;
651 if (metadata) {
652 meta_copy = kzalloc(sizeof(union notifier_metadata),
653 GFP_ATOMIC);
654 if (!meta_copy) {
655 pr_err("%s: out of memory\n", __func__);
656 ret = -ENOMEM;
657 goto free_out;
658 }
659 *meta_copy = *metadata;
660 notify_handle->metadata = meta_copy;
661 } else {
662 notify_handle->metadata = NULL;
663 }
664
665 spin_lock_irqsave(&notify_lock_lhc1, flags);
666 i = kfifo_avail(&smux_notify_fifo);
667 if (i < handle_size) {
668 pr_err("%s: fifo full error %d expected %d\n",
669 __func__, i, handle_size);
670 ret = -ENOMEM;
671 goto unlock_out;
672 }
673
674 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
675 if (i < 0 || i != handle_size) {
676 pr_err("%s: fifo not available error %d (expected %d)\n",
677 __func__, i, handle_size);
678 ret = -ENOSPC;
679 goto unlock_out;
680 }
681 ++queued_fifo_notifications;
682
683unlock_out:
684 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
685
686free_out:
687 queue_work(smux_notify_wq, &smux_notify_local);
688 if (ret < 0 && notify_handle) {
689 kfree(notify_handle->metadata);
690 kfree(notify_handle);
691 }
692 return ret;
693}
694
695/**
696 * Returns the serialized size of a packet.
697 *
698 * @pkt Packet to serialize
699 *
700 * @returns Serialized length of packet
701 */
702static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
703{
704 unsigned int size;
705
706 size = sizeof(struct smux_hdr_t);
707 size += pkt->hdr.payload_len;
708 size += pkt->hdr.pad_len;
709
710 return size;
711}
712
713/**
714 * Serialize packet @pkt into output buffer @data.
715 *
716 * @pkt Packet to serialize
717 * @out Destination buffer pointer
718 * @out_len Size of serialized packet
719 *
720 * @returns 0 for success
721 */
722int smux_serialize(struct smux_pkt_t *pkt, char *out,
723 unsigned int *out_len)
724{
725 char *data_start = out;
726
727 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
728 pr_err("%s: packet size %d too big\n",
729 __func__, smux_serialize_size(pkt));
730 return -E2BIG;
731 }
732
733 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
734 out += sizeof(struct smux_hdr_t);
735 if (pkt->payload) {
736 memcpy(out, pkt->payload, pkt->hdr.payload_len);
737 out += pkt->hdr.payload_len;
738 }
739 if (pkt->hdr.pad_len) {
740 memset(out, 0x0, pkt->hdr.pad_len);
741 out += pkt->hdr.pad_len;
742 }
743 *out_len = out - data_start;
744 return 0;
745}
746
747/**
748 * Serialize header and provide pointer to the data.
749 *
750 * @pkt Packet
751 * @out[out] Pointer to the serialized header data
752 * @out_len[out] Pointer to the serialized header length
753 */
754static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
755 unsigned int *out_len)
756{
757 *out = (char *)&pkt->hdr;
758 *out_len = sizeof(struct smux_hdr_t);
759}
760
761/**
762 * Serialize payload and provide pointer to the data.
763 *
764 * @pkt Packet
765 * @out[out] Pointer to the serialized payload data
766 * @out_len[out] Pointer to the serialized payload length
767 */
768static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
769 unsigned int *out_len)
770{
771 *out = pkt->payload;
772 *out_len = pkt->hdr.payload_len;
773}
774
775/**
776 * Serialize padding and provide pointer to the data.
777 *
778 * @pkt Packet
779 * @out[out] Pointer to the serialized padding (always NULL)
780 * @out_len[out] Pointer to the serialized payload length
781 *
782 * Since the padding field value is undefined, only the size of the patting
783 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
784 */
785static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
786 unsigned int *out_len)
787{
788 *out = NULL;
789 *out_len = pkt->hdr.pad_len;
790}
791
792/**
793 * Write data to TTY framework and handle breaking the writes up if needed.
794 *
795 * @data Data to write
796 * @len Length of data
797 *
798 * @returns 0 for success, < 0 for failure
799 */
800static int write_to_tty(char *data, unsigned len)
801{
802 int data_written;
803
804 if (!data)
805 return 0;
806
807 while (len > 0) {
808 data_written = smux.tty->ops->write(smux.tty, data, len);
809 if (data_written >= 0) {
810 len -= data_written;
811 data += data_written;
812 } else {
813 pr_err("%s: TTY write returned error %d\n",
814 __func__, data_written);
815 return data_written;
816 }
817
818 if (len)
819 tty_wait_until_sent(smux.tty,
820 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
821
822 /* FUTURE - add SSR logic */
823 }
824 return 0;
825}
826
827/**
828 * Write packet to TTY.
829 *
830 * @pkt packet to write
831 *
832 * @returns 0 on success
833 */
834static int smux_tx_tty(struct smux_pkt_t *pkt)
835{
836 char *data;
837 unsigned int len;
838 int ret;
839
840 if (!smux.tty) {
841 pr_err("%s: TTY not initialized", __func__);
842 return -ENOTTY;
843 }
844
845 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
846 SMUX_DBG("%s: tty send single byte\n", __func__);
847 ret = write_to_tty(&pkt->hdr.flags, 1);
848 return ret;
849 }
850
851 smux_serialize_hdr(pkt, &data, &len);
852 ret = write_to_tty(data, len);
853 if (ret) {
854 pr_err("%s: failed %d to write header %d\n",
855 __func__, ret, len);
856 return ret;
857 }
858
859 smux_serialize_payload(pkt, &data, &len);
860 ret = write_to_tty(data, len);
861 if (ret) {
862 pr_err("%s: failed %d to write payload %d\n",
863 __func__, ret, len);
864 return ret;
865 }
866
867 smux_serialize_padding(pkt, &data, &len);
868 while (len > 0) {
869 char zero = 0x0;
870 ret = write_to_tty(&zero, 1);
871 if (ret) {
872 pr_err("%s: failed %d to write padding %d\n",
873 __func__, ret, len);
874 return ret;
875 }
876 --len;
877 }
878 return 0;
879}
880
881/**
882 * Send a single character.
883 *
884 * @ch Character to send
885 */
886static void smux_send_byte(char ch)
887{
888 struct smux_pkt_t pkt;
889
890 smux_init_pkt(&pkt);
891
892 pkt.hdr.cmd = SMUX_CMD_BYTE;
893 pkt.hdr.flags = ch;
894 pkt.hdr.lcid = 0;
895 pkt.hdr.flags = ch;
896 SMUX_LOG_PKT_TX(&pkt);
897 if (!smux_byte_loopback)
898 smux_tx_tty(&pkt);
899 else
900 smux_tx_loopback(&pkt);
901}
902
903/**
904 * Receive a single-character packet (used for internal testing).
905 *
906 * @ch Character to receive
907 * @lcid Logical channel ID for packet
908 *
909 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600910 */
911static int smux_receive_byte(char ch, int lcid)
912{
913 struct smux_pkt_t pkt;
914
915 smux_init_pkt(&pkt);
916 pkt.hdr.lcid = lcid;
917 pkt.hdr.cmd = SMUX_CMD_BYTE;
918 pkt.hdr.flags = ch;
919
920 return smux_dispatch_rx_pkt(&pkt);
921}
922
923/**
924 * Queue packet for transmit.
925 *
926 * @pkt_ptr Packet to queue
927 * @ch Channel to queue packet on
928 * @queue Queue channel on ready list
929 */
930static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
931 int queue)
932{
933 unsigned long flags;
934
935 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
936
937 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
938 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
939 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
940
941 if (queue)
942 list_channel(ch);
943}
944
945/**
946 * Handle receive OPEN ACK command.
947 *
948 * @pkt Received packet
949 *
950 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600951 */
952static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
953{
954 uint8_t lcid;
955 int ret;
956 struct smux_lch_t *ch;
957 int enable_powerdown = 0;
958
959 lcid = pkt->hdr.lcid;
960 ch = &smux_lch[lcid];
961
962 spin_lock(&ch->state_lock_lhb1);
963 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
964 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
965 ch->local_state,
966 SMUX_LCH_LOCAL_OPENED);
967
968 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
969 enable_powerdown = 1;
970
971 ch->local_state = SMUX_LCH_LOCAL_OPENED;
972 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
973 schedule_notify(lcid, SMUX_CONNECTED, NULL);
974 ret = 0;
975 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
976 SMUX_DBG("Remote loopback OPEN ACK received\n");
977 ret = 0;
978 } else {
979 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
980 __func__, lcid, ch->local_state);
981 ret = -EINVAL;
982 }
983 spin_unlock(&ch->state_lock_lhb1);
984
985 if (enable_powerdown) {
986 spin_lock(&smux.tx_lock_lha2);
987 if (!smux.powerdown_enabled) {
988 smux.powerdown_enabled = 1;
989 SMUX_DBG("%s: enabling power-collapse support\n",
990 __func__);
991 }
992 spin_unlock(&smux.tx_lock_lha2);
993 }
994
995 return ret;
996}
997
998static int smux_handle_close_ack(struct smux_pkt_t *pkt)
999{
1000 uint8_t lcid;
1001 int ret;
1002 struct smux_lch_t *ch;
1003 union notifier_metadata meta_disconnected;
1004 unsigned long flags;
1005
1006 lcid = pkt->hdr.lcid;
1007 ch = &smux_lch[lcid];
1008 meta_disconnected.disconnected.is_ssr = 0;
1009
1010 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1011
1012 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1013 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1014 SMUX_LCH_LOCAL_CLOSING,
1015 SMUX_LCH_LOCAL_CLOSED);
1016 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1017 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1018 schedule_notify(lcid, SMUX_DISCONNECTED,
1019 &meta_disconnected);
1020 ret = 0;
1021 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1022 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1023 ret = 0;
1024 } else {
1025 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1026 __func__, lcid, ch->local_state);
1027 ret = -EINVAL;
1028 }
1029 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1030 return ret;
1031}
1032
1033/**
1034 * Handle receive OPEN command.
1035 *
1036 * @pkt Received packet
1037 *
1038 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001039 */
1040static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1041{
1042 uint8_t lcid;
1043 int ret;
1044 struct smux_lch_t *ch;
1045 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001046 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001047 int tx_ready = 0;
1048 int enable_powerdown = 0;
1049
1050 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1051 return smux_handle_rx_open_ack(pkt);
1052
1053 lcid = pkt->hdr.lcid;
1054 ch = &smux_lch[lcid];
1055
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001056 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001057
1058 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1059 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1060 SMUX_LCH_REMOTE_CLOSED,
1061 SMUX_LCH_REMOTE_OPENED);
1062
1063 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1064 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1065 enable_powerdown = 1;
1066
1067 /* Send Open ACK */
1068 ack_pkt = smux_alloc_pkt();
1069 if (!ack_pkt) {
1070 /* exit out to allow retrying this later */
1071 ret = -ENOMEM;
1072 goto out;
1073 }
1074 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1075 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1076 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1077 ack_pkt->hdr.lcid = lcid;
1078 ack_pkt->hdr.payload_len = 0;
1079 ack_pkt->hdr.pad_len = 0;
1080 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1081 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1082 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1083 }
1084 smux_tx_queue(ack_pkt, ch, 0);
1085 tx_ready = 1;
1086
1087 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1088 /*
1089 * Send an Open command to the remote side to
1090 * simulate our local client doing it.
1091 */
1092 ack_pkt = smux_alloc_pkt();
1093 if (ack_pkt) {
1094 ack_pkt->hdr.lcid = lcid;
1095 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1096 ack_pkt->hdr.flags =
1097 SMUX_CMD_OPEN_POWER_COLLAPSE;
1098 ack_pkt->hdr.payload_len = 0;
1099 ack_pkt->hdr.pad_len = 0;
1100 smux_tx_queue(ack_pkt, ch, 0);
1101 tx_ready = 1;
1102 } else {
1103 pr_err("%s: Remote loopack allocation failure\n",
1104 __func__);
1105 }
1106 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1107 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1108 }
1109 ret = 0;
1110 } else {
1111 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1112 __func__, lcid, ch->remote_state);
1113 ret = -EINVAL;
1114 }
1115
1116out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001117 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001118
1119 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001120 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001121 smux.powerdown_enabled = 1;
1122 SMUX_DBG("%s: enabling power-collapse support\n", __func__);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001123 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001124 }
1125
1126 if (tx_ready)
1127 list_channel(ch);
1128
1129 return ret;
1130}
1131
1132/**
1133 * Handle receive CLOSE command.
1134 *
1135 * @pkt Received packet
1136 *
1137 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001138 */
1139static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1140{
1141 uint8_t lcid;
1142 int ret;
1143 struct smux_lch_t *ch;
1144 struct smux_pkt_t *ack_pkt;
1145 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001146 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001147 int tx_ready = 0;
1148
1149 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1150 return smux_handle_close_ack(pkt);
1151
1152 lcid = pkt->hdr.lcid;
1153 ch = &smux_lch[lcid];
1154 meta_disconnected.disconnected.is_ssr = 0;
1155
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001156 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001157 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1158 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1159 SMUX_LCH_REMOTE_OPENED,
1160 SMUX_LCH_REMOTE_CLOSED);
1161
1162 ack_pkt = smux_alloc_pkt();
1163 if (!ack_pkt) {
1164 /* exit out to allow retrying this later */
1165 ret = -ENOMEM;
1166 goto out;
1167 }
1168 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1169 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1170 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1171 ack_pkt->hdr.lcid = lcid;
1172 ack_pkt->hdr.payload_len = 0;
1173 ack_pkt->hdr.pad_len = 0;
1174 smux_tx_queue(ack_pkt, ch, 0);
1175 tx_ready = 1;
1176
1177 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1178 /*
1179 * Send a Close command to the remote side to simulate
1180 * our local client doing it.
1181 */
1182 ack_pkt = smux_alloc_pkt();
1183 if (ack_pkt) {
1184 ack_pkt->hdr.lcid = lcid;
1185 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1186 ack_pkt->hdr.flags = 0;
1187 ack_pkt->hdr.payload_len = 0;
1188 ack_pkt->hdr.pad_len = 0;
1189 smux_tx_queue(ack_pkt, ch, 0);
1190 tx_ready = 1;
1191 } else {
1192 pr_err("%s: Remote loopack allocation failure\n",
1193 __func__);
1194 }
1195 }
1196
1197 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1198 schedule_notify(lcid, SMUX_DISCONNECTED,
1199 &meta_disconnected);
1200 ret = 0;
1201 } else {
1202 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1203 __func__, lcid, ch->remote_state);
1204 ret = -EINVAL;
1205 }
1206out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001207 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001208 if (tx_ready)
1209 list_channel(ch);
1210
1211 return ret;
1212}
1213
1214/*
1215 * Handle receive DATA command.
1216 *
1217 * @pkt Received packet
1218 *
1219 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001220 */
1221static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1222{
1223 uint8_t lcid;
1224 int ret;
1225 int i;
1226 int tmp;
1227 int rx_len;
1228 struct smux_lch_t *ch;
1229 union notifier_metadata metadata;
1230 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001231 struct smux_pkt_t *ack_pkt;
1232 unsigned long flags;
1233
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001234 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1235 ret = -ENXIO;
1236 goto out;
1237 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001238
1239 lcid = pkt->hdr.lcid;
1240 ch = &smux_lch[lcid];
1241 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1242 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1243
1244 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1245 && !remote_loopback) {
1246 pr_err("smux: ch %d error data on local state 0x%x",
1247 lcid, ch->local_state);
1248 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001249 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001250 goto out;
1251 }
1252
1253 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1254 pr_err("smux: ch %d error data on remote state 0x%x",
1255 lcid, ch->remote_state);
1256 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001257 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001258 goto out;
1259 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001260 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001261
1262 rx_len = pkt->hdr.payload_len;
1263 if (rx_len == 0) {
1264 ret = -EINVAL;
1265 goto out;
1266 }
1267
1268 for (i = 0; i < SMUX_GET_RX_BUFF_MAX_RETRY_CNT; ++i) {
1269 metadata.read.pkt_priv = 0;
1270 metadata.read.buffer = 0;
1271
1272 if (!remote_loopback) {
1273 tmp = ch->get_rx_buffer(ch->priv,
1274 (void **)&metadata.read.pkt_priv,
1275 (void **)&metadata.read.buffer,
1276 rx_len);
1277 if (tmp == 0 && metadata.read.buffer) {
1278 /* place data into RX buffer */
1279 memcpy(metadata.read.buffer, pkt->payload,
1280 rx_len);
1281 metadata.read.len = rx_len;
1282 schedule_notify(lcid, SMUX_READ_DONE,
1283 &metadata);
1284 ret = 0;
1285 break;
1286 } else if (tmp == -EAGAIN) {
1287 ret = -ENOMEM;
1288 } else if (tmp < 0) {
1289 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1290 ret = -ENOMEM;
1291 break;
1292 } else if (!metadata.read.buffer) {
1293 pr_err("%s: get_rx_buffer() buffer is NULL\n",
1294 __func__);
1295 ret = -ENOMEM;
1296 }
1297 } else {
1298 /* Echo the data back to the remote client. */
1299 ack_pkt = smux_alloc_pkt();
1300 if (ack_pkt) {
1301 ack_pkt->hdr.lcid = lcid;
1302 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1303 ack_pkt->hdr.flags = 0;
1304 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1305 ack_pkt->payload = pkt->payload;
1306 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1307 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001308 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001309 } else {
1310 pr_err("%s: Remote loopack allocation failure\n",
1311 __func__);
1312 }
1313 }
1314 }
1315
1316out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001317 return ret;
1318}
1319
1320/**
1321 * Handle receive byte command for testing purposes.
1322 *
1323 * @pkt Received packet
1324 *
1325 * @returns 0 for success
1326 */
1327static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1328{
1329 uint8_t lcid;
1330 int ret;
1331 struct smux_lch_t *ch;
1332 union notifier_metadata metadata;
1333 unsigned long flags;
1334
1335 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
1336 return -ENXIO;
1337
1338 lcid = pkt->hdr.lcid;
1339 ch = &smux_lch[lcid];
1340 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1341
1342 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1343 pr_err("smux: ch %d error data on local state 0x%x",
1344 lcid, ch->local_state);
1345 ret = -EIO;
1346 goto out;
1347 }
1348
1349 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1350 pr_err("smux: ch %d error data on remote state 0x%x",
1351 lcid, ch->remote_state);
1352 ret = -EIO;
1353 goto out;
1354 }
1355
1356 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1357 metadata.read.buffer = 0;
1358 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1359 ret = 0;
1360
1361out:
1362 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1363 return ret;
1364}
1365
1366/**
1367 * Handle receive status command.
1368 *
1369 * @pkt Received packet
1370 *
1371 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001372 */
1373static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1374{
1375 uint8_t lcid;
1376 int ret;
1377 struct smux_lch_t *ch;
1378 union notifier_metadata meta;
1379 unsigned long flags;
1380 int tx_ready = 0;
1381
1382 lcid = pkt->hdr.lcid;
1383 ch = &smux_lch[lcid];
1384
1385 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1386 meta.tiocm.tiocm_old = ch->remote_tiocm;
1387 meta.tiocm.tiocm_new = pkt->hdr.flags;
1388
1389 /* update logical channel flow control */
1390 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1391 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1392 /* logical channel flow control changed */
1393 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1394 /* disabled TX */
1395 SMUX_DBG("TX Flow control enabled\n");
1396 ch->tx_flow_control = 1;
1397 } else {
1398 /* re-enable channel */
1399 SMUX_DBG("TX Flow control disabled\n");
1400 ch->tx_flow_control = 0;
1401 tx_ready = 1;
1402 }
1403 }
1404 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1405 ch->remote_tiocm = pkt->hdr.flags;
1406 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1407
1408 /* client notification for status change */
1409 if (IS_FULLY_OPENED(ch)) {
1410 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1411 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1412 ret = 0;
1413 }
1414 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1415 if (tx_ready)
1416 list_channel(ch);
1417
1418 return ret;
1419}
1420
1421/**
1422 * Handle receive power command.
1423 *
1424 * @pkt Received packet
1425 *
1426 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001427 */
1428static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1429{
1430 int tx_ready = 0;
1431 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001432 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001433
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001434 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001435 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1436 /* local sleep request ack */
1437 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1438 /* Power-down complete, turn off UART */
1439 SMUX_DBG("%s: Power %d->%d\n", __func__,
1440 smux.power_state, SMUX_PWR_OFF_FLUSH);
1441 smux.power_state = SMUX_PWR_OFF_FLUSH;
1442 queue_work(smux_tx_wq, &smux_inactivity_work);
1443 } else {
1444 pr_err("%s: sleep request ack invalid in state %d\n",
1445 __func__, smux.power_state);
1446 }
1447 } else {
1448 /* remote sleep request */
1449 if (smux.power_state == SMUX_PWR_ON
1450 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1451 ack_pkt = smux_alloc_pkt();
1452 if (ack_pkt) {
1453 SMUX_DBG("%s: Power %d->%d\n", __func__,
1454 smux.power_state,
1455 SMUX_PWR_TURNING_OFF_FLUSH);
1456
1457 /* send power-down request */
1458 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1459 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
1460 ack_pkt->hdr.lcid = pkt->hdr.lcid;
1461 smux_tx_queue(ack_pkt,
1462 &smux_lch[ack_pkt->hdr.lcid], 0);
1463 tx_ready = 1;
1464 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1465 queue_delayed_work(smux_tx_wq,
1466 &smux_delayed_inactivity_work,
1467 msecs_to_jiffies(
1468 SMUX_INACTIVITY_TIMEOUT_MS));
1469 }
1470 } else {
1471 pr_err("%s: sleep request invalid in state %d\n",
1472 __func__, smux.power_state);
1473 }
1474 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001475 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001476
1477 if (tx_ready)
1478 list_channel(&smux_lch[ack_pkt->hdr.lcid]);
1479
1480 return 0;
1481}
1482
1483/**
1484 * Handle dispatching a completed packet for receive processing.
1485 *
1486 * @pkt Packet to process
1487 *
1488 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001489 */
1490static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1491{
1492 int ret;
1493
1494 SMUX_LOG_PKT_RX(pkt);
1495
1496 switch (pkt->hdr.cmd) {
1497 case SMUX_CMD_OPEN_LCH:
1498 ret = smux_handle_rx_open_cmd(pkt);
1499 break;
1500
1501 case SMUX_CMD_DATA:
1502 ret = smux_handle_rx_data_cmd(pkt);
1503 break;
1504
1505 case SMUX_CMD_CLOSE_LCH:
1506 ret = smux_handle_rx_close_cmd(pkt);
1507 break;
1508
1509 case SMUX_CMD_STATUS:
1510 ret = smux_handle_rx_status_cmd(pkt);
1511 break;
1512
1513 case SMUX_CMD_PWR_CTL:
1514 ret = smux_handle_rx_power_cmd(pkt);
1515 break;
1516
1517 case SMUX_CMD_BYTE:
1518 ret = smux_handle_rx_byte_cmd(pkt);
1519 break;
1520
1521 default:
1522 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1523 ret = -EINVAL;
1524 }
1525 return ret;
1526}
1527
1528/**
1529 * Deserializes a packet and dispatches it to the packet receive logic.
1530 *
1531 * @data Raw data for one packet
1532 * @len Length of the data
1533 *
1534 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001535 */
1536static int smux_deserialize(unsigned char *data, int len)
1537{
1538 struct smux_pkt_t recv;
1539 uint8_t lcid;
1540
1541 smux_init_pkt(&recv);
1542
1543 /*
1544 * It may be possible to optimize this to not use the
1545 * temporary buffer.
1546 */
1547 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1548
1549 if (recv.hdr.magic != SMUX_MAGIC) {
1550 pr_err("%s: invalid header magic\n", __func__);
1551 return -EINVAL;
1552 }
1553
1554 lcid = recv.hdr.lcid;
1555 if (smux_assert_lch_id(lcid)) {
1556 pr_err("%s: invalid channel id %d\n", __func__, lcid);
1557 return -ENXIO;
1558 }
1559
1560 if (recv.hdr.payload_len)
1561 recv.payload = data + sizeof(struct smux_hdr_t);
1562
1563 return smux_dispatch_rx_pkt(&recv);
1564}
1565
1566/**
1567 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001568 */
1569static void smux_handle_wakeup_req(void)
1570{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001571 unsigned long flags;
1572
1573 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001574 if (smux.power_state == SMUX_PWR_OFF
1575 || smux.power_state == SMUX_PWR_TURNING_ON) {
1576 /* wakeup system */
1577 SMUX_DBG("%s: Power %d->%d\n", __func__,
1578 smux.power_state, SMUX_PWR_ON);
1579 smux.power_state = SMUX_PWR_ON;
1580 queue_work(smux_tx_wq, &smux_wakeup_work);
1581 queue_work(smux_tx_wq, &smux_tx_work);
1582 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1583 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1584 smux_send_byte(SMUX_WAKEUP_ACK);
1585 } else {
1586 smux_send_byte(SMUX_WAKEUP_ACK);
1587 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001588 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001589}
1590
1591/**
1592 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001593 */
1594static void smux_handle_wakeup_ack(void)
1595{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001596 unsigned long flags;
1597
1598 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001599 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1600 /* received response to wakeup request */
1601 SMUX_DBG("%s: Power %d->%d\n", __func__,
1602 smux.power_state, SMUX_PWR_ON);
1603 smux.power_state = SMUX_PWR_ON;
1604 queue_work(smux_tx_wq, &smux_tx_work);
1605 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1606 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1607
1608 } else if (smux.power_state != SMUX_PWR_ON) {
1609 /* invalid message */
1610 pr_err("%s: wakeup request ack invalid in state %d\n",
1611 __func__, smux.power_state);
1612 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001613 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001614}
1615
1616/**
1617 * RX State machine - IDLE state processing.
1618 *
1619 * @data New RX data to process
1620 * @len Length of the data
1621 * @used Return value of length processed
1622 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001623 */
1624static void smux_rx_handle_idle(const unsigned char *data,
1625 int len, int *used, int flag)
1626{
1627 int i;
1628
1629 if (flag) {
1630 if (smux_byte_loopback)
1631 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1632 smux_byte_loopback);
1633 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1634 ++*used;
1635 return;
1636 }
1637
1638 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1639 switch (data[i]) {
1640 case SMUX_MAGIC_WORD1:
1641 smux.rx_state = SMUX_RX_MAGIC;
1642 break;
1643 case SMUX_WAKEUP_REQ:
1644 smux_handle_wakeup_req();
1645 break;
1646 case SMUX_WAKEUP_ACK:
1647 smux_handle_wakeup_ack();
1648 break;
1649 default:
1650 /* unexpected character */
1651 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1652 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1653 smux_byte_loopback);
1654 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1655 (unsigned)data[i]);
1656 break;
1657 }
1658 }
1659
1660 *used = i;
1661}
1662
1663/**
1664 * RX State machine - Header Magic state processing.
1665 *
1666 * @data New RX data to process
1667 * @len Length of the data
1668 * @used Return value of length processed
1669 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001670 */
1671static void smux_rx_handle_magic(const unsigned char *data,
1672 int len, int *used, int flag)
1673{
1674 int i;
1675
1676 if (flag) {
1677 pr_err("%s: TTY RX error %d\n", __func__, flag);
1678 smux_enter_reset();
1679 smux.rx_state = SMUX_RX_FAILURE;
1680 ++*used;
1681 return;
1682 }
1683
1684 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1685 /* wait for completion of the magic */
1686 if (data[i] == SMUX_MAGIC_WORD2) {
1687 smux.recv_len = 0;
1688 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1689 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1690 smux.rx_state = SMUX_RX_HDR;
1691 } else {
1692 /* unexpected / trash character */
1693 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1694 __func__, data[i], *used, len);
1695 smux.rx_state = SMUX_RX_IDLE;
1696 }
1697 }
1698
1699 *used = i;
1700}
1701
1702/**
1703 * RX State machine - Packet Header state processing.
1704 *
1705 * @data New RX data to process
1706 * @len Length of the data
1707 * @used Return value of length processed
1708 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001709 */
1710static void smux_rx_handle_hdr(const unsigned char *data,
1711 int len, int *used, int flag)
1712{
1713 int i;
1714 struct smux_hdr_t *hdr;
1715
1716 if (flag) {
1717 pr_err("%s: TTY RX error %d\n", __func__, flag);
1718 smux_enter_reset();
1719 smux.rx_state = SMUX_RX_FAILURE;
1720 ++*used;
1721 return;
1722 }
1723
1724 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1725 smux.recv_buf[smux.recv_len++] = data[i];
1726
1727 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1728 /* complete header received */
1729 hdr = (struct smux_hdr_t *)smux.recv_buf;
1730 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1731 smux.rx_state = SMUX_RX_PAYLOAD;
1732 }
1733 }
1734 *used = i;
1735}
1736
1737/**
1738 * RX State machine - Packet Payload state processing.
1739 *
1740 * @data New RX data to process
1741 * @len Length of the data
1742 * @used Return value of length processed
1743 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001744 */
1745static void smux_rx_handle_pkt_payload(const unsigned char *data,
1746 int len, int *used, int flag)
1747{
1748 int remaining;
1749
1750 if (flag) {
1751 pr_err("%s: TTY RX error %d\n", __func__, flag);
1752 smux_enter_reset();
1753 smux.rx_state = SMUX_RX_FAILURE;
1754 ++*used;
1755 return;
1756 }
1757
1758 /* copy data into rx buffer */
1759 if (smux.pkt_remain < (len - *used))
1760 remaining = smux.pkt_remain;
1761 else
1762 remaining = len - *used;
1763
1764 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1765 smux.recv_len += remaining;
1766 smux.pkt_remain -= remaining;
1767 *used += remaining;
1768
1769 if (smux.pkt_remain == 0) {
1770 /* complete packet received */
1771 smux_deserialize(smux.recv_buf, smux.recv_len);
1772 smux.rx_state = SMUX_RX_IDLE;
1773 }
1774}
1775
1776/**
1777 * Feed data to the receive state machine.
1778 *
1779 * @data Pointer to data block
1780 * @len Length of data
1781 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001782 */
1783void smux_rx_state_machine(const unsigned char *data,
1784 int len, int flag)
1785{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001786 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001787
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001788 work.data = data;
1789 work.len = len;
1790 work.flag = flag;
1791 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1792 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001793
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001794 queue_work(smux_rx_wq, &work.work);
1795 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001796}
1797
1798/**
1799 * Add channel to transmit-ready list and trigger transmit worker.
1800 *
1801 * @ch Channel to add
1802 */
1803static void list_channel(struct smux_lch_t *ch)
1804{
1805 unsigned long flags;
1806
1807 SMUX_DBG("%s: listing channel %d\n",
1808 __func__, ch->lcid);
1809
1810 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1811 spin_lock(&ch->tx_lock_lhb2);
1812 smux.tx_activity_flag = 1;
1813 if (list_empty(&ch->tx_ready_list))
1814 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
1815 spin_unlock(&ch->tx_lock_lhb2);
1816 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1817
1818 queue_work(smux_tx_wq, &smux_tx_work);
1819}
1820
1821/**
1822 * Transmit packet on correct transport and then perform client
1823 * notification.
1824 *
1825 * @ch Channel to transmit on
1826 * @pkt Packet to transmit
1827 */
1828static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
1829{
1830 union notifier_metadata meta_write;
1831 int ret;
1832
1833 if (ch && pkt) {
1834 SMUX_LOG_PKT_TX(pkt);
1835 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
1836 ret = smux_tx_loopback(pkt);
1837 else
1838 ret = smux_tx_tty(pkt);
1839
1840 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
1841 /* notify write-done */
1842 meta_write.write.pkt_priv = pkt->priv;
1843 meta_write.write.buffer = pkt->payload;
1844 meta_write.write.len = pkt->hdr.payload_len;
1845 if (ret >= 0) {
1846 SMUX_DBG("%s: PKT write done", __func__);
1847 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
1848 &meta_write);
1849 } else {
1850 pr_err("%s: failed to write pkt %d\n",
1851 __func__, ret);
1852 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
1853 &meta_write);
1854 }
1855 }
1856 }
1857}
1858
1859/**
1860 * Power-up the UART.
1861 */
1862static void smux_uart_power_on(void)
1863{
1864 struct uart_state *state;
1865
1866 if (!smux.tty || !smux.tty->driver_data) {
1867 pr_err("%s: unable to find UART port for tty %p\n",
1868 __func__, smux.tty);
1869 return;
1870 }
1871 state = smux.tty->driver_data;
1872 msm_hs_request_clock_on(state->uart_port);
1873}
1874
1875/**
1876 * Power down the UART.
1877 */
1878static void smux_uart_power_off(void)
1879{
1880 struct uart_state *state;
1881
1882 if (!smux.tty || !smux.tty->driver_data) {
1883 pr_err("%s: unable to find UART port for tty %p\n",
1884 __func__, smux.tty);
1885 return;
1886 }
1887 state = smux.tty->driver_data;
1888 msm_hs_request_clock_off(state->uart_port);
1889}
1890
1891/**
1892 * TX Wakeup Worker
1893 *
1894 * @work Not used
1895 *
1896 * Do an exponential back-off wakeup sequence with a maximum period
1897 * of approximately 1 second (1 << 20 microseconds).
1898 */
1899static void smux_wakeup_worker(struct work_struct *work)
1900{
1901 unsigned long flags;
1902 unsigned wakeup_delay;
1903 int complete = 0;
1904
1905 for (;;) {
1906 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1907 if (smux.power_state == SMUX_PWR_ON) {
1908 /* wakeup complete */
1909 complete = 1;
1910 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1911 break;
1912 } else {
1913 /* retry */
1914 wakeup_delay = smux.pwr_wakeup_delay_us;
1915 smux.pwr_wakeup_delay_us <<= 1;
1916 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
1917 smux.pwr_wakeup_delay_us =
1918 SMUX_WAKEUP_DELAY_MAX;
1919 }
1920 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1921 SMUX_DBG("%s: triggering wakeup\n", __func__);
1922 smux_send_byte(SMUX_WAKEUP_REQ);
1923
1924 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
1925 SMUX_DBG("%s: sleeping for %u us\n", __func__,
1926 wakeup_delay);
1927 usleep_range(wakeup_delay, 2*wakeup_delay);
1928 } else {
1929 /* schedule delayed work */
1930 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
1931 __func__, wakeup_delay / 1000);
1932 queue_delayed_work(smux_tx_wq,
1933 &smux_wakeup_delayed_work,
1934 msecs_to_jiffies(wakeup_delay / 1000));
1935 break;
1936 }
1937 }
1938
1939 if (complete) {
1940 SMUX_DBG("%s: wakeup complete\n", __func__);
1941 /*
1942 * Cancel any pending retry. This avoids a race condition with
1943 * a new power-up request because:
1944 * 1) this worker doesn't modify the state
1945 * 2) this worker is processed on the same single-threaded
1946 * workqueue as new TX wakeup requests
1947 */
1948 cancel_delayed_work(&smux_wakeup_delayed_work);
1949 }
1950}
1951
1952
1953/**
1954 * Inactivity timeout worker. Periodically scheduled when link is active.
1955 * When it detects inactivity, it will power-down the UART link.
1956 *
1957 * @work Work structure (not used)
1958 */
1959static void smux_inactivity_worker(struct work_struct *work)
1960{
1961 int tx_ready = 0;
1962 struct smux_pkt_t *pkt;
1963 unsigned long flags;
1964
1965 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
1966 spin_lock(&smux.tx_lock_lha2);
1967
1968 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
1969 /* no activity */
1970 if (smux.powerdown_enabled) {
1971 if (smux.power_state == SMUX_PWR_ON) {
1972 /* start power-down sequence */
1973 pkt = smux_alloc_pkt();
1974 if (pkt) {
1975 SMUX_DBG("%s: Power %d->%d\n", __func__,
1976 smux.power_state,
1977 SMUX_PWR_TURNING_OFF);
1978 smux.power_state = SMUX_PWR_TURNING_OFF;
1979
1980 /* send power-down request */
1981 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1982 pkt->hdr.flags = 0;
1983 pkt->hdr.lcid = 0;
1984 smux_tx_queue(pkt,
1985 &smux_lch[SMUX_TEST_LCID],
1986 0);
1987 tx_ready = 1;
1988 }
1989 }
1990 } else {
1991 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
1992 __func__);
1993 }
1994 }
1995 smux.tx_activity_flag = 0;
1996 smux.rx_activity_flag = 0;
1997
1998 spin_unlock(&smux.tx_lock_lha2);
1999 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2000
2001 if (tx_ready)
2002 list_channel(&smux_lch[SMUX_TEST_LCID]);
2003
2004 if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
2005 (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
2006 /* ready to power-down the UART */
2007 SMUX_DBG("%s: Power %d->%d\n", __func__,
2008 smux.power_state, SMUX_PWR_OFF);
2009 smux_uart_power_off();
2010 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2011 smux.power_state = SMUX_PWR_OFF;
2012 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2013 }
2014
2015 /* reschedule inactivity worker */
2016 if (smux.power_state != SMUX_PWR_OFF)
2017 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2018 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2019}
2020
2021/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002022 * RX worker handles all receive operations.
2023 *
2024 * @work Work structure contained in TBD structure
2025 */
2026static void smux_rx_worker(struct work_struct *work)
2027{
2028 unsigned long flags;
2029 int used;
2030 int initial_rx_state;
2031 struct smux_rx_worker_data *w;
2032 const unsigned char *data;
2033 int len;
2034 int flag;
2035
2036 w = container_of(work, struct smux_rx_worker_data, work);
2037 data = w->data;
2038 len = w->len;
2039 flag = w->flag;
2040
2041 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2042 smux.rx_activity_flag = 1;
2043 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2044
2045 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2046 used = 0;
2047 do {
2048 SMUX_DBG("%s: state %d; %d of %d\n",
2049 __func__, smux.rx_state, used, len);
2050 initial_rx_state = smux.rx_state;
2051
2052 switch (smux.rx_state) {
2053 case SMUX_RX_IDLE:
2054 smux_rx_handle_idle(data, len, &used, flag);
2055 break;
2056 case SMUX_RX_MAGIC:
2057 smux_rx_handle_magic(data, len, &used, flag);
2058 break;
2059 case SMUX_RX_HDR:
2060 smux_rx_handle_hdr(data, len, &used, flag);
2061 break;
2062 case SMUX_RX_PAYLOAD:
2063 smux_rx_handle_pkt_payload(data, len, &used, flag);
2064 break;
2065 default:
2066 SMUX_DBG("%s: invalid state %d\n",
2067 __func__, smux.rx_state);
2068 smux.rx_state = SMUX_RX_IDLE;
2069 break;
2070 }
2071 } while (used < len || smux.rx_state != initial_rx_state);
2072
2073 complete(&w->work_complete);
2074}
2075
2076/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002077 * Transmit worker handles serializing and transmitting packets onto the
2078 * underlying transport.
2079 *
2080 * @work Work structure (not used)
2081 */
2082static void smux_tx_worker(struct work_struct *work)
2083{
2084 struct smux_pkt_t *pkt;
2085 struct smux_lch_t *ch;
2086 unsigned low_wm_notif;
2087 unsigned lcid;
2088 unsigned long flags;
2089
2090
2091 /*
2092 * Transmit packets in round-robin fashion based upon ready
2093 * channels.
2094 *
2095 * To eliminate the need to hold a lock for the entire
2096 * iteration through the channel ready list, the head of the
2097 * ready-channel list is always the next channel to be
2098 * processed. To send a packet, the first valid packet in
2099 * the head channel is removed and the head channel is then
2100 * rescheduled at the end of the queue by removing it and
2101 * inserting after the tail. The locks can then be released
2102 * while the packet is processed.
2103 */
2104 for (;;) {
2105 pkt = NULL;
2106 low_wm_notif = 0;
2107
2108 /* get the next ready channel */
2109 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2110 if (list_empty(&smux.lch_tx_ready_list)) {
2111 /* no ready channels */
2112 SMUX_DBG("%s: no more ready channels, exiting\n",
2113 __func__);
2114 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2115 break;
2116 }
2117 smux.tx_activity_flag = 1;
2118
2119 if (smux.power_state != SMUX_PWR_ON
2120 && smux.power_state != SMUX_PWR_TURNING_OFF
2121 && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
2122 /* Link isn't ready to transmit */
2123 if (smux.power_state == SMUX_PWR_OFF) {
2124 /* link is off, trigger wakeup */
2125 smux.pwr_wakeup_delay_us = 1;
2126 SMUX_DBG("%s: Power %d->%d\n", __func__,
2127 smux.power_state,
2128 SMUX_PWR_TURNING_ON);
2129 smux.power_state = SMUX_PWR_TURNING_ON;
2130 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2131 flags);
2132 smux_uart_power_on();
2133 queue_work(smux_tx_wq, &smux_wakeup_work);
2134 } else {
2135 SMUX_DBG("%s: can not tx with power state %d\n",
2136 __func__,
2137 smux.power_state);
2138 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2139 flags);
2140 }
2141 break;
2142 }
2143
2144 /* get the next packet to send and rotate channel list */
2145 ch = list_first_entry(&smux.lch_tx_ready_list,
2146 struct smux_lch_t,
2147 tx_ready_list);
2148
2149 spin_lock(&ch->state_lock_lhb1);
2150 spin_lock(&ch->tx_lock_lhb2);
2151 if (!list_empty(&ch->tx_queue)) {
2152 /*
2153 * If remote TX flow control is enabled or
2154 * the channel is not fully opened, then only
2155 * send command packets.
2156 */
2157 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2158 struct smux_pkt_t *curr;
2159 list_for_each_entry(curr, &ch->tx_queue, list) {
2160 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2161 pkt = curr;
2162 break;
2163 }
2164 }
2165 } else {
2166 /* get next cmd/data packet to send */
2167 pkt = list_first_entry(&ch->tx_queue,
2168 struct smux_pkt_t, list);
2169 }
2170 }
2171
2172 if (pkt) {
2173 list_del(&pkt->list);
2174
2175 /* update packet stats */
2176 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2177 --ch->tx_pending_data_cnt;
2178 if (ch->notify_lwm &&
2179 ch->tx_pending_data_cnt
2180 <= SMUX_WM_LOW) {
2181 ch->notify_lwm = 0;
2182 low_wm_notif = 1;
2183 }
2184 }
2185
2186 /* advance to the next ready channel */
2187 list_rotate_left(&smux.lch_tx_ready_list);
2188 } else {
2189 /* no data in channel to send, remove from ready list */
2190 list_del(&ch->tx_ready_list);
2191 INIT_LIST_HEAD(&ch->tx_ready_list);
2192 }
2193 lcid = ch->lcid;
2194 spin_unlock(&ch->tx_lock_lhb2);
2195 spin_unlock(&ch->state_lock_lhb1);
2196 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2197
2198 if (low_wm_notif)
2199 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2200
2201 /* send the packet */
2202 smux_tx_pkt(ch, pkt);
2203 smux_free_pkt(pkt);
2204 }
2205}
2206
2207
2208/**********************************************************************/
2209/* Kernel API */
2210/**********************************************************************/
2211
2212/**
2213 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2214 * flags.
2215 *
2216 * @lcid Logical channel ID
2217 * @set Options to set
2218 * @clear Options to clear
2219 *
2220 * @returns 0 for success, < 0 for failure
2221 */
2222int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2223{
2224 unsigned long flags;
2225 struct smux_lch_t *ch;
2226 int tx_ready = 0;
2227 int ret = 0;
2228
2229 if (smux_assert_lch_id(lcid))
2230 return -ENXIO;
2231
2232 ch = &smux_lch[lcid];
2233 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2234
2235 /* Local loopback mode */
2236 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2237 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2238
2239 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2240 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2241
2242 /* Remote loopback mode */
2243 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2244 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2245
2246 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2247 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2248
2249 /* Flow control */
2250 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2251 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2252 ret = smux_send_status_cmd(ch);
2253 tx_ready = 1;
2254 }
2255
2256 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2257 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2258 ret = smux_send_status_cmd(ch);
2259 tx_ready = 1;
2260 }
2261
2262 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2263
2264 if (tx_ready)
2265 list_channel(ch);
2266
2267 return ret;
2268}
2269
2270/**
2271 * Starts the opening sequence for a logical channel.
2272 *
2273 * @lcid Logical channel ID
2274 * @priv Free for client usage
2275 * @notify Event notification function
2276 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2277 *
2278 * @returns 0 for success, <0 otherwise
2279 *
2280 * A channel must be fully closed (either not previously opened or
2281 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2282 * received.
2283 *
2284 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2285 * event.
2286 */
2287int msm_smux_open(uint8_t lcid, void *priv,
2288 void (*notify)(void *priv, int event_type, const void *metadata),
2289 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2290 int size))
2291{
2292 int ret;
2293 struct smux_lch_t *ch;
2294 struct smux_pkt_t *pkt;
2295 int tx_ready = 0;
2296 unsigned long flags;
2297
2298 if (smux_assert_lch_id(lcid))
2299 return -ENXIO;
2300
2301 ch = &smux_lch[lcid];
2302 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2303
2304 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2305 ret = -EAGAIN;
2306 goto out;
2307 }
2308
2309 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2310 pr_err("%s: open lcid %d local state %x invalid\n",
2311 __func__, lcid, ch->local_state);
2312 ret = -EINVAL;
2313 goto out;
2314 }
2315
2316 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2317 ch->local_state,
2318 SMUX_LCH_LOCAL_OPENING);
2319
2320 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2321
2322 ch->priv = priv;
2323 ch->notify = notify;
2324 ch->get_rx_buffer = get_rx_buffer;
2325 ret = 0;
2326
2327 /* Send Open Command */
2328 pkt = smux_alloc_pkt();
2329 if (!pkt) {
2330 ret = -ENOMEM;
2331 goto out;
2332 }
2333 pkt->hdr.magic = SMUX_MAGIC;
2334 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2335 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2336 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2337 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2338 pkt->hdr.lcid = lcid;
2339 pkt->hdr.payload_len = 0;
2340 pkt->hdr.pad_len = 0;
2341 smux_tx_queue(pkt, ch, 0);
2342 tx_ready = 1;
2343
2344out:
2345 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2346 if (tx_ready)
2347 list_channel(ch);
2348 return ret;
2349}
2350
2351/**
2352 * Starts the closing sequence for a logical channel.
2353 *
2354 * @lcid Logical channel ID
2355 *
2356 * @returns 0 for success, <0 otherwise
2357 *
2358 * Once the close event has been acknowledge by the remote side, the client
2359 * will receive a SMUX_DISCONNECTED notification.
2360 */
2361int msm_smux_close(uint8_t lcid)
2362{
2363 int ret = 0;
2364 struct smux_lch_t *ch;
2365 struct smux_pkt_t *pkt;
2366 int tx_ready = 0;
2367 unsigned long flags;
2368
2369 if (smux_assert_lch_id(lcid))
2370 return -ENXIO;
2371
2372 ch = &smux_lch[lcid];
2373 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2374 ch->local_tiocm = 0x0;
2375 ch->remote_tiocm = 0x0;
2376 ch->tx_pending_data_cnt = 0;
2377 ch->notify_lwm = 0;
2378
2379 /* Purge TX queue */
2380 spin_lock(&ch->tx_lock_lhb2);
2381 while (!list_empty(&ch->tx_queue)) {
2382 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2383 list);
2384 list_del(&pkt->list);
2385
2386 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2387 /* Open was never sent, just force to closed state */
2388 union notifier_metadata meta_disconnected;
2389
2390 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2391 meta_disconnected.disconnected.is_ssr = 0;
2392 schedule_notify(lcid, SMUX_DISCONNECTED,
2393 &meta_disconnected);
2394 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2395 /* Notify client of failed write */
2396 union notifier_metadata meta_write;
2397
2398 meta_write.write.pkt_priv = pkt->priv;
2399 meta_write.write.buffer = pkt->payload;
2400 meta_write.write.len = pkt->hdr.payload_len;
2401 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2402 }
2403 smux_free_pkt(pkt);
2404 }
2405 spin_unlock(&ch->tx_lock_lhb2);
2406
2407 /* Send Close Command */
2408 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2409 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2410 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2411 ch->local_state,
2412 SMUX_LCH_LOCAL_CLOSING);
2413
2414 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2415 pkt = smux_alloc_pkt();
2416 if (pkt) {
2417 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2418 pkt->hdr.flags = 0;
2419 pkt->hdr.lcid = lcid;
2420 pkt->hdr.payload_len = 0;
2421 pkt->hdr.pad_len = 0;
2422 smux_tx_queue(pkt, ch, 0);
2423 tx_ready = 1;
2424 } else {
2425 pr_err("%s: pkt allocation failed\n", __func__);
2426 ret = -ENOMEM;
2427 }
2428 }
2429 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2430
2431 if (tx_ready)
2432 list_channel(ch);
2433
2434 return ret;
2435}
2436
2437/**
2438 * Write data to a logical channel.
2439 *
2440 * @lcid Logical channel ID
2441 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2442 * SMUX_WRITE_FAIL notification.
2443 * @data Data to write
2444 * @len Length of @data
2445 *
2446 * @returns 0 for success, <0 otherwise
2447 *
2448 * Data may be written immediately after msm_smux_open() is called,
2449 * but the data will wait in the transmit queue until the channel has
2450 * been fully opened.
2451 *
2452 * Once the data has been written, the client will receive either a completion
2453 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2454 */
2455int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2456{
2457 struct smux_lch_t *ch;
2458 struct smux_pkt_t *pkt;
2459 int tx_ready = 0;
2460 unsigned long flags;
2461 int ret;
2462
2463 if (smux_assert_lch_id(lcid))
2464 return -ENXIO;
2465
2466 ch = &smux_lch[lcid];
2467 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2468
2469 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2470 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2471 pr_err("%s: hdr.invalid local state %d channel %d\n",
2472 __func__, ch->local_state, lcid);
2473 ret = -EINVAL;
2474 goto out;
2475 }
2476
2477 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2478 pr_err("%s: payload %d too large\n",
2479 __func__, len);
2480 ret = -E2BIG;
2481 goto out;
2482 }
2483
2484 pkt = smux_alloc_pkt();
2485 if (!pkt) {
2486 ret = -ENOMEM;
2487 goto out;
2488 }
2489
2490 pkt->hdr.cmd = SMUX_CMD_DATA;
2491 pkt->hdr.lcid = lcid;
2492 pkt->hdr.flags = 0;
2493 pkt->hdr.payload_len = len;
2494 pkt->payload = (void *)data;
2495 pkt->priv = pkt_priv;
2496 pkt->hdr.pad_len = 0;
2497
2498 spin_lock(&ch->tx_lock_lhb2);
2499 /* verify high watermark */
2500 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2501
2502 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2503 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2504 __func__, lcid, SMUX_WM_HIGH,
2505 ch->tx_pending_data_cnt);
2506 ret = -EAGAIN;
2507 goto out_inner;
2508 }
2509
2510 /* queue packet for transmit */
2511 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2512 ch->notify_lwm = 1;
2513 pr_err("%s: high watermark hit\n", __func__);
2514 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2515 }
2516 list_add_tail(&pkt->list, &ch->tx_queue);
2517
2518 /* add to ready list */
2519 if (IS_FULLY_OPENED(ch))
2520 tx_ready = 1;
2521
2522 ret = 0;
2523
2524out_inner:
2525 spin_unlock(&ch->tx_lock_lhb2);
2526
2527out:
2528 if (ret)
2529 smux_free_pkt(pkt);
2530 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2531
2532 if (tx_ready)
2533 list_channel(ch);
2534
2535 return ret;
2536}
2537
2538/**
2539 * Returns true if the TX queue is currently full (high water mark).
2540 *
2541 * @lcid Logical channel ID
2542 * @returns 0 if channel is not full
2543 * 1 if it is full
2544 * < 0 for error
2545 */
2546int msm_smux_is_ch_full(uint8_t lcid)
2547{
2548 struct smux_lch_t *ch;
2549 unsigned long flags;
2550 int is_full = 0;
2551
2552 if (smux_assert_lch_id(lcid))
2553 return -ENXIO;
2554
2555 ch = &smux_lch[lcid];
2556
2557 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2558 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2559 is_full = 1;
2560 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2561
2562 return is_full;
2563}
2564
2565/**
2566 * Returns true if the TX queue has space for more packets it is at or
2567 * below the low water mark).
2568 *
2569 * @lcid Logical channel ID
2570 * @returns 0 if channel is above low watermark
2571 * 1 if it's at or below the low watermark
2572 * < 0 for error
2573 */
2574int msm_smux_is_ch_low(uint8_t lcid)
2575{
2576 struct smux_lch_t *ch;
2577 unsigned long flags;
2578 int is_low = 0;
2579
2580 if (smux_assert_lch_id(lcid))
2581 return -ENXIO;
2582
2583 ch = &smux_lch[lcid];
2584
2585 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2586 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2587 is_low = 1;
2588 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2589
2590 return is_low;
2591}
2592
2593/**
2594 * Send TIOCM status update.
2595 *
2596 * @ch Channel for update
2597 *
2598 * @returns 0 for success, <0 for failure
2599 *
2600 * Channel lock must be held before calling.
2601 */
2602static int smux_send_status_cmd(struct smux_lch_t *ch)
2603{
2604 struct smux_pkt_t *pkt;
2605
2606 if (!ch)
2607 return -EINVAL;
2608
2609 pkt = smux_alloc_pkt();
2610 if (!pkt)
2611 return -ENOMEM;
2612
2613 pkt->hdr.lcid = ch->lcid;
2614 pkt->hdr.cmd = SMUX_CMD_STATUS;
2615 pkt->hdr.flags = ch->local_tiocm;
2616 pkt->hdr.payload_len = 0;
2617 pkt->hdr.pad_len = 0;
2618 smux_tx_queue(pkt, ch, 0);
2619
2620 return 0;
2621}
2622
2623/**
2624 * Internal helper function for getting the TIOCM status with
2625 * state_lock_lhb1 already locked.
2626 *
2627 * @ch Channel pointer
2628 *
2629 * @returns TIOCM status
2630 */
2631static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
2632{
2633 long status = 0x0;
2634
2635 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
2636 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
2637 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
2638 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
2639
2640 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
2641 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
2642
2643 return status;
2644}
2645
2646/**
2647 * Get the TIOCM status bits.
2648 *
2649 * @lcid Logical channel ID
2650 *
2651 * @returns >= 0 TIOCM status bits
2652 * < 0 Error condition
2653 */
2654long msm_smux_tiocm_get(uint8_t lcid)
2655{
2656 struct smux_lch_t *ch;
2657 unsigned long flags;
2658 long status = 0x0;
2659
2660 if (smux_assert_lch_id(lcid))
2661 return -ENXIO;
2662
2663 ch = &smux_lch[lcid];
2664 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2665 status = msm_smux_tiocm_get_atomic(ch);
2666 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2667
2668 return status;
2669}
2670
2671/**
2672 * Set/clear the TIOCM status bits.
2673 *
2674 * @lcid Logical channel ID
2675 * @set Bits to set
2676 * @clear Bits to clear
2677 *
2678 * @returns 0 for success; < 0 for failure
2679 *
2680 * If a bit is specified in both the @set and @clear masks, then the clear bit
2681 * definition will dominate and the bit will be cleared.
2682 */
2683int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
2684{
2685 struct smux_lch_t *ch;
2686 unsigned long flags;
2687 uint8_t old_status;
2688 uint8_t status_set = 0x0;
2689 uint8_t status_clear = 0x0;
2690 int tx_ready = 0;
2691 int ret = 0;
2692
2693 if (smux_assert_lch_id(lcid))
2694 return -ENXIO;
2695
2696 ch = &smux_lch[lcid];
2697 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2698
2699 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2700 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2701 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2702 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2703
2704 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2705 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2706 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2707 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2708
2709 old_status = ch->local_tiocm;
2710 ch->local_tiocm |= status_set;
2711 ch->local_tiocm &= ~status_clear;
2712
2713 if (ch->local_tiocm != old_status) {
2714 ret = smux_send_status_cmd(ch);
2715 tx_ready = 1;
2716 }
2717 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2718
2719 if (tx_ready)
2720 list_channel(ch);
2721
2722 return ret;
2723}
2724
2725/**********************************************************************/
2726/* Line Discipline Interface */
2727/**********************************************************************/
2728static int smuxld_open(struct tty_struct *tty)
2729{
2730 int i;
2731 int tmp;
2732 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002733
2734 if (!smux.is_initialized)
2735 return -ENODEV;
2736
2737 spin_lock_irqsave(&smux.lock_lha0, flags);
2738 if (smux.ld_open_count) {
2739 pr_err("%s: %p multiple instances not supported\n",
2740 __func__, tty);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002741 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2742 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002743 }
2744
2745 ++smux.ld_open_count;
2746 if (tty->ops->write == NULL) {
Eric Holmberg902c51e2012-05-29 12:12:16 -06002747 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2748 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002749 }
2750
2751 /* connect to TTY */
2752 smux.tty = tty;
2753 tty->disc_data = &smux;
2754 tty->receive_room = TTY_RECEIVE_ROOM;
2755 tty_driver_flush_buffer(tty);
2756
2757 /* power-down the UART if we are idle */
2758 spin_lock(&smux.tx_lock_lha2);
2759 if (smux.power_state == SMUX_PWR_OFF) {
2760 SMUX_DBG("%s: powering off uart\n", __func__);
2761 smux.power_state = SMUX_PWR_OFF_FLUSH;
2762 spin_unlock(&smux.tx_lock_lha2);
2763 queue_work(smux_tx_wq, &smux_inactivity_work);
2764 } else {
2765 spin_unlock(&smux.tx_lock_lha2);
2766 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002767 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002768
2769 /* register platform devices */
2770 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
2771 tmp = platform_device_register(&smux_devs[i]);
2772 if (tmp)
2773 pr_err("%s: error %d registering device %s\n",
2774 __func__, tmp, smux_devs[i].name);
2775 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002776 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002777}
2778
2779static void smuxld_close(struct tty_struct *tty)
2780{
2781 unsigned long flags;
2782 int i;
2783
2784 spin_lock_irqsave(&smux.lock_lha0, flags);
2785 if (smux.ld_open_count <= 0) {
2786 pr_err("%s: invalid ld count %d\n", __func__,
2787 smux.ld_open_count);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002788 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2789 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002790 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002791 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002792
2793 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
2794 platform_device_unregister(&smux_devs[i]);
2795
2796 --smux.ld_open_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002797}
2798
2799/**
2800 * Receive data from TTY Line Discipline.
2801 *
2802 * @tty TTY structure
2803 * @cp Character data
2804 * @fp Flag data
2805 * @count Size of character and flag data
2806 */
2807void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
2808 char *fp, int count)
2809{
2810 int i;
2811 int last_idx = 0;
2812 const char *tty_name = NULL;
2813 char *f;
2814
2815 if (smux_debug_mask & MSM_SMUX_DEBUG)
2816 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
2817 16, 1, cp, count, true);
2818
2819 /* verify error flags */
2820 for (i = 0, f = fp; i < count; ++i, ++f) {
2821 if (*f != TTY_NORMAL) {
2822 if (tty)
2823 tty_name = tty->name;
2824 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
2825 tty_name, *f, tty_flag_to_str(*f));
2826
2827 /* feed all previous valid data to the parser */
2828 smux_rx_state_machine(cp + last_idx, i - last_idx,
2829 TTY_NORMAL);
2830
2831 /* feed bad data to parser */
2832 smux_rx_state_machine(cp + i, 1, *f);
2833 last_idx = i + 1;
2834 }
2835 }
2836
2837 /* feed data to RX state machine */
2838 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
2839}
2840
2841static void smuxld_flush_buffer(struct tty_struct *tty)
2842{
2843 pr_err("%s: not supported\n", __func__);
2844}
2845
2846static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
2847{
2848 pr_err("%s: not supported\n", __func__);
2849 return -ENODEV;
2850}
2851
2852static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
2853 unsigned char __user *buf, size_t nr)
2854{
2855 pr_err("%s: not supported\n", __func__);
2856 return -ENODEV;
2857}
2858
2859static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
2860 const unsigned char *buf, size_t nr)
2861{
2862 pr_err("%s: not supported\n", __func__);
2863 return -ENODEV;
2864}
2865
2866static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
2867 unsigned int cmd, unsigned long arg)
2868{
2869 pr_err("%s: not supported\n", __func__);
2870 return -ENODEV;
2871}
2872
2873static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
2874 struct poll_table_struct *tbl)
2875{
2876 pr_err("%s: not supported\n", __func__);
2877 return -ENODEV;
2878}
2879
2880static void smuxld_write_wakeup(struct tty_struct *tty)
2881{
2882 pr_err("%s: not supported\n", __func__);
2883}
2884
2885static struct tty_ldisc_ops smux_ldisc_ops = {
2886 .owner = THIS_MODULE,
2887 .magic = TTY_LDISC_MAGIC,
2888 .name = "n_smux",
2889 .open = smuxld_open,
2890 .close = smuxld_close,
2891 .flush_buffer = smuxld_flush_buffer,
2892 .chars_in_buffer = smuxld_chars_in_buffer,
2893 .read = smuxld_read,
2894 .write = smuxld_write,
2895 .ioctl = smuxld_ioctl,
2896 .poll = smuxld_poll,
2897 .receive_buf = smuxld_receive_buf,
2898 .write_wakeup = smuxld_write_wakeup
2899};
2900
2901static int __init smux_init(void)
2902{
2903 int ret;
2904
2905 spin_lock_init(&smux.lock_lha0);
2906
2907 spin_lock_init(&smux.rx_lock_lha1);
2908 smux.rx_state = SMUX_RX_IDLE;
2909 smux.power_state = SMUX_PWR_OFF;
2910 smux.pwr_wakeup_delay_us = 1;
2911 smux.powerdown_enabled = 0;
2912 smux.rx_activity_flag = 0;
2913 smux.tx_activity_flag = 0;
2914 smux.recv_len = 0;
2915 smux.tty = NULL;
2916 smux.ld_open_count = 0;
2917 smux.in_reset = 0;
2918 smux.is_initialized = 1;
2919 smux_byte_loopback = 0;
2920
2921 spin_lock_init(&smux.tx_lock_lha2);
2922 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
2923
2924 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
2925 if (ret != 0) {
2926 pr_err("%s: error %d registering line discipline\n",
2927 __func__, ret);
2928 return ret;
2929 }
2930
2931 ret = lch_init();
2932 if (ret != 0) {
2933 pr_err("%s: lch_init failed\n", __func__);
2934 return ret;
2935 }
2936
2937 return 0;
2938}
2939
2940static void __exit smux_exit(void)
2941{
2942 int ret;
2943
2944 ret = tty_unregister_ldisc(N_SMUX);
2945 if (ret != 0) {
2946 pr_err("%s error %d unregistering line discipline\n",
2947 __func__, ret);
2948 return;
2949 }
2950}
2951
2952module_init(smux_init);
2953module_exit(smux_exit);
2954
2955MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
2956MODULE_LICENSE("GPL v2");
2957MODULE_ALIAS_LDISC(N_SMUX);