blob: 8d2a16e378de45166ff4b0d6bde737c7b2118e15 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
28#include <mach/msm_serial_hs.h>
29#include "smux_private.h"
30#include "smux_loopback.h"
31
32#define SMUX_NOTIFY_FIFO_SIZE 128
33#define SMUX_TX_QUEUE_SIZE 256
34#define SMUX_GET_RX_BUFF_MAX_RETRY_CNT 2
35#define SMUX_WM_LOW 2
36#define SMUX_WM_HIGH 4
37#define SMUX_PKT_LOG_SIZE 80
38
39/* Maximum size we can accept in a single RX buffer */
40#define TTY_RECEIVE_ROOM 65536
41#define TTY_BUFFER_FULL_WAIT_MS 50
42
43/* maximum sleep time between wakeup attempts */
44#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
45
46/* minimum delay for scheduling delayed work */
47#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
48
49/* inactivity timeout for no rx/tx activity */
50#define SMUX_INACTIVITY_TIMEOUT_MS 1000
51
52enum {
53 MSM_SMUX_DEBUG = 1U << 0,
54 MSM_SMUX_INFO = 1U << 1,
55 MSM_SMUX_POWER_INFO = 1U << 2,
56 MSM_SMUX_PKT = 1U << 3,
57};
58
59static int smux_debug_mask;
60module_param_named(debug_mask, smux_debug_mask,
61 int, S_IRUGO | S_IWUSR | S_IWGRP);
62
63/* Simulated wakeup used for testing */
64int smux_byte_loopback;
65module_param_named(byte_loopback, smux_byte_loopback,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67int smux_simulate_wakeup_delay = 1;
68module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70
71#define SMUX_DBG(x...) do { \
72 if (smux_debug_mask & MSM_SMUX_DEBUG) \
73 pr_info(x); \
74} while (0)
75
76#define SMUX_LOG_PKT_RX(pkt) do { \
77 if (smux_debug_mask & MSM_SMUX_PKT) \
78 smux_log_pkt(pkt, 1); \
79} while (0)
80
81#define SMUX_LOG_PKT_TX(pkt) do { \
82 if (smux_debug_mask & MSM_SMUX_PKT) \
83 smux_log_pkt(pkt, 0); \
84} while (0)
85
86/**
87 * Return true if channel is fully opened (both
88 * local and remote sides are in the OPENED state).
89 */
90#define IS_FULLY_OPENED(ch) \
91 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
92 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
93
94static struct platform_device smux_devs[] = {
95 {.name = "SMUX_CTL", .id = -1},
96 {.name = "SMUX_RMNET", .id = -1},
97 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
98 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
99 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
100 {.name = "SMUX_DIAG", .id = -1},
101};
102
103enum {
104 SMUX_CMD_STATUS_RTC = 1 << 0,
105 SMUX_CMD_STATUS_RTR = 1 << 1,
106 SMUX_CMD_STATUS_RI = 1 << 2,
107 SMUX_CMD_STATUS_DCD = 1 << 3,
108 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
109};
110
111/* Channel mode */
112enum {
113 SMUX_LCH_MODE_NORMAL,
114 SMUX_LCH_MODE_LOCAL_LOOPBACK,
115 SMUX_LCH_MODE_REMOTE_LOOPBACK,
116};
117
118enum {
119 SMUX_RX_IDLE,
120 SMUX_RX_MAGIC,
121 SMUX_RX_HDR,
122 SMUX_RX_PAYLOAD,
123 SMUX_RX_FAILURE,
124};
125
126/**
127 * Power states.
128 *
129 * The _FLUSH states are internal transitional states and are not part of the
130 * official state machine.
131 */
132enum {
133 SMUX_PWR_OFF,
134 SMUX_PWR_TURNING_ON,
135 SMUX_PWR_ON,
136 SMUX_PWR_TURNING_OFF_FLUSH,
137 SMUX_PWR_TURNING_OFF,
138 SMUX_PWR_OFF_FLUSH,
139};
140
141/**
142 * Logical Channel Structure. One instance per channel.
143 *
144 * Locking Hierarchy
145 * Each lock has a postfix that describes the locking level. If multiple locks
146 * are required, only increasing lock hierarchy numbers may be locked which
147 * ensures avoiding a deadlock.
148 *
149 * Locking Example
150 * If state_lock_lhb1 is currently held and the TX list needs to be
151 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
152 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
153 * not be acquired since it would result in a deadlock.
154 *
155 * Note that the Line Discipline locks (*_lha) should always be acquired
156 * before the logical channel locks.
157 */
158struct smux_lch_t {
159 /* channel state */
160 spinlock_t state_lock_lhb1;
161 uint8_t lcid;
162 unsigned local_state;
163 unsigned local_mode;
164 uint8_t local_tiocm;
165
166 unsigned remote_state;
167 unsigned remote_mode;
168 uint8_t remote_tiocm;
169
170 int tx_flow_control;
171
172 /* client callbacks and private data */
173 void *priv;
174 void (*notify)(void *priv, int event_type, const void *metadata);
175 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
176 int size);
177
178 /* TX Info */
179 spinlock_t tx_lock_lhb2;
180 struct list_head tx_queue;
181 struct list_head tx_ready_list;
182 unsigned tx_pending_data_cnt;
183 unsigned notify_lwm;
184};
185
186union notifier_metadata {
187 struct smux_meta_disconnected disconnected;
188 struct smux_meta_read read;
189 struct smux_meta_write write;
190 struct smux_meta_tiocm tiocm;
191};
192
193struct smux_notify_handle {
194 void (*notify)(void *priv, int event_type, const void *metadata);
195 void *priv;
196 int event_type;
197 union notifier_metadata *metadata;
198};
199
200/**
201 * Line discipline and module structure.
202 *
203 * Only one instance since multiple instances of line discipline are not
204 * allowed.
205 */
206struct smux_ldisc_t {
207 spinlock_t lock_lha0;
208
209 int is_initialized;
210 int in_reset;
211 int ld_open_count;
212 struct tty_struct *tty;
213
214 /* RX State Machine */
215 spinlock_t rx_lock_lha1;
216 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
217 unsigned int recv_len;
218 unsigned int pkt_remain;
219 unsigned rx_state;
220 unsigned rx_activity_flag;
221
222 /* TX / Power */
223 spinlock_t tx_lock_lha2;
224 struct list_head lch_tx_ready_list;
225 unsigned power_state;
226 unsigned pwr_wakeup_delay_us;
227 unsigned tx_activity_flag;
228 unsigned powerdown_enabled;
229};
230
231
232/* data structures */
233static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
234static struct smux_ldisc_t smux;
235static const char *tty_error_type[] = {
236 [TTY_NORMAL] = "normal",
237 [TTY_OVERRUN] = "overrun",
238 [TTY_BREAK] = "break",
239 [TTY_PARITY] = "parity",
240 [TTY_FRAME] = "framing",
241};
242
243static const char *smux_cmds[] = {
244 [SMUX_CMD_DATA] = "DATA",
245 [SMUX_CMD_OPEN_LCH] = "OPEN",
246 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
247 [SMUX_CMD_STATUS] = "STATUS",
248 [SMUX_CMD_PWR_CTL] = "PWR",
249 [SMUX_CMD_BYTE] = "Raw Byte",
250};
251
252static void smux_notify_local_fn(struct work_struct *work);
253static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
254
255static struct workqueue_struct *smux_notify_wq;
256static size_t handle_size;
257static struct kfifo smux_notify_fifo;
258static int queued_fifo_notifications;
259static DEFINE_SPINLOCK(notify_lock_lhc1);
260
261static struct workqueue_struct *smux_tx_wq;
262static void smux_tx_worker(struct work_struct *work);
263static DECLARE_WORK(smux_tx_work, smux_tx_worker);
264
265static void smux_wakeup_worker(struct work_struct *work);
266static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
267static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
268
269static void smux_inactivity_worker(struct work_struct *work);
270static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
271static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
272 smux_inactivity_worker);
273
274static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
275static void list_channel(struct smux_lch_t *ch);
276static int smux_send_status_cmd(struct smux_lch_t *ch);
277static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
278
279/**
280 * Convert TTY Error Flags to string for logging purposes.
281 *
282 * @flag TTY_* flag
283 * @returns String description or NULL if unknown
284 */
285static const char *tty_flag_to_str(unsigned flag)
286{
287 if (flag < ARRAY_SIZE(tty_error_type))
288 return tty_error_type[flag];
289 return NULL;
290}
291
292/**
293 * Convert SMUX Command to string for logging purposes.
294 *
295 * @cmd SMUX command
296 * @returns String description or NULL if unknown
297 */
298static const char *cmd_to_str(unsigned cmd)
299{
300 if (cmd < ARRAY_SIZE(smux_cmds))
301 return smux_cmds[cmd];
302 return NULL;
303}
304
305/**
306 * Set the reset state due to an unrecoverable failure.
307 */
308static void smux_enter_reset(void)
309{
310 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
311 smux.in_reset = 1;
312}
313
314static int lch_init(void)
315{
316 unsigned int id;
317 struct smux_lch_t *ch;
318 int i = 0;
319
320 handle_size = sizeof(struct smux_notify_handle *);
321
322 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
323 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
324
325 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
326 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
327 __func__);
328 return -ENOMEM;
329 }
330
331 i |= kfifo_alloc(&smux_notify_fifo,
332 SMUX_NOTIFY_FIFO_SIZE * handle_size,
333 GFP_KERNEL);
334 i |= smux_loopback_init();
335
336 if (i) {
337 pr_err("%s: out of memory error\n", __func__);
338 return -ENOMEM;
339 }
340
341 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
342 ch = &smux_lch[id];
343
344 spin_lock_init(&ch->state_lock_lhb1);
345 ch->lcid = id;
346 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
347 ch->local_mode = SMUX_LCH_MODE_NORMAL;
348 ch->local_tiocm = 0x0;
349 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
350 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
351 ch->remote_tiocm = 0x0;
352 ch->tx_flow_control = 0;
353 ch->priv = 0;
354 ch->notify = 0;
355 ch->get_rx_buffer = 0;
356
357 spin_lock_init(&ch->tx_lock_lhb2);
358 INIT_LIST_HEAD(&ch->tx_queue);
359 INIT_LIST_HEAD(&ch->tx_ready_list);
360 ch->tx_pending_data_cnt = 0;
361 ch->notify_lwm = 0;
362 }
363
364 return 0;
365}
366
367int smux_assert_lch_id(uint32_t lcid)
368{
369 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
370 return -ENXIO;
371 else
372 return 0;
373}
374
375/**
376 * Log packet information for debug purposes.
377 *
378 * @pkt Packet to log
379 * @is_recv 1 = RX packet; 0 = TX Packet
380 *
381 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
382 *
383 * PKT Info:
384 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
385 *
386 * Direction: R = Receive, S = Send
387 * Local State: C = Closed; c = closing; o = opening; O = Opened
388 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
389 * Remote State: C = Closed; O = Opened
390 * Remote Mode: R = Remote loopback; N = Normal
391 */
392static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
393{
394 char logbuf[SMUX_PKT_LOG_SIZE];
395 char cmd_extra[16];
396 int i = 0;
397 int count;
398 int len;
399 char local_state;
400 char local_mode;
401 char remote_state;
402 char remote_mode;
403 struct smux_lch_t *ch;
404 unsigned char *data;
405
406 ch = &smux_lch[pkt->hdr.lcid];
407
408 switch (ch->local_state) {
409 case SMUX_LCH_LOCAL_CLOSED:
410 local_state = 'C';
411 break;
412 case SMUX_LCH_LOCAL_OPENING:
413 local_state = 'o';
414 break;
415 case SMUX_LCH_LOCAL_OPENED:
416 local_state = 'O';
417 break;
418 case SMUX_LCH_LOCAL_CLOSING:
419 local_state = 'c';
420 break;
421 default:
422 local_state = 'U';
423 break;
424 }
425
426 switch (ch->local_mode) {
427 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
428 local_mode = 'L';
429 break;
430 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
431 local_mode = 'R';
432 break;
433 case SMUX_LCH_MODE_NORMAL:
434 local_mode = 'N';
435 break;
436 default:
437 local_mode = 'U';
438 break;
439 }
440
441 switch (ch->remote_state) {
442 case SMUX_LCH_REMOTE_CLOSED:
443 remote_state = 'C';
444 break;
445 case SMUX_LCH_REMOTE_OPENED:
446 remote_state = 'O';
447 break;
448
449 default:
450 remote_state = 'U';
451 break;
452 }
453
454 switch (ch->remote_mode) {
455 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
456 remote_mode = 'R';
457 break;
458 case SMUX_LCH_MODE_NORMAL:
459 remote_mode = 'N';
460 break;
461 default:
462 remote_mode = 'U';
463 break;
464 }
465
466 /* determine command type (ACK, etc) */
467 cmd_extra[0] = '\0';
468 switch (pkt->hdr.cmd) {
469 case SMUX_CMD_OPEN_LCH:
470 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
471 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
472 break;
473 case SMUX_CMD_CLOSE_LCH:
474 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
475 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
476 break;
477 };
478
479 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
480 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
481 is_recv ? 'R' : 'S', pkt->hdr.lcid,
482 local_state, local_mode,
483 remote_state, remote_mode,
484 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
485 pkt->hdr.payload_len, pkt->hdr.pad_len);
486
487 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
488 data = (unsigned char *)pkt->payload;
489 for (count = 0; count < len; count++)
490 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
491 "%02x ", (unsigned)data[count]);
492
493 pr_info("%s\n", logbuf);
494}
495
496static void smux_notify_local_fn(struct work_struct *work)
497{
498 struct smux_notify_handle *notify_handle = NULL;
499 union notifier_metadata *metadata = NULL;
500 unsigned long flags;
501 int i;
502
503 for (;;) {
504 /* retrieve notification */
505 spin_lock_irqsave(&notify_lock_lhc1, flags);
506 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
507 i = kfifo_out(&smux_notify_fifo,
508 &notify_handle,
509 handle_size);
510 if (i != handle_size) {
511 pr_err("%s: unable to retrieve handle %d expected %d\n",
512 __func__, i, handle_size);
513 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
514 break;
515 }
516 } else {
517 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
518 break;
519 }
520 --queued_fifo_notifications;
521 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
522
523 /* notify client */
524 metadata = notify_handle->metadata;
525 notify_handle->notify(notify_handle->priv,
526 notify_handle->event_type,
527 metadata);
528
529 kfree(metadata);
530 kfree(notify_handle);
531 }
532}
533
534/**
535 * Initialize existing packet.
536 */
537void smux_init_pkt(struct smux_pkt_t *pkt)
538{
539 memset(pkt, 0x0, sizeof(*pkt));
540 pkt->hdr.magic = SMUX_MAGIC;
541 INIT_LIST_HEAD(&pkt->list);
542}
543
544/**
545 * Allocate and initialize packet.
546 *
547 * If a payload is needed, either set it directly and ensure that it's freed or
548 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
549 * automatically when smd_free_pkt() is called.
550 */
551struct smux_pkt_t *smux_alloc_pkt(void)
552{
553 struct smux_pkt_t *pkt;
554
555 /* Consider a free list implementation instead of kmalloc */
556 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
557 if (!pkt) {
558 pr_err("%s: out of memory\n", __func__);
559 return NULL;
560 }
561 smux_init_pkt(pkt);
562 pkt->allocated = 1;
563
564 return pkt;
565}
566
567/**
568 * Free packet.
569 *
570 * @pkt Packet to free (may be NULL)
571 *
572 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
573 * well. Otherwise, the caller is responsible for freeing the payload.
574 */
575void smux_free_pkt(struct smux_pkt_t *pkt)
576{
577 if (pkt) {
578 if (pkt->free_payload)
579 kfree(pkt->payload);
580 if (pkt->allocated)
581 kfree(pkt);
582 }
583}
584
585/**
586 * Allocate packet payload.
587 *
588 * @pkt Packet to add payload to
589 *
590 * @returns 0 on success, <0 upon error
591 *
592 * A flag is set to signal smux_free_pkt() to free the payload.
593 */
594int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
595{
596 if (!pkt)
597 return -EINVAL;
598
599 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
600 pkt->free_payload = 1;
601 if (!pkt->payload) {
602 pr_err("%s: unable to malloc %d bytes for payload\n",
603 __func__, pkt->hdr.payload_len);
604 return -ENOMEM;
605 }
606
607 return 0;
608}
609
610static int schedule_notify(uint8_t lcid, int event,
611 const union notifier_metadata *metadata)
612{
613 struct smux_notify_handle *notify_handle = 0;
614 union notifier_metadata *meta_copy = 0;
615 struct smux_lch_t *ch;
616 int i;
617 unsigned long flags;
618 int ret = 0;
619
620 ch = &smux_lch[lcid];
621 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
622 GFP_ATOMIC);
623 if (!notify_handle) {
624 pr_err("%s: out of memory\n", __func__);
625 ret = -ENOMEM;
626 goto free_out;
627 }
628
629 notify_handle->notify = ch->notify;
630 notify_handle->priv = ch->priv;
631 notify_handle->event_type = event;
632 if (metadata) {
633 meta_copy = kzalloc(sizeof(union notifier_metadata),
634 GFP_ATOMIC);
635 if (!meta_copy) {
636 pr_err("%s: out of memory\n", __func__);
637 ret = -ENOMEM;
638 goto free_out;
639 }
640 *meta_copy = *metadata;
641 notify_handle->metadata = meta_copy;
642 } else {
643 notify_handle->metadata = NULL;
644 }
645
646 spin_lock_irqsave(&notify_lock_lhc1, flags);
647 i = kfifo_avail(&smux_notify_fifo);
648 if (i < handle_size) {
649 pr_err("%s: fifo full error %d expected %d\n",
650 __func__, i, handle_size);
651 ret = -ENOMEM;
652 goto unlock_out;
653 }
654
655 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
656 if (i < 0 || i != handle_size) {
657 pr_err("%s: fifo not available error %d (expected %d)\n",
658 __func__, i, handle_size);
659 ret = -ENOSPC;
660 goto unlock_out;
661 }
662 ++queued_fifo_notifications;
663
664unlock_out:
665 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
666
667free_out:
668 queue_work(smux_notify_wq, &smux_notify_local);
669 if (ret < 0 && notify_handle) {
670 kfree(notify_handle->metadata);
671 kfree(notify_handle);
672 }
673 return ret;
674}
675
676/**
677 * Returns the serialized size of a packet.
678 *
679 * @pkt Packet to serialize
680 *
681 * @returns Serialized length of packet
682 */
683static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
684{
685 unsigned int size;
686
687 size = sizeof(struct smux_hdr_t);
688 size += pkt->hdr.payload_len;
689 size += pkt->hdr.pad_len;
690
691 return size;
692}
693
694/**
695 * Serialize packet @pkt into output buffer @data.
696 *
697 * @pkt Packet to serialize
698 * @out Destination buffer pointer
699 * @out_len Size of serialized packet
700 *
701 * @returns 0 for success
702 */
703int smux_serialize(struct smux_pkt_t *pkt, char *out,
704 unsigned int *out_len)
705{
706 char *data_start = out;
707
708 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
709 pr_err("%s: packet size %d too big\n",
710 __func__, smux_serialize_size(pkt));
711 return -E2BIG;
712 }
713
714 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
715 out += sizeof(struct smux_hdr_t);
716 if (pkt->payload) {
717 memcpy(out, pkt->payload, pkt->hdr.payload_len);
718 out += pkt->hdr.payload_len;
719 }
720 if (pkt->hdr.pad_len) {
721 memset(out, 0x0, pkt->hdr.pad_len);
722 out += pkt->hdr.pad_len;
723 }
724 *out_len = out - data_start;
725 return 0;
726}
727
728/**
729 * Serialize header and provide pointer to the data.
730 *
731 * @pkt Packet
732 * @out[out] Pointer to the serialized header data
733 * @out_len[out] Pointer to the serialized header length
734 */
735static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
736 unsigned int *out_len)
737{
738 *out = (char *)&pkt->hdr;
739 *out_len = sizeof(struct smux_hdr_t);
740}
741
742/**
743 * Serialize payload and provide pointer to the data.
744 *
745 * @pkt Packet
746 * @out[out] Pointer to the serialized payload data
747 * @out_len[out] Pointer to the serialized payload length
748 */
749static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
750 unsigned int *out_len)
751{
752 *out = pkt->payload;
753 *out_len = pkt->hdr.payload_len;
754}
755
756/**
757 * Serialize padding and provide pointer to the data.
758 *
759 * @pkt Packet
760 * @out[out] Pointer to the serialized padding (always NULL)
761 * @out_len[out] Pointer to the serialized payload length
762 *
763 * Since the padding field value is undefined, only the size of the patting
764 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
765 */
766static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
767 unsigned int *out_len)
768{
769 *out = NULL;
770 *out_len = pkt->hdr.pad_len;
771}
772
773/**
774 * Write data to TTY framework and handle breaking the writes up if needed.
775 *
776 * @data Data to write
777 * @len Length of data
778 *
779 * @returns 0 for success, < 0 for failure
780 */
781static int write_to_tty(char *data, unsigned len)
782{
783 int data_written;
784
785 if (!data)
786 return 0;
787
788 while (len > 0) {
789 data_written = smux.tty->ops->write(smux.tty, data, len);
790 if (data_written >= 0) {
791 len -= data_written;
792 data += data_written;
793 } else {
794 pr_err("%s: TTY write returned error %d\n",
795 __func__, data_written);
796 return data_written;
797 }
798
799 if (len)
800 tty_wait_until_sent(smux.tty,
801 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
802
803 /* FUTURE - add SSR logic */
804 }
805 return 0;
806}
807
808/**
809 * Write packet to TTY.
810 *
811 * @pkt packet to write
812 *
813 * @returns 0 on success
814 */
815static int smux_tx_tty(struct smux_pkt_t *pkt)
816{
817 char *data;
818 unsigned int len;
819 int ret;
820
821 if (!smux.tty) {
822 pr_err("%s: TTY not initialized", __func__);
823 return -ENOTTY;
824 }
825
826 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
827 SMUX_DBG("%s: tty send single byte\n", __func__);
828 ret = write_to_tty(&pkt->hdr.flags, 1);
829 return ret;
830 }
831
832 smux_serialize_hdr(pkt, &data, &len);
833 ret = write_to_tty(data, len);
834 if (ret) {
835 pr_err("%s: failed %d to write header %d\n",
836 __func__, ret, len);
837 return ret;
838 }
839
840 smux_serialize_payload(pkt, &data, &len);
841 ret = write_to_tty(data, len);
842 if (ret) {
843 pr_err("%s: failed %d to write payload %d\n",
844 __func__, ret, len);
845 return ret;
846 }
847
848 smux_serialize_padding(pkt, &data, &len);
849 while (len > 0) {
850 char zero = 0x0;
851 ret = write_to_tty(&zero, 1);
852 if (ret) {
853 pr_err("%s: failed %d to write padding %d\n",
854 __func__, ret, len);
855 return ret;
856 }
857 --len;
858 }
859 return 0;
860}
861
862/**
863 * Send a single character.
864 *
865 * @ch Character to send
866 */
867static void smux_send_byte(char ch)
868{
869 struct smux_pkt_t pkt;
870
871 smux_init_pkt(&pkt);
872
873 pkt.hdr.cmd = SMUX_CMD_BYTE;
874 pkt.hdr.flags = ch;
875 pkt.hdr.lcid = 0;
876 pkt.hdr.flags = ch;
877 SMUX_LOG_PKT_TX(&pkt);
878 if (!smux_byte_loopback)
879 smux_tx_tty(&pkt);
880 else
881 smux_tx_loopback(&pkt);
882}
883
884/**
885 * Receive a single-character packet (used for internal testing).
886 *
887 * @ch Character to receive
888 * @lcid Logical channel ID for packet
889 *
890 * @returns 0 for success
891 *
892 * Called with rx_lock_lha1 locked.
893 */
894static int smux_receive_byte(char ch, int lcid)
895{
896 struct smux_pkt_t pkt;
897
898 smux_init_pkt(&pkt);
899 pkt.hdr.lcid = lcid;
900 pkt.hdr.cmd = SMUX_CMD_BYTE;
901 pkt.hdr.flags = ch;
902
903 return smux_dispatch_rx_pkt(&pkt);
904}
905
906/**
907 * Queue packet for transmit.
908 *
909 * @pkt_ptr Packet to queue
910 * @ch Channel to queue packet on
911 * @queue Queue channel on ready list
912 */
913static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
914 int queue)
915{
916 unsigned long flags;
917
918 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
919
920 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
921 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
922 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
923
924 if (queue)
925 list_channel(ch);
926}
927
928/**
929 * Handle receive OPEN ACK command.
930 *
931 * @pkt Received packet
932 *
933 * @returns 0 for success
934 *
935 * Called with rx_lock_lha1 already locked.
936 */
937static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
938{
939 uint8_t lcid;
940 int ret;
941 struct smux_lch_t *ch;
942 int enable_powerdown = 0;
943
944 lcid = pkt->hdr.lcid;
945 ch = &smux_lch[lcid];
946
947 spin_lock(&ch->state_lock_lhb1);
948 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
949 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
950 ch->local_state,
951 SMUX_LCH_LOCAL_OPENED);
952
953 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
954 enable_powerdown = 1;
955
956 ch->local_state = SMUX_LCH_LOCAL_OPENED;
957 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
958 schedule_notify(lcid, SMUX_CONNECTED, NULL);
959 ret = 0;
960 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
961 SMUX_DBG("Remote loopback OPEN ACK received\n");
962 ret = 0;
963 } else {
964 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
965 __func__, lcid, ch->local_state);
966 ret = -EINVAL;
967 }
968 spin_unlock(&ch->state_lock_lhb1);
969
970 if (enable_powerdown) {
971 spin_lock(&smux.tx_lock_lha2);
972 if (!smux.powerdown_enabled) {
973 smux.powerdown_enabled = 1;
974 SMUX_DBG("%s: enabling power-collapse support\n",
975 __func__);
976 }
977 spin_unlock(&smux.tx_lock_lha2);
978 }
979
980 return ret;
981}
982
983static int smux_handle_close_ack(struct smux_pkt_t *pkt)
984{
985 uint8_t lcid;
986 int ret;
987 struct smux_lch_t *ch;
988 union notifier_metadata meta_disconnected;
989 unsigned long flags;
990
991 lcid = pkt->hdr.lcid;
992 ch = &smux_lch[lcid];
993 meta_disconnected.disconnected.is_ssr = 0;
994
995 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
996
997 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
998 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
999 SMUX_LCH_LOCAL_CLOSING,
1000 SMUX_LCH_LOCAL_CLOSED);
1001 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1002 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1003 schedule_notify(lcid, SMUX_DISCONNECTED,
1004 &meta_disconnected);
1005 ret = 0;
1006 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1007 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1008 ret = 0;
1009 } else {
1010 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1011 __func__, lcid, ch->local_state);
1012 ret = -EINVAL;
1013 }
1014 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1015 return ret;
1016}
1017
1018/**
1019 * Handle receive OPEN command.
1020 *
1021 * @pkt Received packet
1022 *
1023 * @returns 0 for success
1024 *
1025 * Called with rx_lock_lha1 already locked.
1026 */
1027static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1028{
1029 uint8_t lcid;
1030 int ret;
1031 struct smux_lch_t *ch;
1032 struct smux_pkt_t *ack_pkt;
1033 int tx_ready = 0;
1034 int enable_powerdown = 0;
1035
1036 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1037 return smux_handle_rx_open_ack(pkt);
1038
1039 lcid = pkt->hdr.lcid;
1040 ch = &smux_lch[lcid];
1041
1042 spin_lock(&ch->state_lock_lhb1);
1043
1044 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1045 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1046 SMUX_LCH_REMOTE_CLOSED,
1047 SMUX_LCH_REMOTE_OPENED);
1048
1049 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1050 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1051 enable_powerdown = 1;
1052
1053 /* Send Open ACK */
1054 ack_pkt = smux_alloc_pkt();
1055 if (!ack_pkt) {
1056 /* exit out to allow retrying this later */
1057 ret = -ENOMEM;
1058 goto out;
1059 }
1060 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1061 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1062 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1063 ack_pkt->hdr.lcid = lcid;
1064 ack_pkt->hdr.payload_len = 0;
1065 ack_pkt->hdr.pad_len = 0;
1066 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1067 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1068 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1069 }
1070 smux_tx_queue(ack_pkt, ch, 0);
1071 tx_ready = 1;
1072
1073 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1074 /*
1075 * Send an Open command to the remote side to
1076 * simulate our local client doing it.
1077 */
1078 ack_pkt = smux_alloc_pkt();
1079 if (ack_pkt) {
1080 ack_pkt->hdr.lcid = lcid;
1081 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1082 ack_pkt->hdr.flags =
1083 SMUX_CMD_OPEN_POWER_COLLAPSE;
1084 ack_pkt->hdr.payload_len = 0;
1085 ack_pkt->hdr.pad_len = 0;
1086 smux_tx_queue(ack_pkt, ch, 0);
1087 tx_ready = 1;
1088 } else {
1089 pr_err("%s: Remote loopack allocation failure\n",
1090 __func__);
1091 }
1092 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1093 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1094 }
1095 ret = 0;
1096 } else {
1097 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1098 __func__, lcid, ch->remote_state);
1099 ret = -EINVAL;
1100 }
1101
1102out:
1103 spin_unlock(&ch->state_lock_lhb1);
1104
1105 if (enable_powerdown) {
1106 spin_lock(&smux.tx_lock_lha2);
1107 smux.powerdown_enabled = 1;
1108 SMUX_DBG("%s: enabling power-collapse support\n", __func__);
1109 spin_unlock(&smux.tx_lock_lha2);
1110 }
1111
1112 if (tx_ready)
1113 list_channel(ch);
1114
1115 return ret;
1116}
1117
1118/**
1119 * Handle receive CLOSE command.
1120 *
1121 * @pkt Received packet
1122 *
1123 * @returns 0 for success
1124 *
1125 * Called with rx_lock_lha1 already locked.
1126 */
1127static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1128{
1129 uint8_t lcid;
1130 int ret;
1131 struct smux_lch_t *ch;
1132 struct smux_pkt_t *ack_pkt;
1133 union notifier_metadata meta_disconnected;
1134 int tx_ready = 0;
1135
1136 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1137 return smux_handle_close_ack(pkt);
1138
1139 lcid = pkt->hdr.lcid;
1140 ch = &smux_lch[lcid];
1141 meta_disconnected.disconnected.is_ssr = 0;
1142
1143 spin_lock(&ch->state_lock_lhb1);
1144 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1145 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1146 SMUX_LCH_REMOTE_OPENED,
1147 SMUX_LCH_REMOTE_CLOSED);
1148
1149 ack_pkt = smux_alloc_pkt();
1150 if (!ack_pkt) {
1151 /* exit out to allow retrying this later */
1152 ret = -ENOMEM;
1153 goto out;
1154 }
1155 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1156 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1157 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1158 ack_pkt->hdr.lcid = lcid;
1159 ack_pkt->hdr.payload_len = 0;
1160 ack_pkt->hdr.pad_len = 0;
1161 smux_tx_queue(ack_pkt, ch, 0);
1162 tx_ready = 1;
1163
1164 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1165 /*
1166 * Send a Close command to the remote side to simulate
1167 * our local client doing it.
1168 */
1169 ack_pkt = smux_alloc_pkt();
1170 if (ack_pkt) {
1171 ack_pkt->hdr.lcid = lcid;
1172 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1173 ack_pkt->hdr.flags = 0;
1174 ack_pkt->hdr.payload_len = 0;
1175 ack_pkt->hdr.pad_len = 0;
1176 smux_tx_queue(ack_pkt, ch, 0);
1177 tx_ready = 1;
1178 } else {
1179 pr_err("%s: Remote loopack allocation failure\n",
1180 __func__);
1181 }
1182 }
1183
1184 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1185 schedule_notify(lcid, SMUX_DISCONNECTED,
1186 &meta_disconnected);
1187 ret = 0;
1188 } else {
1189 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1190 __func__, lcid, ch->remote_state);
1191 ret = -EINVAL;
1192 }
1193out:
1194 spin_unlock(&ch->state_lock_lhb1);
1195 if (tx_ready)
1196 list_channel(ch);
1197
1198 return ret;
1199}
1200
1201/*
1202 * Handle receive DATA command.
1203 *
1204 * @pkt Received packet
1205 *
1206 * @returns 0 for success
1207 *
1208 * Called with rx_lock_lha1 already locked.
1209 */
1210static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1211{
1212 uint8_t lcid;
1213 int ret;
1214 int i;
1215 int tmp;
1216 int rx_len;
1217 struct smux_lch_t *ch;
1218 union notifier_metadata metadata;
1219 int remote_loopback;
1220 int tx_ready = 0;
1221 struct smux_pkt_t *ack_pkt;
1222 unsigned long flags;
1223
1224 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
1225 return -ENXIO;
1226
1227 lcid = pkt->hdr.lcid;
1228 ch = &smux_lch[lcid];
1229 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1230 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1231
1232 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1233 && !remote_loopback) {
1234 pr_err("smux: ch %d error data on local state 0x%x",
1235 lcid, ch->local_state);
1236 ret = -EIO;
1237 goto out;
1238 }
1239
1240 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1241 pr_err("smux: ch %d error data on remote state 0x%x",
1242 lcid, ch->remote_state);
1243 ret = -EIO;
1244 goto out;
1245 }
1246
1247 rx_len = pkt->hdr.payload_len;
1248 if (rx_len == 0) {
1249 ret = -EINVAL;
1250 goto out;
1251 }
1252
1253 for (i = 0; i < SMUX_GET_RX_BUFF_MAX_RETRY_CNT; ++i) {
1254 metadata.read.pkt_priv = 0;
1255 metadata.read.buffer = 0;
1256
1257 if (!remote_loopback) {
1258 tmp = ch->get_rx_buffer(ch->priv,
1259 (void **)&metadata.read.pkt_priv,
1260 (void **)&metadata.read.buffer,
1261 rx_len);
1262 if (tmp == 0 && metadata.read.buffer) {
1263 /* place data into RX buffer */
1264 memcpy(metadata.read.buffer, pkt->payload,
1265 rx_len);
1266 metadata.read.len = rx_len;
1267 schedule_notify(lcid, SMUX_READ_DONE,
1268 &metadata);
1269 ret = 0;
1270 break;
1271 } else if (tmp == -EAGAIN) {
1272 ret = -ENOMEM;
1273 } else if (tmp < 0) {
1274 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1275 ret = -ENOMEM;
1276 break;
1277 } else if (!metadata.read.buffer) {
1278 pr_err("%s: get_rx_buffer() buffer is NULL\n",
1279 __func__);
1280 ret = -ENOMEM;
1281 }
1282 } else {
1283 /* Echo the data back to the remote client. */
1284 ack_pkt = smux_alloc_pkt();
1285 if (ack_pkt) {
1286 ack_pkt->hdr.lcid = lcid;
1287 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1288 ack_pkt->hdr.flags = 0;
1289 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1290 ack_pkt->payload = pkt->payload;
1291 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1292 smux_tx_queue(ack_pkt, ch, 0);
1293 tx_ready = 1;
1294 } else {
1295 pr_err("%s: Remote loopack allocation failure\n",
1296 __func__);
1297 }
1298 }
1299 }
1300
1301out:
1302 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1303
1304 if (tx_ready)
1305 list_channel(ch);
1306
1307 return ret;
1308}
1309
1310/**
1311 * Handle receive byte command for testing purposes.
1312 *
1313 * @pkt Received packet
1314 *
1315 * @returns 0 for success
1316 */
1317static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1318{
1319 uint8_t lcid;
1320 int ret;
1321 struct smux_lch_t *ch;
1322 union notifier_metadata metadata;
1323 unsigned long flags;
1324
1325 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
1326 return -ENXIO;
1327
1328 lcid = pkt->hdr.lcid;
1329 ch = &smux_lch[lcid];
1330 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1331
1332 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1333 pr_err("smux: ch %d error data on local state 0x%x",
1334 lcid, ch->local_state);
1335 ret = -EIO;
1336 goto out;
1337 }
1338
1339 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1340 pr_err("smux: ch %d error data on remote state 0x%x",
1341 lcid, ch->remote_state);
1342 ret = -EIO;
1343 goto out;
1344 }
1345
1346 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1347 metadata.read.buffer = 0;
1348 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1349 ret = 0;
1350
1351out:
1352 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1353 return ret;
1354}
1355
1356/**
1357 * Handle receive status command.
1358 *
1359 * @pkt Received packet
1360 *
1361 * @returns 0 for success
1362 *
1363 * Called with rx_lock_lha1 already locked.
1364 */
1365static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1366{
1367 uint8_t lcid;
1368 int ret;
1369 struct smux_lch_t *ch;
1370 union notifier_metadata meta;
1371 unsigned long flags;
1372 int tx_ready = 0;
1373
1374 lcid = pkt->hdr.lcid;
1375 ch = &smux_lch[lcid];
1376
1377 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1378 meta.tiocm.tiocm_old = ch->remote_tiocm;
1379 meta.tiocm.tiocm_new = pkt->hdr.flags;
1380
1381 /* update logical channel flow control */
1382 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1383 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1384 /* logical channel flow control changed */
1385 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1386 /* disabled TX */
1387 SMUX_DBG("TX Flow control enabled\n");
1388 ch->tx_flow_control = 1;
1389 } else {
1390 /* re-enable channel */
1391 SMUX_DBG("TX Flow control disabled\n");
1392 ch->tx_flow_control = 0;
1393 tx_ready = 1;
1394 }
1395 }
1396 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1397 ch->remote_tiocm = pkt->hdr.flags;
1398 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1399
1400 /* client notification for status change */
1401 if (IS_FULLY_OPENED(ch)) {
1402 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1403 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1404 ret = 0;
1405 }
1406 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1407 if (tx_ready)
1408 list_channel(ch);
1409
1410 return ret;
1411}
1412
1413/**
1414 * Handle receive power command.
1415 *
1416 * @pkt Received packet
1417 *
1418 * @returns 0 for success
1419 *
1420 * Called with rx_lock_lha1 already locked.
1421 */
1422static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1423{
1424 int tx_ready = 0;
1425 struct smux_pkt_t *ack_pkt;
1426
1427 spin_lock(&smux.tx_lock_lha2);
1428 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1429 /* local sleep request ack */
1430 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1431 /* Power-down complete, turn off UART */
1432 SMUX_DBG("%s: Power %d->%d\n", __func__,
1433 smux.power_state, SMUX_PWR_OFF_FLUSH);
1434 smux.power_state = SMUX_PWR_OFF_FLUSH;
1435 queue_work(smux_tx_wq, &smux_inactivity_work);
1436 } else {
1437 pr_err("%s: sleep request ack invalid in state %d\n",
1438 __func__, smux.power_state);
1439 }
1440 } else {
1441 /* remote sleep request */
1442 if (smux.power_state == SMUX_PWR_ON
1443 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1444 ack_pkt = smux_alloc_pkt();
1445 if (ack_pkt) {
1446 SMUX_DBG("%s: Power %d->%d\n", __func__,
1447 smux.power_state,
1448 SMUX_PWR_TURNING_OFF_FLUSH);
1449
1450 /* send power-down request */
1451 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1452 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
1453 ack_pkt->hdr.lcid = pkt->hdr.lcid;
1454 smux_tx_queue(ack_pkt,
1455 &smux_lch[ack_pkt->hdr.lcid], 0);
1456 tx_ready = 1;
1457 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1458 queue_delayed_work(smux_tx_wq,
1459 &smux_delayed_inactivity_work,
1460 msecs_to_jiffies(
1461 SMUX_INACTIVITY_TIMEOUT_MS));
1462 }
1463 } else {
1464 pr_err("%s: sleep request invalid in state %d\n",
1465 __func__, smux.power_state);
1466 }
1467 }
1468 spin_unlock(&smux.tx_lock_lha2);
1469
1470 if (tx_ready)
1471 list_channel(&smux_lch[ack_pkt->hdr.lcid]);
1472
1473 return 0;
1474}
1475
1476/**
1477 * Handle dispatching a completed packet for receive processing.
1478 *
1479 * @pkt Packet to process
1480 *
1481 * @returns 0 for success
1482 *
1483 * Called with rx_lock_lha1 already locked.
1484 */
1485static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1486{
1487 int ret;
1488
1489 SMUX_LOG_PKT_RX(pkt);
1490
1491 switch (pkt->hdr.cmd) {
1492 case SMUX_CMD_OPEN_LCH:
1493 ret = smux_handle_rx_open_cmd(pkt);
1494 break;
1495
1496 case SMUX_CMD_DATA:
1497 ret = smux_handle_rx_data_cmd(pkt);
1498 break;
1499
1500 case SMUX_CMD_CLOSE_LCH:
1501 ret = smux_handle_rx_close_cmd(pkt);
1502 break;
1503
1504 case SMUX_CMD_STATUS:
1505 ret = smux_handle_rx_status_cmd(pkt);
1506 break;
1507
1508 case SMUX_CMD_PWR_CTL:
1509 ret = smux_handle_rx_power_cmd(pkt);
1510 break;
1511
1512 case SMUX_CMD_BYTE:
1513 ret = smux_handle_rx_byte_cmd(pkt);
1514 break;
1515
1516 default:
1517 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1518 ret = -EINVAL;
1519 }
1520 return ret;
1521}
1522
1523/**
1524 * Deserializes a packet and dispatches it to the packet receive logic.
1525 *
1526 * @data Raw data for one packet
1527 * @len Length of the data
1528 *
1529 * @returns 0 for success
1530 *
1531 * Called with rx_lock_lha1 already locked.
1532 */
1533static int smux_deserialize(unsigned char *data, int len)
1534{
1535 struct smux_pkt_t recv;
1536 uint8_t lcid;
1537
1538 smux_init_pkt(&recv);
1539
1540 /*
1541 * It may be possible to optimize this to not use the
1542 * temporary buffer.
1543 */
1544 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1545
1546 if (recv.hdr.magic != SMUX_MAGIC) {
1547 pr_err("%s: invalid header magic\n", __func__);
1548 return -EINVAL;
1549 }
1550
1551 lcid = recv.hdr.lcid;
1552 if (smux_assert_lch_id(lcid)) {
1553 pr_err("%s: invalid channel id %d\n", __func__, lcid);
1554 return -ENXIO;
1555 }
1556
1557 if (recv.hdr.payload_len)
1558 recv.payload = data + sizeof(struct smux_hdr_t);
1559
1560 return smux_dispatch_rx_pkt(&recv);
1561}
1562
1563/**
1564 * Handle wakeup request byte.
1565 *
1566 * Called with rx_lock_lha1 already locked.
1567 */
1568static void smux_handle_wakeup_req(void)
1569{
1570 spin_lock(&smux.tx_lock_lha2);
1571 if (smux.power_state == SMUX_PWR_OFF
1572 || smux.power_state == SMUX_PWR_TURNING_ON) {
1573 /* wakeup system */
1574 SMUX_DBG("%s: Power %d->%d\n", __func__,
1575 smux.power_state, SMUX_PWR_ON);
1576 smux.power_state = SMUX_PWR_ON;
1577 queue_work(smux_tx_wq, &smux_wakeup_work);
1578 queue_work(smux_tx_wq, &smux_tx_work);
1579 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1580 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1581 smux_send_byte(SMUX_WAKEUP_ACK);
1582 } else {
1583 smux_send_byte(SMUX_WAKEUP_ACK);
1584 }
1585 spin_unlock(&smux.tx_lock_lha2);
1586}
1587
1588/**
1589 * Handle wakeup request ack.
1590 *
1591 * Called with rx_lock_lha1 already locked.
1592 */
1593static void smux_handle_wakeup_ack(void)
1594{
1595 spin_lock(&smux.tx_lock_lha2);
1596 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1597 /* received response to wakeup request */
1598 SMUX_DBG("%s: Power %d->%d\n", __func__,
1599 smux.power_state, SMUX_PWR_ON);
1600 smux.power_state = SMUX_PWR_ON;
1601 queue_work(smux_tx_wq, &smux_tx_work);
1602 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1603 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1604
1605 } else if (smux.power_state != SMUX_PWR_ON) {
1606 /* invalid message */
1607 pr_err("%s: wakeup request ack invalid in state %d\n",
1608 __func__, smux.power_state);
1609 }
1610 spin_unlock(&smux.tx_lock_lha2);
1611}
1612
1613/**
1614 * RX State machine - IDLE state processing.
1615 *
1616 * @data New RX data to process
1617 * @len Length of the data
1618 * @used Return value of length processed
1619 * @flag Error flag - TTY_NORMAL 0 for no failure
1620 *
1621 * Called with rx_lock_lha1 locked.
1622 */
1623static void smux_rx_handle_idle(const unsigned char *data,
1624 int len, int *used, int flag)
1625{
1626 int i;
1627
1628 if (flag) {
1629 if (smux_byte_loopback)
1630 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1631 smux_byte_loopback);
1632 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1633 ++*used;
1634 return;
1635 }
1636
1637 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1638 switch (data[i]) {
1639 case SMUX_MAGIC_WORD1:
1640 smux.rx_state = SMUX_RX_MAGIC;
1641 break;
1642 case SMUX_WAKEUP_REQ:
1643 smux_handle_wakeup_req();
1644 break;
1645 case SMUX_WAKEUP_ACK:
1646 smux_handle_wakeup_ack();
1647 break;
1648 default:
1649 /* unexpected character */
1650 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1651 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1652 smux_byte_loopback);
1653 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1654 (unsigned)data[i]);
1655 break;
1656 }
1657 }
1658
1659 *used = i;
1660}
1661
1662/**
1663 * RX State machine - Header Magic state processing.
1664 *
1665 * @data New RX data to process
1666 * @len Length of the data
1667 * @used Return value of length processed
1668 * @flag Error flag - TTY_NORMAL 0 for no failure
1669 *
1670 * Called with rx_lock_lha1 locked.
1671 */
1672static void smux_rx_handle_magic(const unsigned char *data,
1673 int len, int *used, int flag)
1674{
1675 int i;
1676
1677 if (flag) {
1678 pr_err("%s: TTY RX error %d\n", __func__, flag);
1679 smux_enter_reset();
1680 smux.rx_state = SMUX_RX_FAILURE;
1681 ++*used;
1682 return;
1683 }
1684
1685 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1686 /* wait for completion of the magic */
1687 if (data[i] == SMUX_MAGIC_WORD2) {
1688 smux.recv_len = 0;
1689 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1690 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1691 smux.rx_state = SMUX_RX_HDR;
1692 } else {
1693 /* unexpected / trash character */
1694 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1695 __func__, data[i], *used, len);
1696 smux.rx_state = SMUX_RX_IDLE;
1697 }
1698 }
1699
1700 *used = i;
1701}
1702
1703/**
1704 * RX State machine - Packet Header state processing.
1705 *
1706 * @data New RX data to process
1707 * @len Length of the data
1708 * @used Return value of length processed
1709 * @flag Error flag - TTY_NORMAL 0 for no failure
1710 *
1711 * Called with rx_lock_lha1 locked.
1712 */
1713static void smux_rx_handle_hdr(const unsigned char *data,
1714 int len, int *used, int flag)
1715{
1716 int i;
1717 struct smux_hdr_t *hdr;
1718
1719 if (flag) {
1720 pr_err("%s: TTY RX error %d\n", __func__, flag);
1721 smux_enter_reset();
1722 smux.rx_state = SMUX_RX_FAILURE;
1723 ++*used;
1724 return;
1725 }
1726
1727 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1728 smux.recv_buf[smux.recv_len++] = data[i];
1729
1730 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1731 /* complete header received */
1732 hdr = (struct smux_hdr_t *)smux.recv_buf;
1733 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1734 smux.rx_state = SMUX_RX_PAYLOAD;
1735 }
1736 }
1737 *used = i;
1738}
1739
1740/**
1741 * RX State machine - Packet Payload state processing.
1742 *
1743 * @data New RX data to process
1744 * @len Length of the data
1745 * @used Return value of length processed
1746 * @flag Error flag - TTY_NORMAL 0 for no failure
1747 *
1748 * Called with rx_lock_lha1 locked.
1749 */
1750static void smux_rx_handle_pkt_payload(const unsigned char *data,
1751 int len, int *used, int flag)
1752{
1753 int remaining;
1754
1755 if (flag) {
1756 pr_err("%s: TTY RX error %d\n", __func__, flag);
1757 smux_enter_reset();
1758 smux.rx_state = SMUX_RX_FAILURE;
1759 ++*used;
1760 return;
1761 }
1762
1763 /* copy data into rx buffer */
1764 if (smux.pkt_remain < (len - *used))
1765 remaining = smux.pkt_remain;
1766 else
1767 remaining = len - *used;
1768
1769 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1770 smux.recv_len += remaining;
1771 smux.pkt_remain -= remaining;
1772 *used += remaining;
1773
1774 if (smux.pkt_remain == 0) {
1775 /* complete packet received */
1776 smux_deserialize(smux.recv_buf, smux.recv_len);
1777 smux.rx_state = SMUX_RX_IDLE;
1778 }
1779}
1780
1781/**
1782 * Feed data to the receive state machine.
1783 *
1784 * @data Pointer to data block
1785 * @len Length of data
1786 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
1787 *
1788 * Called with rx_lock_lha1 locked.
1789 */
1790void smux_rx_state_machine(const unsigned char *data,
1791 int len, int flag)
1792{
1793 unsigned long flags;
1794 int used;
1795 int initial_rx_state;
1796
1797
1798 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
1799 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
1800 used = 0;
1801 smux.rx_activity_flag = 1;
1802 do {
1803 SMUX_DBG("%s: state %d; %d of %d\n",
1804 __func__, smux.rx_state, used, len);
1805 initial_rx_state = smux.rx_state;
1806
1807 switch (smux.rx_state) {
1808 case SMUX_RX_IDLE:
1809 smux_rx_handle_idle(data, len, &used, flag);
1810 break;
1811 case SMUX_RX_MAGIC:
1812 smux_rx_handle_magic(data, len, &used, flag);
1813 break;
1814 case SMUX_RX_HDR:
1815 smux_rx_handle_hdr(data, len, &used, flag);
1816 break;
1817 case SMUX_RX_PAYLOAD:
1818 smux_rx_handle_pkt_payload(data, len, &used, flag);
1819 break;
1820 default:
1821 SMUX_DBG("%s: invalid state %d\n",
1822 __func__, smux.rx_state);
1823 smux.rx_state = SMUX_RX_IDLE;
1824 break;
1825 }
1826 } while (used < len || smux.rx_state != initial_rx_state);
1827 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
1828}
1829
1830/**
1831 * Add channel to transmit-ready list and trigger transmit worker.
1832 *
1833 * @ch Channel to add
1834 */
1835static void list_channel(struct smux_lch_t *ch)
1836{
1837 unsigned long flags;
1838
1839 SMUX_DBG("%s: listing channel %d\n",
1840 __func__, ch->lcid);
1841
1842 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1843 spin_lock(&ch->tx_lock_lhb2);
1844 smux.tx_activity_flag = 1;
1845 if (list_empty(&ch->tx_ready_list))
1846 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
1847 spin_unlock(&ch->tx_lock_lhb2);
1848 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1849
1850 queue_work(smux_tx_wq, &smux_tx_work);
1851}
1852
1853/**
1854 * Transmit packet on correct transport and then perform client
1855 * notification.
1856 *
1857 * @ch Channel to transmit on
1858 * @pkt Packet to transmit
1859 */
1860static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
1861{
1862 union notifier_metadata meta_write;
1863 int ret;
1864
1865 if (ch && pkt) {
1866 SMUX_LOG_PKT_TX(pkt);
1867 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
1868 ret = smux_tx_loopback(pkt);
1869 else
1870 ret = smux_tx_tty(pkt);
1871
1872 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
1873 /* notify write-done */
1874 meta_write.write.pkt_priv = pkt->priv;
1875 meta_write.write.buffer = pkt->payload;
1876 meta_write.write.len = pkt->hdr.payload_len;
1877 if (ret >= 0) {
1878 SMUX_DBG("%s: PKT write done", __func__);
1879 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
1880 &meta_write);
1881 } else {
1882 pr_err("%s: failed to write pkt %d\n",
1883 __func__, ret);
1884 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
1885 &meta_write);
1886 }
1887 }
1888 }
1889}
1890
1891/**
1892 * Power-up the UART.
1893 */
1894static void smux_uart_power_on(void)
1895{
1896 struct uart_state *state;
1897
1898 if (!smux.tty || !smux.tty->driver_data) {
1899 pr_err("%s: unable to find UART port for tty %p\n",
1900 __func__, smux.tty);
1901 return;
1902 }
1903 state = smux.tty->driver_data;
1904 msm_hs_request_clock_on(state->uart_port);
1905}
1906
1907/**
1908 * Power down the UART.
1909 */
1910static void smux_uart_power_off(void)
1911{
1912 struct uart_state *state;
1913
1914 if (!smux.tty || !smux.tty->driver_data) {
1915 pr_err("%s: unable to find UART port for tty %p\n",
1916 __func__, smux.tty);
1917 return;
1918 }
1919 state = smux.tty->driver_data;
1920 msm_hs_request_clock_off(state->uart_port);
1921}
1922
1923/**
1924 * TX Wakeup Worker
1925 *
1926 * @work Not used
1927 *
1928 * Do an exponential back-off wakeup sequence with a maximum period
1929 * of approximately 1 second (1 << 20 microseconds).
1930 */
1931static void smux_wakeup_worker(struct work_struct *work)
1932{
1933 unsigned long flags;
1934 unsigned wakeup_delay;
1935 int complete = 0;
1936
1937 for (;;) {
1938 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1939 if (smux.power_state == SMUX_PWR_ON) {
1940 /* wakeup complete */
1941 complete = 1;
1942 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1943 break;
1944 } else {
1945 /* retry */
1946 wakeup_delay = smux.pwr_wakeup_delay_us;
1947 smux.pwr_wakeup_delay_us <<= 1;
1948 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
1949 smux.pwr_wakeup_delay_us =
1950 SMUX_WAKEUP_DELAY_MAX;
1951 }
1952 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1953 SMUX_DBG("%s: triggering wakeup\n", __func__);
1954 smux_send_byte(SMUX_WAKEUP_REQ);
1955
1956 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
1957 SMUX_DBG("%s: sleeping for %u us\n", __func__,
1958 wakeup_delay);
1959 usleep_range(wakeup_delay, 2*wakeup_delay);
1960 } else {
1961 /* schedule delayed work */
1962 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
1963 __func__, wakeup_delay / 1000);
1964 queue_delayed_work(smux_tx_wq,
1965 &smux_wakeup_delayed_work,
1966 msecs_to_jiffies(wakeup_delay / 1000));
1967 break;
1968 }
1969 }
1970
1971 if (complete) {
1972 SMUX_DBG("%s: wakeup complete\n", __func__);
1973 /*
1974 * Cancel any pending retry. This avoids a race condition with
1975 * a new power-up request because:
1976 * 1) this worker doesn't modify the state
1977 * 2) this worker is processed on the same single-threaded
1978 * workqueue as new TX wakeup requests
1979 */
1980 cancel_delayed_work(&smux_wakeup_delayed_work);
1981 }
1982}
1983
1984
1985/**
1986 * Inactivity timeout worker. Periodically scheduled when link is active.
1987 * When it detects inactivity, it will power-down the UART link.
1988 *
1989 * @work Work structure (not used)
1990 */
1991static void smux_inactivity_worker(struct work_struct *work)
1992{
1993 int tx_ready = 0;
1994 struct smux_pkt_t *pkt;
1995 unsigned long flags;
1996
1997 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
1998 spin_lock(&smux.tx_lock_lha2);
1999
2000 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2001 /* no activity */
2002 if (smux.powerdown_enabled) {
2003 if (smux.power_state == SMUX_PWR_ON) {
2004 /* start power-down sequence */
2005 pkt = smux_alloc_pkt();
2006 if (pkt) {
2007 SMUX_DBG("%s: Power %d->%d\n", __func__,
2008 smux.power_state,
2009 SMUX_PWR_TURNING_OFF);
2010 smux.power_state = SMUX_PWR_TURNING_OFF;
2011
2012 /* send power-down request */
2013 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2014 pkt->hdr.flags = 0;
2015 pkt->hdr.lcid = 0;
2016 smux_tx_queue(pkt,
2017 &smux_lch[SMUX_TEST_LCID],
2018 0);
2019 tx_ready = 1;
2020 }
2021 }
2022 } else {
2023 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2024 __func__);
2025 }
2026 }
2027 smux.tx_activity_flag = 0;
2028 smux.rx_activity_flag = 0;
2029
2030 spin_unlock(&smux.tx_lock_lha2);
2031 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2032
2033 if (tx_ready)
2034 list_channel(&smux_lch[SMUX_TEST_LCID]);
2035
2036 if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
2037 (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
2038 /* ready to power-down the UART */
2039 SMUX_DBG("%s: Power %d->%d\n", __func__,
2040 smux.power_state, SMUX_PWR_OFF);
2041 smux_uart_power_off();
2042 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2043 smux.power_state = SMUX_PWR_OFF;
2044 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2045 }
2046
2047 /* reschedule inactivity worker */
2048 if (smux.power_state != SMUX_PWR_OFF)
2049 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2050 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2051}
2052
2053/**
2054 * Transmit worker handles serializing and transmitting packets onto the
2055 * underlying transport.
2056 *
2057 * @work Work structure (not used)
2058 */
2059static void smux_tx_worker(struct work_struct *work)
2060{
2061 struct smux_pkt_t *pkt;
2062 struct smux_lch_t *ch;
2063 unsigned low_wm_notif;
2064 unsigned lcid;
2065 unsigned long flags;
2066
2067
2068 /*
2069 * Transmit packets in round-robin fashion based upon ready
2070 * channels.
2071 *
2072 * To eliminate the need to hold a lock for the entire
2073 * iteration through the channel ready list, the head of the
2074 * ready-channel list is always the next channel to be
2075 * processed. To send a packet, the first valid packet in
2076 * the head channel is removed and the head channel is then
2077 * rescheduled at the end of the queue by removing it and
2078 * inserting after the tail. The locks can then be released
2079 * while the packet is processed.
2080 */
2081 for (;;) {
2082 pkt = NULL;
2083 low_wm_notif = 0;
2084
2085 /* get the next ready channel */
2086 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2087 if (list_empty(&smux.lch_tx_ready_list)) {
2088 /* no ready channels */
2089 SMUX_DBG("%s: no more ready channels, exiting\n",
2090 __func__);
2091 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2092 break;
2093 }
2094 smux.tx_activity_flag = 1;
2095
2096 if (smux.power_state != SMUX_PWR_ON
2097 && smux.power_state != SMUX_PWR_TURNING_OFF
2098 && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
2099 /* Link isn't ready to transmit */
2100 if (smux.power_state == SMUX_PWR_OFF) {
2101 /* link is off, trigger wakeup */
2102 smux.pwr_wakeup_delay_us = 1;
2103 SMUX_DBG("%s: Power %d->%d\n", __func__,
2104 smux.power_state,
2105 SMUX_PWR_TURNING_ON);
2106 smux.power_state = SMUX_PWR_TURNING_ON;
2107 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2108 flags);
2109 smux_uart_power_on();
2110 queue_work(smux_tx_wq, &smux_wakeup_work);
2111 } else {
2112 SMUX_DBG("%s: can not tx with power state %d\n",
2113 __func__,
2114 smux.power_state);
2115 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2116 flags);
2117 }
2118 break;
2119 }
2120
2121 /* get the next packet to send and rotate channel list */
2122 ch = list_first_entry(&smux.lch_tx_ready_list,
2123 struct smux_lch_t,
2124 tx_ready_list);
2125
2126 spin_lock(&ch->state_lock_lhb1);
2127 spin_lock(&ch->tx_lock_lhb2);
2128 if (!list_empty(&ch->tx_queue)) {
2129 /*
2130 * If remote TX flow control is enabled or
2131 * the channel is not fully opened, then only
2132 * send command packets.
2133 */
2134 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2135 struct smux_pkt_t *curr;
2136 list_for_each_entry(curr, &ch->tx_queue, list) {
2137 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2138 pkt = curr;
2139 break;
2140 }
2141 }
2142 } else {
2143 /* get next cmd/data packet to send */
2144 pkt = list_first_entry(&ch->tx_queue,
2145 struct smux_pkt_t, list);
2146 }
2147 }
2148
2149 if (pkt) {
2150 list_del(&pkt->list);
2151
2152 /* update packet stats */
2153 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2154 --ch->tx_pending_data_cnt;
2155 if (ch->notify_lwm &&
2156 ch->tx_pending_data_cnt
2157 <= SMUX_WM_LOW) {
2158 ch->notify_lwm = 0;
2159 low_wm_notif = 1;
2160 }
2161 }
2162
2163 /* advance to the next ready channel */
2164 list_rotate_left(&smux.lch_tx_ready_list);
2165 } else {
2166 /* no data in channel to send, remove from ready list */
2167 list_del(&ch->tx_ready_list);
2168 INIT_LIST_HEAD(&ch->tx_ready_list);
2169 }
2170 lcid = ch->lcid;
2171 spin_unlock(&ch->tx_lock_lhb2);
2172 spin_unlock(&ch->state_lock_lhb1);
2173 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2174
2175 if (low_wm_notif)
2176 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2177
2178 /* send the packet */
2179 smux_tx_pkt(ch, pkt);
2180 smux_free_pkt(pkt);
2181 }
2182}
2183
2184
2185/**********************************************************************/
2186/* Kernel API */
2187/**********************************************************************/
2188
2189/**
2190 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2191 * flags.
2192 *
2193 * @lcid Logical channel ID
2194 * @set Options to set
2195 * @clear Options to clear
2196 *
2197 * @returns 0 for success, < 0 for failure
2198 */
2199int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2200{
2201 unsigned long flags;
2202 struct smux_lch_t *ch;
2203 int tx_ready = 0;
2204 int ret = 0;
2205
2206 if (smux_assert_lch_id(lcid))
2207 return -ENXIO;
2208
2209 ch = &smux_lch[lcid];
2210 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2211
2212 /* Local loopback mode */
2213 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2214 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2215
2216 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2217 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2218
2219 /* Remote loopback mode */
2220 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2221 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2222
2223 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2224 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2225
2226 /* Flow control */
2227 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2228 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2229 ret = smux_send_status_cmd(ch);
2230 tx_ready = 1;
2231 }
2232
2233 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2234 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2235 ret = smux_send_status_cmd(ch);
2236 tx_ready = 1;
2237 }
2238
2239 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2240
2241 if (tx_ready)
2242 list_channel(ch);
2243
2244 return ret;
2245}
2246
2247/**
2248 * Starts the opening sequence for a logical channel.
2249 *
2250 * @lcid Logical channel ID
2251 * @priv Free for client usage
2252 * @notify Event notification function
2253 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2254 *
2255 * @returns 0 for success, <0 otherwise
2256 *
2257 * A channel must be fully closed (either not previously opened or
2258 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2259 * received.
2260 *
2261 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2262 * event.
2263 */
2264int msm_smux_open(uint8_t lcid, void *priv,
2265 void (*notify)(void *priv, int event_type, const void *metadata),
2266 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2267 int size))
2268{
2269 int ret;
2270 struct smux_lch_t *ch;
2271 struct smux_pkt_t *pkt;
2272 int tx_ready = 0;
2273 unsigned long flags;
2274
2275 if (smux_assert_lch_id(lcid))
2276 return -ENXIO;
2277
2278 ch = &smux_lch[lcid];
2279 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2280
2281 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2282 ret = -EAGAIN;
2283 goto out;
2284 }
2285
2286 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2287 pr_err("%s: open lcid %d local state %x invalid\n",
2288 __func__, lcid, ch->local_state);
2289 ret = -EINVAL;
2290 goto out;
2291 }
2292
2293 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2294 ch->local_state,
2295 SMUX_LCH_LOCAL_OPENING);
2296
2297 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2298
2299 ch->priv = priv;
2300 ch->notify = notify;
2301 ch->get_rx_buffer = get_rx_buffer;
2302 ret = 0;
2303
2304 /* Send Open Command */
2305 pkt = smux_alloc_pkt();
2306 if (!pkt) {
2307 ret = -ENOMEM;
2308 goto out;
2309 }
2310 pkt->hdr.magic = SMUX_MAGIC;
2311 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2312 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2313 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2314 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2315 pkt->hdr.lcid = lcid;
2316 pkt->hdr.payload_len = 0;
2317 pkt->hdr.pad_len = 0;
2318 smux_tx_queue(pkt, ch, 0);
2319 tx_ready = 1;
2320
2321out:
2322 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2323 if (tx_ready)
2324 list_channel(ch);
2325 return ret;
2326}
2327
2328/**
2329 * Starts the closing sequence for a logical channel.
2330 *
2331 * @lcid Logical channel ID
2332 *
2333 * @returns 0 for success, <0 otherwise
2334 *
2335 * Once the close event has been acknowledge by the remote side, the client
2336 * will receive a SMUX_DISCONNECTED notification.
2337 */
2338int msm_smux_close(uint8_t lcid)
2339{
2340 int ret = 0;
2341 struct smux_lch_t *ch;
2342 struct smux_pkt_t *pkt;
2343 int tx_ready = 0;
2344 unsigned long flags;
2345
2346 if (smux_assert_lch_id(lcid))
2347 return -ENXIO;
2348
2349 ch = &smux_lch[lcid];
2350 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2351 ch->local_tiocm = 0x0;
2352 ch->remote_tiocm = 0x0;
2353 ch->tx_pending_data_cnt = 0;
2354 ch->notify_lwm = 0;
2355
2356 /* Purge TX queue */
2357 spin_lock(&ch->tx_lock_lhb2);
2358 while (!list_empty(&ch->tx_queue)) {
2359 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2360 list);
2361 list_del(&pkt->list);
2362
2363 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2364 /* Open was never sent, just force to closed state */
2365 union notifier_metadata meta_disconnected;
2366
2367 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2368 meta_disconnected.disconnected.is_ssr = 0;
2369 schedule_notify(lcid, SMUX_DISCONNECTED,
2370 &meta_disconnected);
2371 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2372 /* Notify client of failed write */
2373 union notifier_metadata meta_write;
2374
2375 meta_write.write.pkt_priv = pkt->priv;
2376 meta_write.write.buffer = pkt->payload;
2377 meta_write.write.len = pkt->hdr.payload_len;
2378 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2379 }
2380 smux_free_pkt(pkt);
2381 }
2382 spin_unlock(&ch->tx_lock_lhb2);
2383
2384 /* Send Close Command */
2385 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2386 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2387 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2388 ch->local_state,
2389 SMUX_LCH_LOCAL_CLOSING);
2390
2391 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2392 pkt = smux_alloc_pkt();
2393 if (pkt) {
2394 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2395 pkt->hdr.flags = 0;
2396 pkt->hdr.lcid = lcid;
2397 pkt->hdr.payload_len = 0;
2398 pkt->hdr.pad_len = 0;
2399 smux_tx_queue(pkt, ch, 0);
2400 tx_ready = 1;
2401 } else {
2402 pr_err("%s: pkt allocation failed\n", __func__);
2403 ret = -ENOMEM;
2404 }
2405 }
2406 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2407
2408 if (tx_ready)
2409 list_channel(ch);
2410
2411 return ret;
2412}
2413
2414/**
2415 * Write data to a logical channel.
2416 *
2417 * @lcid Logical channel ID
2418 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2419 * SMUX_WRITE_FAIL notification.
2420 * @data Data to write
2421 * @len Length of @data
2422 *
2423 * @returns 0 for success, <0 otherwise
2424 *
2425 * Data may be written immediately after msm_smux_open() is called,
2426 * but the data will wait in the transmit queue until the channel has
2427 * been fully opened.
2428 *
2429 * Once the data has been written, the client will receive either a completion
2430 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2431 */
2432int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2433{
2434 struct smux_lch_t *ch;
2435 struct smux_pkt_t *pkt;
2436 int tx_ready = 0;
2437 unsigned long flags;
2438 int ret;
2439
2440 if (smux_assert_lch_id(lcid))
2441 return -ENXIO;
2442
2443 ch = &smux_lch[lcid];
2444 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2445
2446 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2447 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2448 pr_err("%s: hdr.invalid local state %d channel %d\n",
2449 __func__, ch->local_state, lcid);
2450 ret = -EINVAL;
2451 goto out;
2452 }
2453
2454 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2455 pr_err("%s: payload %d too large\n",
2456 __func__, len);
2457 ret = -E2BIG;
2458 goto out;
2459 }
2460
2461 pkt = smux_alloc_pkt();
2462 if (!pkt) {
2463 ret = -ENOMEM;
2464 goto out;
2465 }
2466
2467 pkt->hdr.cmd = SMUX_CMD_DATA;
2468 pkt->hdr.lcid = lcid;
2469 pkt->hdr.flags = 0;
2470 pkt->hdr.payload_len = len;
2471 pkt->payload = (void *)data;
2472 pkt->priv = pkt_priv;
2473 pkt->hdr.pad_len = 0;
2474
2475 spin_lock(&ch->tx_lock_lhb2);
2476 /* verify high watermark */
2477 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2478
2479 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2480 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2481 __func__, lcid, SMUX_WM_HIGH,
2482 ch->tx_pending_data_cnt);
2483 ret = -EAGAIN;
2484 goto out_inner;
2485 }
2486
2487 /* queue packet for transmit */
2488 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2489 ch->notify_lwm = 1;
2490 pr_err("%s: high watermark hit\n", __func__);
2491 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2492 }
2493 list_add_tail(&pkt->list, &ch->tx_queue);
2494
2495 /* add to ready list */
2496 if (IS_FULLY_OPENED(ch))
2497 tx_ready = 1;
2498
2499 ret = 0;
2500
2501out_inner:
2502 spin_unlock(&ch->tx_lock_lhb2);
2503
2504out:
2505 if (ret)
2506 smux_free_pkt(pkt);
2507 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2508
2509 if (tx_ready)
2510 list_channel(ch);
2511
2512 return ret;
2513}
2514
2515/**
2516 * Returns true if the TX queue is currently full (high water mark).
2517 *
2518 * @lcid Logical channel ID
2519 * @returns 0 if channel is not full
2520 * 1 if it is full
2521 * < 0 for error
2522 */
2523int msm_smux_is_ch_full(uint8_t lcid)
2524{
2525 struct smux_lch_t *ch;
2526 unsigned long flags;
2527 int is_full = 0;
2528
2529 if (smux_assert_lch_id(lcid))
2530 return -ENXIO;
2531
2532 ch = &smux_lch[lcid];
2533
2534 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2535 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2536 is_full = 1;
2537 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2538
2539 return is_full;
2540}
2541
2542/**
2543 * Returns true if the TX queue has space for more packets it is at or
2544 * below the low water mark).
2545 *
2546 * @lcid Logical channel ID
2547 * @returns 0 if channel is above low watermark
2548 * 1 if it's at or below the low watermark
2549 * < 0 for error
2550 */
2551int msm_smux_is_ch_low(uint8_t lcid)
2552{
2553 struct smux_lch_t *ch;
2554 unsigned long flags;
2555 int is_low = 0;
2556
2557 if (smux_assert_lch_id(lcid))
2558 return -ENXIO;
2559
2560 ch = &smux_lch[lcid];
2561
2562 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2563 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2564 is_low = 1;
2565 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2566
2567 return is_low;
2568}
2569
2570/**
2571 * Send TIOCM status update.
2572 *
2573 * @ch Channel for update
2574 *
2575 * @returns 0 for success, <0 for failure
2576 *
2577 * Channel lock must be held before calling.
2578 */
2579static int smux_send_status_cmd(struct smux_lch_t *ch)
2580{
2581 struct smux_pkt_t *pkt;
2582
2583 if (!ch)
2584 return -EINVAL;
2585
2586 pkt = smux_alloc_pkt();
2587 if (!pkt)
2588 return -ENOMEM;
2589
2590 pkt->hdr.lcid = ch->lcid;
2591 pkt->hdr.cmd = SMUX_CMD_STATUS;
2592 pkt->hdr.flags = ch->local_tiocm;
2593 pkt->hdr.payload_len = 0;
2594 pkt->hdr.pad_len = 0;
2595 smux_tx_queue(pkt, ch, 0);
2596
2597 return 0;
2598}
2599
2600/**
2601 * Internal helper function for getting the TIOCM status with
2602 * state_lock_lhb1 already locked.
2603 *
2604 * @ch Channel pointer
2605 *
2606 * @returns TIOCM status
2607 */
2608static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
2609{
2610 long status = 0x0;
2611
2612 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
2613 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
2614 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
2615 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
2616
2617 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
2618 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
2619
2620 return status;
2621}
2622
2623/**
2624 * Get the TIOCM status bits.
2625 *
2626 * @lcid Logical channel ID
2627 *
2628 * @returns >= 0 TIOCM status bits
2629 * < 0 Error condition
2630 */
2631long msm_smux_tiocm_get(uint8_t lcid)
2632{
2633 struct smux_lch_t *ch;
2634 unsigned long flags;
2635 long status = 0x0;
2636
2637 if (smux_assert_lch_id(lcid))
2638 return -ENXIO;
2639
2640 ch = &smux_lch[lcid];
2641 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2642 status = msm_smux_tiocm_get_atomic(ch);
2643 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2644
2645 return status;
2646}
2647
2648/**
2649 * Set/clear the TIOCM status bits.
2650 *
2651 * @lcid Logical channel ID
2652 * @set Bits to set
2653 * @clear Bits to clear
2654 *
2655 * @returns 0 for success; < 0 for failure
2656 *
2657 * If a bit is specified in both the @set and @clear masks, then the clear bit
2658 * definition will dominate and the bit will be cleared.
2659 */
2660int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
2661{
2662 struct smux_lch_t *ch;
2663 unsigned long flags;
2664 uint8_t old_status;
2665 uint8_t status_set = 0x0;
2666 uint8_t status_clear = 0x0;
2667 int tx_ready = 0;
2668 int ret = 0;
2669
2670 if (smux_assert_lch_id(lcid))
2671 return -ENXIO;
2672
2673 ch = &smux_lch[lcid];
2674 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2675
2676 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2677 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2678 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2679 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2680
2681 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2682 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2683 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2684 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2685
2686 old_status = ch->local_tiocm;
2687 ch->local_tiocm |= status_set;
2688 ch->local_tiocm &= ~status_clear;
2689
2690 if (ch->local_tiocm != old_status) {
2691 ret = smux_send_status_cmd(ch);
2692 tx_ready = 1;
2693 }
2694 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2695
2696 if (tx_ready)
2697 list_channel(ch);
2698
2699 return ret;
2700}
2701
2702/**********************************************************************/
2703/* Line Discipline Interface */
2704/**********************************************************************/
2705static int smuxld_open(struct tty_struct *tty)
2706{
2707 int i;
2708 int tmp;
2709 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002710
2711 if (!smux.is_initialized)
2712 return -ENODEV;
2713
2714 spin_lock_irqsave(&smux.lock_lha0, flags);
2715 if (smux.ld_open_count) {
2716 pr_err("%s: %p multiple instances not supported\n",
2717 __func__, tty);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002718 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2719 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002720 }
2721
2722 ++smux.ld_open_count;
2723 if (tty->ops->write == NULL) {
Eric Holmberg902c51e2012-05-29 12:12:16 -06002724 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2725 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002726 }
2727
2728 /* connect to TTY */
2729 smux.tty = tty;
2730 tty->disc_data = &smux;
2731 tty->receive_room = TTY_RECEIVE_ROOM;
2732 tty_driver_flush_buffer(tty);
2733
2734 /* power-down the UART if we are idle */
2735 spin_lock(&smux.tx_lock_lha2);
2736 if (smux.power_state == SMUX_PWR_OFF) {
2737 SMUX_DBG("%s: powering off uart\n", __func__);
2738 smux.power_state = SMUX_PWR_OFF_FLUSH;
2739 spin_unlock(&smux.tx_lock_lha2);
2740 queue_work(smux_tx_wq, &smux_inactivity_work);
2741 } else {
2742 spin_unlock(&smux.tx_lock_lha2);
2743 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002744 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002745
2746 /* register platform devices */
2747 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
2748 tmp = platform_device_register(&smux_devs[i]);
2749 if (tmp)
2750 pr_err("%s: error %d registering device %s\n",
2751 __func__, tmp, smux_devs[i].name);
2752 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002753 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002754}
2755
2756static void smuxld_close(struct tty_struct *tty)
2757{
2758 unsigned long flags;
2759 int i;
2760
2761 spin_lock_irqsave(&smux.lock_lha0, flags);
2762 if (smux.ld_open_count <= 0) {
2763 pr_err("%s: invalid ld count %d\n", __func__,
2764 smux.ld_open_count);
Eric Holmberg902c51e2012-05-29 12:12:16 -06002765 spin_unlock_irqrestore(&smux.lock_lha0, flags);
2766 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002767 }
Eric Holmberg902c51e2012-05-29 12:12:16 -06002768 spin_unlock_irqrestore(&smux.lock_lha0, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002769
2770 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i)
2771 platform_device_unregister(&smux_devs[i]);
2772
2773 --smux.ld_open_count;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002774}
2775
2776/**
2777 * Receive data from TTY Line Discipline.
2778 *
2779 * @tty TTY structure
2780 * @cp Character data
2781 * @fp Flag data
2782 * @count Size of character and flag data
2783 */
2784void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
2785 char *fp, int count)
2786{
2787 int i;
2788 int last_idx = 0;
2789 const char *tty_name = NULL;
2790 char *f;
2791
2792 if (smux_debug_mask & MSM_SMUX_DEBUG)
2793 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
2794 16, 1, cp, count, true);
2795
2796 /* verify error flags */
2797 for (i = 0, f = fp; i < count; ++i, ++f) {
2798 if (*f != TTY_NORMAL) {
2799 if (tty)
2800 tty_name = tty->name;
2801 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
2802 tty_name, *f, tty_flag_to_str(*f));
2803
2804 /* feed all previous valid data to the parser */
2805 smux_rx_state_machine(cp + last_idx, i - last_idx,
2806 TTY_NORMAL);
2807
2808 /* feed bad data to parser */
2809 smux_rx_state_machine(cp + i, 1, *f);
2810 last_idx = i + 1;
2811 }
2812 }
2813
2814 /* feed data to RX state machine */
2815 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
2816}
2817
2818static void smuxld_flush_buffer(struct tty_struct *tty)
2819{
2820 pr_err("%s: not supported\n", __func__);
2821}
2822
2823static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
2824{
2825 pr_err("%s: not supported\n", __func__);
2826 return -ENODEV;
2827}
2828
2829static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
2830 unsigned char __user *buf, size_t nr)
2831{
2832 pr_err("%s: not supported\n", __func__);
2833 return -ENODEV;
2834}
2835
2836static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
2837 const unsigned char *buf, size_t nr)
2838{
2839 pr_err("%s: not supported\n", __func__);
2840 return -ENODEV;
2841}
2842
2843static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
2844 unsigned int cmd, unsigned long arg)
2845{
2846 pr_err("%s: not supported\n", __func__);
2847 return -ENODEV;
2848}
2849
2850static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
2851 struct poll_table_struct *tbl)
2852{
2853 pr_err("%s: not supported\n", __func__);
2854 return -ENODEV;
2855}
2856
2857static void smuxld_write_wakeup(struct tty_struct *tty)
2858{
2859 pr_err("%s: not supported\n", __func__);
2860}
2861
2862static struct tty_ldisc_ops smux_ldisc_ops = {
2863 .owner = THIS_MODULE,
2864 .magic = TTY_LDISC_MAGIC,
2865 .name = "n_smux",
2866 .open = smuxld_open,
2867 .close = smuxld_close,
2868 .flush_buffer = smuxld_flush_buffer,
2869 .chars_in_buffer = smuxld_chars_in_buffer,
2870 .read = smuxld_read,
2871 .write = smuxld_write,
2872 .ioctl = smuxld_ioctl,
2873 .poll = smuxld_poll,
2874 .receive_buf = smuxld_receive_buf,
2875 .write_wakeup = smuxld_write_wakeup
2876};
2877
2878static int __init smux_init(void)
2879{
2880 int ret;
2881
2882 spin_lock_init(&smux.lock_lha0);
2883
2884 spin_lock_init(&smux.rx_lock_lha1);
2885 smux.rx_state = SMUX_RX_IDLE;
2886 smux.power_state = SMUX_PWR_OFF;
2887 smux.pwr_wakeup_delay_us = 1;
2888 smux.powerdown_enabled = 0;
2889 smux.rx_activity_flag = 0;
2890 smux.tx_activity_flag = 0;
2891 smux.recv_len = 0;
2892 smux.tty = NULL;
2893 smux.ld_open_count = 0;
2894 smux.in_reset = 0;
2895 smux.is_initialized = 1;
2896 smux_byte_loopback = 0;
2897
2898 spin_lock_init(&smux.tx_lock_lha2);
2899 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
2900
2901 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
2902 if (ret != 0) {
2903 pr_err("%s: error %d registering line discipline\n",
2904 __func__, ret);
2905 return ret;
2906 }
2907
2908 ret = lch_init();
2909 if (ret != 0) {
2910 pr_err("%s: lch_init failed\n", __func__);
2911 return ret;
2912 }
2913
2914 return 0;
2915}
2916
2917static void __exit smux_exit(void)
2918{
2919 int ret;
2920
2921 ret = tty_unregister_ldisc(N_SMUX);
2922 if (ret != 0) {
2923 pr_err("%s error %d unregistering line discipline\n",
2924 __func__, ret);
2925 return;
2926 }
2927}
2928
2929module_init(smux_init);
2930module_exit(smux_exit);
2931
2932MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
2933MODULE_LICENSE("GPL v2");
2934MODULE_ALIAS_LDISC(N_SMUX);