blob: 0006d23d569d27bdbac2666d502f5bc8bb176525 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_WM_LOW 2
37#define SMUX_WM_HIGH 4
38#define SMUX_PKT_LOG_SIZE 80
39
40/* Maximum size we can accept in a single RX buffer */
41#define TTY_RECEIVE_ROOM 65536
42#define TTY_BUFFER_FULL_WAIT_MS 50
43
44/* maximum sleep time between wakeup attempts */
45#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
46
47/* minimum delay for scheduling delayed work */
48#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
49
50/* inactivity timeout for no rx/tx activity */
51#define SMUX_INACTIVITY_TIMEOUT_MS 1000
52
Eric Holmbergb8435c82012-06-05 14:51:29 -060053/* RX get_rx_buffer retry timeout values */
54#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
55#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
56
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057enum {
58 MSM_SMUX_DEBUG = 1U << 0,
59 MSM_SMUX_INFO = 1U << 1,
60 MSM_SMUX_POWER_INFO = 1U << 2,
61 MSM_SMUX_PKT = 1U << 3,
62};
63
64static int smux_debug_mask;
65module_param_named(debug_mask, smux_debug_mask,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67
68/* Simulated wakeup used for testing */
69int smux_byte_loopback;
70module_param_named(byte_loopback, smux_byte_loopback,
71 int, S_IRUGO | S_IWUSR | S_IWGRP);
72int smux_simulate_wakeup_delay = 1;
73module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
74 int, S_IRUGO | S_IWUSR | S_IWGRP);
75
76#define SMUX_DBG(x...) do { \
77 if (smux_debug_mask & MSM_SMUX_DEBUG) \
78 pr_info(x); \
79} while (0)
80
81#define SMUX_LOG_PKT_RX(pkt) do { \
82 if (smux_debug_mask & MSM_SMUX_PKT) \
83 smux_log_pkt(pkt, 1); \
84} while (0)
85
86#define SMUX_LOG_PKT_TX(pkt) do { \
87 if (smux_debug_mask & MSM_SMUX_PKT) \
88 smux_log_pkt(pkt, 0); \
89} while (0)
90
91/**
92 * Return true if channel is fully opened (both
93 * local and remote sides are in the OPENED state).
94 */
95#define IS_FULLY_OPENED(ch) \
96 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
97 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
98
99static struct platform_device smux_devs[] = {
100 {.name = "SMUX_CTL", .id = -1},
101 {.name = "SMUX_RMNET", .id = -1},
102 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
103 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
104 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
105 {.name = "SMUX_DIAG", .id = -1},
106};
107
108enum {
109 SMUX_CMD_STATUS_RTC = 1 << 0,
110 SMUX_CMD_STATUS_RTR = 1 << 1,
111 SMUX_CMD_STATUS_RI = 1 << 2,
112 SMUX_CMD_STATUS_DCD = 1 << 3,
113 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
114};
115
116/* Channel mode */
117enum {
118 SMUX_LCH_MODE_NORMAL,
119 SMUX_LCH_MODE_LOCAL_LOOPBACK,
120 SMUX_LCH_MODE_REMOTE_LOOPBACK,
121};
122
123enum {
124 SMUX_RX_IDLE,
125 SMUX_RX_MAGIC,
126 SMUX_RX_HDR,
127 SMUX_RX_PAYLOAD,
128 SMUX_RX_FAILURE,
129};
130
131/**
132 * Power states.
133 *
134 * The _FLUSH states are internal transitional states and are not part of the
135 * official state machine.
136 */
137enum {
138 SMUX_PWR_OFF,
139 SMUX_PWR_TURNING_ON,
140 SMUX_PWR_ON,
141 SMUX_PWR_TURNING_OFF_FLUSH,
142 SMUX_PWR_TURNING_OFF,
143 SMUX_PWR_OFF_FLUSH,
144};
145
146/**
147 * Logical Channel Structure. One instance per channel.
148 *
149 * Locking Hierarchy
150 * Each lock has a postfix that describes the locking level. If multiple locks
151 * are required, only increasing lock hierarchy numbers may be locked which
152 * ensures avoiding a deadlock.
153 *
154 * Locking Example
155 * If state_lock_lhb1 is currently held and the TX list needs to be
156 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
157 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
158 * not be acquired since it would result in a deadlock.
159 *
160 * Note that the Line Discipline locks (*_lha) should always be acquired
161 * before the logical channel locks.
162 */
163struct smux_lch_t {
164 /* channel state */
165 spinlock_t state_lock_lhb1;
166 uint8_t lcid;
167 unsigned local_state;
168 unsigned local_mode;
169 uint8_t local_tiocm;
170
171 unsigned remote_state;
172 unsigned remote_mode;
173 uint8_t remote_tiocm;
174
175 int tx_flow_control;
176
177 /* client callbacks and private data */
178 void *priv;
179 void (*notify)(void *priv, int event_type, const void *metadata);
180 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
181 int size);
182
Eric Holmbergb8435c82012-06-05 14:51:29 -0600183 /* RX Info */
184 struct list_head rx_retry_queue;
185 unsigned rx_retry_queue_cnt;
186 struct delayed_work rx_retry_work;
187
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600188 /* TX Info */
189 spinlock_t tx_lock_lhb2;
190 struct list_head tx_queue;
191 struct list_head tx_ready_list;
192 unsigned tx_pending_data_cnt;
193 unsigned notify_lwm;
194};
195
196union notifier_metadata {
197 struct smux_meta_disconnected disconnected;
198 struct smux_meta_read read;
199 struct smux_meta_write write;
200 struct smux_meta_tiocm tiocm;
201};
202
203struct smux_notify_handle {
204 void (*notify)(void *priv, int event_type, const void *metadata);
205 void *priv;
206 int event_type;
207 union notifier_metadata *metadata;
208};
209
210/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600211 * Get RX Buffer Retry structure.
212 *
213 * This is used for clients that are unable to provide an RX buffer
214 * immediately. This temporary structure will be used to temporarily hold the
215 * data and perform a retry.
216 */
217struct smux_rx_pkt_retry {
218 struct smux_pkt_t *pkt;
219 struct list_head rx_retry_list;
220 unsigned timeout_in_ms;
221};
222
223/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600224 * Receive worker data structure.
225 *
226 * One instance is created for every call to smux_rx_state_machine.
227 */
228struct smux_rx_worker_data {
229 const unsigned char *data;
230 int len;
231 int flag;
232
233 struct work_struct work;
234 struct completion work_complete;
235};
236
237/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 * Line discipline and module structure.
239 *
240 * Only one instance since multiple instances of line discipline are not
241 * allowed.
242 */
243struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600244 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600245
246 int is_initialized;
247 int in_reset;
248 int ld_open_count;
249 struct tty_struct *tty;
250
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600251 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600252 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
253 unsigned int recv_len;
254 unsigned int pkt_remain;
255 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600256
257 /* RX Activity - accessed by multiple threads */
258 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600259 unsigned rx_activity_flag;
260
261 /* TX / Power */
262 spinlock_t tx_lock_lha2;
263 struct list_head lch_tx_ready_list;
264 unsigned power_state;
265 unsigned pwr_wakeup_delay_us;
266 unsigned tx_activity_flag;
267 unsigned powerdown_enabled;
268};
269
270
271/* data structures */
272static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
273static struct smux_ldisc_t smux;
274static const char *tty_error_type[] = {
275 [TTY_NORMAL] = "normal",
276 [TTY_OVERRUN] = "overrun",
277 [TTY_BREAK] = "break",
278 [TTY_PARITY] = "parity",
279 [TTY_FRAME] = "framing",
280};
281
282static const char *smux_cmds[] = {
283 [SMUX_CMD_DATA] = "DATA",
284 [SMUX_CMD_OPEN_LCH] = "OPEN",
285 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
286 [SMUX_CMD_STATUS] = "STATUS",
287 [SMUX_CMD_PWR_CTL] = "PWR",
288 [SMUX_CMD_BYTE] = "Raw Byte",
289};
290
291static void smux_notify_local_fn(struct work_struct *work);
292static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
293
294static struct workqueue_struct *smux_notify_wq;
295static size_t handle_size;
296static struct kfifo smux_notify_fifo;
297static int queued_fifo_notifications;
298static DEFINE_SPINLOCK(notify_lock_lhc1);
299
300static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600301static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600302static void smux_tx_worker(struct work_struct *work);
303static DECLARE_WORK(smux_tx_work, smux_tx_worker);
304
305static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600306static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600307static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600308static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
309static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
310
311static void smux_inactivity_worker(struct work_struct *work);
312static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
313static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
314 smux_inactivity_worker);
315
316static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
317static void list_channel(struct smux_lch_t *ch);
318static int smux_send_status_cmd(struct smux_lch_t *ch);
319static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600320static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
321static int schedule_notify(uint8_t lcid, int event,
322 const union notifier_metadata *metadata);
323static int ssr_notifier_cb(struct notifier_block *this,
324 unsigned long code,
325 void *data);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600326
327/**
328 * Convert TTY Error Flags to string for logging purposes.
329 *
330 * @flag TTY_* flag
331 * @returns String description or NULL if unknown
332 */
333static const char *tty_flag_to_str(unsigned flag)
334{
335 if (flag < ARRAY_SIZE(tty_error_type))
336 return tty_error_type[flag];
337 return NULL;
338}
339
340/**
341 * Convert SMUX Command to string for logging purposes.
342 *
343 * @cmd SMUX command
344 * @returns String description or NULL if unknown
345 */
346static const char *cmd_to_str(unsigned cmd)
347{
348 if (cmd < ARRAY_SIZE(smux_cmds))
349 return smux_cmds[cmd];
350 return NULL;
351}
352
353/**
354 * Set the reset state due to an unrecoverable failure.
355 */
356static void smux_enter_reset(void)
357{
358 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
359 smux.in_reset = 1;
360}
361
362static int lch_init(void)
363{
364 unsigned int id;
365 struct smux_lch_t *ch;
366 int i = 0;
367
368 handle_size = sizeof(struct smux_notify_handle *);
369
370 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
371 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600372 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600373
374 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
375 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
376 __func__);
377 return -ENOMEM;
378 }
379
380 i |= kfifo_alloc(&smux_notify_fifo,
381 SMUX_NOTIFY_FIFO_SIZE * handle_size,
382 GFP_KERNEL);
383 i |= smux_loopback_init();
384
385 if (i) {
386 pr_err("%s: out of memory error\n", __func__);
387 return -ENOMEM;
388 }
389
390 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
391 ch = &smux_lch[id];
392
393 spin_lock_init(&ch->state_lock_lhb1);
394 ch->lcid = id;
395 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
396 ch->local_mode = SMUX_LCH_MODE_NORMAL;
397 ch->local_tiocm = 0x0;
398 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
399 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
400 ch->remote_tiocm = 0x0;
401 ch->tx_flow_control = 0;
402 ch->priv = 0;
403 ch->notify = 0;
404 ch->get_rx_buffer = 0;
405
Eric Holmbergb8435c82012-06-05 14:51:29 -0600406 INIT_LIST_HEAD(&ch->rx_retry_queue);
407 ch->rx_retry_queue_cnt = 0;
408 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
409
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600410 spin_lock_init(&ch->tx_lock_lhb2);
411 INIT_LIST_HEAD(&ch->tx_queue);
412 INIT_LIST_HEAD(&ch->tx_ready_list);
413 ch->tx_pending_data_cnt = 0;
414 ch->notify_lwm = 0;
415 }
416
417 return 0;
418}
419
Eric Holmberged1f00c2012-06-07 09:45:18 -0600420/**
421 * Empty and cleanup all SMUX logical channels for subsystem restart or line
422 * discipline disconnect.
423 */
424static void smux_lch_purge(void)
425{
426 struct smux_lch_t *ch;
427 unsigned long flags;
428 int i;
429
430 /* Empty TX ready list */
431 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
432 while (!list_empty(&smux.lch_tx_ready_list)) {
433 SMUX_DBG("%s: emptying ready list %p\n",
434 __func__, smux.lch_tx_ready_list.next);
435 ch = list_first_entry(&smux.lch_tx_ready_list,
436 struct smux_lch_t,
437 tx_ready_list);
438 list_del(&ch->tx_ready_list);
439 INIT_LIST_HEAD(&ch->tx_ready_list);
440 }
441 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
442
443 /* Close all ports */
444 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
445 ch = &smux_lch[i];
446 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
447
448 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
449
450 /* Purge TX queue */
451 spin_lock(&ch->tx_lock_lhb2);
452 smux_purge_ch_tx_queue(ch);
453 spin_unlock(&ch->tx_lock_lhb2);
454
455 /* Notify user of disconnect and reset channel state */
456 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
457 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
458 union notifier_metadata meta;
459
460 meta.disconnected.is_ssr = smux.in_reset;
461 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
462 }
463
464 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
465 ch->local_mode = SMUX_LCH_MODE_NORMAL;
466 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
467 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
468 ch->tx_flow_control = 0;
469
470 /* Purge RX retry queue */
471 if (ch->rx_retry_queue_cnt)
472 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
473
474 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
475 }
476
477 /* Flush TX/RX workqueues */
478 SMUX_DBG("%s: flushing tx wq\n", __func__);
479 flush_workqueue(smux_tx_wq);
480 SMUX_DBG("%s: flushing rx wq\n", __func__);
481 flush_workqueue(smux_rx_wq);
482}
483
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600484int smux_assert_lch_id(uint32_t lcid)
485{
486 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
487 return -ENXIO;
488 else
489 return 0;
490}
491
492/**
493 * Log packet information for debug purposes.
494 *
495 * @pkt Packet to log
496 * @is_recv 1 = RX packet; 0 = TX Packet
497 *
498 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
499 *
500 * PKT Info:
501 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
502 *
503 * Direction: R = Receive, S = Send
504 * Local State: C = Closed; c = closing; o = opening; O = Opened
505 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
506 * Remote State: C = Closed; O = Opened
507 * Remote Mode: R = Remote loopback; N = Normal
508 */
509static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
510{
511 char logbuf[SMUX_PKT_LOG_SIZE];
512 char cmd_extra[16];
513 int i = 0;
514 int count;
515 int len;
516 char local_state;
517 char local_mode;
518 char remote_state;
519 char remote_mode;
520 struct smux_lch_t *ch;
521 unsigned char *data;
522
523 ch = &smux_lch[pkt->hdr.lcid];
524
525 switch (ch->local_state) {
526 case SMUX_LCH_LOCAL_CLOSED:
527 local_state = 'C';
528 break;
529 case SMUX_LCH_LOCAL_OPENING:
530 local_state = 'o';
531 break;
532 case SMUX_LCH_LOCAL_OPENED:
533 local_state = 'O';
534 break;
535 case SMUX_LCH_LOCAL_CLOSING:
536 local_state = 'c';
537 break;
538 default:
539 local_state = 'U';
540 break;
541 }
542
543 switch (ch->local_mode) {
544 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
545 local_mode = 'L';
546 break;
547 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
548 local_mode = 'R';
549 break;
550 case SMUX_LCH_MODE_NORMAL:
551 local_mode = 'N';
552 break;
553 default:
554 local_mode = 'U';
555 break;
556 }
557
558 switch (ch->remote_state) {
559 case SMUX_LCH_REMOTE_CLOSED:
560 remote_state = 'C';
561 break;
562 case SMUX_LCH_REMOTE_OPENED:
563 remote_state = 'O';
564 break;
565
566 default:
567 remote_state = 'U';
568 break;
569 }
570
571 switch (ch->remote_mode) {
572 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
573 remote_mode = 'R';
574 break;
575 case SMUX_LCH_MODE_NORMAL:
576 remote_mode = 'N';
577 break;
578 default:
579 remote_mode = 'U';
580 break;
581 }
582
583 /* determine command type (ACK, etc) */
584 cmd_extra[0] = '\0';
585 switch (pkt->hdr.cmd) {
586 case SMUX_CMD_OPEN_LCH:
587 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
588 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
589 break;
590 case SMUX_CMD_CLOSE_LCH:
591 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
592 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
593 break;
594 };
595
596 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
597 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
598 is_recv ? 'R' : 'S', pkt->hdr.lcid,
599 local_state, local_mode,
600 remote_state, remote_mode,
601 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
602 pkt->hdr.payload_len, pkt->hdr.pad_len);
603
604 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
605 data = (unsigned char *)pkt->payload;
606 for (count = 0; count < len; count++)
607 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
608 "%02x ", (unsigned)data[count]);
609
610 pr_info("%s\n", logbuf);
611}
612
613static void smux_notify_local_fn(struct work_struct *work)
614{
615 struct smux_notify_handle *notify_handle = NULL;
616 union notifier_metadata *metadata = NULL;
617 unsigned long flags;
618 int i;
619
620 for (;;) {
621 /* retrieve notification */
622 spin_lock_irqsave(&notify_lock_lhc1, flags);
623 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
624 i = kfifo_out(&smux_notify_fifo,
625 &notify_handle,
626 handle_size);
627 if (i != handle_size) {
628 pr_err("%s: unable to retrieve handle %d expected %d\n",
629 __func__, i, handle_size);
630 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
631 break;
632 }
633 } else {
634 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
635 break;
636 }
637 --queued_fifo_notifications;
638 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
639
640 /* notify client */
641 metadata = notify_handle->metadata;
642 notify_handle->notify(notify_handle->priv,
643 notify_handle->event_type,
644 metadata);
645
646 kfree(metadata);
647 kfree(notify_handle);
648 }
649}
650
651/**
652 * Initialize existing packet.
653 */
654void smux_init_pkt(struct smux_pkt_t *pkt)
655{
656 memset(pkt, 0x0, sizeof(*pkt));
657 pkt->hdr.magic = SMUX_MAGIC;
658 INIT_LIST_HEAD(&pkt->list);
659}
660
661/**
662 * Allocate and initialize packet.
663 *
664 * If a payload is needed, either set it directly and ensure that it's freed or
665 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
666 * automatically when smd_free_pkt() is called.
667 */
668struct smux_pkt_t *smux_alloc_pkt(void)
669{
670 struct smux_pkt_t *pkt;
671
672 /* Consider a free list implementation instead of kmalloc */
673 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
674 if (!pkt) {
675 pr_err("%s: out of memory\n", __func__);
676 return NULL;
677 }
678 smux_init_pkt(pkt);
679 pkt->allocated = 1;
680
681 return pkt;
682}
683
684/**
685 * Free packet.
686 *
687 * @pkt Packet to free (may be NULL)
688 *
689 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
690 * well. Otherwise, the caller is responsible for freeing the payload.
691 */
692void smux_free_pkt(struct smux_pkt_t *pkt)
693{
694 if (pkt) {
695 if (pkt->free_payload)
696 kfree(pkt->payload);
697 if (pkt->allocated)
698 kfree(pkt);
699 }
700}
701
702/**
703 * Allocate packet payload.
704 *
705 * @pkt Packet to add payload to
706 *
707 * @returns 0 on success, <0 upon error
708 *
709 * A flag is set to signal smux_free_pkt() to free the payload.
710 */
711int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
712{
713 if (!pkt)
714 return -EINVAL;
715
716 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
717 pkt->free_payload = 1;
718 if (!pkt->payload) {
719 pr_err("%s: unable to malloc %d bytes for payload\n",
720 __func__, pkt->hdr.payload_len);
721 return -ENOMEM;
722 }
723
724 return 0;
725}
726
727static int schedule_notify(uint8_t lcid, int event,
728 const union notifier_metadata *metadata)
729{
730 struct smux_notify_handle *notify_handle = 0;
731 union notifier_metadata *meta_copy = 0;
732 struct smux_lch_t *ch;
733 int i;
734 unsigned long flags;
735 int ret = 0;
736
737 ch = &smux_lch[lcid];
738 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
739 GFP_ATOMIC);
740 if (!notify_handle) {
741 pr_err("%s: out of memory\n", __func__);
742 ret = -ENOMEM;
743 goto free_out;
744 }
745
746 notify_handle->notify = ch->notify;
747 notify_handle->priv = ch->priv;
748 notify_handle->event_type = event;
749 if (metadata) {
750 meta_copy = kzalloc(sizeof(union notifier_metadata),
751 GFP_ATOMIC);
752 if (!meta_copy) {
753 pr_err("%s: out of memory\n", __func__);
754 ret = -ENOMEM;
755 goto free_out;
756 }
757 *meta_copy = *metadata;
758 notify_handle->metadata = meta_copy;
759 } else {
760 notify_handle->metadata = NULL;
761 }
762
763 spin_lock_irqsave(&notify_lock_lhc1, flags);
764 i = kfifo_avail(&smux_notify_fifo);
765 if (i < handle_size) {
766 pr_err("%s: fifo full error %d expected %d\n",
767 __func__, i, handle_size);
768 ret = -ENOMEM;
769 goto unlock_out;
770 }
771
772 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
773 if (i < 0 || i != handle_size) {
774 pr_err("%s: fifo not available error %d (expected %d)\n",
775 __func__, i, handle_size);
776 ret = -ENOSPC;
777 goto unlock_out;
778 }
779 ++queued_fifo_notifications;
780
781unlock_out:
782 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
783
784free_out:
785 queue_work(smux_notify_wq, &smux_notify_local);
786 if (ret < 0 && notify_handle) {
787 kfree(notify_handle->metadata);
788 kfree(notify_handle);
789 }
790 return ret;
791}
792
793/**
794 * Returns the serialized size of a packet.
795 *
796 * @pkt Packet to serialize
797 *
798 * @returns Serialized length of packet
799 */
800static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
801{
802 unsigned int size;
803
804 size = sizeof(struct smux_hdr_t);
805 size += pkt->hdr.payload_len;
806 size += pkt->hdr.pad_len;
807
808 return size;
809}
810
811/**
812 * Serialize packet @pkt into output buffer @data.
813 *
814 * @pkt Packet to serialize
815 * @out Destination buffer pointer
816 * @out_len Size of serialized packet
817 *
818 * @returns 0 for success
819 */
820int smux_serialize(struct smux_pkt_t *pkt, char *out,
821 unsigned int *out_len)
822{
823 char *data_start = out;
824
825 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
826 pr_err("%s: packet size %d too big\n",
827 __func__, smux_serialize_size(pkt));
828 return -E2BIG;
829 }
830
831 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
832 out += sizeof(struct smux_hdr_t);
833 if (pkt->payload) {
834 memcpy(out, pkt->payload, pkt->hdr.payload_len);
835 out += pkt->hdr.payload_len;
836 }
837 if (pkt->hdr.pad_len) {
838 memset(out, 0x0, pkt->hdr.pad_len);
839 out += pkt->hdr.pad_len;
840 }
841 *out_len = out - data_start;
842 return 0;
843}
844
845/**
846 * Serialize header and provide pointer to the data.
847 *
848 * @pkt Packet
849 * @out[out] Pointer to the serialized header data
850 * @out_len[out] Pointer to the serialized header length
851 */
852static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
853 unsigned int *out_len)
854{
855 *out = (char *)&pkt->hdr;
856 *out_len = sizeof(struct smux_hdr_t);
857}
858
859/**
860 * Serialize payload and provide pointer to the data.
861 *
862 * @pkt Packet
863 * @out[out] Pointer to the serialized payload data
864 * @out_len[out] Pointer to the serialized payload length
865 */
866static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
867 unsigned int *out_len)
868{
869 *out = pkt->payload;
870 *out_len = pkt->hdr.payload_len;
871}
872
873/**
874 * Serialize padding and provide pointer to the data.
875 *
876 * @pkt Packet
877 * @out[out] Pointer to the serialized padding (always NULL)
878 * @out_len[out] Pointer to the serialized payload length
879 *
880 * Since the padding field value is undefined, only the size of the patting
881 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
882 */
883static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
884 unsigned int *out_len)
885{
886 *out = NULL;
887 *out_len = pkt->hdr.pad_len;
888}
889
890/**
891 * Write data to TTY framework and handle breaking the writes up if needed.
892 *
893 * @data Data to write
894 * @len Length of data
895 *
896 * @returns 0 for success, < 0 for failure
897 */
898static int write_to_tty(char *data, unsigned len)
899{
900 int data_written;
901
902 if (!data)
903 return 0;
904
Eric Holmberged1f00c2012-06-07 09:45:18 -0600905 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600906 data_written = smux.tty->ops->write(smux.tty, data, len);
907 if (data_written >= 0) {
908 len -= data_written;
909 data += data_written;
910 } else {
911 pr_err("%s: TTY write returned error %d\n",
912 __func__, data_written);
913 return data_written;
914 }
915
916 if (len)
917 tty_wait_until_sent(smux.tty,
918 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600919 }
920 return 0;
921}
922
923/**
924 * Write packet to TTY.
925 *
926 * @pkt packet to write
927 *
928 * @returns 0 on success
929 */
930static int smux_tx_tty(struct smux_pkt_t *pkt)
931{
932 char *data;
933 unsigned int len;
934 int ret;
935
936 if (!smux.tty) {
937 pr_err("%s: TTY not initialized", __func__);
938 return -ENOTTY;
939 }
940
941 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
942 SMUX_DBG("%s: tty send single byte\n", __func__);
943 ret = write_to_tty(&pkt->hdr.flags, 1);
944 return ret;
945 }
946
947 smux_serialize_hdr(pkt, &data, &len);
948 ret = write_to_tty(data, len);
949 if (ret) {
950 pr_err("%s: failed %d to write header %d\n",
951 __func__, ret, len);
952 return ret;
953 }
954
955 smux_serialize_payload(pkt, &data, &len);
956 ret = write_to_tty(data, len);
957 if (ret) {
958 pr_err("%s: failed %d to write payload %d\n",
959 __func__, ret, len);
960 return ret;
961 }
962
963 smux_serialize_padding(pkt, &data, &len);
964 while (len > 0) {
965 char zero = 0x0;
966 ret = write_to_tty(&zero, 1);
967 if (ret) {
968 pr_err("%s: failed %d to write padding %d\n",
969 __func__, ret, len);
970 return ret;
971 }
972 --len;
973 }
974 return 0;
975}
976
977/**
978 * Send a single character.
979 *
980 * @ch Character to send
981 */
982static void smux_send_byte(char ch)
983{
984 struct smux_pkt_t pkt;
985
986 smux_init_pkt(&pkt);
987
988 pkt.hdr.cmd = SMUX_CMD_BYTE;
989 pkt.hdr.flags = ch;
990 pkt.hdr.lcid = 0;
991 pkt.hdr.flags = ch;
992 SMUX_LOG_PKT_TX(&pkt);
993 if (!smux_byte_loopback)
994 smux_tx_tty(&pkt);
995 else
996 smux_tx_loopback(&pkt);
997}
998
999/**
1000 * Receive a single-character packet (used for internal testing).
1001 *
1002 * @ch Character to receive
1003 * @lcid Logical channel ID for packet
1004 *
1005 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001006 */
1007static int smux_receive_byte(char ch, int lcid)
1008{
1009 struct smux_pkt_t pkt;
1010
1011 smux_init_pkt(&pkt);
1012 pkt.hdr.lcid = lcid;
1013 pkt.hdr.cmd = SMUX_CMD_BYTE;
1014 pkt.hdr.flags = ch;
1015
1016 return smux_dispatch_rx_pkt(&pkt);
1017}
1018
1019/**
1020 * Queue packet for transmit.
1021 *
1022 * @pkt_ptr Packet to queue
1023 * @ch Channel to queue packet on
1024 * @queue Queue channel on ready list
1025 */
1026static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1027 int queue)
1028{
1029 unsigned long flags;
1030
1031 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1032
1033 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1034 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1035 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1036
1037 if (queue)
1038 list_channel(ch);
1039}
1040
1041/**
1042 * Handle receive OPEN ACK command.
1043 *
1044 * @pkt Received packet
1045 *
1046 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001047 */
1048static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1049{
1050 uint8_t lcid;
1051 int ret;
1052 struct smux_lch_t *ch;
1053 int enable_powerdown = 0;
1054
1055 lcid = pkt->hdr.lcid;
1056 ch = &smux_lch[lcid];
1057
1058 spin_lock(&ch->state_lock_lhb1);
1059 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1060 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1061 ch->local_state,
1062 SMUX_LCH_LOCAL_OPENED);
1063
1064 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1065 enable_powerdown = 1;
1066
1067 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1068 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1069 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1070 ret = 0;
1071 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1072 SMUX_DBG("Remote loopback OPEN ACK received\n");
1073 ret = 0;
1074 } else {
1075 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1076 __func__, lcid, ch->local_state);
1077 ret = -EINVAL;
1078 }
1079 spin_unlock(&ch->state_lock_lhb1);
1080
1081 if (enable_powerdown) {
1082 spin_lock(&smux.tx_lock_lha2);
1083 if (!smux.powerdown_enabled) {
1084 smux.powerdown_enabled = 1;
1085 SMUX_DBG("%s: enabling power-collapse support\n",
1086 __func__);
1087 }
1088 spin_unlock(&smux.tx_lock_lha2);
1089 }
1090
1091 return ret;
1092}
1093
1094static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1095{
1096 uint8_t lcid;
1097 int ret;
1098 struct smux_lch_t *ch;
1099 union notifier_metadata meta_disconnected;
1100 unsigned long flags;
1101
1102 lcid = pkt->hdr.lcid;
1103 ch = &smux_lch[lcid];
1104 meta_disconnected.disconnected.is_ssr = 0;
1105
1106 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1107
1108 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1109 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1110 SMUX_LCH_LOCAL_CLOSING,
1111 SMUX_LCH_LOCAL_CLOSED);
1112 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1113 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1114 schedule_notify(lcid, SMUX_DISCONNECTED,
1115 &meta_disconnected);
1116 ret = 0;
1117 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1118 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1119 ret = 0;
1120 } else {
1121 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1122 __func__, lcid, ch->local_state);
1123 ret = -EINVAL;
1124 }
1125 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1126 return ret;
1127}
1128
1129/**
1130 * Handle receive OPEN command.
1131 *
1132 * @pkt Received packet
1133 *
1134 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001135 */
1136static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1137{
1138 uint8_t lcid;
1139 int ret;
1140 struct smux_lch_t *ch;
1141 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001142 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001143 int tx_ready = 0;
1144 int enable_powerdown = 0;
1145
1146 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1147 return smux_handle_rx_open_ack(pkt);
1148
1149 lcid = pkt->hdr.lcid;
1150 ch = &smux_lch[lcid];
1151
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001152 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001153
1154 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1155 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1156 SMUX_LCH_REMOTE_CLOSED,
1157 SMUX_LCH_REMOTE_OPENED);
1158
1159 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1160 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1161 enable_powerdown = 1;
1162
1163 /* Send Open ACK */
1164 ack_pkt = smux_alloc_pkt();
1165 if (!ack_pkt) {
1166 /* exit out to allow retrying this later */
1167 ret = -ENOMEM;
1168 goto out;
1169 }
1170 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1171 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1172 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1173 ack_pkt->hdr.lcid = lcid;
1174 ack_pkt->hdr.payload_len = 0;
1175 ack_pkt->hdr.pad_len = 0;
1176 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1177 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1178 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1179 }
1180 smux_tx_queue(ack_pkt, ch, 0);
1181 tx_ready = 1;
1182
1183 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1184 /*
1185 * Send an Open command to the remote side to
1186 * simulate our local client doing it.
1187 */
1188 ack_pkt = smux_alloc_pkt();
1189 if (ack_pkt) {
1190 ack_pkt->hdr.lcid = lcid;
1191 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1192 ack_pkt->hdr.flags =
1193 SMUX_CMD_OPEN_POWER_COLLAPSE;
1194 ack_pkt->hdr.payload_len = 0;
1195 ack_pkt->hdr.pad_len = 0;
1196 smux_tx_queue(ack_pkt, ch, 0);
1197 tx_ready = 1;
1198 } else {
1199 pr_err("%s: Remote loopack allocation failure\n",
1200 __func__);
1201 }
1202 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1203 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1204 }
1205 ret = 0;
1206 } else {
1207 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1208 __func__, lcid, ch->remote_state);
1209 ret = -EINVAL;
1210 }
1211
1212out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001213 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001214
1215 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001216 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001217 if (!smux.powerdown_enabled) {
1218 smux.powerdown_enabled = 1;
1219 SMUX_DBG("%s: enabling power-collapse support\n",
1220 __func__);
1221 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001222 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001223 }
1224
1225 if (tx_ready)
1226 list_channel(ch);
1227
1228 return ret;
1229}
1230
1231/**
1232 * Handle receive CLOSE command.
1233 *
1234 * @pkt Received packet
1235 *
1236 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001237 */
1238static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1239{
1240 uint8_t lcid;
1241 int ret;
1242 struct smux_lch_t *ch;
1243 struct smux_pkt_t *ack_pkt;
1244 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001245 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001246 int tx_ready = 0;
1247
1248 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1249 return smux_handle_close_ack(pkt);
1250
1251 lcid = pkt->hdr.lcid;
1252 ch = &smux_lch[lcid];
1253 meta_disconnected.disconnected.is_ssr = 0;
1254
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001255 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001256 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1257 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1258 SMUX_LCH_REMOTE_OPENED,
1259 SMUX_LCH_REMOTE_CLOSED);
1260
1261 ack_pkt = smux_alloc_pkt();
1262 if (!ack_pkt) {
1263 /* exit out to allow retrying this later */
1264 ret = -ENOMEM;
1265 goto out;
1266 }
1267 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1268 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1269 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1270 ack_pkt->hdr.lcid = lcid;
1271 ack_pkt->hdr.payload_len = 0;
1272 ack_pkt->hdr.pad_len = 0;
1273 smux_tx_queue(ack_pkt, ch, 0);
1274 tx_ready = 1;
1275
1276 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1277 /*
1278 * Send a Close command to the remote side to simulate
1279 * our local client doing it.
1280 */
1281 ack_pkt = smux_alloc_pkt();
1282 if (ack_pkt) {
1283 ack_pkt->hdr.lcid = lcid;
1284 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1285 ack_pkt->hdr.flags = 0;
1286 ack_pkt->hdr.payload_len = 0;
1287 ack_pkt->hdr.pad_len = 0;
1288 smux_tx_queue(ack_pkt, ch, 0);
1289 tx_ready = 1;
1290 } else {
1291 pr_err("%s: Remote loopack allocation failure\n",
1292 __func__);
1293 }
1294 }
1295
1296 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1297 schedule_notify(lcid, SMUX_DISCONNECTED,
1298 &meta_disconnected);
1299 ret = 0;
1300 } else {
1301 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1302 __func__, lcid, ch->remote_state);
1303 ret = -EINVAL;
1304 }
1305out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001306 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001307 if (tx_ready)
1308 list_channel(ch);
1309
1310 return ret;
1311}
1312
1313/*
1314 * Handle receive DATA command.
1315 *
1316 * @pkt Received packet
1317 *
1318 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001319 */
1320static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1321{
1322 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001323 int ret = 0;
1324 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001325 int tmp;
1326 int rx_len;
1327 struct smux_lch_t *ch;
1328 union notifier_metadata metadata;
1329 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001330 struct smux_pkt_t *ack_pkt;
1331 unsigned long flags;
1332
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001333 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1334 ret = -ENXIO;
1335 goto out;
1336 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001337
Eric Holmbergb8435c82012-06-05 14:51:29 -06001338 rx_len = pkt->hdr.payload_len;
1339 if (rx_len == 0) {
1340 ret = -EINVAL;
1341 goto out;
1342 }
1343
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001344 lcid = pkt->hdr.lcid;
1345 ch = &smux_lch[lcid];
1346 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1347 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1348
1349 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1350 && !remote_loopback) {
1351 pr_err("smux: ch %d error data on local state 0x%x",
1352 lcid, ch->local_state);
1353 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001354 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001355 goto out;
1356 }
1357
1358 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1359 pr_err("smux: ch %d error data on remote state 0x%x",
1360 lcid, ch->remote_state);
1361 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001362 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001363 goto out;
1364 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001365
1366 if (!list_empty(&ch->rx_retry_queue)) {
1367 do_retry = 1;
1368 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1369 /* retry queue full */
1370 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1371 ret = -ENOMEM;
1372 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1373 goto out;
1374 }
1375 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001376 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001377
Eric Holmbergb8435c82012-06-05 14:51:29 -06001378 if (remote_loopback) {
1379 /* Echo the data back to the remote client. */
1380 ack_pkt = smux_alloc_pkt();
1381 if (ack_pkt) {
1382 ack_pkt->hdr.lcid = lcid;
1383 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1384 ack_pkt->hdr.flags = 0;
1385 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1386 if (ack_pkt->hdr.payload_len) {
1387 smux_alloc_pkt_payload(ack_pkt);
1388 memcpy(ack_pkt->payload, pkt->payload,
1389 ack_pkt->hdr.payload_len);
1390 }
1391 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1392 smux_tx_queue(ack_pkt, ch, 0);
1393 list_channel(ch);
1394 } else {
1395 pr_err("%s: Remote loopack allocation failure\n",
1396 __func__);
1397 }
1398 } else if (!do_retry) {
1399 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001400 metadata.read.pkt_priv = 0;
1401 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001402 tmp = ch->get_rx_buffer(ch->priv,
1403 (void **)&metadata.read.pkt_priv,
1404 (void **)&metadata.read.buffer,
1405 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001406
Eric Holmbergb8435c82012-06-05 14:51:29 -06001407 if (tmp == 0 && metadata.read.buffer) {
1408 /* place data into RX buffer */
1409 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001410 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001411 metadata.read.len = rx_len;
1412 schedule_notify(lcid, SMUX_READ_DONE,
1413 &metadata);
1414 } else if (tmp == -EAGAIN ||
1415 (tmp == 0 && !metadata.read.buffer)) {
1416 /* buffer allocation failed - add to retry queue */
1417 do_retry = 1;
1418 } else if (tmp < 0) {
1419 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1420 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001421 }
1422 }
1423
Eric Holmbergb8435c82012-06-05 14:51:29 -06001424 if (do_retry) {
1425 struct smux_rx_pkt_retry *retry;
1426
1427 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1428 if (!retry) {
1429 pr_err("%s: retry alloc failure\n", __func__);
1430 ret = -ENOMEM;
1431 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1432 goto out;
1433 }
1434 INIT_LIST_HEAD(&retry->rx_retry_list);
1435 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1436
1437 /* copy packet */
1438 retry->pkt = smux_alloc_pkt();
1439 if (!retry->pkt) {
1440 kfree(retry);
1441 pr_err("%s: pkt alloc failure\n", __func__);
1442 ret = -ENOMEM;
1443 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1444 goto out;
1445 }
1446 retry->pkt->hdr.lcid = lcid;
1447 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1448 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1449 if (retry->pkt->hdr.payload_len) {
1450 smux_alloc_pkt_payload(retry->pkt);
1451 memcpy(retry->pkt->payload, pkt->payload,
1452 retry->pkt->hdr.payload_len);
1453 }
1454
1455 /* add to retry queue */
1456 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1457 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1458 ++ch->rx_retry_queue_cnt;
1459 if (ch->rx_retry_queue_cnt == 1)
1460 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1461 msecs_to_jiffies(retry->timeout_in_ms));
1462 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1463 }
1464
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001465out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001466 return ret;
1467}
1468
1469/**
1470 * Handle receive byte command for testing purposes.
1471 *
1472 * @pkt Received packet
1473 *
1474 * @returns 0 for success
1475 */
1476static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1477{
1478 uint8_t lcid;
1479 int ret;
1480 struct smux_lch_t *ch;
1481 union notifier_metadata metadata;
1482 unsigned long flags;
1483
1484 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid))
1485 return -ENXIO;
1486
1487 lcid = pkt->hdr.lcid;
1488 ch = &smux_lch[lcid];
1489 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1490
1491 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1492 pr_err("smux: ch %d error data on local state 0x%x",
1493 lcid, ch->local_state);
1494 ret = -EIO;
1495 goto out;
1496 }
1497
1498 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1499 pr_err("smux: ch %d error data on remote state 0x%x",
1500 lcid, ch->remote_state);
1501 ret = -EIO;
1502 goto out;
1503 }
1504
1505 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1506 metadata.read.buffer = 0;
1507 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1508 ret = 0;
1509
1510out:
1511 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1512 return ret;
1513}
1514
1515/**
1516 * Handle receive status command.
1517 *
1518 * @pkt Received packet
1519 *
1520 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001521 */
1522static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1523{
1524 uint8_t lcid;
1525 int ret;
1526 struct smux_lch_t *ch;
1527 union notifier_metadata meta;
1528 unsigned long flags;
1529 int tx_ready = 0;
1530
1531 lcid = pkt->hdr.lcid;
1532 ch = &smux_lch[lcid];
1533
1534 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1535 meta.tiocm.tiocm_old = ch->remote_tiocm;
1536 meta.tiocm.tiocm_new = pkt->hdr.flags;
1537
1538 /* update logical channel flow control */
1539 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1540 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1541 /* logical channel flow control changed */
1542 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1543 /* disabled TX */
1544 SMUX_DBG("TX Flow control enabled\n");
1545 ch->tx_flow_control = 1;
1546 } else {
1547 /* re-enable channel */
1548 SMUX_DBG("TX Flow control disabled\n");
1549 ch->tx_flow_control = 0;
1550 tx_ready = 1;
1551 }
1552 }
1553 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1554 ch->remote_tiocm = pkt->hdr.flags;
1555 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1556
1557 /* client notification for status change */
1558 if (IS_FULLY_OPENED(ch)) {
1559 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1560 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1561 ret = 0;
1562 }
1563 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1564 if (tx_ready)
1565 list_channel(ch);
1566
1567 return ret;
1568}
1569
1570/**
1571 * Handle receive power command.
1572 *
1573 * @pkt Received packet
1574 *
1575 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001576 */
1577static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1578{
1579 int tx_ready = 0;
1580 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001581 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001582
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001583 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001584 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1585 /* local sleep request ack */
1586 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1587 /* Power-down complete, turn off UART */
1588 SMUX_DBG("%s: Power %d->%d\n", __func__,
1589 smux.power_state, SMUX_PWR_OFF_FLUSH);
1590 smux.power_state = SMUX_PWR_OFF_FLUSH;
1591 queue_work(smux_tx_wq, &smux_inactivity_work);
1592 } else {
1593 pr_err("%s: sleep request ack invalid in state %d\n",
1594 __func__, smux.power_state);
1595 }
1596 } else {
1597 /* remote sleep request */
1598 if (smux.power_state == SMUX_PWR_ON
1599 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1600 ack_pkt = smux_alloc_pkt();
1601 if (ack_pkt) {
1602 SMUX_DBG("%s: Power %d->%d\n", __func__,
1603 smux.power_state,
1604 SMUX_PWR_TURNING_OFF_FLUSH);
1605
1606 /* send power-down request */
1607 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1608 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
1609 ack_pkt->hdr.lcid = pkt->hdr.lcid;
1610 smux_tx_queue(ack_pkt,
1611 &smux_lch[ack_pkt->hdr.lcid], 0);
1612 tx_ready = 1;
1613 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1614 queue_delayed_work(smux_tx_wq,
1615 &smux_delayed_inactivity_work,
1616 msecs_to_jiffies(
1617 SMUX_INACTIVITY_TIMEOUT_MS));
1618 }
1619 } else {
1620 pr_err("%s: sleep request invalid in state %d\n",
1621 __func__, smux.power_state);
1622 }
1623 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001624 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001625
1626 if (tx_ready)
1627 list_channel(&smux_lch[ack_pkt->hdr.lcid]);
1628
1629 return 0;
1630}
1631
1632/**
1633 * Handle dispatching a completed packet for receive processing.
1634 *
1635 * @pkt Packet to process
1636 *
1637 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001638 */
1639static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1640{
1641 int ret;
1642
1643 SMUX_LOG_PKT_RX(pkt);
1644
1645 switch (pkt->hdr.cmd) {
1646 case SMUX_CMD_OPEN_LCH:
1647 ret = smux_handle_rx_open_cmd(pkt);
1648 break;
1649
1650 case SMUX_CMD_DATA:
1651 ret = smux_handle_rx_data_cmd(pkt);
1652 break;
1653
1654 case SMUX_CMD_CLOSE_LCH:
1655 ret = smux_handle_rx_close_cmd(pkt);
1656 break;
1657
1658 case SMUX_CMD_STATUS:
1659 ret = smux_handle_rx_status_cmd(pkt);
1660 break;
1661
1662 case SMUX_CMD_PWR_CTL:
1663 ret = smux_handle_rx_power_cmd(pkt);
1664 break;
1665
1666 case SMUX_CMD_BYTE:
1667 ret = smux_handle_rx_byte_cmd(pkt);
1668 break;
1669
1670 default:
1671 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1672 ret = -EINVAL;
1673 }
1674 return ret;
1675}
1676
1677/**
1678 * Deserializes a packet and dispatches it to the packet receive logic.
1679 *
1680 * @data Raw data for one packet
1681 * @len Length of the data
1682 *
1683 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001684 */
1685static int smux_deserialize(unsigned char *data, int len)
1686{
1687 struct smux_pkt_t recv;
1688 uint8_t lcid;
1689
1690 smux_init_pkt(&recv);
1691
1692 /*
1693 * It may be possible to optimize this to not use the
1694 * temporary buffer.
1695 */
1696 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1697
1698 if (recv.hdr.magic != SMUX_MAGIC) {
1699 pr_err("%s: invalid header magic\n", __func__);
1700 return -EINVAL;
1701 }
1702
1703 lcid = recv.hdr.lcid;
1704 if (smux_assert_lch_id(lcid)) {
1705 pr_err("%s: invalid channel id %d\n", __func__, lcid);
1706 return -ENXIO;
1707 }
1708
1709 if (recv.hdr.payload_len)
1710 recv.payload = data + sizeof(struct smux_hdr_t);
1711
1712 return smux_dispatch_rx_pkt(&recv);
1713}
1714
1715/**
1716 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001717 */
1718static void smux_handle_wakeup_req(void)
1719{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001720 unsigned long flags;
1721
1722 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001723 if (smux.power_state == SMUX_PWR_OFF
1724 || smux.power_state == SMUX_PWR_TURNING_ON) {
1725 /* wakeup system */
1726 SMUX_DBG("%s: Power %d->%d\n", __func__,
1727 smux.power_state, SMUX_PWR_ON);
1728 smux.power_state = SMUX_PWR_ON;
1729 queue_work(smux_tx_wq, &smux_wakeup_work);
1730 queue_work(smux_tx_wq, &smux_tx_work);
1731 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1732 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1733 smux_send_byte(SMUX_WAKEUP_ACK);
1734 } else {
1735 smux_send_byte(SMUX_WAKEUP_ACK);
1736 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001737 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001738}
1739
1740/**
1741 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001742 */
1743static void smux_handle_wakeup_ack(void)
1744{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001745 unsigned long flags;
1746
1747 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001748 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1749 /* received response to wakeup request */
1750 SMUX_DBG("%s: Power %d->%d\n", __func__,
1751 smux.power_state, SMUX_PWR_ON);
1752 smux.power_state = SMUX_PWR_ON;
1753 queue_work(smux_tx_wq, &smux_tx_work);
1754 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1755 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1756
1757 } else if (smux.power_state != SMUX_PWR_ON) {
1758 /* invalid message */
1759 pr_err("%s: wakeup request ack invalid in state %d\n",
1760 __func__, smux.power_state);
1761 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001762 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001763}
1764
1765/**
1766 * RX State machine - IDLE state processing.
1767 *
1768 * @data New RX data to process
1769 * @len Length of the data
1770 * @used Return value of length processed
1771 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001772 */
1773static void smux_rx_handle_idle(const unsigned char *data,
1774 int len, int *used, int flag)
1775{
1776 int i;
1777
1778 if (flag) {
1779 if (smux_byte_loopback)
1780 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1781 smux_byte_loopback);
1782 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1783 ++*used;
1784 return;
1785 }
1786
1787 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1788 switch (data[i]) {
1789 case SMUX_MAGIC_WORD1:
1790 smux.rx_state = SMUX_RX_MAGIC;
1791 break;
1792 case SMUX_WAKEUP_REQ:
1793 smux_handle_wakeup_req();
1794 break;
1795 case SMUX_WAKEUP_ACK:
1796 smux_handle_wakeup_ack();
1797 break;
1798 default:
1799 /* unexpected character */
1800 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1801 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1802 smux_byte_loopback);
1803 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1804 (unsigned)data[i]);
1805 break;
1806 }
1807 }
1808
1809 *used = i;
1810}
1811
1812/**
1813 * RX State machine - Header Magic state processing.
1814 *
1815 * @data New RX data to process
1816 * @len Length of the data
1817 * @used Return value of length processed
1818 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001819 */
1820static void smux_rx_handle_magic(const unsigned char *data,
1821 int len, int *used, int flag)
1822{
1823 int i;
1824
1825 if (flag) {
1826 pr_err("%s: TTY RX error %d\n", __func__, flag);
1827 smux_enter_reset();
1828 smux.rx_state = SMUX_RX_FAILURE;
1829 ++*used;
1830 return;
1831 }
1832
1833 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1834 /* wait for completion of the magic */
1835 if (data[i] == SMUX_MAGIC_WORD2) {
1836 smux.recv_len = 0;
1837 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1838 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1839 smux.rx_state = SMUX_RX_HDR;
1840 } else {
1841 /* unexpected / trash character */
1842 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1843 __func__, data[i], *used, len);
1844 smux.rx_state = SMUX_RX_IDLE;
1845 }
1846 }
1847
1848 *used = i;
1849}
1850
1851/**
1852 * RX State machine - Packet Header state processing.
1853 *
1854 * @data New RX data to process
1855 * @len Length of the data
1856 * @used Return value of length processed
1857 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001858 */
1859static void smux_rx_handle_hdr(const unsigned char *data,
1860 int len, int *used, int flag)
1861{
1862 int i;
1863 struct smux_hdr_t *hdr;
1864
1865 if (flag) {
1866 pr_err("%s: TTY RX error %d\n", __func__, flag);
1867 smux_enter_reset();
1868 smux.rx_state = SMUX_RX_FAILURE;
1869 ++*used;
1870 return;
1871 }
1872
1873 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1874 smux.recv_buf[smux.recv_len++] = data[i];
1875
1876 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1877 /* complete header received */
1878 hdr = (struct smux_hdr_t *)smux.recv_buf;
1879 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1880 smux.rx_state = SMUX_RX_PAYLOAD;
1881 }
1882 }
1883 *used = i;
1884}
1885
1886/**
1887 * RX State machine - Packet Payload state processing.
1888 *
1889 * @data New RX data to process
1890 * @len Length of the data
1891 * @used Return value of length processed
1892 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001893 */
1894static void smux_rx_handle_pkt_payload(const unsigned char *data,
1895 int len, int *used, int flag)
1896{
1897 int remaining;
1898
1899 if (flag) {
1900 pr_err("%s: TTY RX error %d\n", __func__, flag);
1901 smux_enter_reset();
1902 smux.rx_state = SMUX_RX_FAILURE;
1903 ++*used;
1904 return;
1905 }
1906
1907 /* copy data into rx buffer */
1908 if (smux.pkt_remain < (len - *used))
1909 remaining = smux.pkt_remain;
1910 else
1911 remaining = len - *used;
1912
1913 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1914 smux.recv_len += remaining;
1915 smux.pkt_remain -= remaining;
1916 *used += remaining;
1917
1918 if (smux.pkt_remain == 0) {
1919 /* complete packet received */
1920 smux_deserialize(smux.recv_buf, smux.recv_len);
1921 smux.rx_state = SMUX_RX_IDLE;
1922 }
1923}
1924
1925/**
1926 * Feed data to the receive state machine.
1927 *
1928 * @data Pointer to data block
1929 * @len Length of data
1930 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001931 */
1932void smux_rx_state_machine(const unsigned char *data,
1933 int len, int flag)
1934{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001935 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001936
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001937 work.data = data;
1938 work.len = len;
1939 work.flag = flag;
1940 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1941 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001942
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001943 queue_work(smux_rx_wq, &work.work);
1944 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001945}
1946
1947/**
1948 * Add channel to transmit-ready list and trigger transmit worker.
1949 *
1950 * @ch Channel to add
1951 */
1952static void list_channel(struct smux_lch_t *ch)
1953{
1954 unsigned long flags;
1955
1956 SMUX_DBG("%s: listing channel %d\n",
1957 __func__, ch->lcid);
1958
1959 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1960 spin_lock(&ch->tx_lock_lhb2);
1961 smux.tx_activity_flag = 1;
1962 if (list_empty(&ch->tx_ready_list))
1963 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
1964 spin_unlock(&ch->tx_lock_lhb2);
1965 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1966
1967 queue_work(smux_tx_wq, &smux_tx_work);
1968}
1969
1970/**
1971 * Transmit packet on correct transport and then perform client
1972 * notification.
1973 *
1974 * @ch Channel to transmit on
1975 * @pkt Packet to transmit
1976 */
1977static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
1978{
1979 union notifier_metadata meta_write;
1980 int ret;
1981
1982 if (ch && pkt) {
1983 SMUX_LOG_PKT_TX(pkt);
1984 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
1985 ret = smux_tx_loopback(pkt);
1986 else
1987 ret = smux_tx_tty(pkt);
1988
1989 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
1990 /* notify write-done */
1991 meta_write.write.pkt_priv = pkt->priv;
1992 meta_write.write.buffer = pkt->payload;
1993 meta_write.write.len = pkt->hdr.payload_len;
1994 if (ret >= 0) {
1995 SMUX_DBG("%s: PKT write done", __func__);
1996 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
1997 &meta_write);
1998 } else {
1999 pr_err("%s: failed to write pkt %d\n",
2000 __func__, ret);
2001 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2002 &meta_write);
2003 }
2004 }
2005 }
2006}
2007
2008/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002009 * Purge TX queue for logical channel.
2010 *
2011 * @ch Logical channel pointer
2012 *
2013 * Must be called with the following spinlocks locked:
2014 * state_lock_lhb1
2015 * tx_lock_lhb2
2016 */
2017static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2018{
2019 struct smux_pkt_t *pkt;
2020 int send_disconnect = 0;
2021
2022 while (!list_empty(&ch->tx_queue)) {
2023 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2024 list);
2025 list_del(&pkt->list);
2026
2027 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2028 /* Open was never sent, just force to closed state */
2029 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2030 send_disconnect = 1;
2031 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2032 /* Notify client of failed write */
2033 union notifier_metadata meta_write;
2034
2035 meta_write.write.pkt_priv = pkt->priv;
2036 meta_write.write.buffer = pkt->payload;
2037 meta_write.write.len = pkt->hdr.payload_len;
2038 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2039 }
2040 smux_free_pkt(pkt);
2041 }
2042
2043 if (send_disconnect) {
2044 union notifier_metadata meta_disconnected;
2045
2046 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2047 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2048 &meta_disconnected);
2049 }
2050}
2051
2052/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002053 * Power-up the UART.
2054 */
2055static void smux_uart_power_on(void)
2056{
2057 struct uart_state *state;
2058
2059 if (!smux.tty || !smux.tty->driver_data) {
2060 pr_err("%s: unable to find UART port for tty %p\n",
2061 __func__, smux.tty);
2062 return;
2063 }
2064 state = smux.tty->driver_data;
2065 msm_hs_request_clock_on(state->uart_port);
2066}
2067
2068/**
2069 * Power down the UART.
2070 */
2071static void smux_uart_power_off(void)
2072{
2073 struct uart_state *state;
2074
2075 if (!smux.tty || !smux.tty->driver_data) {
2076 pr_err("%s: unable to find UART port for tty %p\n",
2077 __func__, smux.tty);
2078 return;
2079 }
2080 state = smux.tty->driver_data;
2081 msm_hs_request_clock_off(state->uart_port);
2082}
2083
2084/**
2085 * TX Wakeup Worker
2086 *
2087 * @work Not used
2088 *
2089 * Do an exponential back-off wakeup sequence with a maximum period
2090 * of approximately 1 second (1 << 20 microseconds).
2091 */
2092static void smux_wakeup_worker(struct work_struct *work)
2093{
2094 unsigned long flags;
2095 unsigned wakeup_delay;
2096 int complete = 0;
2097
Eric Holmberged1f00c2012-06-07 09:45:18 -06002098 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002099 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2100 if (smux.power_state == SMUX_PWR_ON) {
2101 /* wakeup complete */
2102 complete = 1;
2103 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2104 break;
2105 } else {
2106 /* retry */
2107 wakeup_delay = smux.pwr_wakeup_delay_us;
2108 smux.pwr_wakeup_delay_us <<= 1;
2109 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2110 smux.pwr_wakeup_delay_us =
2111 SMUX_WAKEUP_DELAY_MAX;
2112 }
2113 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2114 SMUX_DBG("%s: triggering wakeup\n", __func__);
2115 smux_send_byte(SMUX_WAKEUP_REQ);
2116
2117 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2118 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2119 wakeup_delay);
2120 usleep_range(wakeup_delay, 2*wakeup_delay);
2121 } else {
2122 /* schedule delayed work */
2123 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2124 __func__, wakeup_delay / 1000);
2125 queue_delayed_work(smux_tx_wq,
2126 &smux_wakeup_delayed_work,
2127 msecs_to_jiffies(wakeup_delay / 1000));
2128 break;
2129 }
2130 }
2131
2132 if (complete) {
2133 SMUX_DBG("%s: wakeup complete\n", __func__);
2134 /*
2135 * Cancel any pending retry. This avoids a race condition with
2136 * a new power-up request because:
2137 * 1) this worker doesn't modify the state
2138 * 2) this worker is processed on the same single-threaded
2139 * workqueue as new TX wakeup requests
2140 */
2141 cancel_delayed_work(&smux_wakeup_delayed_work);
2142 }
2143}
2144
2145
2146/**
2147 * Inactivity timeout worker. Periodically scheduled when link is active.
2148 * When it detects inactivity, it will power-down the UART link.
2149 *
2150 * @work Work structure (not used)
2151 */
2152static void smux_inactivity_worker(struct work_struct *work)
2153{
2154 int tx_ready = 0;
2155 struct smux_pkt_t *pkt;
2156 unsigned long flags;
2157
2158 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2159 spin_lock(&smux.tx_lock_lha2);
2160
2161 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2162 /* no activity */
2163 if (smux.powerdown_enabled) {
2164 if (smux.power_state == SMUX_PWR_ON) {
2165 /* start power-down sequence */
2166 pkt = smux_alloc_pkt();
2167 if (pkt) {
2168 SMUX_DBG("%s: Power %d->%d\n", __func__,
2169 smux.power_state,
2170 SMUX_PWR_TURNING_OFF);
2171 smux.power_state = SMUX_PWR_TURNING_OFF;
2172
2173 /* send power-down request */
2174 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2175 pkt->hdr.flags = 0;
2176 pkt->hdr.lcid = 0;
2177 smux_tx_queue(pkt,
2178 &smux_lch[SMUX_TEST_LCID],
2179 0);
2180 tx_ready = 1;
2181 }
2182 }
2183 } else {
2184 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2185 __func__);
2186 }
2187 }
2188 smux.tx_activity_flag = 0;
2189 smux.rx_activity_flag = 0;
2190
2191 spin_unlock(&smux.tx_lock_lha2);
2192 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2193
2194 if (tx_ready)
2195 list_channel(&smux_lch[SMUX_TEST_LCID]);
2196
2197 if ((smux.power_state == SMUX_PWR_OFF_FLUSH) ||
2198 (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH)) {
2199 /* ready to power-down the UART */
2200 SMUX_DBG("%s: Power %d->%d\n", __func__,
2201 smux.power_state, SMUX_PWR_OFF);
2202 smux_uart_power_off();
2203 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2204 smux.power_state = SMUX_PWR_OFF;
2205 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2206 }
2207
2208 /* reschedule inactivity worker */
2209 if (smux.power_state != SMUX_PWR_OFF)
2210 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2211 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2212}
2213
2214/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002215 * Remove RX retry packet from channel and free it.
2216 *
2217 * Must be called with state_lock_lhb1 locked.
2218 *
2219 * @ch Channel for retry packet
2220 * @retry Retry packet to remove
2221 */
2222void smux_remove_rx_retry(struct smux_lch_t *ch,
2223 struct smux_rx_pkt_retry *retry)
2224{
2225 list_del(&retry->rx_retry_list);
2226 --ch->rx_retry_queue_cnt;
2227 smux_free_pkt(retry->pkt);
2228 kfree(retry);
2229}
2230
2231/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002232 * RX worker handles all receive operations.
2233 *
2234 * @work Work structure contained in TBD structure
2235 */
2236static void smux_rx_worker(struct work_struct *work)
2237{
2238 unsigned long flags;
2239 int used;
2240 int initial_rx_state;
2241 struct smux_rx_worker_data *w;
2242 const unsigned char *data;
2243 int len;
2244 int flag;
2245
2246 w = container_of(work, struct smux_rx_worker_data, work);
2247 data = w->data;
2248 len = w->len;
2249 flag = w->flag;
2250
2251 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2252 smux.rx_activity_flag = 1;
2253 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2254
2255 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2256 used = 0;
2257 do {
2258 SMUX_DBG("%s: state %d; %d of %d\n",
2259 __func__, smux.rx_state, used, len);
2260 initial_rx_state = smux.rx_state;
2261
2262 switch (smux.rx_state) {
2263 case SMUX_RX_IDLE:
2264 smux_rx_handle_idle(data, len, &used, flag);
2265 break;
2266 case SMUX_RX_MAGIC:
2267 smux_rx_handle_magic(data, len, &used, flag);
2268 break;
2269 case SMUX_RX_HDR:
2270 smux_rx_handle_hdr(data, len, &used, flag);
2271 break;
2272 case SMUX_RX_PAYLOAD:
2273 smux_rx_handle_pkt_payload(data, len, &used, flag);
2274 break;
2275 default:
2276 SMUX_DBG("%s: invalid state %d\n",
2277 __func__, smux.rx_state);
2278 smux.rx_state = SMUX_RX_IDLE;
2279 break;
2280 }
2281 } while (used < len || smux.rx_state != initial_rx_state);
2282
2283 complete(&w->work_complete);
2284}
2285
2286/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002287 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2288 * because the client was not ready (-EAGAIN).
2289 *
2290 * @work Work structure contained in smux_lch_t structure
2291 */
2292static void smux_rx_retry_worker(struct work_struct *work)
2293{
2294 struct smux_lch_t *ch;
2295 struct smux_rx_pkt_retry *retry;
2296 union notifier_metadata metadata;
2297 int tmp;
2298 unsigned long flags;
2299
2300 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2301
2302 /* get next retry packet */
2303 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2304 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2305 /* port has been closed - remove all retries */
2306 while (!list_empty(&ch->rx_retry_queue)) {
2307 retry = list_first_entry(&ch->rx_retry_queue,
2308 struct smux_rx_pkt_retry,
2309 rx_retry_list);
2310 smux_remove_rx_retry(ch, retry);
2311 }
2312 }
2313
2314 if (list_empty(&ch->rx_retry_queue)) {
2315 SMUX_DBG("%s: retry list empty for channel %d\n",
2316 __func__, ch->lcid);
2317 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2318 return;
2319 }
2320 retry = list_first_entry(&ch->rx_retry_queue,
2321 struct smux_rx_pkt_retry,
2322 rx_retry_list);
2323 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2324
2325 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2326 metadata.read.pkt_priv = 0;
2327 metadata.read.buffer = 0;
2328 tmp = ch->get_rx_buffer(ch->priv,
2329 (void **)&metadata.read.pkt_priv,
2330 (void **)&metadata.read.buffer,
2331 retry->pkt->hdr.payload_len);
2332 if (tmp == 0 && metadata.read.buffer) {
2333 /* have valid RX buffer */
2334 memcpy(metadata.read.buffer, retry->pkt->payload,
2335 retry->pkt->hdr.payload_len);
2336 metadata.read.len = retry->pkt->hdr.payload_len;
2337
2338 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2339 smux_remove_rx_retry(ch, retry);
2340 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2341
2342 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2343 } else if (tmp == -EAGAIN ||
2344 (tmp == 0 && !metadata.read.buffer)) {
2345 /* retry again */
2346 retry->timeout_in_ms <<= 1;
2347 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2348 /* timed out */
2349 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2350 smux_remove_rx_retry(ch, retry);
2351 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2352 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2353 }
2354 } else {
2355 /* client error - drop packet */
2356 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2357 smux_remove_rx_retry(ch, retry);
2358 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2359
2360 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2361 }
2362
2363 /* schedule next retry */
2364 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2365 if (!list_empty(&ch->rx_retry_queue)) {
2366 retry = list_first_entry(&ch->rx_retry_queue,
2367 struct smux_rx_pkt_retry,
2368 rx_retry_list);
2369 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2370 msecs_to_jiffies(retry->timeout_in_ms));
2371 }
2372 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2373}
2374
2375/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002376 * Transmit worker handles serializing and transmitting packets onto the
2377 * underlying transport.
2378 *
2379 * @work Work structure (not used)
2380 */
2381static void smux_tx_worker(struct work_struct *work)
2382{
2383 struct smux_pkt_t *pkt;
2384 struct smux_lch_t *ch;
2385 unsigned low_wm_notif;
2386 unsigned lcid;
2387 unsigned long flags;
2388
2389
2390 /*
2391 * Transmit packets in round-robin fashion based upon ready
2392 * channels.
2393 *
2394 * To eliminate the need to hold a lock for the entire
2395 * iteration through the channel ready list, the head of the
2396 * ready-channel list is always the next channel to be
2397 * processed. To send a packet, the first valid packet in
2398 * the head channel is removed and the head channel is then
2399 * rescheduled at the end of the queue by removing it and
2400 * inserting after the tail. The locks can then be released
2401 * while the packet is processed.
2402 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002403 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002404 pkt = NULL;
2405 low_wm_notif = 0;
2406
2407 /* get the next ready channel */
2408 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2409 if (list_empty(&smux.lch_tx_ready_list)) {
2410 /* no ready channels */
2411 SMUX_DBG("%s: no more ready channels, exiting\n",
2412 __func__);
2413 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2414 break;
2415 }
2416 smux.tx_activity_flag = 1;
2417
2418 if (smux.power_state != SMUX_PWR_ON
2419 && smux.power_state != SMUX_PWR_TURNING_OFF
2420 && smux.power_state != SMUX_PWR_TURNING_OFF_FLUSH) {
2421 /* Link isn't ready to transmit */
2422 if (smux.power_state == SMUX_PWR_OFF) {
2423 /* link is off, trigger wakeup */
2424 smux.pwr_wakeup_delay_us = 1;
2425 SMUX_DBG("%s: Power %d->%d\n", __func__,
2426 smux.power_state,
2427 SMUX_PWR_TURNING_ON);
2428 smux.power_state = SMUX_PWR_TURNING_ON;
2429 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2430 flags);
2431 smux_uart_power_on();
2432 queue_work(smux_tx_wq, &smux_wakeup_work);
2433 } else {
2434 SMUX_DBG("%s: can not tx with power state %d\n",
2435 __func__,
2436 smux.power_state);
2437 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2438 flags);
2439 }
2440 break;
2441 }
2442
2443 /* get the next packet to send and rotate channel list */
2444 ch = list_first_entry(&smux.lch_tx_ready_list,
2445 struct smux_lch_t,
2446 tx_ready_list);
2447
2448 spin_lock(&ch->state_lock_lhb1);
2449 spin_lock(&ch->tx_lock_lhb2);
2450 if (!list_empty(&ch->tx_queue)) {
2451 /*
2452 * If remote TX flow control is enabled or
2453 * the channel is not fully opened, then only
2454 * send command packets.
2455 */
2456 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2457 struct smux_pkt_t *curr;
2458 list_for_each_entry(curr, &ch->tx_queue, list) {
2459 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2460 pkt = curr;
2461 break;
2462 }
2463 }
2464 } else {
2465 /* get next cmd/data packet to send */
2466 pkt = list_first_entry(&ch->tx_queue,
2467 struct smux_pkt_t, list);
2468 }
2469 }
2470
2471 if (pkt) {
2472 list_del(&pkt->list);
2473
2474 /* update packet stats */
2475 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2476 --ch->tx_pending_data_cnt;
2477 if (ch->notify_lwm &&
2478 ch->tx_pending_data_cnt
2479 <= SMUX_WM_LOW) {
2480 ch->notify_lwm = 0;
2481 low_wm_notif = 1;
2482 }
2483 }
2484
2485 /* advance to the next ready channel */
2486 list_rotate_left(&smux.lch_tx_ready_list);
2487 } else {
2488 /* no data in channel to send, remove from ready list */
2489 list_del(&ch->tx_ready_list);
2490 INIT_LIST_HEAD(&ch->tx_ready_list);
2491 }
2492 lcid = ch->lcid;
2493 spin_unlock(&ch->tx_lock_lhb2);
2494 spin_unlock(&ch->state_lock_lhb1);
2495 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2496
2497 if (low_wm_notif)
2498 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2499
2500 /* send the packet */
2501 smux_tx_pkt(ch, pkt);
2502 smux_free_pkt(pkt);
2503 }
2504}
2505
2506
2507/**********************************************************************/
2508/* Kernel API */
2509/**********************************************************************/
2510
2511/**
2512 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2513 * flags.
2514 *
2515 * @lcid Logical channel ID
2516 * @set Options to set
2517 * @clear Options to clear
2518 *
2519 * @returns 0 for success, < 0 for failure
2520 */
2521int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2522{
2523 unsigned long flags;
2524 struct smux_lch_t *ch;
2525 int tx_ready = 0;
2526 int ret = 0;
2527
2528 if (smux_assert_lch_id(lcid))
2529 return -ENXIO;
2530
2531 ch = &smux_lch[lcid];
2532 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2533
2534 /* Local loopback mode */
2535 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2536 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2537
2538 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2539 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2540
2541 /* Remote loopback mode */
2542 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2543 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2544
2545 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2546 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2547
2548 /* Flow control */
2549 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2550 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2551 ret = smux_send_status_cmd(ch);
2552 tx_ready = 1;
2553 }
2554
2555 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2556 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2557 ret = smux_send_status_cmd(ch);
2558 tx_ready = 1;
2559 }
2560
2561 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2562
2563 if (tx_ready)
2564 list_channel(ch);
2565
2566 return ret;
2567}
2568
2569/**
2570 * Starts the opening sequence for a logical channel.
2571 *
2572 * @lcid Logical channel ID
2573 * @priv Free for client usage
2574 * @notify Event notification function
2575 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2576 *
2577 * @returns 0 for success, <0 otherwise
2578 *
2579 * A channel must be fully closed (either not previously opened or
2580 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2581 * received.
2582 *
2583 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2584 * event.
2585 */
2586int msm_smux_open(uint8_t lcid, void *priv,
2587 void (*notify)(void *priv, int event_type, const void *metadata),
2588 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2589 int size))
2590{
2591 int ret;
2592 struct smux_lch_t *ch;
2593 struct smux_pkt_t *pkt;
2594 int tx_ready = 0;
2595 unsigned long flags;
2596
2597 if (smux_assert_lch_id(lcid))
2598 return -ENXIO;
2599
2600 ch = &smux_lch[lcid];
2601 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2602
2603 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2604 ret = -EAGAIN;
2605 goto out;
2606 }
2607
2608 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2609 pr_err("%s: open lcid %d local state %x invalid\n",
2610 __func__, lcid, ch->local_state);
2611 ret = -EINVAL;
2612 goto out;
2613 }
2614
2615 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2616 ch->local_state,
2617 SMUX_LCH_LOCAL_OPENING);
2618
2619 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2620
2621 ch->priv = priv;
2622 ch->notify = notify;
2623 ch->get_rx_buffer = get_rx_buffer;
2624 ret = 0;
2625
2626 /* Send Open Command */
2627 pkt = smux_alloc_pkt();
2628 if (!pkt) {
2629 ret = -ENOMEM;
2630 goto out;
2631 }
2632 pkt->hdr.magic = SMUX_MAGIC;
2633 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2634 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2635 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2636 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2637 pkt->hdr.lcid = lcid;
2638 pkt->hdr.payload_len = 0;
2639 pkt->hdr.pad_len = 0;
2640 smux_tx_queue(pkt, ch, 0);
2641 tx_ready = 1;
2642
2643out:
2644 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2645 if (tx_ready)
2646 list_channel(ch);
2647 return ret;
2648}
2649
2650/**
2651 * Starts the closing sequence for a logical channel.
2652 *
2653 * @lcid Logical channel ID
2654 *
2655 * @returns 0 for success, <0 otherwise
2656 *
2657 * Once the close event has been acknowledge by the remote side, the client
2658 * will receive a SMUX_DISCONNECTED notification.
2659 */
2660int msm_smux_close(uint8_t lcid)
2661{
2662 int ret = 0;
2663 struct smux_lch_t *ch;
2664 struct smux_pkt_t *pkt;
2665 int tx_ready = 0;
2666 unsigned long flags;
2667
2668 if (smux_assert_lch_id(lcid))
2669 return -ENXIO;
2670
2671 ch = &smux_lch[lcid];
2672 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2673 ch->local_tiocm = 0x0;
2674 ch->remote_tiocm = 0x0;
2675 ch->tx_pending_data_cnt = 0;
2676 ch->notify_lwm = 0;
2677
2678 /* Purge TX queue */
2679 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002680 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002681 spin_unlock(&ch->tx_lock_lhb2);
2682
2683 /* Send Close Command */
2684 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2685 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2686 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2687 ch->local_state,
2688 SMUX_LCH_LOCAL_CLOSING);
2689
2690 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2691 pkt = smux_alloc_pkt();
2692 if (pkt) {
2693 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2694 pkt->hdr.flags = 0;
2695 pkt->hdr.lcid = lcid;
2696 pkt->hdr.payload_len = 0;
2697 pkt->hdr.pad_len = 0;
2698 smux_tx_queue(pkt, ch, 0);
2699 tx_ready = 1;
2700 } else {
2701 pr_err("%s: pkt allocation failed\n", __func__);
2702 ret = -ENOMEM;
2703 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002704
2705 /* Purge RX retry queue */
2706 if (ch->rx_retry_queue_cnt)
2707 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002708 }
2709 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2710
2711 if (tx_ready)
2712 list_channel(ch);
2713
2714 return ret;
2715}
2716
2717/**
2718 * Write data to a logical channel.
2719 *
2720 * @lcid Logical channel ID
2721 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2722 * SMUX_WRITE_FAIL notification.
2723 * @data Data to write
2724 * @len Length of @data
2725 *
2726 * @returns 0 for success, <0 otherwise
2727 *
2728 * Data may be written immediately after msm_smux_open() is called,
2729 * but the data will wait in the transmit queue until the channel has
2730 * been fully opened.
2731 *
2732 * Once the data has been written, the client will receive either a completion
2733 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2734 */
2735int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2736{
2737 struct smux_lch_t *ch;
2738 struct smux_pkt_t *pkt;
2739 int tx_ready = 0;
2740 unsigned long flags;
2741 int ret;
2742
2743 if (smux_assert_lch_id(lcid))
2744 return -ENXIO;
2745
2746 ch = &smux_lch[lcid];
2747 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2748
2749 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2750 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2751 pr_err("%s: hdr.invalid local state %d channel %d\n",
2752 __func__, ch->local_state, lcid);
2753 ret = -EINVAL;
2754 goto out;
2755 }
2756
2757 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2758 pr_err("%s: payload %d too large\n",
2759 __func__, len);
2760 ret = -E2BIG;
2761 goto out;
2762 }
2763
2764 pkt = smux_alloc_pkt();
2765 if (!pkt) {
2766 ret = -ENOMEM;
2767 goto out;
2768 }
2769
2770 pkt->hdr.cmd = SMUX_CMD_DATA;
2771 pkt->hdr.lcid = lcid;
2772 pkt->hdr.flags = 0;
2773 pkt->hdr.payload_len = len;
2774 pkt->payload = (void *)data;
2775 pkt->priv = pkt_priv;
2776 pkt->hdr.pad_len = 0;
2777
2778 spin_lock(&ch->tx_lock_lhb2);
2779 /* verify high watermark */
2780 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2781
2782 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2783 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2784 __func__, lcid, SMUX_WM_HIGH,
2785 ch->tx_pending_data_cnt);
2786 ret = -EAGAIN;
2787 goto out_inner;
2788 }
2789
2790 /* queue packet for transmit */
2791 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2792 ch->notify_lwm = 1;
2793 pr_err("%s: high watermark hit\n", __func__);
2794 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2795 }
2796 list_add_tail(&pkt->list, &ch->tx_queue);
2797
2798 /* add to ready list */
2799 if (IS_FULLY_OPENED(ch))
2800 tx_ready = 1;
2801
2802 ret = 0;
2803
2804out_inner:
2805 spin_unlock(&ch->tx_lock_lhb2);
2806
2807out:
2808 if (ret)
2809 smux_free_pkt(pkt);
2810 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2811
2812 if (tx_ready)
2813 list_channel(ch);
2814
2815 return ret;
2816}
2817
2818/**
2819 * Returns true if the TX queue is currently full (high water mark).
2820 *
2821 * @lcid Logical channel ID
2822 * @returns 0 if channel is not full
2823 * 1 if it is full
2824 * < 0 for error
2825 */
2826int msm_smux_is_ch_full(uint8_t lcid)
2827{
2828 struct smux_lch_t *ch;
2829 unsigned long flags;
2830 int is_full = 0;
2831
2832 if (smux_assert_lch_id(lcid))
2833 return -ENXIO;
2834
2835 ch = &smux_lch[lcid];
2836
2837 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2838 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2839 is_full = 1;
2840 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2841
2842 return is_full;
2843}
2844
2845/**
2846 * Returns true if the TX queue has space for more packets it is at or
2847 * below the low water mark).
2848 *
2849 * @lcid Logical channel ID
2850 * @returns 0 if channel is above low watermark
2851 * 1 if it's at or below the low watermark
2852 * < 0 for error
2853 */
2854int msm_smux_is_ch_low(uint8_t lcid)
2855{
2856 struct smux_lch_t *ch;
2857 unsigned long flags;
2858 int is_low = 0;
2859
2860 if (smux_assert_lch_id(lcid))
2861 return -ENXIO;
2862
2863 ch = &smux_lch[lcid];
2864
2865 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2866 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2867 is_low = 1;
2868 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2869
2870 return is_low;
2871}
2872
2873/**
2874 * Send TIOCM status update.
2875 *
2876 * @ch Channel for update
2877 *
2878 * @returns 0 for success, <0 for failure
2879 *
2880 * Channel lock must be held before calling.
2881 */
2882static int smux_send_status_cmd(struct smux_lch_t *ch)
2883{
2884 struct smux_pkt_t *pkt;
2885
2886 if (!ch)
2887 return -EINVAL;
2888
2889 pkt = smux_alloc_pkt();
2890 if (!pkt)
2891 return -ENOMEM;
2892
2893 pkt->hdr.lcid = ch->lcid;
2894 pkt->hdr.cmd = SMUX_CMD_STATUS;
2895 pkt->hdr.flags = ch->local_tiocm;
2896 pkt->hdr.payload_len = 0;
2897 pkt->hdr.pad_len = 0;
2898 smux_tx_queue(pkt, ch, 0);
2899
2900 return 0;
2901}
2902
2903/**
2904 * Internal helper function for getting the TIOCM status with
2905 * state_lock_lhb1 already locked.
2906 *
2907 * @ch Channel pointer
2908 *
2909 * @returns TIOCM status
2910 */
2911static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
2912{
2913 long status = 0x0;
2914
2915 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
2916 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
2917 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
2918 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
2919
2920 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
2921 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
2922
2923 return status;
2924}
2925
2926/**
2927 * Get the TIOCM status bits.
2928 *
2929 * @lcid Logical channel ID
2930 *
2931 * @returns >= 0 TIOCM status bits
2932 * < 0 Error condition
2933 */
2934long msm_smux_tiocm_get(uint8_t lcid)
2935{
2936 struct smux_lch_t *ch;
2937 unsigned long flags;
2938 long status = 0x0;
2939
2940 if (smux_assert_lch_id(lcid))
2941 return -ENXIO;
2942
2943 ch = &smux_lch[lcid];
2944 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2945 status = msm_smux_tiocm_get_atomic(ch);
2946 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2947
2948 return status;
2949}
2950
2951/**
2952 * Set/clear the TIOCM status bits.
2953 *
2954 * @lcid Logical channel ID
2955 * @set Bits to set
2956 * @clear Bits to clear
2957 *
2958 * @returns 0 for success; < 0 for failure
2959 *
2960 * If a bit is specified in both the @set and @clear masks, then the clear bit
2961 * definition will dominate and the bit will be cleared.
2962 */
2963int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
2964{
2965 struct smux_lch_t *ch;
2966 unsigned long flags;
2967 uint8_t old_status;
2968 uint8_t status_set = 0x0;
2969 uint8_t status_clear = 0x0;
2970 int tx_ready = 0;
2971 int ret = 0;
2972
2973 if (smux_assert_lch_id(lcid))
2974 return -ENXIO;
2975
2976 ch = &smux_lch[lcid];
2977 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2978
2979 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2980 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2981 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2982 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2983
2984 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
2985 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
2986 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
2987 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
2988
2989 old_status = ch->local_tiocm;
2990 ch->local_tiocm |= status_set;
2991 ch->local_tiocm &= ~status_clear;
2992
2993 if (ch->local_tiocm != old_status) {
2994 ret = smux_send_status_cmd(ch);
2995 tx_ready = 1;
2996 }
2997 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2998
2999 if (tx_ready)
3000 list_channel(ch);
3001
3002 return ret;
3003}
3004
3005/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003006/* Subsystem Restart */
3007/**********************************************************************/
3008static struct notifier_block ssr_notifier = {
3009 .notifier_call = ssr_notifier_cb,
3010};
3011
3012/**
3013 * Handle Subsystem Restart (SSR) notifications.
3014 *
3015 * @this Pointer to ssr_notifier
3016 * @code SSR Code
3017 * @data Data pointer (not used)
3018 */
3019static int ssr_notifier_cb(struct notifier_block *this,
3020 unsigned long code,
3021 void *data)
3022{
3023 unsigned long flags;
3024 int power_off_uart = 0;
3025
3026 if (code != SUBSYS_AFTER_SHUTDOWN)
3027 return NOTIFY_DONE;
3028
3029 /* Cleanup channels */
3030 smux_lch_purge();
3031
3032 /* Power-down UART */
3033 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3034 if (smux.power_state != SMUX_PWR_OFF) {
3035 SMUX_DBG("%s: SSR - turning off UART\n", __func__);
3036 smux.power_state = SMUX_PWR_OFF;
3037 power_off_uart = 1;
3038 }
3039 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3040
3041 if (power_off_uart)
3042 smux_uart_power_off();
3043
3044 return NOTIFY_DONE;
3045}
3046
3047/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003048/* Line Discipline Interface */
3049/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003050static void smux_pdev_release(struct device *dev)
3051{
3052 struct platform_device *pdev;
3053
3054 pdev = container_of(dev, struct platform_device, dev);
3055 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3056 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3057}
3058
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003059static int smuxld_open(struct tty_struct *tty)
3060{
3061 int i;
3062 int tmp;
3063 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003064
3065 if (!smux.is_initialized)
3066 return -ENODEV;
3067
Eric Holmberged1f00c2012-06-07 09:45:18 -06003068 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003069 if (smux.ld_open_count) {
3070 pr_err("%s: %p multiple instances not supported\n",
3071 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003072 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003073 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003074 }
3075
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003076 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003077 pr_err("%s: tty->ops->write already NULL\n", __func__);
3078 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003079 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003080 }
3081
3082 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003083 ++smux.ld_open_count;
3084 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003085 smux.tty = tty;
3086 tty->disc_data = &smux;
3087 tty->receive_room = TTY_RECEIVE_ROOM;
3088 tty_driver_flush_buffer(tty);
3089
3090 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003091 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003092 if (smux.power_state == SMUX_PWR_OFF) {
3093 SMUX_DBG("%s: powering off uart\n", __func__);
3094 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003095 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003096 queue_work(smux_tx_wq, &smux_inactivity_work);
3097 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003098 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003099 }
3100
3101 /* register platform devices */
3102 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003103 SMUX_DBG("%s: register pdev '%s'\n",
3104 __func__, smux_devs[i].name);
3105 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003106 tmp = platform_device_register(&smux_devs[i]);
3107 if (tmp)
3108 pr_err("%s: error %d registering device %s\n",
3109 __func__, tmp, smux_devs[i].name);
3110 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003111 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003112 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003113}
3114
3115static void smuxld_close(struct tty_struct *tty)
3116{
3117 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003118 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003119 int i;
3120
Eric Holmberged1f00c2012-06-07 09:45:18 -06003121 SMUX_DBG("%s: ldisc unload\n", __func__);
3122 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003123 if (smux.ld_open_count <= 0) {
3124 pr_err("%s: invalid ld count %d\n", __func__,
3125 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003126 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003127 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003128 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003129 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003130 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003131
3132 /* Cleanup channels */
3133 smux_lch_purge();
3134
3135 /* Unregister platform devices */
3136 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3137 SMUX_DBG("%s: unregister pdev '%s'\n",
3138 __func__, smux_devs[i].name);
3139 platform_device_unregister(&smux_devs[i]);
3140 }
3141
3142 /* Schedule UART power-up if it's down */
3143 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3144 if (smux.power_state == SMUX_PWR_OFF ||
3145 smux.power_state == SMUX_PWR_OFF_FLUSH) {
3146 smux.power_state = SMUX_PWR_OFF;
3147 power_up_uart = 1;
3148 } else {
3149 smux.power_state = SMUX_PWR_OFF;
3150 }
3151 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3152
3153 if (power_up_uart)
3154 smux_uart_power_on();
3155
3156 /* Disconnect from TTY */
3157 smux.tty = NULL;
3158 mutex_unlock(&smux.mutex_lha0);
3159 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003160}
3161
3162/**
3163 * Receive data from TTY Line Discipline.
3164 *
3165 * @tty TTY structure
3166 * @cp Character data
3167 * @fp Flag data
3168 * @count Size of character and flag data
3169 */
3170void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3171 char *fp, int count)
3172{
3173 int i;
3174 int last_idx = 0;
3175 const char *tty_name = NULL;
3176 char *f;
3177
3178 if (smux_debug_mask & MSM_SMUX_DEBUG)
3179 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3180 16, 1, cp, count, true);
3181
3182 /* verify error flags */
3183 for (i = 0, f = fp; i < count; ++i, ++f) {
3184 if (*f != TTY_NORMAL) {
3185 if (tty)
3186 tty_name = tty->name;
3187 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3188 tty_name, *f, tty_flag_to_str(*f));
3189
3190 /* feed all previous valid data to the parser */
3191 smux_rx_state_machine(cp + last_idx, i - last_idx,
3192 TTY_NORMAL);
3193
3194 /* feed bad data to parser */
3195 smux_rx_state_machine(cp + i, 1, *f);
3196 last_idx = i + 1;
3197 }
3198 }
3199
3200 /* feed data to RX state machine */
3201 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3202}
3203
3204static void smuxld_flush_buffer(struct tty_struct *tty)
3205{
3206 pr_err("%s: not supported\n", __func__);
3207}
3208
3209static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3210{
3211 pr_err("%s: not supported\n", __func__);
3212 return -ENODEV;
3213}
3214
3215static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3216 unsigned char __user *buf, size_t nr)
3217{
3218 pr_err("%s: not supported\n", __func__);
3219 return -ENODEV;
3220}
3221
3222static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3223 const unsigned char *buf, size_t nr)
3224{
3225 pr_err("%s: not supported\n", __func__);
3226 return -ENODEV;
3227}
3228
3229static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3230 unsigned int cmd, unsigned long arg)
3231{
3232 pr_err("%s: not supported\n", __func__);
3233 return -ENODEV;
3234}
3235
3236static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3237 struct poll_table_struct *tbl)
3238{
3239 pr_err("%s: not supported\n", __func__);
3240 return -ENODEV;
3241}
3242
3243static void smuxld_write_wakeup(struct tty_struct *tty)
3244{
3245 pr_err("%s: not supported\n", __func__);
3246}
3247
3248static struct tty_ldisc_ops smux_ldisc_ops = {
3249 .owner = THIS_MODULE,
3250 .magic = TTY_LDISC_MAGIC,
3251 .name = "n_smux",
3252 .open = smuxld_open,
3253 .close = smuxld_close,
3254 .flush_buffer = smuxld_flush_buffer,
3255 .chars_in_buffer = smuxld_chars_in_buffer,
3256 .read = smuxld_read,
3257 .write = smuxld_write,
3258 .ioctl = smuxld_ioctl,
3259 .poll = smuxld_poll,
3260 .receive_buf = smuxld_receive_buf,
3261 .write_wakeup = smuxld_write_wakeup
3262};
3263
3264static int __init smux_init(void)
3265{
3266 int ret;
3267
Eric Holmberged1f00c2012-06-07 09:45:18 -06003268 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003269
3270 spin_lock_init(&smux.rx_lock_lha1);
3271 smux.rx_state = SMUX_RX_IDLE;
3272 smux.power_state = SMUX_PWR_OFF;
3273 smux.pwr_wakeup_delay_us = 1;
3274 smux.powerdown_enabled = 0;
3275 smux.rx_activity_flag = 0;
3276 smux.tx_activity_flag = 0;
3277 smux.recv_len = 0;
3278 smux.tty = NULL;
3279 smux.ld_open_count = 0;
3280 smux.in_reset = 0;
3281 smux.is_initialized = 1;
3282 smux_byte_loopback = 0;
3283
3284 spin_lock_init(&smux.tx_lock_lha2);
3285 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3286
3287 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3288 if (ret != 0) {
3289 pr_err("%s: error %d registering line discipline\n",
3290 __func__, ret);
3291 return ret;
3292 }
3293
Eric Holmberged1f00c2012-06-07 09:45:18 -06003294 subsys_notif_register_notifier("qsc", &ssr_notifier);
3295
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003296 ret = lch_init();
3297 if (ret != 0) {
3298 pr_err("%s: lch_init failed\n", __func__);
3299 return ret;
3300 }
3301
3302 return 0;
3303}
3304
3305static void __exit smux_exit(void)
3306{
3307 int ret;
3308
3309 ret = tty_unregister_ldisc(N_SMUX);
3310 if (ret != 0) {
3311 pr_err("%s error %d unregistering line discipline\n",
3312 __func__, ret);
3313 return;
3314 }
3315}
3316
3317module_init(smux_init);
3318module_exit(smux_exit);
3319
3320MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3321MODULE_LICENSE("GPL v2");
3322MODULE_ALIAS_LDISC(N_SMUX);