blob: 568f0103a078c40ff954e642b45b9d12ea7ceb09 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_WM_LOW 2
37#define SMUX_WM_HIGH 4
38#define SMUX_PKT_LOG_SIZE 80
39
40/* Maximum size we can accept in a single RX buffer */
41#define TTY_RECEIVE_ROOM 65536
42#define TTY_BUFFER_FULL_WAIT_MS 50
43
44/* maximum sleep time between wakeup attempts */
45#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
46
47/* minimum delay for scheduling delayed work */
48#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
49
50/* inactivity timeout for no rx/tx activity */
51#define SMUX_INACTIVITY_TIMEOUT_MS 1000
52
Eric Holmbergb8435c82012-06-05 14:51:29 -060053/* RX get_rx_buffer retry timeout values */
54#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
55#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
56
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057enum {
58 MSM_SMUX_DEBUG = 1U << 0,
59 MSM_SMUX_INFO = 1U << 1,
60 MSM_SMUX_POWER_INFO = 1U << 2,
61 MSM_SMUX_PKT = 1U << 3,
62};
63
64static int smux_debug_mask;
65module_param_named(debug_mask, smux_debug_mask,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67
68/* Simulated wakeup used for testing */
69int smux_byte_loopback;
70module_param_named(byte_loopback, smux_byte_loopback,
71 int, S_IRUGO | S_IWUSR | S_IWGRP);
72int smux_simulate_wakeup_delay = 1;
73module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
74 int, S_IRUGO | S_IWUSR | S_IWGRP);
75
76#define SMUX_DBG(x...) do { \
77 if (smux_debug_mask & MSM_SMUX_DEBUG) \
78 pr_info(x); \
79} while (0)
80
81#define SMUX_LOG_PKT_RX(pkt) do { \
82 if (smux_debug_mask & MSM_SMUX_PKT) \
83 smux_log_pkt(pkt, 1); \
84} while (0)
85
86#define SMUX_LOG_PKT_TX(pkt) do { \
87 if (smux_debug_mask & MSM_SMUX_PKT) \
88 smux_log_pkt(pkt, 0); \
89} while (0)
90
91/**
92 * Return true if channel is fully opened (both
93 * local and remote sides are in the OPENED state).
94 */
95#define IS_FULLY_OPENED(ch) \
96 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
97 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
98
99static struct platform_device smux_devs[] = {
100 {.name = "SMUX_CTL", .id = -1},
101 {.name = "SMUX_RMNET", .id = -1},
102 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
103 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
104 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
105 {.name = "SMUX_DIAG", .id = -1},
106};
107
108enum {
109 SMUX_CMD_STATUS_RTC = 1 << 0,
110 SMUX_CMD_STATUS_RTR = 1 << 1,
111 SMUX_CMD_STATUS_RI = 1 << 2,
112 SMUX_CMD_STATUS_DCD = 1 << 3,
113 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
114};
115
116/* Channel mode */
117enum {
118 SMUX_LCH_MODE_NORMAL,
119 SMUX_LCH_MODE_LOCAL_LOOPBACK,
120 SMUX_LCH_MODE_REMOTE_LOOPBACK,
121};
122
123enum {
124 SMUX_RX_IDLE,
125 SMUX_RX_MAGIC,
126 SMUX_RX_HDR,
127 SMUX_RX_PAYLOAD,
128 SMUX_RX_FAILURE,
129};
130
131/**
132 * Power states.
133 *
134 * The _FLUSH states are internal transitional states and are not part of the
135 * official state machine.
136 */
137enum {
138 SMUX_PWR_OFF,
139 SMUX_PWR_TURNING_ON,
140 SMUX_PWR_ON,
141 SMUX_PWR_TURNING_OFF_FLUSH,
142 SMUX_PWR_TURNING_OFF,
143 SMUX_PWR_OFF_FLUSH,
144};
145
146/**
147 * Logical Channel Structure. One instance per channel.
148 *
149 * Locking Hierarchy
150 * Each lock has a postfix that describes the locking level. If multiple locks
151 * are required, only increasing lock hierarchy numbers may be locked which
152 * ensures avoiding a deadlock.
153 *
154 * Locking Example
155 * If state_lock_lhb1 is currently held and the TX list needs to be
156 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
157 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
158 * not be acquired since it would result in a deadlock.
159 *
160 * Note that the Line Discipline locks (*_lha) should always be acquired
161 * before the logical channel locks.
162 */
163struct smux_lch_t {
164 /* channel state */
165 spinlock_t state_lock_lhb1;
166 uint8_t lcid;
167 unsigned local_state;
168 unsigned local_mode;
169 uint8_t local_tiocm;
170
171 unsigned remote_state;
172 unsigned remote_mode;
173 uint8_t remote_tiocm;
174
175 int tx_flow_control;
176
177 /* client callbacks and private data */
178 void *priv;
179 void (*notify)(void *priv, int event_type, const void *metadata);
180 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
181 int size);
182
Eric Holmbergb8435c82012-06-05 14:51:29 -0600183 /* RX Info */
184 struct list_head rx_retry_queue;
185 unsigned rx_retry_queue_cnt;
186 struct delayed_work rx_retry_work;
187
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600188 /* TX Info */
189 spinlock_t tx_lock_lhb2;
190 struct list_head tx_queue;
191 struct list_head tx_ready_list;
192 unsigned tx_pending_data_cnt;
193 unsigned notify_lwm;
194};
195
196union notifier_metadata {
197 struct smux_meta_disconnected disconnected;
198 struct smux_meta_read read;
199 struct smux_meta_write write;
200 struct smux_meta_tiocm tiocm;
201};
202
203struct smux_notify_handle {
204 void (*notify)(void *priv, int event_type, const void *metadata);
205 void *priv;
206 int event_type;
207 union notifier_metadata *metadata;
208};
209
210/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600211 * Get RX Buffer Retry structure.
212 *
213 * This is used for clients that are unable to provide an RX buffer
214 * immediately. This temporary structure will be used to temporarily hold the
215 * data and perform a retry.
216 */
217struct smux_rx_pkt_retry {
218 struct smux_pkt_t *pkt;
219 struct list_head rx_retry_list;
220 unsigned timeout_in_ms;
221};
222
223/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600224 * Receive worker data structure.
225 *
226 * One instance is created for every call to smux_rx_state_machine.
227 */
228struct smux_rx_worker_data {
229 const unsigned char *data;
230 int len;
231 int flag;
232
233 struct work_struct work;
234 struct completion work_complete;
235};
236
237/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600238 * Line discipline and module structure.
239 *
240 * Only one instance since multiple instances of line discipline are not
241 * allowed.
242 */
243struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600244 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600245
246 int is_initialized;
247 int in_reset;
248 int ld_open_count;
249 struct tty_struct *tty;
250
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600251 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600252 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
253 unsigned int recv_len;
254 unsigned int pkt_remain;
255 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600256
257 /* RX Activity - accessed by multiple threads */
258 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600259 unsigned rx_activity_flag;
260
261 /* TX / Power */
262 spinlock_t tx_lock_lha2;
263 struct list_head lch_tx_ready_list;
264 unsigned power_state;
265 unsigned pwr_wakeup_delay_us;
266 unsigned tx_activity_flag;
267 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600268 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600269};
270
271
272/* data structures */
273static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
274static struct smux_ldisc_t smux;
275static const char *tty_error_type[] = {
276 [TTY_NORMAL] = "normal",
277 [TTY_OVERRUN] = "overrun",
278 [TTY_BREAK] = "break",
279 [TTY_PARITY] = "parity",
280 [TTY_FRAME] = "framing",
281};
282
283static const char *smux_cmds[] = {
284 [SMUX_CMD_DATA] = "DATA",
285 [SMUX_CMD_OPEN_LCH] = "OPEN",
286 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
287 [SMUX_CMD_STATUS] = "STATUS",
288 [SMUX_CMD_PWR_CTL] = "PWR",
289 [SMUX_CMD_BYTE] = "Raw Byte",
290};
291
292static void smux_notify_local_fn(struct work_struct *work);
293static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
294
295static struct workqueue_struct *smux_notify_wq;
296static size_t handle_size;
297static struct kfifo smux_notify_fifo;
298static int queued_fifo_notifications;
299static DEFINE_SPINLOCK(notify_lock_lhc1);
300
301static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600302static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600303static void smux_tx_worker(struct work_struct *work);
304static DECLARE_WORK(smux_tx_work, smux_tx_worker);
305
306static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600307static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600308static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600309static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
310static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
311
312static void smux_inactivity_worker(struct work_struct *work);
313static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
314static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
315 smux_inactivity_worker);
316
317static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
318static void list_channel(struct smux_lch_t *ch);
319static int smux_send_status_cmd(struct smux_lch_t *ch);
320static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600321static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600322static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
323static int schedule_notify(uint8_t lcid, int event,
324 const union notifier_metadata *metadata);
325static int ssr_notifier_cb(struct notifier_block *this,
326 unsigned long code,
327 void *data);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600328
329/**
330 * Convert TTY Error Flags to string for logging purposes.
331 *
332 * @flag TTY_* flag
333 * @returns String description or NULL if unknown
334 */
335static const char *tty_flag_to_str(unsigned flag)
336{
337 if (flag < ARRAY_SIZE(tty_error_type))
338 return tty_error_type[flag];
339 return NULL;
340}
341
342/**
343 * Convert SMUX Command to string for logging purposes.
344 *
345 * @cmd SMUX command
346 * @returns String description or NULL if unknown
347 */
348static const char *cmd_to_str(unsigned cmd)
349{
350 if (cmd < ARRAY_SIZE(smux_cmds))
351 return smux_cmds[cmd];
352 return NULL;
353}
354
355/**
356 * Set the reset state due to an unrecoverable failure.
357 */
358static void smux_enter_reset(void)
359{
360 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
361 smux.in_reset = 1;
362}
363
364static int lch_init(void)
365{
366 unsigned int id;
367 struct smux_lch_t *ch;
368 int i = 0;
369
370 handle_size = sizeof(struct smux_notify_handle *);
371
372 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
373 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600374 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600375
376 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
377 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
378 __func__);
379 return -ENOMEM;
380 }
381
382 i |= kfifo_alloc(&smux_notify_fifo,
383 SMUX_NOTIFY_FIFO_SIZE * handle_size,
384 GFP_KERNEL);
385 i |= smux_loopback_init();
386
387 if (i) {
388 pr_err("%s: out of memory error\n", __func__);
389 return -ENOMEM;
390 }
391
392 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
393 ch = &smux_lch[id];
394
395 spin_lock_init(&ch->state_lock_lhb1);
396 ch->lcid = id;
397 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
398 ch->local_mode = SMUX_LCH_MODE_NORMAL;
399 ch->local_tiocm = 0x0;
400 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
401 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
402 ch->remote_tiocm = 0x0;
403 ch->tx_flow_control = 0;
404 ch->priv = 0;
405 ch->notify = 0;
406 ch->get_rx_buffer = 0;
407
Eric Holmbergb8435c82012-06-05 14:51:29 -0600408 INIT_LIST_HEAD(&ch->rx_retry_queue);
409 ch->rx_retry_queue_cnt = 0;
410 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
411
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600412 spin_lock_init(&ch->tx_lock_lhb2);
413 INIT_LIST_HEAD(&ch->tx_queue);
414 INIT_LIST_HEAD(&ch->tx_ready_list);
415 ch->tx_pending_data_cnt = 0;
416 ch->notify_lwm = 0;
417 }
418
419 return 0;
420}
421
Eric Holmberged1f00c2012-06-07 09:45:18 -0600422/**
423 * Empty and cleanup all SMUX logical channels for subsystem restart or line
424 * discipline disconnect.
425 */
426static void smux_lch_purge(void)
427{
428 struct smux_lch_t *ch;
429 unsigned long flags;
430 int i;
431
432 /* Empty TX ready list */
433 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
434 while (!list_empty(&smux.lch_tx_ready_list)) {
435 SMUX_DBG("%s: emptying ready list %p\n",
436 __func__, smux.lch_tx_ready_list.next);
437 ch = list_first_entry(&smux.lch_tx_ready_list,
438 struct smux_lch_t,
439 tx_ready_list);
440 list_del(&ch->tx_ready_list);
441 INIT_LIST_HEAD(&ch->tx_ready_list);
442 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600443
444 /* Purge Power Queue */
445 while (!list_empty(&smux.power_queue)) {
446 struct smux_pkt_t *pkt;
447
448 pkt = list_first_entry(&smux.power_queue,
449 struct smux_pkt_t,
450 list);
451 SMUX_DBG("%s: emptying power queue pkt=%p\n",
452 __func__, pkt);
453 smux_free_pkt(pkt);
454 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600455 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
456
457 /* Close all ports */
458 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
459 ch = &smux_lch[i];
460 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
461
462 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
463
464 /* Purge TX queue */
465 spin_lock(&ch->tx_lock_lhb2);
466 smux_purge_ch_tx_queue(ch);
467 spin_unlock(&ch->tx_lock_lhb2);
468
469 /* Notify user of disconnect and reset channel state */
470 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
471 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
472 union notifier_metadata meta;
473
474 meta.disconnected.is_ssr = smux.in_reset;
475 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
476 }
477
478 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
479 ch->local_mode = SMUX_LCH_MODE_NORMAL;
480 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
481 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
482 ch->tx_flow_control = 0;
483
484 /* Purge RX retry queue */
485 if (ch->rx_retry_queue_cnt)
486 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
487
488 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
489 }
490
491 /* Flush TX/RX workqueues */
492 SMUX_DBG("%s: flushing tx wq\n", __func__);
493 flush_workqueue(smux_tx_wq);
494 SMUX_DBG("%s: flushing rx wq\n", __func__);
495 flush_workqueue(smux_rx_wq);
496}
497
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600498int smux_assert_lch_id(uint32_t lcid)
499{
500 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
501 return -ENXIO;
502 else
503 return 0;
504}
505
506/**
507 * Log packet information for debug purposes.
508 *
509 * @pkt Packet to log
510 * @is_recv 1 = RX packet; 0 = TX Packet
511 *
512 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
513 *
514 * PKT Info:
515 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
516 *
517 * Direction: R = Receive, S = Send
518 * Local State: C = Closed; c = closing; o = opening; O = Opened
519 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
520 * Remote State: C = Closed; O = Opened
521 * Remote Mode: R = Remote loopback; N = Normal
522 */
523static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
524{
525 char logbuf[SMUX_PKT_LOG_SIZE];
526 char cmd_extra[16];
527 int i = 0;
528 int count;
529 int len;
530 char local_state;
531 char local_mode;
532 char remote_state;
533 char remote_mode;
534 struct smux_lch_t *ch;
535 unsigned char *data;
536
537 ch = &smux_lch[pkt->hdr.lcid];
538
539 switch (ch->local_state) {
540 case SMUX_LCH_LOCAL_CLOSED:
541 local_state = 'C';
542 break;
543 case SMUX_LCH_LOCAL_OPENING:
544 local_state = 'o';
545 break;
546 case SMUX_LCH_LOCAL_OPENED:
547 local_state = 'O';
548 break;
549 case SMUX_LCH_LOCAL_CLOSING:
550 local_state = 'c';
551 break;
552 default:
553 local_state = 'U';
554 break;
555 }
556
557 switch (ch->local_mode) {
558 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
559 local_mode = 'L';
560 break;
561 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
562 local_mode = 'R';
563 break;
564 case SMUX_LCH_MODE_NORMAL:
565 local_mode = 'N';
566 break;
567 default:
568 local_mode = 'U';
569 break;
570 }
571
572 switch (ch->remote_state) {
573 case SMUX_LCH_REMOTE_CLOSED:
574 remote_state = 'C';
575 break;
576 case SMUX_LCH_REMOTE_OPENED:
577 remote_state = 'O';
578 break;
579
580 default:
581 remote_state = 'U';
582 break;
583 }
584
585 switch (ch->remote_mode) {
586 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
587 remote_mode = 'R';
588 break;
589 case SMUX_LCH_MODE_NORMAL:
590 remote_mode = 'N';
591 break;
592 default:
593 remote_mode = 'U';
594 break;
595 }
596
597 /* determine command type (ACK, etc) */
598 cmd_extra[0] = '\0';
599 switch (pkt->hdr.cmd) {
600 case SMUX_CMD_OPEN_LCH:
601 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
602 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
603 break;
604 case SMUX_CMD_CLOSE_LCH:
605 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
606 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
607 break;
608 };
609
610 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
611 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
612 is_recv ? 'R' : 'S', pkt->hdr.lcid,
613 local_state, local_mode,
614 remote_state, remote_mode,
615 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
616 pkt->hdr.payload_len, pkt->hdr.pad_len);
617
618 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
619 data = (unsigned char *)pkt->payload;
620 for (count = 0; count < len; count++)
621 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
622 "%02x ", (unsigned)data[count]);
623
624 pr_info("%s\n", logbuf);
625}
626
627static void smux_notify_local_fn(struct work_struct *work)
628{
629 struct smux_notify_handle *notify_handle = NULL;
630 union notifier_metadata *metadata = NULL;
631 unsigned long flags;
632 int i;
633
634 for (;;) {
635 /* retrieve notification */
636 spin_lock_irqsave(&notify_lock_lhc1, flags);
637 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
638 i = kfifo_out(&smux_notify_fifo,
639 &notify_handle,
640 handle_size);
641 if (i != handle_size) {
642 pr_err("%s: unable to retrieve handle %d expected %d\n",
643 __func__, i, handle_size);
644 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
645 break;
646 }
647 } else {
648 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
649 break;
650 }
651 --queued_fifo_notifications;
652 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
653
654 /* notify client */
655 metadata = notify_handle->metadata;
656 notify_handle->notify(notify_handle->priv,
657 notify_handle->event_type,
658 metadata);
659
660 kfree(metadata);
661 kfree(notify_handle);
662 }
663}
664
665/**
666 * Initialize existing packet.
667 */
668void smux_init_pkt(struct smux_pkt_t *pkt)
669{
670 memset(pkt, 0x0, sizeof(*pkt));
671 pkt->hdr.magic = SMUX_MAGIC;
672 INIT_LIST_HEAD(&pkt->list);
673}
674
675/**
676 * Allocate and initialize packet.
677 *
678 * If a payload is needed, either set it directly and ensure that it's freed or
679 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
680 * automatically when smd_free_pkt() is called.
681 */
682struct smux_pkt_t *smux_alloc_pkt(void)
683{
684 struct smux_pkt_t *pkt;
685
686 /* Consider a free list implementation instead of kmalloc */
687 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
688 if (!pkt) {
689 pr_err("%s: out of memory\n", __func__);
690 return NULL;
691 }
692 smux_init_pkt(pkt);
693 pkt->allocated = 1;
694
695 return pkt;
696}
697
698/**
699 * Free packet.
700 *
701 * @pkt Packet to free (may be NULL)
702 *
703 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
704 * well. Otherwise, the caller is responsible for freeing the payload.
705 */
706void smux_free_pkt(struct smux_pkt_t *pkt)
707{
708 if (pkt) {
709 if (pkt->free_payload)
710 kfree(pkt->payload);
711 if (pkt->allocated)
712 kfree(pkt);
713 }
714}
715
716/**
717 * Allocate packet payload.
718 *
719 * @pkt Packet to add payload to
720 *
721 * @returns 0 on success, <0 upon error
722 *
723 * A flag is set to signal smux_free_pkt() to free the payload.
724 */
725int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
726{
727 if (!pkt)
728 return -EINVAL;
729
730 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
731 pkt->free_payload = 1;
732 if (!pkt->payload) {
733 pr_err("%s: unable to malloc %d bytes for payload\n",
734 __func__, pkt->hdr.payload_len);
735 return -ENOMEM;
736 }
737
738 return 0;
739}
740
741static int schedule_notify(uint8_t lcid, int event,
742 const union notifier_metadata *metadata)
743{
744 struct smux_notify_handle *notify_handle = 0;
745 union notifier_metadata *meta_copy = 0;
746 struct smux_lch_t *ch;
747 int i;
748 unsigned long flags;
749 int ret = 0;
750
751 ch = &smux_lch[lcid];
752 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
753 GFP_ATOMIC);
754 if (!notify_handle) {
755 pr_err("%s: out of memory\n", __func__);
756 ret = -ENOMEM;
757 goto free_out;
758 }
759
760 notify_handle->notify = ch->notify;
761 notify_handle->priv = ch->priv;
762 notify_handle->event_type = event;
763 if (metadata) {
764 meta_copy = kzalloc(sizeof(union notifier_metadata),
765 GFP_ATOMIC);
766 if (!meta_copy) {
767 pr_err("%s: out of memory\n", __func__);
768 ret = -ENOMEM;
769 goto free_out;
770 }
771 *meta_copy = *metadata;
772 notify_handle->metadata = meta_copy;
773 } else {
774 notify_handle->metadata = NULL;
775 }
776
777 spin_lock_irqsave(&notify_lock_lhc1, flags);
778 i = kfifo_avail(&smux_notify_fifo);
779 if (i < handle_size) {
780 pr_err("%s: fifo full error %d expected %d\n",
781 __func__, i, handle_size);
782 ret = -ENOMEM;
783 goto unlock_out;
784 }
785
786 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
787 if (i < 0 || i != handle_size) {
788 pr_err("%s: fifo not available error %d (expected %d)\n",
789 __func__, i, handle_size);
790 ret = -ENOSPC;
791 goto unlock_out;
792 }
793 ++queued_fifo_notifications;
794
795unlock_out:
796 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
797
798free_out:
799 queue_work(smux_notify_wq, &smux_notify_local);
800 if (ret < 0 && notify_handle) {
801 kfree(notify_handle->metadata);
802 kfree(notify_handle);
803 }
804 return ret;
805}
806
807/**
808 * Returns the serialized size of a packet.
809 *
810 * @pkt Packet to serialize
811 *
812 * @returns Serialized length of packet
813 */
814static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
815{
816 unsigned int size;
817
818 size = sizeof(struct smux_hdr_t);
819 size += pkt->hdr.payload_len;
820 size += pkt->hdr.pad_len;
821
822 return size;
823}
824
825/**
826 * Serialize packet @pkt into output buffer @data.
827 *
828 * @pkt Packet to serialize
829 * @out Destination buffer pointer
830 * @out_len Size of serialized packet
831 *
832 * @returns 0 for success
833 */
834int smux_serialize(struct smux_pkt_t *pkt, char *out,
835 unsigned int *out_len)
836{
837 char *data_start = out;
838
839 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
840 pr_err("%s: packet size %d too big\n",
841 __func__, smux_serialize_size(pkt));
842 return -E2BIG;
843 }
844
845 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
846 out += sizeof(struct smux_hdr_t);
847 if (pkt->payload) {
848 memcpy(out, pkt->payload, pkt->hdr.payload_len);
849 out += pkt->hdr.payload_len;
850 }
851 if (pkt->hdr.pad_len) {
852 memset(out, 0x0, pkt->hdr.pad_len);
853 out += pkt->hdr.pad_len;
854 }
855 *out_len = out - data_start;
856 return 0;
857}
858
859/**
860 * Serialize header and provide pointer to the data.
861 *
862 * @pkt Packet
863 * @out[out] Pointer to the serialized header data
864 * @out_len[out] Pointer to the serialized header length
865 */
866static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
867 unsigned int *out_len)
868{
869 *out = (char *)&pkt->hdr;
870 *out_len = sizeof(struct smux_hdr_t);
871}
872
873/**
874 * Serialize payload and provide pointer to the data.
875 *
876 * @pkt Packet
877 * @out[out] Pointer to the serialized payload data
878 * @out_len[out] Pointer to the serialized payload length
879 */
880static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
881 unsigned int *out_len)
882{
883 *out = pkt->payload;
884 *out_len = pkt->hdr.payload_len;
885}
886
887/**
888 * Serialize padding and provide pointer to the data.
889 *
890 * @pkt Packet
891 * @out[out] Pointer to the serialized padding (always NULL)
892 * @out_len[out] Pointer to the serialized payload length
893 *
894 * Since the padding field value is undefined, only the size of the patting
895 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
896 */
897static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
898 unsigned int *out_len)
899{
900 *out = NULL;
901 *out_len = pkt->hdr.pad_len;
902}
903
904/**
905 * Write data to TTY framework and handle breaking the writes up if needed.
906 *
907 * @data Data to write
908 * @len Length of data
909 *
910 * @returns 0 for success, < 0 for failure
911 */
912static int write_to_tty(char *data, unsigned len)
913{
914 int data_written;
915
916 if (!data)
917 return 0;
918
Eric Holmberged1f00c2012-06-07 09:45:18 -0600919 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600920 data_written = smux.tty->ops->write(smux.tty, data, len);
921 if (data_written >= 0) {
922 len -= data_written;
923 data += data_written;
924 } else {
925 pr_err("%s: TTY write returned error %d\n",
926 __func__, data_written);
927 return data_written;
928 }
929
930 if (len)
931 tty_wait_until_sent(smux.tty,
932 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600933 }
934 return 0;
935}
936
937/**
938 * Write packet to TTY.
939 *
940 * @pkt packet to write
941 *
942 * @returns 0 on success
943 */
944static int smux_tx_tty(struct smux_pkt_t *pkt)
945{
946 char *data;
947 unsigned int len;
948 int ret;
949
950 if (!smux.tty) {
951 pr_err("%s: TTY not initialized", __func__);
952 return -ENOTTY;
953 }
954
955 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
956 SMUX_DBG("%s: tty send single byte\n", __func__);
957 ret = write_to_tty(&pkt->hdr.flags, 1);
958 return ret;
959 }
960
961 smux_serialize_hdr(pkt, &data, &len);
962 ret = write_to_tty(data, len);
963 if (ret) {
964 pr_err("%s: failed %d to write header %d\n",
965 __func__, ret, len);
966 return ret;
967 }
968
969 smux_serialize_payload(pkt, &data, &len);
970 ret = write_to_tty(data, len);
971 if (ret) {
972 pr_err("%s: failed %d to write payload %d\n",
973 __func__, ret, len);
974 return ret;
975 }
976
977 smux_serialize_padding(pkt, &data, &len);
978 while (len > 0) {
979 char zero = 0x0;
980 ret = write_to_tty(&zero, 1);
981 if (ret) {
982 pr_err("%s: failed %d to write padding %d\n",
983 __func__, ret, len);
984 return ret;
985 }
986 --len;
987 }
988 return 0;
989}
990
991/**
992 * Send a single character.
993 *
994 * @ch Character to send
995 */
996static void smux_send_byte(char ch)
997{
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600998 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600999
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001000 pkt = smux_alloc_pkt();
1001 if (!pkt) {
1002 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1003 return;
1004 }
1005 pkt->hdr.cmd = SMUX_CMD_BYTE;
1006 pkt->hdr.flags = ch;
1007 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1008 pkt->hdr.flags = ch;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001009
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001010 list_add_tail(&pkt->list, &smux.power_queue);
1011 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001012}
1013
1014/**
1015 * Receive a single-character packet (used for internal testing).
1016 *
1017 * @ch Character to receive
1018 * @lcid Logical channel ID for packet
1019 *
1020 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001021 */
1022static int smux_receive_byte(char ch, int lcid)
1023{
1024 struct smux_pkt_t pkt;
1025
1026 smux_init_pkt(&pkt);
1027 pkt.hdr.lcid = lcid;
1028 pkt.hdr.cmd = SMUX_CMD_BYTE;
1029 pkt.hdr.flags = ch;
1030
1031 return smux_dispatch_rx_pkt(&pkt);
1032}
1033
1034/**
1035 * Queue packet for transmit.
1036 *
1037 * @pkt_ptr Packet to queue
1038 * @ch Channel to queue packet on
1039 * @queue Queue channel on ready list
1040 */
1041static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1042 int queue)
1043{
1044 unsigned long flags;
1045
1046 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1047
1048 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1049 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1050 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1051
1052 if (queue)
1053 list_channel(ch);
1054}
1055
1056/**
1057 * Handle receive OPEN ACK command.
1058 *
1059 * @pkt Received packet
1060 *
1061 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001062 */
1063static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1064{
1065 uint8_t lcid;
1066 int ret;
1067 struct smux_lch_t *ch;
1068 int enable_powerdown = 0;
1069
1070 lcid = pkt->hdr.lcid;
1071 ch = &smux_lch[lcid];
1072
1073 spin_lock(&ch->state_lock_lhb1);
1074 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1075 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1076 ch->local_state,
1077 SMUX_LCH_LOCAL_OPENED);
1078
1079 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1080 enable_powerdown = 1;
1081
1082 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1083 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1084 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1085 ret = 0;
1086 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1087 SMUX_DBG("Remote loopback OPEN ACK received\n");
1088 ret = 0;
1089 } else {
1090 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1091 __func__, lcid, ch->local_state);
1092 ret = -EINVAL;
1093 }
1094 spin_unlock(&ch->state_lock_lhb1);
1095
1096 if (enable_powerdown) {
1097 spin_lock(&smux.tx_lock_lha2);
1098 if (!smux.powerdown_enabled) {
1099 smux.powerdown_enabled = 1;
1100 SMUX_DBG("%s: enabling power-collapse support\n",
1101 __func__);
1102 }
1103 spin_unlock(&smux.tx_lock_lha2);
1104 }
1105
1106 return ret;
1107}
1108
1109static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1110{
1111 uint8_t lcid;
1112 int ret;
1113 struct smux_lch_t *ch;
1114 union notifier_metadata meta_disconnected;
1115 unsigned long flags;
1116
1117 lcid = pkt->hdr.lcid;
1118 ch = &smux_lch[lcid];
1119 meta_disconnected.disconnected.is_ssr = 0;
1120
1121 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1122
1123 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1124 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1125 SMUX_LCH_LOCAL_CLOSING,
1126 SMUX_LCH_LOCAL_CLOSED);
1127 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1128 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1129 schedule_notify(lcid, SMUX_DISCONNECTED,
1130 &meta_disconnected);
1131 ret = 0;
1132 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1133 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1134 ret = 0;
1135 } else {
1136 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1137 __func__, lcid, ch->local_state);
1138 ret = -EINVAL;
1139 }
1140 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1141 return ret;
1142}
1143
1144/**
1145 * Handle receive OPEN command.
1146 *
1147 * @pkt Received packet
1148 *
1149 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001150 */
1151static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1152{
1153 uint8_t lcid;
1154 int ret;
1155 struct smux_lch_t *ch;
1156 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001157 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001158 int tx_ready = 0;
1159 int enable_powerdown = 0;
1160
1161 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1162 return smux_handle_rx_open_ack(pkt);
1163
1164 lcid = pkt->hdr.lcid;
1165 ch = &smux_lch[lcid];
1166
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001167 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001168
1169 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1170 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1171 SMUX_LCH_REMOTE_CLOSED,
1172 SMUX_LCH_REMOTE_OPENED);
1173
1174 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1175 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1176 enable_powerdown = 1;
1177
1178 /* Send Open ACK */
1179 ack_pkt = smux_alloc_pkt();
1180 if (!ack_pkt) {
1181 /* exit out to allow retrying this later */
1182 ret = -ENOMEM;
1183 goto out;
1184 }
1185 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1186 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1187 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1188 ack_pkt->hdr.lcid = lcid;
1189 ack_pkt->hdr.payload_len = 0;
1190 ack_pkt->hdr.pad_len = 0;
1191 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1192 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1193 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1194 }
1195 smux_tx_queue(ack_pkt, ch, 0);
1196 tx_ready = 1;
1197
1198 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1199 /*
1200 * Send an Open command to the remote side to
1201 * simulate our local client doing it.
1202 */
1203 ack_pkt = smux_alloc_pkt();
1204 if (ack_pkt) {
1205 ack_pkt->hdr.lcid = lcid;
1206 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1207 ack_pkt->hdr.flags =
1208 SMUX_CMD_OPEN_POWER_COLLAPSE;
1209 ack_pkt->hdr.payload_len = 0;
1210 ack_pkt->hdr.pad_len = 0;
1211 smux_tx_queue(ack_pkt, ch, 0);
1212 tx_ready = 1;
1213 } else {
1214 pr_err("%s: Remote loopack allocation failure\n",
1215 __func__);
1216 }
1217 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1218 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1219 }
1220 ret = 0;
1221 } else {
1222 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1223 __func__, lcid, ch->remote_state);
1224 ret = -EINVAL;
1225 }
1226
1227out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001228 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001229
1230 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001231 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001232 if (!smux.powerdown_enabled) {
1233 smux.powerdown_enabled = 1;
1234 SMUX_DBG("%s: enabling power-collapse support\n",
1235 __func__);
1236 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001237 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001238 }
1239
1240 if (tx_ready)
1241 list_channel(ch);
1242
1243 return ret;
1244}
1245
1246/**
1247 * Handle receive CLOSE command.
1248 *
1249 * @pkt Received packet
1250 *
1251 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001252 */
1253static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1254{
1255 uint8_t lcid;
1256 int ret;
1257 struct smux_lch_t *ch;
1258 struct smux_pkt_t *ack_pkt;
1259 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001260 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001261 int tx_ready = 0;
1262
1263 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1264 return smux_handle_close_ack(pkt);
1265
1266 lcid = pkt->hdr.lcid;
1267 ch = &smux_lch[lcid];
1268 meta_disconnected.disconnected.is_ssr = 0;
1269
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001270 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001271 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1272 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1273 SMUX_LCH_REMOTE_OPENED,
1274 SMUX_LCH_REMOTE_CLOSED);
1275
1276 ack_pkt = smux_alloc_pkt();
1277 if (!ack_pkt) {
1278 /* exit out to allow retrying this later */
1279 ret = -ENOMEM;
1280 goto out;
1281 }
1282 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1283 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1284 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1285 ack_pkt->hdr.lcid = lcid;
1286 ack_pkt->hdr.payload_len = 0;
1287 ack_pkt->hdr.pad_len = 0;
1288 smux_tx_queue(ack_pkt, ch, 0);
1289 tx_ready = 1;
1290
1291 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1292 /*
1293 * Send a Close command to the remote side to simulate
1294 * our local client doing it.
1295 */
1296 ack_pkt = smux_alloc_pkt();
1297 if (ack_pkt) {
1298 ack_pkt->hdr.lcid = lcid;
1299 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1300 ack_pkt->hdr.flags = 0;
1301 ack_pkt->hdr.payload_len = 0;
1302 ack_pkt->hdr.pad_len = 0;
1303 smux_tx_queue(ack_pkt, ch, 0);
1304 tx_ready = 1;
1305 } else {
1306 pr_err("%s: Remote loopack allocation failure\n",
1307 __func__);
1308 }
1309 }
1310
1311 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1312 schedule_notify(lcid, SMUX_DISCONNECTED,
1313 &meta_disconnected);
1314 ret = 0;
1315 } else {
1316 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1317 __func__, lcid, ch->remote_state);
1318 ret = -EINVAL;
1319 }
1320out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001321 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001322 if (tx_ready)
1323 list_channel(ch);
1324
1325 return ret;
1326}
1327
1328/*
1329 * Handle receive DATA command.
1330 *
1331 * @pkt Received packet
1332 *
1333 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001334 */
1335static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1336{
1337 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001338 int ret = 0;
1339 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001340 int tmp;
1341 int rx_len;
1342 struct smux_lch_t *ch;
1343 union notifier_metadata metadata;
1344 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001345 struct smux_pkt_t *ack_pkt;
1346 unsigned long flags;
1347
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001348 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1349 ret = -ENXIO;
1350 goto out;
1351 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001352
Eric Holmbergb8435c82012-06-05 14:51:29 -06001353 rx_len = pkt->hdr.payload_len;
1354 if (rx_len == 0) {
1355 ret = -EINVAL;
1356 goto out;
1357 }
1358
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001359 lcid = pkt->hdr.lcid;
1360 ch = &smux_lch[lcid];
1361 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1362 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1363
1364 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1365 && !remote_loopback) {
1366 pr_err("smux: ch %d error data on local state 0x%x",
1367 lcid, ch->local_state);
1368 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001369 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001370 goto out;
1371 }
1372
1373 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1374 pr_err("smux: ch %d error data on remote state 0x%x",
1375 lcid, ch->remote_state);
1376 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001377 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001378 goto out;
1379 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001380
1381 if (!list_empty(&ch->rx_retry_queue)) {
1382 do_retry = 1;
1383 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1384 /* retry queue full */
1385 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1386 ret = -ENOMEM;
1387 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1388 goto out;
1389 }
1390 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001391 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001392
Eric Holmbergb8435c82012-06-05 14:51:29 -06001393 if (remote_loopback) {
1394 /* Echo the data back to the remote client. */
1395 ack_pkt = smux_alloc_pkt();
1396 if (ack_pkt) {
1397 ack_pkt->hdr.lcid = lcid;
1398 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1399 ack_pkt->hdr.flags = 0;
1400 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1401 if (ack_pkt->hdr.payload_len) {
1402 smux_alloc_pkt_payload(ack_pkt);
1403 memcpy(ack_pkt->payload, pkt->payload,
1404 ack_pkt->hdr.payload_len);
1405 }
1406 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1407 smux_tx_queue(ack_pkt, ch, 0);
1408 list_channel(ch);
1409 } else {
1410 pr_err("%s: Remote loopack allocation failure\n",
1411 __func__);
1412 }
1413 } else if (!do_retry) {
1414 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001415 metadata.read.pkt_priv = 0;
1416 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001417 tmp = ch->get_rx_buffer(ch->priv,
1418 (void **)&metadata.read.pkt_priv,
1419 (void **)&metadata.read.buffer,
1420 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001421
Eric Holmbergb8435c82012-06-05 14:51:29 -06001422 if (tmp == 0 && metadata.read.buffer) {
1423 /* place data into RX buffer */
1424 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001425 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001426 metadata.read.len = rx_len;
1427 schedule_notify(lcid, SMUX_READ_DONE,
1428 &metadata);
1429 } else if (tmp == -EAGAIN ||
1430 (tmp == 0 && !metadata.read.buffer)) {
1431 /* buffer allocation failed - add to retry queue */
1432 do_retry = 1;
1433 } else if (tmp < 0) {
1434 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1435 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001436 }
1437 }
1438
Eric Holmbergb8435c82012-06-05 14:51:29 -06001439 if (do_retry) {
1440 struct smux_rx_pkt_retry *retry;
1441
1442 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1443 if (!retry) {
1444 pr_err("%s: retry alloc failure\n", __func__);
1445 ret = -ENOMEM;
1446 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1447 goto out;
1448 }
1449 INIT_LIST_HEAD(&retry->rx_retry_list);
1450 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1451
1452 /* copy packet */
1453 retry->pkt = smux_alloc_pkt();
1454 if (!retry->pkt) {
1455 kfree(retry);
1456 pr_err("%s: pkt alloc failure\n", __func__);
1457 ret = -ENOMEM;
1458 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1459 goto out;
1460 }
1461 retry->pkt->hdr.lcid = lcid;
1462 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1463 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1464 if (retry->pkt->hdr.payload_len) {
1465 smux_alloc_pkt_payload(retry->pkt);
1466 memcpy(retry->pkt->payload, pkt->payload,
1467 retry->pkt->hdr.payload_len);
1468 }
1469
1470 /* add to retry queue */
1471 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1472 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1473 ++ch->rx_retry_queue_cnt;
1474 if (ch->rx_retry_queue_cnt == 1)
1475 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1476 msecs_to_jiffies(retry->timeout_in_ms));
1477 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1478 }
1479
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001480out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001481 return ret;
1482}
1483
1484/**
1485 * Handle receive byte command for testing purposes.
1486 *
1487 * @pkt Received packet
1488 *
1489 * @returns 0 for success
1490 */
1491static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1492{
1493 uint8_t lcid;
1494 int ret;
1495 struct smux_lch_t *ch;
1496 union notifier_metadata metadata;
1497 unsigned long flags;
1498
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001499 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1500 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001501 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001502 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001503
1504 lcid = pkt->hdr.lcid;
1505 ch = &smux_lch[lcid];
1506 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1507
1508 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1509 pr_err("smux: ch %d error data on local state 0x%x",
1510 lcid, ch->local_state);
1511 ret = -EIO;
1512 goto out;
1513 }
1514
1515 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1516 pr_err("smux: ch %d error data on remote state 0x%x",
1517 lcid, ch->remote_state);
1518 ret = -EIO;
1519 goto out;
1520 }
1521
1522 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1523 metadata.read.buffer = 0;
1524 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1525 ret = 0;
1526
1527out:
1528 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1529 return ret;
1530}
1531
1532/**
1533 * Handle receive status command.
1534 *
1535 * @pkt Received packet
1536 *
1537 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001538 */
1539static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1540{
1541 uint8_t lcid;
1542 int ret;
1543 struct smux_lch_t *ch;
1544 union notifier_metadata meta;
1545 unsigned long flags;
1546 int tx_ready = 0;
1547
1548 lcid = pkt->hdr.lcid;
1549 ch = &smux_lch[lcid];
1550
1551 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1552 meta.tiocm.tiocm_old = ch->remote_tiocm;
1553 meta.tiocm.tiocm_new = pkt->hdr.flags;
1554
1555 /* update logical channel flow control */
1556 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1557 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1558 /* logical channel flow control changed */
1559 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1560 /* disabled TX */
1561 SMUX_DBG("TX Flow control enabled\n");
1562 ch->tx_flow_control = 1;
1563 } else {
1564 /* re-enable channel */
1565 SMUX_DBG("TX Flow control disabled\n");
1566 ch->tx_flow_control = 0;
1567 tx_ready = 1;
1568 }
1569 }
1570 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1571 ch->remote_tiocm = pkt->hdr.flags;
1572 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1573
1574 /* client notification for status change */
1575 if (IS_FULLY_OPENED(ch)) {
1576 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1577 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1578 ret = 0;
1579 }
1580 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1581 if (tx_ready)
1582 list_channel(ch);
1583
1584 return ret;
1585}
1586
1587/**
1588 * Handle receive power command.
1589 *
1590 * @pkt Received packet
1591 *
1592 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001593 */
1594static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1595{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001596 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001597 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001598
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001599 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001600 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1601 /* local sleep request ack */
1602 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1603 /* Power-down complete, turn off UART */
1604 SMUX_DBG("%s: Power %d->%d\n", __func__,
1605 smux.power_state, SMUX_PWR_OFF_FLUSH);
1606 smux.power_state = SMUX_PWR_OFF_FLUSH;
1607 queue_work(smux_tx_wq, &smux_inactivity_work);
1608 } else {
1609 pr_err("%s: sleep request ack invalid in state %d\n",
1610 __func__, smux.power_state);
1611 }
1612 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001613 /*
1614 * Remote sleep request
1615 *
1616 * Even if we have data pending, we need to transition to the
1617 * POWER_OFF state and then perform a wakeup since the remote
1618 * side has requested a power-down.
1619 *
1620 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1621 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1622 * when it sends the packet.
1623 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001624 if (smux.power_state == SMUX_PWR_ON
1625 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1626 ack_pkt = smux_alloc_pkt();
1627 if (ack_pkt) {
1628 SMUX_DBG("%s: Power %d->%d\n", __func__,
1629 smux.power_state,
1630 SMUX_PWR_TURNING_OFF_FLUSH);
1631
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001632 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1633
1634 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001635 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1636 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001637 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1638 list_add_tail(&ack_pkt->list,
1639 &smux.power_queue);
1640 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001641 }
1642 } else {
1643 pr_err("%s: sleep request invalid in state %d\n",
1644 __func__, smux.power_state);
1645 }
1646 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001647 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001648
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001649 return 0;
1650}
1651
1652/**
1653 * Handle dispatching a completed packet for receive processing.
1654 *
1655 * @pkt Packet to process
1656 *
1657 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001658 */
1659static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1660{
1661 int ret;
1662
1663 SMUX_LOG_PKT_RX(pkt);
1664
1665 switch (pkt->hdr.cmd) {
1666 case SMUX_CMD_OPEN_LCH:
1667 ret = smux_handle_rx_open_cmd(pkt);
1668 break;
1669
1670 case SMUX_CMD_DATA:
1671 ret = smux_handle_rx_data_cmd(pkt);
1672 break;
1673
1674 case SMUX_CMD_CLOSE_LCH:
1675 ret = smux_handle_rx_close_cmd(pkt);
1676 break;
1677
1678 case SMUX_CMD_STATUS:
1679 ret = smux_handle_rx_status_cmd(pkt);
1680 break;
1681
1682 case SMUX_CMD_PWR_CTL:
1683 ret = smux_handle_rx_power_cmd(pkt);
1684 break;
1685
1686 case SMUX_CMD_BYTE:
1687 ret = smux_handle_rx_byte_cmd(pkt);
1688 break;
1689
1690 default:
1691 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1692 ret = -EINVAL;
1693 }
1694 return ret;
1695}
1696
1697/**
1698 * Deserializes a packet and dispatches it to the packet receive logic.
1699 *
1700 * @data Raw data for one packet
1701 * @len Length of the data
1702 *
1703 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001704 */
1705static int smux_deserialize(unsigned char *data, int len)
1706{
1707 struct smux_pkt_t recv;
1708 uint8_t lcid;
1709
1710 smux_init_pkt(&recv);
1711
1712 /*
1713 * It may be possible to optimize this to not use the
1714 * temporary buffer.
1715 */
1716 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1717
1718 if (recv.hdr.magic != SMUX_MAGIC) {
1719 pr_err("%s: invalid header magic\n", __func__);
1720 return -EINVAL;
1721 }
1722
1723 lcid = recv.hdr.lcid;
1724 if (smux_assert_lch_id(lcid)) {
1725 pr_err("%s: invalid channel id %d\n", __func__, lcid);
1726 return -ENXIO;
1727 }
1728
1729 if (recv.hdr.payload_len)
1730 recv.payload = data + sizeof(struct smux_hdr_t);
1731
1732 return smux_dispatch_rx_pkt(&recv);
1733}
1734
1735/**
1736 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001737 */
1738static void smux_handle_wakeup_req(void)
1739{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001740 unsigned long flags;
1741
1742 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001743 if (smux.power_state == SMUX_PWR_OFF
1744 || smux.power_state == SMUX_PWR_TURNING_ON) {
1745 /* wakeup system */
1746 SMUX_DBG("%s: Power %d->%d\n", __func__,
1747 smux.power_state, SMUX_PWR_ON);
1748 smux.power_state = SMUX_PWR_ON;
1749 queue_work(smux_tx_wq, &smux_wakeup_work);
1750 queue_work(smux_tx_wq, &smux_tx_work);
1751 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1752 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1753 smux_send_byte(SMUX_WAKEUP_ACK);
1754 } else {
1755 smux_send_byte(SMUX_WAKEUP_ACK);
1756 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001757 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001758}
1759
1760/**
1761 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001762 */
1763static void smux_handle_wakeup_ack(void)
1764{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001765 unsigned long flags;
1766
1767 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001768 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1769 /* received response to wakeup request */
1770 SMUX_DBG("%s: Power %d->%d\n", __func__,
1771 smux.power_state, SMUX_PWR_ON);
1772 smux.power_state = SMUX_PWR_ON;
1773 queue_work(smux_tx_wq, &smux_tx_work);
1774 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1775 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1776
1777 } else if (smux.power_state != SMUX_PWR_ON) {
1778 /* invalid message */
1779 pr_err("%s: wakeup request ack invalid in state %d\n",
1780 __func__, smux.power_state);
1781 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001782 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001783}
1784
1785/**
1786 * RX State machine - IDLE state processing.
1787 *
1788 * @data New RX data to process
1789 * @len Length of the data
1790 * @used Return value of length processed
1791 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001792 */
1793static void smux_rx_handle_idle(const unsigned char *data,
1794 int len, int *used, int flag)
1795{
1796 int i;
1797
1798 if (flag) {
1799 if (smux_byte_loopback)
1800 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1801 smux_byte_loopback);
1802 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1803 ++*used;
1804 return;
1805 }
1806
1807 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1808 switch (data[i]) {
1809 case SMUX_MAGIC_WORD1:
1810 smux.rx_state = SMUX_RX_MAGIC;
1811 break;
1812 case SMUX_WAKEUP_REQ:
1813 smux_handle_wakeup_req();
1814 break;
1815 case SMUX_WAKEUP_ACK:
1816 smux_handle_wakeup_ack();
1817 break;
1818 default:
1819 /* unexpected character */
1820 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1821 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1822 smux_byte_loopback);
1823 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1824 (unsigned)data[i]);
1825 break;
1826 }
1827 }
1828
1829 *used = i;
1830}
1831
1832/**
1833 * RX State machine - Header Magic state processing.
1834 *
1835 * @data New RX data to process
1836 * @len Length of the data
1837 * @used Return value of length processed
1838 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001839 */
1840static void smux_rx_handle_magic(const unsigned char *data,
1841 int len, int *used, int flag)
1842{
1843 int i;
1844
1845 if (flag) {
1846 pr_err("%s: TTY RX error %d\n", __func__, flag);
1847 smux_enter_reset();
1848 smux.rx_state = SMUX_RX_FAILURE;
1849 ++*used;
1850 return;
1851 }
1852
1853 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1854 /* wait for completion of the magic */
1855 if (data[i] == SMUX_MAGIC_WORD2) {
1856 smux.recv_len = 0;
1857 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1858 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1859 smux.rx_state = SMUX_RX_HDR;
1860 } else {
1861 /* unexpected / trash character */
1862 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1863 __func__, data[i], *used, len);
1864 smux.rx_state = SMUX_RX_IDLE;
1865 }
1866 }
1867
1868 *used = i;
1869}
1870
1871/**
1872 * RX State machine - Packet Header state processing.
1873 *
1874 * @data New RX data to process
1875 * @len Length of the data
1876 * @used Return value of length processed
1877 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001878 */
1879static void smux_rx_handle_hdr(const unsigned char *data,
1880 int len, int *used, int flag)
1881{
1882 int i;
1883 struct smux_hdr_t *hdr;
1884
1885 if (flag) {
1886 pr_err("%s: TTY RX error %d\n", __func__, flag);
1887 smux_enter_reset();
1888 smux.rx_state = SMUX_RX_FAILURE;
1889 ++*used;
1890 return;
1891 }
1892
1893 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1894 smux.recv_buf[smux.recv_len++] = data[i];
1895
1896 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1897 /* complete header received */
1898 hdr = (struct smux_hdr_t *)smux.recv_buf;
1899 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1900 smux.rx_state = SMUX_RX_PAYLOAD;
1901 }
1902 }
1903 *used = i;
1904}
1905
1906/**
1907 * RX State machine - Packet Payload state processing.
1908 *
1909 * @data New RX data to process
1910 * @len Length of the data
1911 * @used Return value of length processed
1912 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001913 */
1914static void smux_rx_handle_pkt_payload(const unsigned char *data,
1915 int len, int *used, int flag)
1916{
1917 int remaining;
1918
1919 if (flag) {
1920 pr_err("%s: TTY RX error %d\n", __func__, flag);
1921 smux_enter_reset();
1922 smux.rx_state = SMUX_RX_FAILURE;
1923 ++*used;
1924 return;
1925 }
1926
1927 /* copy data into rx buffer */
1928 if (smux.pkt_remain < (len - *used))
1929 remaining = smux.pkt_remain;
1930 else
1931 remaining = len - *used;
1932
1933 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1934 smux.recv_len += remaining;
1935 smux.pkt_remain -= remaining;
1936 *used += remaining;
1937
1938 if (smux.pkt_remain == 0) {
1939 /* complete packet received */
1940 smux_deserialize(smux.recv_buf, smux.recv_len);
1941 smux.rx_state = SMUX_RX_IDLE;
1942 }
1943}
1944
1945/**
1946 * Feed data to the receive state machine.
1947 *
1948 * @data Pointer to data block
1949 * @len Length of data
1950 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001951 */
1952void smux_rx_state_machine(const unsigned char *data,
1953 int len, int flag)
1954{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001955 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001956
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001957 work.data = data;
1958 work.len = len;
1959 work.flag = flag;
1960 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1961 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001962
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001963 queue_work(smux_rx_wq, &work.work);
1964 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001965}
1966
1967/**
1968 * Add channel to transmit-ready list and trigger transmit worker.
1969 *
1970 * @ch Channel to add
1971 */
1972static void list_channel(struct smux_lch_t *ch)
1973{
1974 unsigned long flags;
1975
1976 SMUX_DBG("%s: listing channel %d\n",
1977 __func__, ch->lcid);
1978
1979 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1980 spin_lock(&ch->tx_lock_lhb2);
1981 smux.tx_activity_flag = 1;
1982 if (list_empty(&ch->tx_ready_list))
1983 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
1984 spin_unlock(&ch->tx_lock_lhb2);
1985 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1986
1987 queue_work(smux_tx_wq, &smux_tx_work);
1988}
1989
1990/**
1991 * Transmit packet on correct transport and then perform client
1992 * notification.
1993 *
1994 * @ch Channel to transmit on
1995 * @pkt Packet to transmit
1996 */
1997static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
1998{
1999 union notifier_metadata meta_write;
2000 int ret;
2001
2002 if (ch && pkt) {
2003 SMUX_LOG_PKT_TX(pkt);
2004 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2005 ret = smux_tx_loopback(pkt);
2006 else
2007 ret = smux_tx_tty(pkt);
2008
2009 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2010 /* notify write-done */
2011 meta_write.write.pkt_priv = pkt->priv;
2012 meta_write.write.buffer = pkt->payload;
2013 meta_write.write.len = pkt->hdr.payload_len;
2014 if (ret >= 0) {
2015 SMUX_DBG("%s: PKT write done", __func__);
2016 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2017 &meta_write);
2018 } else {
2019 pr_err("%s: failed to write pkt %d\n",
2020 __func__, ret);
2021 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2022 &meta_write);
2023 }
2024 }
2025 }
2026}
2027
2028/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002029 * Flush pending TTY TX data.
2030 */
2031static void smux_flush_tty(void)
2032{
2033 if (!smux.tty) {
2034 pr_err("%s: ldisc not loaded\n", __func__);
2035 return;
2036 }
2037
2038 tty_wait_until_sent(smux.tty,
2039 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2040
2041 if (tty_chars_in_buffer(smux.tty) > 0)
2042 pr_err("%s: unable to flush UART queue\n", __func__);
2043}
2044
2045/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002046 * Purge TX queue for logical channel.
2047 *
2048 * @ch Logical channel pointer
2049 *
2050 * Must be called with the following spinlocks locked:
2051 * state_lock_lhb1
2052 * tx_lock_lhb2
2053 */
2054static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2055{
2056 struct smux_pkt_t *pkt;
2057 int send_disconnect = 0;
2058
2059 while (!list_empty(&ch->tx_queue)) {
2060 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2061 list);
2062 list_del(&pkt->list);
2063
2064 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2065 /* Open was never sent, just force to closed state */
2066 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2067 send_disconnect = 1;
2068 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2069 /* Notify client of failed write */
2070 union notifier_metadata meta_write;
2071
2072 meta_write.write.pkt_priv = pkt->priv;
2073 meta_write.write.buffer = pkt->payload;
2074 meta_write.write.len = pkt->hdr.payload_len;
2075 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2076 }
2077 smux_free_pkt(pkt);
2078 }
2079
2080 if (send_disconnect) {
2081 union notifier_metadata meta_disconnected;
2082
2083 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2084 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2085 &meta_disconnected);
2086 }
2087}
2088
2089/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002090 * Power-up the UART.
2091 */
2092static void smux_uart_power_on(void)
2093{
2094 struct uart_state *state;
2095
2096 if (!smux.tty || !smux.tty->driver_data) {
2097 pr_err("%s: unable to find UART port for tty %p\n",
2098 __func__, smux.tty);
2099 return;
2100 }
2101 state = smux.tty->driver_data;
2102 msm_hs_request_clock_on(state->uart_port);
2103}
2104
2105/**
2106 * Power down the UART.
2107 */
2108static void smux_uart_power_off(void)
2109{
2110 struct uart_state *state;
2111
2112 if (!smux.tty || !smux.tty->driver_data) {
2113 pr_err("%s: unable to find UART port for tty %p\n",
2114 __func__, smux.tty);
2115 return;
2116 }
2117 state = smux.tty->driver_data;
2118 msm_hs_request_clock_off(state->uart_port);
2119}
2120
2121/**
2122 * TX Wakeup Worker
2123 *
2124 * @work Not used
2125 *
2126 * Do an exponential back-off wakeup sequence with a maximum period
2127 * of approximately 1 second (1 << 20 microseconds).
2128 */
2129static void smux_wakeup_worker(struct work_struct *work)
2130{
2131 unsigned long flags;
2132 unsigned wakeup_delay;
2133 int complete = 0;
2134
Eric Holmberged1f00c2012-06-07 09:45:18 -06002135 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002136 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2137 if (smux.power_state == SMUX_PWR_ON) {
2138 /* wakeup complete */
2139 complete = 1;
2140 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2141 break;
2142 } else {
2143 /* retry */
2144 wakeup_delay = smux.pwr_wakeup_delay_us;
2145 smux.pwr_wakeup_delay_us <<= 1;
2146 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2147 smux.pwr_wakeup_delay_us =
2148 SMUX_WAKEUP_DELAY_MAX;
2149 }
2150 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2151 SMUX_DBG("%s: triggering wakeup\n", __func__);
2152 smux_send_byte(SMUX_WAKEUP_REQ);
2153
2154 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2155 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2156 wakeup_delay);
2157 usleep_range(wakeup_delay, 2*wakeup_delay);
2158 } else {
2159 /* schedule delayed work */
2160 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2161 __func__, wakeup_delay / 1000);
2162 queue_delayed_work(smux_tx_wq,
2163 &smux_wakeup_delayed_work,
2164 msecs_to_jiffies(wakeup_delay / 1000));
2165 break;
2166 }
2167 }
2168
2169 if (complete) {
2170 SMUX_DBG("%s: wakeup complete\n", __func__);
2171 /*
2172 * Cancel any pending retry. This avoids a race condition with
2173 * a new power-up request because:
2174 * 1) this worker doesn't modify the state
2175 * 2) this worker is processed on the same single-threaded
2176 * workqueue as new TX wakeup requests
2177 */
2178 cancel_delayed_work(&smux_wakeup_delayed_work);
2179 }
2180}
2181
2182
2183/**
2184 * Inactivity timeout worker. Periodically scheduled when link is active.
2185 * When it detects inactivity, it will power-down the UART link.
2186 *
2187 * @work Work structure (not used)
2188 */
2189static void smux_inactivity_worker(struct work_struct *work)
2190{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002191 struct smux_pkt_t *pkt;
2192 unsigned long flags;
2193
2194 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2195 spin_lock(&smux.tx_lock_lha2);
2196
2197 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2198 /* no activity */
2199 if (smux.powerdown_enabled) {
2200 if (smux.power_state == SMUX_PWR_ON) {
2201 /* start power-down sequence */
2202 pkt = smux_alloc_pkt();
2203 if (pkt) {
2204 SMUX_DBG("%s: Power %d->%d\n", __func__,
2205 smux.power_state,
2206 SMUX_PWR_TURNING_OFF);
2207 smux.power_state = SMUX_PWR_TURNING_OFF;
2208
2209 /* send power-down request */
2210 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2211 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002212 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2213 list_add_tail(&pkt->list,
2214 &smux.power_queue);
2215 queue_work(smux_tx_wq, &smux_tx_work);
2216 } else {
2217 pr_err("%s: packet alloc failed\n",
2218 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002219 }
2220 }
2221 } else {
2222 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2223 __func__);
2224 }
2225 }
2226 smux.tx_activity_flag = 0;
2227 smux.rx_activity_flag = 0;
2228
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002229 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002230 /* ready to power-down the UART */
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002231 smux.power_state = SMUX_PWR_OFF;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002232 SMUX_DBG("%s: Power %d->%d\n", __func__,
2233 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002234
2235 /* if data is pending, schedule a new wakeup */
2236 if (!list_empty(&smux.lch_tx_ready_list) ||
2237 !list_empty(&smux.power_queue))
2238 queue_work(smux_tx_wq, &smux_tx_work);
2239
2240 spin_unlock(&smux.tx_lock_lha2);
2241 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2242
2243 /* flush UART output queue and power down */
2244 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002245 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002246 } else {
2247 spin_unlock(&smux.tx_lock_lha2);
2248 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002249 }
2250
2251 /* reschedule inactivity worker */
2252 if (smux.power_state != SMUX_PWR_OFF)
2253 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2254 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2255}
2256
2257/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002258 * Remove RX retry packet from channel and free it.
2259 *
2260 * Must be called with state_lock_lhb1 locked.
2261 *
2262 * @ch Channel for retry packet
2263 * @retry Retry packet to remove
2264 */
2265void smux_remove_rx_retry(struct smux_lch_t *ch,
2266 struct smux_rx_pkt_retry *retry)
2267{
2268 list_del(&retry->rx_retry_list);
2269 --ch->rx_retry_queue_cnt;
2270 smux_free_pkt(retry->pkt);
2271 kfree(retry);
2272}
2273
2274/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002275 * RX worker handles all receive operations.
2276 *
2277 * @work Work structure contained in TBD structure
2278 */
2279static void smux_rx_worker(struct work_struct *work)
2280{
2281 unsigned long flags;
2282 int used;
2283 int initial_rx_state;
2284 struct smux_rx_worker_data *w;
2285 const unsigned char *data;
2286 int len;
2287 int flag;
2288
2289 w = container_of(work, struct smux_rx_worker_data, work);
2290 data = w->data;
2291 len = w->len;
2292 flag = w->flag;
2293
2294 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2295 smux.rx_activity_flag = 1;
2296 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2297
2298 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2299 used = 0;
2300 do {
2301 SMUX_DBG("%s: state %d; %d of %d\n",
2302 __func__, smux.rx_state, used, len);
2303 initial_rx_state = smux.rx_state;
2304
2305 switch (smux.rx_state) {
2306 case SMUX_RX_IDLE:
2307 smux_rx_handle_idle(data, len, &used, flag);
2308 break;
2309 case SMUX_RX_MAGIC:
2310 smux_rx_handle_magic(data, len, &used, flag);
2311 break;
2312 case SMUX_RX_HDR:
2313 smux_rx_handle_hdr(data, len, &used, flag);
2314 break;
2315 case SMUX_RX_PAYLOAD:
2316 smux_rx_handle_pkt_payload(data, len, &used, flag);
2317 break;
2318 default:
2319 SMUX_DBG("%s: invalid state %d\n",
2320 __func__, smux.rx_state);
2321 smux.rx_state = SMUX_RX_IDLE;
2322 break;
2323 }
2324 } while (used < len || smux.rx_state != initial_rx_state);
2325
2326 complete(&w->work_complete);
2327}
2328
2329/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002330 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2331 * because the client was not ready (-EAGAIN).
2332 *
2333 * @work Work structure contained in smux_lch_t structure
2334 */
2335static void smux_rx_retry_worker(struct work_struct *work)
2336{
2337 struct smux_lch_t *ch;
2338 struct smux_rx_pkt_retry *retry;
2339 union notifier_metadata metadata;
2340 int tmp;
2341 unsigned long flags;
2342
2343 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2344
2345 /* get next retry packet */
2346 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2347 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2348 /* port has been closed - remove all retries */
2349 while (!list_empty(&ch->rx_retry_queue)) {
2350 retry = list_first_entry(&ch->rx_retry_queue,
2351 struct smux_rx_pkt_retry,
2352 rx_retry_list);
2353 smux_remove_rx_retry(ch, retry);
2354 }
2355 }
2356
2357 if (list_empty(&ch->rx_retry_queue)) {
2358 SMUX_DBG("%s: retry list empty for channel %d\n",
2359 __func__, ch->lcid);
2360 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2361 return;
2362 }
2363 retry = list_first_entry(&ch->rx_retry_queue,
2364 struct smux_rx_pkt_retry,
2365 rx_retry_list);
2366 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2367
2368 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2369 metadata.read.pkt_priv = 0;
2370 metadata.read.buffer = 0;
2371 tmp = ch->get_rx_buffer(ch->priv,
2372 (void **)&metadata.read.pkt_priv,
2373 (void **)&metadata.read.buffer,
2374 retry->pkt->hdr.payload_len);
2375 if (tmp == 0 && metadata.read.buffer) {
2376 /* have valid RX buffer */
2377 memcpy(metadata.read.buffer, retry->pkt->payload,
2378 retry->pkt->hdr.payload_len);
2379 metadata.read.len = retry->pkt->hdr.payload_len;
2380
2381 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2382 smux_remove_rx_retry(ch, retry);
2383 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2384
2385 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2386 } else if (tmp == -EAGAIN ||
2387 (tmp == 0 && !metadata.read.buffer)) {
2388 /* retry again */
2389 retry->timeout_in_ms <<= 1;
2390 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2391 /* timed out */
2392 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2393 smux_remove_rx_retry(ch, retry);
2394 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2395 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2396 }
2397 } else {
2398 /* client error - drop packet */
2399 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2400 smux_remove_rx_retry(ch, retry);
2401 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2402
2403 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2404 }
2405
2406 /* schedule next retry */
2407 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2408 if (!list_empty(&ch->rx_retry_queue)) {
2409 retry = list_first_entry(&ch->rx_retry_queue,
2410 struct smux_rx_pkt_retry,
2411 rx_retry_list);
2412 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2413 msecs_to_jiffies(retry->timeout_in_ms));
2414 }
2415 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2416}
2417
2418/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002419 * Transmit worker handles serializing and transmitting packets onto the
2420 * underlying transport.
2421 *
2422 * @work Work structure (not used)
2423 */
2424static void smux_tx_worker(struct work_struct *work)
2425{
2426 struct smux_pkt_t *pkt;
2427 struct smux_lch_t *ch;
2428 unsigned low_wm_notif;
2429 unsigned lcid;
2430 unsigned long flags;
2431
2432
2433 /*
2434 * Transmit packets in round-robin fashion based upon ready
2435 * channels.
2436 *
2437 * To eliminate the need to hold a lock for the entire
2438 * iteration through the channel ready list, the head of the
2439 * ready-channel list is always the next channel to be
2440 * processed. To send a packet, the first valid packet in
2441 * the head channel is removed and the head channel is then
2442 * rescheduled at the end of the queue by removing it and
2443 * inserting after the tail. The locks can then be released
2444 * while the packet is processed.
2445 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002446 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002447 pkt = NULL;
2448 low_wm_notif = 0;
2449
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002450 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002451
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002452 /* handle wakeup if needed */
2453 if (smux.power_state == SMUX_PWR_OFF) {
2454 if (!list_empty(&smux.lch_tx_ready_list) ||
2455 !list_empty(&smux.power_queue)) {
2456 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002457 smux.pwr_wakeup_delay_us = 1;
2458 SMUX_DBG("%s: Power %d->%d\n", __func__,
2459 smux.power_state,
2460 SMUX_PWR_TURNING_ON);
2461 smux.power_state = SMUX_PWR_TURNING_ON;
2462 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2463 flags);
2464 smux_uart_power_on();
2465 queue_work(smux_tx_wq, &smux_wakeup_work);
2466 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002467 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002468 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2469 flags);
2470 }
2471 break;
2472 }
2473
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002474 /* process any pending power packets */
2475 if (!list_empty(&smux.power_queue)) {
2476 pkt = list_first_entry(&smux.power_queue,
2477 struct smux_pkt_t, list);
2478 list_del(&pkt->list);
2479 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2480
2481 /* send the packet */
2482 SMUX_LOG_PKT_TX(pkt);
2483 if (!smux_byte_loopback) {
2484 smux_tx_tty(pkt);
2485 smux_flush_tty();
2486 } else {
2487 smux_tx_loopback(pkt);
2488 }
2489
2490 /* Adjust power state if this is a flush command */
2491 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2492 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2493 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2494 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
2495 SMUX_DBG("%s: Power %d->%d\n", __func__,
2496 smux.power_state,
2497 SMUX_PWR_OFF_FLUSH);
2498 smux.power_state = SMUX_PWR_OFF_FLUSH;
2499 queue_work(smux_tx_wq, &smux_inactivity_work);
2500 }
2501 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2502
2503 smux_free_pkt(pkt);
2504 continue;
2505 }
2506
2507 /* get the next ready channel */
2508 if (list_empty(&smux.lch_tx_ready_list)) {
2509 /* no ready channels */
2510 SMUX_DBG("%s: no more ready channels, exiting\n",
2511 __func__);
2512 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2513 break;
2514 }
2515 smux.tx_activity_flag = 1;
2516
2517 if (smux.power_state != SMUX_PWR_ON) {
2518 /* channel not ready to transmit */
2519 SMUX_DBG("%s: can not tx with power state %d\n",
2520 __func__,
2521 smux.power_state);
2522 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2523 break;
2524 }
2525
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002526 /* get the next packet to send and rotate channel list */
2527 ch = list_first_entry(&smux.lch_tx_ready_list,
2528 struct smux_lch_t,
2529 tx_ready_list);
2530
2531 spin_lock(&ch->state_lock_lhb1);
2532 spin_lock(&ch->tx_lock_lhb2);
2533 if (!list_empty(&ch->tx_queue)) {
2534 /*
2535 * If remote TX flow control is enabled or
2536 * the channel is not fully opened, then only
2537 * send command packets.
2538 */
2539 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2540 struct smux_pkt_t *curr;
2541 list_for_each_entry(curr, &ch->tx_queue, list) {
2542 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2543 pkt = curr;
2544 break;
2545 }
2546 }
2547 } else {
2548 /* get next cmd/data packet to send */
2549 pkt = list_first_entry(&ch->tx_queue,
2550 struct smux_pkt_t, list);
2551 }
2552 }
2553
2554 if (pkt) {
2555 list_del(&pkt->list);
2556
2557 /* update packet stats */
2558 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2559 --ch->tx_pending_data_cnt;
2560 if (ch->notify_lwm &&
2561 ch->tx_pending_data_cnt
2562 <= SMUX_WM_LOW) {
2563 ch->notify_lwm = 0;
2564 low_wm_notif = 1;
2565 }
2566 }
2567
2568 /* advance to the next ready channel */
2569 list_rotate_left(&smux.lch_tx_ready_list);
2570 } else {
2571 /* no data in channel to send, remove from ready list */
2572 list_del(&ch->tx_ready_list);
2573 INIT_LIST_HEAD(&ch->tx_ready_list);
2574 }
2575 lcid = ch->lcid;
2576 spin_unlock(&ch->tx_lock_lhb2);
2577 spin_unlock(&ch->state_lock_lhb1);
2578 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2579
2580 if (low_wm_notif)
2581 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2582
2583 /* send the packet */
2584 smux_tx_pkt(ch, pkt);
2585 smux_free_pkt(pkt);
2586 }
2587}
2588
2589
2590/**********************************************************************/
2591/* Kernel API */
2592/**********************************************************************/
2593
2594/**
2595 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2596 * flags.
2597 *
2598 * @lcid Logical channel ID
2599 * @set Options to set
2600 * @clear Options to clear
2601 *
2602 * @returns 0 for success, < 0 for failure
2603 */
2604int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2605{
2606 unsigned long flags;
2607 struct smux_lch_t *ch;
2608 int tx_ready = 0;
2609 int ret = 0;
2610
2611 if (smux_assert_lch_id(lcid))
2612 return -ENXIO;
2613
2614 ch = &smux_lch[lcid];
2615 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2616
2617 /* Local loopback mode */
2618 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2619 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2620
2621 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2622 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2623
2624 /* Remote loopback mode */
2625 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2626 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2627
2628 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2629 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2630
2631 /* Flow control */
2632 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2633 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2634 ret = smux_send_status_cmd(ch);
2635 tx_ready = 1;
2636 }
2637
2638 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2639 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2640 ret = smux_send_status_cmd(ch);
2641 tx_ready = 1;
2642 }
2643
2644 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2645
2646 if (tx_ready)
2647 list_channel(ch);
2648
2649 return ret;
2650}
2651
2652/**
2653 * Starts the opening sequence for a logical channel.
2654 *
2655 * @lcid Logical channel ID
2656 * @priv Free for client usage
2657 * @notify Event notification function
2658 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2659 *
2660 * @returns 0 for success, <0 otherwise
2661 *
2662 * A channel must be fully closed (either not previously opened or
2663 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2664 * received.
2665 *
2666 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2667 * event.
2668 */
2669int msm_smux_open(uint8_t lcid, void *priv,
2670 void (*notify)(void *priv, int event_type, const void *metadata),
2671 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2672 int size))
2673{
2674 int ret;
2675 struct smux_lch_t *ch;
2676 struct smux_pkt_t *pkt;
2677 int tx_ready = 0;
2678 unsigned long flags;
2679
2680 if (smux_assert_lch_id(lcid))
2681 return -ENXIO;
2682
2683 ch = &smux_lch[lcid];
2684 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2685
2686 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2687 ret = -EAGAIN;
2688 goto out;
2689 }
2690
2691 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2692 pr_err("%s: open lcid %d local state %x invalid\n",
2693 __func__, lcid, ch->local_state);
2694 ret = -EINVAL;
2695 goto out;
2696 }
2697
2698 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2699 ch->local_state,
2700 SMUX_LCH_LOCAL_OPENING);
2701
2702 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2703
2704 ch->priv = priv;
2705 ch->notify = notify;
2706 ch->get_rx_buffer = get_rx_buffer;
2707 ret = 0;
2708
2709 /* Send Open Command */
2710 pkt = smux_alloc_pkt();
2711 if (!pkt) {
2712 ret = -ENOMEM;
2713 goto out;
2714 }
2715 pkt->hdr.magic = SMUX_MAGIC;
2716 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2717 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2718 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2719 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2720 pkt->hdr.lcid = lcid;
2721 pkt->hdr.payload_len = 0;
2722 pkt->hdr.pad_len = 0;
2723 smux_tx_queue(pkt, ch, 0);
2724 tx_ready = 1;
2725
2726out:
2727 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2728 if (tx_ready)
2729 list_channel(ch);
2730 return ret;
2731}
2732
2733/**
2734 * Starts the closing sequence for a logical channel.
2735 *
2736 * @lcid Logical channel ID
2737 *
2738 * @returns 0 for success, <0 otherwise
2739 *
2740 * Once the close event has been acknowledge by the remote side, the client
2741 * will receive a SMUX_DISCONNECTED notification.
2742 */
2743int msm_smux_close(uint8_t lcid)
2744{
2745 int ret = 0;
2746 struct smux_lch_t *ch;
2747 struct smux_pkt_t *pkt;
2748 int tx_ready = 0;
2749 unsigned long flags;
2750
2751 if (smux_assert_lch_id(lcid))
2752 return -ENXIO;
2753
2754 ch = &smux_lch[lcid];
2755 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2756 ch->local_tiocm = 0x0;
2757 ch->remote_tiocm = 0x0;
2758 ch->tx_pending_data_cnt = 0;
2759 ch->notify_lwm = 0;
2760
2761 /* Purge TX queue */
2762 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002763 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002764 spin_unlock(&ch->tx_lock_lhb2);
2765
2766 /* Send Close Command */
2767 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2768 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2769 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2770 ch->local_state,
2771 SMUX_LCH_LOCAL_CLOSING);
2772
2773 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2774 pkt = smux_alloc_pkt();
2775 if (pkt) {
2776 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2777 pkt->hdr.flags = 0;
2778 pkt->hdr.lcid = lcid;
2779 pkt->hdr.payload_len = 0;
2780 pkt->hdr.pad_len = 0;
2781 smux_tx_queue(pkt, ch, 0);
2782 tx_ready = 1;
2783 } else {
2784 pr_err("%s: pkt allocation failed\n", __func__);
2785 ret = -ENOMEM;
2786 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002787
2788 /* Purge RX retry queue */
2789 if (ch->rx_retry_queue_cnt)
2790 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002791 }
2792 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2793
2794 if (tx_ready)
2795 list_channel(ch);
2796
2797 return ret;
2798}
2799
2800/**
2801 * Write data to a logical channel.
2802 *
2803 * @lcid Logical channel ID
2804 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2805 * SMUX_WRITE_FAIL notification.
2806 * @data Data to write
2807 * @len Length of @data
2808 *
2809 * @returns 0 for success, <0 otherwise
2810 *
2811 * Data may be written immediately after msm_smux_open() is called,
2812 * but the data will wait in the transmit queue until the channel has
2813 * been fully opened.
2814 *
2815 * Once the data has been written, the client will receive either a completion
2816 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2817 */
2818int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2819{
2820 struct smux_lch_t *ch;
2821 struct smux_pkt_t *pkt;
2822 int tx_ready = 0;
2823 unsigned long flags;
2824 int ret;
2825
2826 if (smux_assert_lch_id(lcid))
2827 return -ENXIO;
2828
2829 ch = &smux_lch[lcid];
2830 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2831
2832 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2833 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2834 pr_err("%s: hdr.invalid local state %d channel %d\n",
2835 __func__, ch->local_state, lcid);
2836 ret = -EINVAL;
2837 goto out;
2838 }
2839
2840 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2841 pr_err("%s: payload %d too large\n",
2842 __func__, len);
2843 ret = -E2BIG;
2844 goto out;
2845 }
2846
2847 pkt = smux_alloc_pkt();
2848 if (!pkt) {
2849 ret = -ENOMEM;
2850 goto out;
2851 }
2852
2853 pkt->hdr.cmd = SMUX_CMD_DATA;
2854 pkt->hdr.lcid = lcid;
2855 pkt->hdr.flags = 0;
2856 pkt->hdr.payload_len = len;
2857 pkt->payload = (void *)data;
2858 pkt->priv = pkt_priv;
2859 pkt->hdr.pad_len = 0;
2860
2861 spin_lock(&ch->tx_lock_lhb2);
2862 /* verify high watermark */
2863 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2864
2865 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2866 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2867 __func__, lcid, SMUX_WM_HIGH,
2868 ch->tx_pending_data_cnt);
2869 ret = -EAGAIN;
2870 goto out_inner;
2871 }
2872
2873 /* queue packet for transmit */
2874 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2875 ch->notify_lwm = 1;
2876 pr_err("%s: high watermark hit\n", __func__);
2877 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2878 }
2879 list_add_tail(&pkt->list, &ch->tx_queue);
2880
2881 /* add to ready list */
2882 if (IS_FULLY_OPENED(ch))
2883 tx_ready = 1;
2884
2885 ret = 0;
2886
2887out_inner:
2888 spin_unlock(&ch->tx_lock_lhb2);
2889
2890out:
2891 if (ret)
2892 smux_free_pkt(pkt);
2893 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2894
2895 if (tx_ready)
2896 list_channel(ch);
2897
2898 return ret;
2899}
2900
2901/**
2902 * Returns true if the TX queue is currently full (high water mark).
2903 *
2904 * @lcid Logical channel ID
2905 * @returns 0 if channel is not full
2906 * 1 if it is full
2907 * < 0 for error
2908 */
2909int msm_smux_is_ch_full(uint8_t lcid)
2910{
2911 struct smux_lch_t *ch;
2912 unsigned long flags;
2913 int is_full = 0;
2914
2915 if (smux_assert_lch_id(lcid))
2916 return -ENXIO;
2917
2918 ch = &smux_lch[lcid];
2919
2920 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2921 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2922 is_full = 1;
2923 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2924
2925 return is_full;
2926}
2927
2928/**
2929 * Returns true if the TX queue has space for more packets it is at or
2930 * below the low water mark).
2931 *
2932 * @lcid Logical channel ID
2933 * @returns 0 if channel is above low watermark
2934 * 1 if it's at or below the low watermark
2935 * < 0 for error
2936 */
2937int msm_smux_is_ch_low(uint8_t lcid)
2938{
2939 struct smux_lch_t *ch;
2940 unsigned long flags;
2941 int is_low = 0;
2942
2943 if (smux_assert_lch_id(lcid))
2944 return -ENXIO;
2945
2946 ch = &smux_lch[lcid];
2947
2948 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2949 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2950 is_low = 1;
2951 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2952
2953 return is_low;
2954}
2955
2956/**
2957 * Send TIOCM status update.
2958 *
2959 * @ch Channel for update
2960 *
2961 * @returns 0 for success, <0 for failure
2962 *
2963 * Channel lock must be held before calling.
2964 */
2965static int smux_send_status_cmd(struct smux_lch_t *ch)
2966{
2967 struct smux_pkt_t *pkt;
2968
2969 if (!ch)
2970 return -EINVAL;
2971
2972 pkt = smux_alloc_pkt();
2973 if (!pkt)
2974 return -ENOMEM;
2975
2976 pkt->hdr.lcid = ch->lcid;
2977 pkt->hdr.cmd = SMUX_CMD_STATUS;
2978 pkt->hdr.flags = ch->local_tiocm;
2979 pkt->hdr.payload_len = 0;
2980 pkt->hdr.pad_len = 0;
2981 smux_tx_queue(pkt, ch, 0);
2982
2983 return 0;
2984}
2985
2986/**
2987 * Internal helper function for getting the TIOCM status with
2988 * state_lock_lhb1 already locked.
2989 *
2990 * @ch Channel pointer
2991 *
2992 * @returns TIOCM status
2993 */
2994static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
2995{
2996 long status = 0x0;
2997
2998 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
2999 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3000 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3001 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3002
3003 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3004 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3005
3006 return status;
3007}
3008
3009/**
3010 * Get the TIOCM status bits.
3011 *
3012 * @lcid Logical channel ID
3013 *
3014 * @returns >= 0 TIOCM status bits
3015 * < 0 Error condition
3016 */
3017long msm_smux_tiocm_get(uint8_t lcid)
3018{
3019 struct smux_lch_t *ch;
3020 unsigned long flags;
3021 long status = 0x0;
3022
3023 if (smux_assert_lch_id(lcid))
3024 return -ENXIO;
3025
3026 ch = &smux_lch[lcid];
3027 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3028 status = msm_smux_tiocm_get_atomic(ch);
3029 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3030
3031 return status;
3032}
3033
3034/**
3035 * Set/clear the TIOCM status bits.
3036 *
3037 * @lcid Logical channel ID
3038 * @set Bits to set
3039 * @clear Bits to clear
3040 *
3041 * @returns 0 for success; < 0 for failure
3042 *
3043 * If a bit is specified in both the @set and @clear masks, then the clear bit
3044 * definition will dominate and the bit will be cleared.
3045 */
3046int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3047{
3048 struct smux_lch_t *ch;
3049 unsigned long flags;
3050 uint8_t old_status;
3051 uint8_t status_set = 0x0;
3052 uint8_t status_clear = 0x0;
3053 int tx_ready = 0;
3054 int ret = 0;
3055
3056 if (smux_assert_lch_id(lcid))
3057 return -ENXIO;
3058
3059 ch = &smux_lch[lcid];
3060 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3061
3062 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3063 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3064 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3065 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3066
3067 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3068 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3069 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3070 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3071
3072 old_status = ch->local_tiocm;
3073 ch->local_tiocm |= status_set;
3074 ch->local_tiocm &= ~status_clear;
3075
3076 if (ch->local_tiocm != old_status) {
3077 ret = smux_send_status_cmd(ch);
3078 tx_ready = 1;
3079 }
3080 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3081
3082 if (tx_ready)
3083 list_channel(ch);
3084
3085 return ret;
3086}
3087
3088/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003089/* Subsystem Restart */
3090/**********************************************************************/
3091static struct notifier_block ssr_notifier = {
3092 .notifier_call = ssr_notifier_cb,
3093};
3094
3095/**
3096 * Handle Subsystem Restart (SSR) notifications.
3097 *
3098 * @this Pointer to ssr_notifier
3099 * @code SSR Code
3100 * @data Data pointer (not used)
3101 */
3102static int ssr_notifier_cb(struct notifier_block *this,
3103 unsigned long code,
3104 void *data)
3105{
3106 unsigned long flags;
3107 int power_off_uart = 0;
3108
3109 if (code != SUBSYS_AFTER_SHUTDOWN)
3110 return NOTIFY_DONE;
3111
3112 /* Cleanup channels */
3113 smux_lch_purge();
3114
3115 /* Power-down UART */
3116 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3117 if (smux.power_state != SMUX_PWR_OFF) {
3118 SMUX_DBG("%s: SSR - turning off UART\n", __func__);
3119 smux.power_state = SMUX_PWR_OFF;
3120 power_off_uart = 1;
3121 }
3122 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3123
3124 if (power_off_uart)
3125 smux_uart_power_off();
3126
3127 return NOTIFY_DONE;
3128}
3129
3130/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003131/* Line Discipline Interface */
3132/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003133static void smux_pdev_release(struct device *dev)
3134{
3135 struct platform_device *pdev;
3136
3137 pdev = container_of(dev, struct platform_device, dev);
3138 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3139 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3140}
3141
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003142static int smuxld_open(struct tty_struct *tty)
3143{
3144 int i;
3145 int tmp;
3146 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003147
3148 if (!smux.is_initialized)
3149 return -ENODEV;
3150
Eric Holmberged1f00c2012-06-07 09:45:18 -06003151 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003152 if (smux.ld_open_count) {
3153 pr_err("%s: %p multiple instances not supported\n",
3154 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003155 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003156 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003157 }
3158
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003159 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003160 pr_err("%s: tty->ops->write already NULL\n", __func__);
3161 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003162 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003163 }
3164
3165 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003166 ++smux.ld_open_count;
3167 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003168 smux.tty = tty;
3169 tty->disc_data = &smux;
3170 tty->receive_room = TTY_RECEIVE_ROOM;
3171 tty_driver_flush_buffer(tty);
3172
3173 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003174 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003175 if (smux.power_state == SMUX_PWR_OFF) {
3176 SMUX_DBG("%s: powering off uart\n", __func__);
3177 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003178 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003179 queue_work(smux_tx_wq, &smux_inactivity_work);
3180 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003181 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003182 }
3183
3184 /* register platform devices */
3185 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003186 SMUX_DBG("%s: register pdev '%s'\n",
3187 __func__, smux_devs[i].name);
3188 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003189 tmp = platform_device_register(&smux_devs[i]);
3190 if (tmp)
3191 pr_err("%s: error %d registering device %s\n",
3192 __func__, tmp, smux_devs[i].name);
3193 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003194 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003195 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003196}
3197
3198static void smuxld_close(struct tty_struct *tty)
3199{
3200 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003201 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003202 int i;
3203
Eric Holmberged1f00c2012-06-07 09:45:18 -06003204 SMUX_DBG("%s: ldisc unload\n", __func__);
3205 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003206 if (smux.ld_open_count <= 0) {
3207 pr_err("%s: invalid ld count %d\n", __func__,
3208 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003209 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003210 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003211 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003212 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003213 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003214
3215 /* Cleanup channels */
3216 smux_lch_purge();
3217
3218 /* Unregister platform devices */
3219 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3220 SMUX_DBG("%s: unregister pdev '%s'\n",
3221 __func__, smux_devs[i].name);
3222 platform_device_unregister(&smux_devs[i]);
3223 }
3224
3225 /* Schedule UART power-up if it's down */
3226 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003227 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003228 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003229 smux.power_state = SMUX_PWR_OFF;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003230 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3231
3232 if (power_up_uart)
3233 smux_uart_power_on();
3234
3235 /* Disconnect from TTY */
3236 smux.tty = NULL;
3237 mutex_unlock(&smux.mutex_lha0);
3238 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003239}
3240
3241/**
3242 * Receive data from TTY Line Discipline.
3243 *
3244 * @tty TTY structure
3245 * @cp Character data
3246 * @fp Flag data
3247 * @count Size of character and flag data
3248 */
3249void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3250 char *fp, int count)
3251{
3252 int i;
3253 int last_idx = 0;
3254 const char *tty_name = NULL;
3255 char *f;
3256
3257 if (smux_debug_mask & MSM_SMUX_DEBUG)
3258 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3259 16, 1, cp, count, true);
3260
3261 /* verify error flags */
3262 for (i = 0, f = fp; i < count; ++i, ++f) {
3263 if (*f != TTY_NORMAL) {
3264 if (tty)
3265 tty_name = tty->name;
3266 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3267 tty_name, *f, tty_flag_to_str(*f));
3268
3269 /* feed all previous valid data to the parser */
3270 smux_rx_state_machine(cp + last_idx, i - last_idx,
3271 TTY_NORMAL);
3272
3273 /* feed bad data to parser */
3274 smux_rx_state_machine(cp + i, 1, *f);
3275 last_idx = i + 1;
3276 }
3277 }
3278
3279 /* feed data to RX state machine */
3280 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3281}
3282
3283static void smuxld_flush_buffer(struct tty_struct *tty)
3284{
3285 pr_err("%s: not supported\n", __func__);
3286}
3287
3288static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3289{
3290 pr_err("%s: not supported\n", __func__);
3291 return -ENODEV;
3292}
3293
3294static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3295 unsigned char __user *buf, size_t nr)
3296{
3297 pr_err("%s: not supported\n", __func__);
3298 return -ENODEV;
3299}
3300
3301static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3302 const unsigned char *buf, size_t nr)
3303{
3304 pr_err("%s: not supported\n", __func__);
3305 return -ENODEV;
3306}
3307
3308static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3309 unsigned int cmd, unsigned long arg)
3310{
3311 pr_err("%s: not supported\n", __func__);
3312 return -ENODEV;
3313}
3314
3315static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3316 struct poll_table_struct *tbl)
3317{
3318 pr_err("%s: not supported\n", __func__);
3319 return -ENODEV;
3320}
3321
3322static void smuxld_write_wakeup(struct tty_struct *tty)
3323{
3324 pr_err("%s: not supported\n", __func__);
3325}
3326
3327static struct tty_ldisc_ops smux_ldisc_ops = {
3328 .owner = THIS_MODULE,
3329 .magic = TTY_LDISC_MAGIC,
3330 .name = "n_smux",
3331 .open = smuxld_open,
3332 .close = smuxld_close,
3333 .flush_buffer = smuxld_flush_buffer,
3334 .chars_in_buffer = smuxld_chars_in_buffer,
3335 .read = smuxld_read,
3336 .write = smuxld_write,
3337 .ioctl = smuxld_ioctl,
3338 .poll = smuxld_poll,
3339 .receive_buf = smuxld_receive_buf,
3340 .write_wakeup = smuxld_write_wakeup
3341};
3342
3343static int __init smux_init(void)
3344{
3345 int ret;
3346
Eric Holmberged1f00c2012-06-07 09:45:18 -06003347 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003348
3349 spin_lock_init(&smux.rx_lock_lha1);
3350 smux.rx_state = SMUX_RX_IDLE;
3351 smux.power_state = SMUX_PWR_OFF;
3352 smux.pwr_wakeup_delay_us = 1;
3353 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003354 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003355 smux.rx_activity_flag = 0;
3356 smux.tx_activity_flag = 0;
3357 smux.recv_len = 0;
3358 smux.tty = NULL;
3359 smux.ld_open_count = 0;
3360 smux.in_reset = 0;
3361 smux.is_initialized = 1;
3362 smux_byte_loopback = 0;
3363
3364 spin_lock_init(&smux.tx_lock_lha2);
3365 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3366
3367 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3368 if (ret != 0) {
3369 pr_err("%s: error %d registering line discipline\n",
3370 __func__, ret);
3371 return ret;
3372 }
3373
Eric Holmberged1f00c2012-06-07 09:45:18 -06003374 subsys_notif_register_notifier("qsc", &ssr_notifier);
3375
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003376 ret = lch_init();
3377 if (ret != 0) {
3378 pr_err("%s: lch_init failed\n", __func__);
3379 return ret;
3380 }
3381
3382 return 0;
3383}
3384
3385static void __exit smux_exit(void)
3386{
3387 int ret;
3388
3389 ret = tty_unregister_ldisc(N_SMUX);
3390 if (ret != 0) {
3391 pr_err("%s error %d unregistering line discipline\n",
3392 __func__, ret);
3393 return;
3394 }
3395}
3396
3397module_init(smux_init);
3398module_exit(smux_exit);
3399
3400MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3401MODULE_LICENSE("GPL v2");
3402MODULE_ALIAS_LDISC(N_SMUX);