blob: 0849f323b36b44ac19912069b215f857f7b567cf [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_WM_LOW 2
37#define SMUX_WM_HIGH 4
38#define SMUX_PKT_LOG_SIZE 80
39
40/* Maximum size we can accept in a single RX buffer */
41#define TTY_RECEIVE_ROOM 65536
42#define TTY_BUFFER_FULL_WAIT_MS 50
43
44/* maximum sleep time between wakeup attempts */
45#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
46
47/* minimum delay for scheduling delayed work */
48#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
49
50/* inactivity timeout for no rx/tx activity */
51#define SMUX_INACTIVITY_TIMEOUT_MS 1000
52
Eric Holmbergb8435c82012-06-05 14:51:29 -060053/* RX get_rx_buffer retry timeout values */
54#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
55#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
56
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057enum {
58 MSM_SMUX_DEBUG = 1U << 0,
59 MSM_SMUX_INFO = 1U << 1,
60 MSM_SMUX_POWER_INFO = 1U << 2,
61 MSM_SMUX_PKT = 1U << 3,
62};
63
64static int smux_debug_mask;
65module_param_named(debug_mask, smux_debug_mask,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67
68/* Simulated wakeup used for testing */
69int smux_byte_loopback;
70module_param_named(byte_loopback, smux_byte_loopback,
71 int, S_IRUGO | S_IWUSR | S_IWGRP);
72int smux_simulate_wakeup_delay = 1;
73module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
74 int, S_IRUGO | S_IWUSR | S_IWGRP);
75
76#define SMUX_DBG(x...) do { \
77 if (smux_debug_mask & MSM_SMUX_DEBUG) \
78 pr_info(x); \
79} while (0)
80
Eric Holmbergff0b0112012-06-08 15:06:57 -060081#define SMUX_PWR(x...) do { \
82 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
83 pr_info(x); \
84} while (0)
85
Eric Holmberg8ed30f22012-05-10 19:16:51 -060086#define SMUX_LOG_PKT_RX(pkt) do { \
87 if (smux_debug_mask & MSM_SMUX_PKT) \
88 smux_log_pkt(pkt, 1); \
89} while (0)
90
91#define SMUX_LOG_PKT_TX(pkt) do { \
92 if (smux_debug_mask & MSM_SMUX_PKT) \
93 smux_log_pkt(pkt, 0); \
94} while (0)
95
96/**
97 * Return true if channel is fully opened (both
98 * local and remote sides are in the OPENED state).
99 */
100#define IS_FULLY_OPENED(ch) \
101 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
102 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
103
104static struct platform_device smux_devs[] = {
105 {.name = "SMUX_CTL", .id = -1},
106 {.name = "SMUX_RMNET", .id = -1},
107 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
108 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
109 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
110 {.name = "SMUX_DIAG", .id = -1},
111};
112
113enum {
114 SMUX_CMD_STATUS_RTC = 1 << 0,
115 SMUX_CMD_STATUS_RTR = 1 << 1,
116 SMUX_CMD_STATUS_RI = 1 << 2,
117 SMUX_CMD_STATUS_DCD = 1 << 3,
118 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
119};
120
121/* Channel mode */
122enum {
123 SMUX_LCH_MODE_NORMAL,
124 SMUX_LCH_MODE_LOCAL_LOOPBACK,
125 SMUX_LCH_MODE_REMOTE_LOOPBACK,
126};
127
128enum {
129 SMUX_RX_IDLE,
130 SMUX_RX_MAGIC,
131 SMUX_RX_HDR,
132 SMUX_RX_PAYLOAD,
133 SMUX_RX_FAILURE,
134};
135
136/**
137 * Power states.
138 *
139 * The _FLUSH states are internal transitional states and are not part of the
140 * official state machine.
141 */
142enum {
143 SMUX_PWR_OFF,
144 SMUX_PWR_TURNING_ON,
145 SMUX_PWR_ON,
146 SMUX_PWR_TURNING_OFF_FLUSH,
147 SMUX_PWR_TURNING_OFF,
148 SMUX_PWR_OFF_FLUSH,
149};
150
151/**
152 * Logical Channel Structure. One instance per channel.
153 *
154 * Locking Hierarchy
155 * Each lock has a postfix that describes the locking level. If multiple locks
156 * are required, only increasing lock hierarchy numbers may be locked which
157 * ensures avoiding a deadlock.
158 *
159 * Locking Example
160 * If state_lock_lhb1 is currently held and the TX list needs to be
161 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
162 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
163 * not be acquired since it would result in a deadlock.
164 *
165 * Note that the Line Discipline locks (*_lha) should always be acquired
166 * before the logical channel locks.
167 */
168struct smux_lch_t {
169 /* channel state */
170 spinlock_t state_lock_lhb1;
171 uint8_t lcid;
172 unsigned local_state;
173 unsigned local_mode;
174 uint8_t local_tiocm;
175
176 unsigned remote_state;
177 unsigned remote_mode;
178 uint8_t remote_tiocm;
179
180 int tx_flow_control;
181
182 /* client callbacks and private data */
183 void *priv;
184 void (*notify)(void *priv, int event_type, const void *metadata);
185 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
186 int size);
187
Eric Holmbergb8435c82012-06-05 14:51:29 -0600188 /* RX Info */
189 struct list_head rx_retry_queue;
190 unsigned rx_retry_queue_cnt;
191 struct delayed_work rx_retry_work;
192
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600193 /* TX Info */
194 spinlock_t tx_lock_lhb2;
195 struct list_head tx_queue;
196 struct list_head tx_ready_list;
197 unsigned tx_pending_data_cnt;
198 unsigned notify_lwm;
199};
200
201union notifier_metadata {
202 struct smux_meta_disconnected disconnected;
203 struct smux_meta_read read;
204 struct smux_meta_write write;
205 struct smux_meta_tiocm tiocm;
206};
207
208struct smux_notify_handle {
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 void *priv;
211 int event_type;
212 union notifier_metadata *metadata;
213};
214
215/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600216 * Get RX Buffer Retry structure.
217 *
218 * This is used for clients that are unable to provide an RX buffer
219 * immediately. This temporary structure will be used to temporarily hold the
220 * data and perform a retry.
221 */
222struct smux_rx_pkt_retry {
223 struct smux_pkt_t *pkt;
224 struct list_head rx_retry_list;
225 unsigned timeout_in_ms;
226};
227
228/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600229 * Receive worker data structure.
230 *
231 * One instance is created for every call to smux_rx_state_machine.
232 */
233struct smux_rx_worker_data {
234 const unsigned char *data;
235 int len;
236 int flag;
237
238 struct work_struct work;
239 struct completion work_complete;
240};
241
242/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600243 * Line discipline and module structure.
244 *
245 * Only one instance since multiple instances of line discipline are not
246 * allowed.
247 */
248struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600249 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250
251 int is_initialized;
252 int in_reset;
253 int ld_open_count;
254 struct tty_struct *tty;
255
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600256 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600257 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
258 unsigned int recv_len;
259 unsigned int pkt_remain;
260 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600261
262 /* RX Activity - accessed by multiple threads */
263 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264 unsigned rx_activity_flag;
265
266 /* TX / Power */
267 spinlock_t tx_lock_lha2;
268 struct list_head lch_tx_ready_list;
269 unsigned power_state;
270 unsigned pwr_wakeup_delay_us;
271 unsigned tx_activity_flag;
272 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600273 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600274};
275
276
277/* data structures */
278static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
279static struct smux_ldisc_t smux;
280static const char *tty_error_type[] = {
281 [TTY_NORMAL] = "normal",
282 [TTY_OVERRUN] = "overrun",
283 [TTY_BREAK] = "break",
284 [TTY_PARITY] = "parity",
285 [TTY_FRAME] = "framing",
286};
287
288static const char *smux_cmds[] = {
289 [SMUX_CMD_DATA] = "DATA",
290 [SMUX_CMD_OPEN_LCH] = "OPEN",
291 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
292 [SMUX_CMD_STATUS] = "STATUS",
293 [SMUX_CMD_PWR_CTL] = "PWR",
294 [SMUX_CMD_BYTE] = "Raw Byte",
295};
296
297static void smux_notify_local_fn(struct work_struct *work);
298static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
299
300static struct workqueue_struct *smux_notify_wq;
301static size_t handle_size;
302static struct kfifo smux_notify_fifo;
303static int queued_fifo_notifications;
304static DEFINE_SPINLOCK(notify_lock_lhc1);
305
306static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600307static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600308static void smux_tx_worker(struct work_struct *work);
309static DECLARE_WORK(smux_tx_work, smux_tx_worker);
310
311static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600312static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600313static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600314static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
315static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
316
317static void smux_inactivity_worker(struct work_struct *work);
318static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
319static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
320 smux_inactivity_worker);
321
322static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
323static void list_channel(struct smux_lch_t *ch);
324static int smux_send_status_cmd(struct smux_lch_t *ch);
325static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600326static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600327static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
328static int schedule_notify(uint8_t lcid, int event,
329 const union notifier_metadata *metadata);
330static int ssr_notifier_cb(struct notifier_block *this,
331 unsigned long code,
332 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600333static void smux_uart_power_on_atomic(void);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600334
335/**
336 * Convert TTY Error Flags to string for logging purposes.
337 *
338 * @flag TTY_* flag
339 * @returns String description or NULL if unknown
340 */
341static const char *tty_flag_to_str(unsigned flag)
342{
343 if (flag < ARRAY_SIZE(tty_error_type))
344 return tty_error_type[flag];
345 return NULL;
346}
347
348/**
349 * Convert SMUX Command to string for logging purposes.
350 *
351 * @cmd SMUX command
352 * @returns String description or NULL if unknown
353 */
354static const char *cmd_to_str(unsigned cmd)
355{
356 if (cmd < ARRAY_SIZE(smux_cmds))
357 return smux_cmds[cmd];
358 return NULL;
359}
360
361/**
362 * Set the reset state due to an unrecoverable failure.
363 */
364static void smux_enter_reset(void)
365{
366 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
367 smux.in_reset = 1;
368}
369
370static int lch_init(void)
371{
372 unsigned int id;
373 struct smux_lch_t *ch;
374 int i = 0;
375
376 handle_size = sizeof(struct smux_notify_handle *);
377
378 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
379 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600380 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600381
382 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
383 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
384 __func__);
385 return -ENOMEM;
386 }
387
388 i |= kfifo_alloc(&smux_notify_fifo,
389 SMUX_NOTIFY_FIFO_SIZE * handle_size,
390 GFP_KERNEL);
391 i |= smux_loopback_init();
392
393 if (i) {
394 pr_err("%s: out of memory error\n", __func__);
395 return -ENOMEM;
396 }
397
398 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
399 ch = &smux_lch[id];
400
401 spin_lock_init(&ch->state_lock_lhb1);
402 ch->lcid = id;
403 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
404 ch->local_mode = SMUX_LCH_MODE_NORMAL;
405 ch->local_tiocm = 0x0;
406 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
407 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
408 ch->remote_tiocm = 0x0;
409 ch->tx_flow_control = 0;
410 ch->priv = 0;
411 ch->notify = 0;
412 ch->get_rx_buffer = 0;
413
Eric Holmbergb8435c82012-06-05 14:51:29 -0600414 INIT_LIST_HEAD(&ch->rx_retry_queue);
415 ch->rx_retry_queue_cnt = 0;
416 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
417
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600418 spin_lock_init(&ch->tx_lock_lhb2);
419 INIT_LIST_HEAD(&ch->tx_queue);
420 INIT_LIST_HEAD(&ch->tx_ready_list);
421 ch->tx_pending_data_cnt = 0;
422 ch->notify_lwm = 0;
423 }
424
425 return 0;
426}
427
Eric Holmberged1f00c2012-06-07 09:45:18 -0600428/**
429 * Empty and cleanup all SMUX logical channels for subsystem restart or line
430 * discipline disconnect.
431 */
432static void smux_lch_purge(void)
433{
434 struct smux_lch_t *ch;
435 unsigned long flags;
436 int i;
437
438 /* Empty TX ready list */
439 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
440 while (!list_empty(&smux.lch_tx_ready_list)) {
441 SMUX_DBG("%s: emptying ready list %p\n",
442 __func__, smux.lch_tx_ready_list.next);
443 ch = list_first_entry(&smux.lch_tx_ready_list,
444 struct smux_lch_t,
445 tx_ready_list);
446 list_del(&ch->tx_ready_list);
447 INIT_LIST_HEAD(&ch->tx_ready_list);
448 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600449
450 /* Purge Power Queue */
451 while (!list_empty(&smux.power_queue)) {
452 struct smux_pkt_t *pkt;
453
454 pkt = list_first_entry(&smux.power_queue,
455 struct smux_pkt_t,
456 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600457 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600458 SMUX_DBG("%s: emptying power queue pkt=%p\n",
459 __func__, pkt);
460 smux_free_pkt(pkt);
461 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600462 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
463
464 /* Close all ports */
465 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
466 ch = &smux_lch[i];
467 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
468
469 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
470
471 /* Purge TX queue */
472 spin_lock(&ch->tx_lock_lhb2);
473 smux_purge_ch_tx_queue(ch);
474 spin_unlock(&ch->tx_lock_lhb2);
475
476 /* Notify user of disconnect and reset channel state */
477 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
478 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
479 union notifier_metadata meta;
480
481 meta.disconnected.is_ssr = smux.in_reset;
482 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
483 }
484
485 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
486 ch->local_mode = SMUX_LCH_MODE_NORMAL;
487 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
488 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
489 ch->tx_flow_control = 0;
490
491 /* Purge RX retry queue */
492 if (ch->rx_retry_queue_cnt)
493 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
494
495 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
496 }
497
498 /* Flush TX/RX workqueues */
499 SMUX_DBG("%s: flushing tx wq\n", __func__);
500 flush_workqueue(smux_tx_wq);
501 SMUX_DBG("%s: flushing rx wq\n", __func__);
502 flush_workqueue(smux_rx_wq);
503}
504
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600505int smux_assert_lch_id(uint32_t lcid)
506{
507 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
508 return -ENXIO;
509 else
510 return 0;
511}
512
513/**
514 * Log packet information for debug purposes.
515 *
516 * @pkt Packet to log
517 * @is_recv 1 = RX packet; 0 = TX Packet
518 *
519 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
520 *
521 * PKT Info:
522 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
523 *
524 * Direction: R = Receive, S = Send
525 * Local State: C = Closed; c = closing; o = opening; O = Opened
526 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
527 * Remote State: C = Closed; O = Opened
528 * Remote Mode: R = Remote loopback; N = Normal
529 */
530static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
531{
532 char logbuf[SMUX_PKT_LOG_SIZE];
533 char cmd_extra[16];
534 int i = 0;
535 int count;
536 int len;
537 char local_state;
538 char local_mode;
539 char remote_state;
540 char remote_mode;
541 struct smux_lch_t *ch;
542 unsigned char *data;
543
544 ch = &smux_lch[pkt->hdr.lcid];
545
546 switch (ch->local_state) {
547 case SMUX_LCH_LOCAL_CLOSED:
548 local_state = 'C';
549 break;
550 case SMUX_LCH_LOCAL_OPENING:
551 local_state = 'o';
552 break;
553 case SMUX_LCH_LOCAL_OPENED:
554 local_state = 'O';
555 break;
556 case SMUX_LCH_LOCAL_CLOSING:
557 local_state = 'c';
558 break;
559 default:
560 local_state = 'U';
561 break;
562 }
563
564 switch (ch->local_mode) {
565 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
566 local_mode = 'L';
567 break;
568 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
569 local_mode = 'R';
570 break;
571 case SMUX_LCH_MODE_NORMAL:
572 local_mode = 'N';
573 break;
574 default:
575 local_mode = 'U';
576 break;
577 }
578
579 switch (ch->remote_state) {
580 case SMUX_LCH_REMOTE_CLOSED:
581 remote_state = 'C';
582 break;
583 case SMUX_LCH_REMOTE_OPENED:
584 remote_state = 'O';
585 break;
586
587 default:
588 remote_state = 'U';
589 break;
590 }
591
592 switch (ch->remote_mode) {
593 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
594 remote_mode = 'R';
595 break;
596 case SMUX_LCH_MODE_NORMAL:
597 remote_mode = 'N';
598 break;
599 default:
600 remote_mode = 'U';
601 break;
602 }
603
604 /* determine command type (ACK, etc) */
605 cmd_extra[0] = '\0';
606 switch (pkt->hdr.cmd) {
607 case SMUX_CMD_OPEN_LCH:
608 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
609 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
610 break;
611 case SMUX_CMD_CLOSE_LCH:
612 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
613 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
614 break;
615 };
616
617 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
618 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
619 is_recv ? 'R' : 'S', pkt->hdr.lcid,
620 local_state, local_mode,
621 remote_state, remote_mode,
622 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
623 pkt->hdr.payload_len, pkt->hdr.pad_len);
624
625 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
626 data = (unsigned char *)pkt->payload;
627 for (count = 0; count < len; count++)
628 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
629 "%02x ", (unsigned)data[count]);
630
631 pr_info("%s\n", logbuf);
632}
633
634static void smux_notify_local_fn(struct work_struct *work)
635{
636 struct smux_notify_handle *notify_handle = NULL;
637 union notifier_metadata *metadata = NULL;
638 unsigned long flags;
639 int i;
640
641 for (;;) {
642 /* retrieve notification */
643 spin_lock_irqsave(&notify_lock_lhc1, flags);
644 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
645 i = kfifo_out(&smux_notify_fifo,
646 &notify_handle,
647 handle_size);
648 if (i != handle_size) {
649 pr_err("%s: unable to retrieve handle %d expected %d\n",
650 __func__, i, handle_size);
651 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
652 break;
653 }
654 } else {
655 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
656 break;
657 }
658 --queued_fifo_notifications;
659 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
660
661 /* notify client */
662 metadata = notify_handle->metadata;
663 notify_handle->notify(notify_handle->priv,
664 notify_handle->event_type,
665 metadata);
666
667 kfree(metadata);
668 kfree(notify_handle);
669 }
670}
671
672/**
673 * Initialize existing packet.
674 */
675void smux_init_pkt(struct smux_pkt_t *pkt)
676{
677 memset(pkt, 0x0, sizeof(*pkt));
678 pkt->hdr.magic = SMUX_MAGIC;
679 INIT_LIST_HEAD(&pkt->list);
680}
681
682/**
683 * Allocate and initialize packet.
684 *
685 * If a payload is needed, either set it directly and ensure that it's freed or
686 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
687 * automatically when smd_free_pkt() is called.
688 */
689struct smux_pkt_t *smux_alloc_pkt(void)
690{
691 struct smux_pkt_t *pkt;
692
693 /* Consider a free list implementation instead of kmalloc */
694 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
695 if (!pkt) {
696 pr_err("%s: out of memory\n", __func__);
697 return NULL;
698 }
699 smux_init_pkt(pkt);
700 pkt->allocated = 1;
701
702 return pkt;
703}
704
705/**
706 * Free packet.
707 *
708 * @pkt Packet to free (may be NULL)
709 *
710 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
711 * well. Otherwise, the caller is responsible for freeing the payload.
712 */
713void smux_free_pkt(struct smux_pkt_t *pkt)
714{
715 if (pkt) {
716 if (pkt->free_payload)
717 kfree(pkt->payload);
718 if (pkt->allocated)
719 kfree(pkt);
720 }
721}
722
723/**
724 * Allocate packet payload.
725 *
726 * @pkt Packet to add payload to
727 *
728 * @returns 0 on success, <0 upon error
729 *
730 * A flag is set to signal smux_free_pkt() to free the payload.
731 */
732int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
733{
734 if (!pkt)
735 return -EINVAL;
736
737 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
738 pkt->free_payload = 1;
739 if (!pkt->payload) {
740 pr_err("%s: unable to malloc %d bytes for payload\n",
741 __func__, pkt->hdr.payload_len);
742 return -ENOMEM;
743 }
744
745 return 0;
746}
747
748static int schedule_notify(uint8_t lcid, int event,
749 const union notifier_metadata *metadata)
750{
751 struct smux_notify_handle *notify_handle = 0;
752 union notifier_metadata *meta_copy = 0;
753 struct smux_lch_t *ch;
754 int i;
755 unsigned long flags;
756 int ret = 0;
757
758 ch = &smux_lch[lcid];
759 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
760 GFP_ATOMIC);
761 if (!notify_handle) {
762 pr_err("%s: out of memory\n", __func__);
763 ret = -ENOMEM;
764 goto free_out;
765 }
766
767 notify_handle->notify = ch->notify;
768 notify_handle->priv = ch->priv;
769 notify_handle->event_type = event;
770 if (metadata) {
771 meta_copy = kzalloc(sizeof(union notifier_metadata),
772 GFP_ATOMIC);
773 if (!meta_copy) {
774 pr_err("%s: out of memory\n", __func__);
775 ret = -ENOMEM;
776 goto free_out;
777 }
778 *meta_copy = *metadata;
779 notify_handle->metadata = meta_copy;
780 } else {
781 notify_handle->metadata = NULL;
782 }
783
784 spin_lock_irqsave(&notify_lock_lhc1, flags);
785 i = kfifo_avail(&smux_notify_fifo);
786 if (i < handle_size) {
787 pr_err("%s: fifo full error %d expected %d\n",
788 __func__, i, handle_size);
789 ret = -ENOMEM;
790 goto unlock_out;
791 }
792
793 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
794 if (i < 0 || i != handle_size) {
795 pr_err("%s: fifo not available error %d (expected %d)\n",
796 __func__, i, handle_size);
797 ret = -ENOSPC;
798 goto unlock_out;
799 }
800 ++queued_fifo_notifications;
801
802unlock_out:
803 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
804
805free_out:
806 queue_work(smux_notify_wq, &smux_notify_local);
807 if (ret < 0 && notify_handle) {
808 kfree(notify_handle->metadata);
809 kfree(notify_handle);
810 }
811 return ret;
812}
813
814/**
815 * Returns the serialized size of a packet.
816 *
817 * @pkt Packet to serialize
818 *
819 * @returns Serialized length of packet
820 */
821static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
822{
823 unsigned int size;
824
825 size = sizeof(struct smux_hdr_t);
826 size += pkt->hdr.payload_len;
827 size += pkt->hdr.pad_len;
828
829 return size;
830}
831
832/**
833 * Serialize packet @pkt into output buffer @data.
834 *
835 * @pkt Packet to serialize
836 * @out Destination buffer pointer
837 * @out_len Size of serialized packet
838 *
839 * @returns 0 for success
840 */
841int smux_serialize(struct smux_pkt_t *pkt, char *out,
842 unsigned int *out_len)
843{
844 char *data_start = out;
845
846 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
847 pr_err("%s: packet size %d too big\n",
848 __func__, smux_serialize_size(pkt));
849 return -E2BIG;
850 }
851
852 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
853 out += sizeof(struct smux_hdr_t);
854 if (pkt->payload) {
855 memcpy(out, pkt->payload, pkt->hdr.payload_len);
856 out += pkt->hdr.payload_len;
857 }
858 if (pkt->hdr.pad_len) {
859 memset(out, 0x0, pkt->hdr.pad_len);
860 out += pkt->hdr.pad_len;
861 }
862 *out_len = out - data_start;
863 return 0;
864}
865
866/**
867 * Serialize header and provide pointer to the data.
868 *
869 * @pkt Packet
870 * @out[out] Pointer to the serialized header data
871 * @out_len[out] Pointer to the serialized header length
872 */
873static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
874 unsigned int *out_len)
875{
876 *out = (char *)&pkt->hdr;
877 *out_len = sizeof(struct smux_hdr_t);
878}
879
880/**
881 * Serialize payload and provide pointer to the data.
882 *
883 * @pkt Packet
884 * @out[out] Pointer to the serialized payload data
885 * @out_len[out] Pointer to the serialized payload length
886 */
887static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
888 unsigned int *out_len)
889{
890 *out = pkt->payload;
891 *out_len = pkt->hdr.payload_len;
892}
893
894/**
895 * Serialize padding and provide pointer to the data.
896 *
897 * @pkt Packet
898 * @out[out] Pointer to the serialized padding (always NULL)
899 * @out_len[out] Pointer to the serialized payload length
900 *
901 * Since the padding field value is undefined, only the size of the patting
902 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
903 */
904static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
905 unsigned int *out_len)
906{
907 *out = NULL;
908 *out_len = pkt->hdr.pad_len;
909}
910
911/**
912 * Write data to TTY framework and handle breaking the writes up if needed.
913 *
914 * @data Data to write
915 * @len Length of data
916 *
917 * @returns 0 for success, < 0 for failure
918 */
919static int write_to_tty(char *data, unsigned len)
920{
921 int data_written;
922
923 if (!data)
924 return 0;
925
Eric Holmberged1f00c2012-06-07 09:45:18 -0600926 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600927 data_written = smux.tty->ops->write(smux.tty, data, len);
928 if (data_written >= 0) {
929 len -= data_written;
930 data += data_written;
931 } else {
932 pr_err("%s: TTY write returned error %d\n",
933 __func__, data_written);
934 return data_written;
935 }
936
937 if (len)
938 tty_wait_until_sent(smux.tty,
939 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600940 }
941 return 0;
942}
943
944/**
945 * Write packet to TTY.
946 *
947 * @pkt packet to write
948 *
949 * @returns 0 on success
950 */
951static int smux_tx_tty(struct smux_pkt_t *pkt)
952{
953 char *data;
954 unsigned int len;
955 int ret;
956
957 if (!smux.tty) {
958 pr_err("%s: TTY not initialized", __func__);
959 return -ENOTTY;
960 }
961
962 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
963 SMUX_DBG("%s: tty send single byte\n", __func__);
964 ret = write_to_tty(&pkt->hdr.flags, 1);
965 return ret;
966 }
967
968 smux_serialize_hdr(pkt, &data, &len);
969 ret = write_to_tty(data, len);
970 if (ret) {
971 pr_err("%s: failed %d to write header %d\n",
972 __func__, ret, len);
973 return ret;
974 }
975
976 smux_serialize_payload(pkt, &data, &len);
977 ret = write_to_tty(data, len);
978 if (ret) {
979 pr_err("%s: failed %d to write payload %d\n",
980 __func__, ret, len);
981 return ret;
982 }
983
984 smux_serialize_padding(pkt, &data, &len);
985 while (len > 0) {
986 char zero = 0x0;
987 ret = write_to_tty(&zero, 1);
988 if (ret) {
989 pr_err("%s: failed %d to write padding %d\n",
990 __func__, ret, len);
991 return ret;
992 }
993 --len;
994 }
995 return 0;
996}
997
998/**
999 * Send a single character.
1000 *
1001 * @ch Character to send
1002 */
1003static void smux_send_byte(char ch)
1004{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001005 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001006
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001007 pkt = smux_alloc_pkt();
1008 if (!pkt) {
1009 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1010 return;
1011 }
1012 pkt->hdr.cmd = SMUX_CMD_BYTE;
1013 pkt->hdr.flags = ch;
1014 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001015
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001016 list_add_tail(&pkt->list, &smux.power_queue);
1017 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001018}
1019
1020/**
1021 * Receive a single-character packet (used for internal testing).
1022 *
1023 * @ch Character to receive
1024 * @lcid Logical channel ID for packet
1025 *
1026 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001027 */
1028static int smux_receive_byte(char ch, int lcid)
1029{
1030 struct smux_pkt_t pkt;
1031
1032 smux_init_pkt(&pkt);
1033 pkt.hdr.lcid = lcid;
1034 pkt.hdr.cmd = SMUX_CMD_BYTE;
1035 pkt.hdr.flags = ch;
1036
1037 return smux_dispatch_rx_pkt(&pkt);
1038}
1039
1040/**
1041 * Queue packet for transmit.
1042 *
1043 * @pkt_ptr Packet to queue
1044 * @ch Channel to queue packet on
1045 * @queue Queue channel on ready list
1046 */
1047static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1048 int queue)
1049{
1050 unsigned long flags;
1051
1052 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1053
1054 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1055 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1056 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1057
1058 if (queue)
1059 list_channel(ch);
1060}
1061
1062/**
1063 * Handle receive OPEN ACK command.
1064 *
1065 * @pkt Received packet
1066 *
1067 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001068 */
1069static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1070{
1071 uint8_t lcid;
1072 int ret;
1073 struct smux_lch_t *ch;
1074 int enable_powerdown = 0;
1075
1076 lcid = pkt->hdr.lcid;
1077 ch = &smux_lch[lcid];
1078
1079 spin_lock(&ch->state_lock_lhb1);
1080 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1081 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1082 ch->local_state,
1083 SMUX_LCH_LOCAL_OPENED);
1084
1085 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1086 enable_powerdown = 1;
1087
1088 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1089 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1090 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1091 ret = 0;
1092 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1093 SMUX_DBG("Remote loopback OPEN ACK received\n");
1094 ret = 0;
1095 } else {
1096 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1097 __func__, lcid, ch->local_state);
1098 ret = -EINVAL;
1099 }
1100 spin_unlock(&ch->state_lock_lhb1);
1101
1102 if (enable_powerdown) {
1103 spin_lock(&smux.tx_lock_lha2);
1104 if (!smux.powerdown_enabled) {
1105 smux.powerdown_enabled = 1;
1106 SMUX_DBG("%s: enabling power-collapse support\n",
1107 __func__);
1108 }
1109 spin_unlock(&smux.tx_lock_lha2);
1110 }
1111
1112 return ret;
1113}
1114
1115static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1116{
1117 uint8_t lcid;
1118 int ret;
1119 struct smux_lch_t *ch;
1120 union notifier_metadata meta_disconnected;
1121 unsigned long flags;
1122
1123 lcid = pkt->hdr.lcid;
1124 ch = &smux_lch[lcid];
1125 meta_disconnected.disconnected.is_ssr = 0;
1126
1127 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1128
1129 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1130 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1131 SMUX_LCH_LOCAL_CLOSING,
1132 SMUX_LCH_LOCAL_CLOSED);
1133 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1134 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1135 schedule_notify(lcid, SMUX_DISCONNECTED,
1136 &meta_disconnected);
1137 ret = 0;
1138 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1139 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1140 ret = 0;
1141 } else {
1142 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1143 __func__, lcid, ch->local_state);
1144 ret = -EINVAL;
1145 }
1146 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1147 return ret;
1148}
1149
1150/**
1151 * Handle receive OPEN command.
1152 *
1153 * @pkt Received packet
1154 *
1155 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001156 */
1157static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1158{
1159 uint8_t lcid;
1160 int ret;
1161 struct smux_lch_t *ch;
1162 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001163 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001164 int tx_ready = 0;
1165 int enable_powerdown = 0;
1166
1167 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1168 return smux_handle_rx_open_ack(pkt);
1169
1170 lcid = pkt->hdr.lcid;
1171 ch = &smux_lch[lcid];
1172
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001173 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001174
1175 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1176 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1177 SMUX_LCH_REMOTE_CLOSED,
1178 SMUX_LCH_REMOTE_OPENED);
1179
1180 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1181 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1182 enable_powerdown = 1;
1183
1184 /* Send Open ACK */
1185 ack_pkt = smux_alloc_pkt();
1186 if (!ack_pkt) {
1187 /* exit out to allow retrying this later */
1188 ret = -ENOMEM;
1189 goto out;
1190 }
1191 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1192 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1193 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1194 ack_pkt->hdr.lcid = lcid;
1195 ack_pkt->hdr.payload_len = 0;
1196 ack_pkt->hdr.pad_len = 0;
1197 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1198 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1199 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1200 }
1201 smux_tx_queue(ack_pkt, ch, 0);
1202 tx_ready = 1;
1203
1204 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1205 /*
1206 * Send an Open command to the remote side to
1207 * simulate our local client doing it.
1208 */
1209 ack_pkt = smux_alloc_pkt();
1210 if (ack_pkt) {
1211 ack_pkt->hdr.lcid = lcid;
1212 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1213 ack_pkt->hdr.flags =
1214 SMUX_CMD_OPEN_POWER_COLLAPSE;
1215 ack_pkt->hdr.payload_len = 0;
1216 ack_pkt->hdr.pad_len = 0;
1217 smux_tx_queue(ack_pkt, ch, 0);
1218 tx_ready = 1;
1219 } else {
1220 pr_err("%s: Remote loopack allocation failure\n",
1221 __func__);
1222 }
1223 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1224 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1225 }
1226 ret = 0;
1227 } else {
1228 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1229 __func__, lcid, ch->remote_state);
1230 ret = -EINVAL;
1231 }
1232
1233out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001234 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001235
1236 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001237 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001238 if (!smux.powerdown_enabled) {
1239 smux.powerdown_enabled = 1;
1240 SMUX_DBG("%s: enabling power-collapse support\n",
1241 __func__);
1242 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001243 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001244 }
1245
1246 if (tx_ready)
1247 list_channel(ch);
1248
1249 return ret;
1250}
1251
1252/**
1253 * Handle receive CLOSE command.
1254 *
1255 * @pkt Received packet
1256 *
1257 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001258 */
1259static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1260{
1261 uint8_t lcid;
1262 int ret;
1263 struct smux_lch_t *ch;
1264 struct smux_pkt_t *ack_pkt;
1265 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001266 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001267 int tx_ready = 0;
1268
1269 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1270 return smux_handle_close_ack(pkt);
1271
1272 lcid = pkt->hdr.lcid;
1273 ch = &smux_lch[lcid];
1274 meta_disconnected.disconnected.is_ssr = 0;
1275
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001276 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001277 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1278 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1279 SMUX_LCH_REMOTE_OPENED,
1280 SMUX_LCH_REMOTE_CLOSED);
1281
1282 ack_pkt = smux_alloc_pkt();
1283 if (!ack_pkt) {
1284 /* exit out to allow retrying this later */
1285 ret = -ENOMEM;
1286 goto out;
1287 }
1288 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1289 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1290 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1291 ack_pkt->hdr.lcid = lcid;
1292 ack_pkt->hdr.payload_len = 0;
1293 ack_pkt->hdr.pad_len = 0;
1294 smux_tx_queue(ack_pkt, ch, 0);
1295 tx_ready = 1;
1296
1297 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1298 /*
1299 * Send a Close command to the remote side to simulate
1300 * our local client doing it.
1301 */
1302 ack_pkt = smux_alloc_pkt();
1303 if (ack_pkt) {
1304 ack_pkt->hdr.lcid = lcid;
1305 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1306 ack_pkt->hdr.flags = 0;
1307 ack_pkt->hdr.payload_len = 0;
1308 ack_pkt->hdr.pad_len = 0;
1309 smux_tx_queue(ack_pkt, ch, 0);
1310 tx_ready = 1;
1311 } else {
1312 pr_err("%s: Remote loopack allocation failure\n",
1313 __func__);
1314 }
1315 }
1316
1317 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1318 schedule_notify(lcid, SMUX_DISCONNECTED,
1319 &meta_disconnected);
1320 ret = 0;
1321 } else {
1322 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1323 __func__, lcid, ch->remote_state);
1324 ret = -EINVAL;
1325 }
1326out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001327 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001328 if (tx_ready)
1329 list_channel(ch);
1330
1331 return ret;
1332}
1333
1334/*
1335 * Handle receive DATA command.
1336 *
1337 * @pkt Received packet
1338 *
1339 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001340 */
1341static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1342{
1343 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001344 int ret = 0;
1345 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001346 int tmp;
1347 int rx_len;
1348 struct smux_lch_t *ch;
1349 union notifier_metadata metadata;
1350 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001351 struct smux_pkt_t *ack_pkt;
1352 unsigned long flags;
1353
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001354 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1355 ret = -ENXIO;
1356 goto out;
1357 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001358
Eric Holmbergb8435c82012-06-05 14:51:29 -06001359 rx_len = pkt->hdr.payload_len;
1360 if (rx_len == 0) {
1361 ret = -EINVAL;
1362 goto out;
1363 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001364
1365 lcid = pkt->hdr.lcid;
1366 ch = &smux_lch[lcid];
1367 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1368 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1369
1370 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1371 && !remote_loopback) {
1372 pr_err("smux: ch %d error data on local state 0x%x",
1373 lcid, ch->local_state);
1374 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001375 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001376 goto out;
1377 }
1378
1379 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1380 pr_err("smux: ch %d error data on remote state 0x%x",
1381 lcid, ch->remote_state);
1382 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001383 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001384 goto out;
1385 }
1386
Eric Holmbergb8435c82012-06-05 14:51:29 -06001387 if (!list_empty(&ch->rx_retry_queue)) {
1388 do_retry = 1;
1389 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1390 /* retry queue full */
1391 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1392 ret = -ENOMEM;
1393 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1394 goto out;
1395 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001396 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001397 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001398
Eric Holmbergb8435c82012-06-05 14:51:29 -06001399 if (remote_loopback) {
1400 /* Echo the data back to the remote client. */
1401 ack_pkt = smux_alloc_pkt();
1402 if (ack_pkt) {
1403 ack_pkt->hdr.lcid = lcid;
1404 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1405 ack_pkt->hdr.flags = 0;
1406 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1407 if (ack_pkt->hdr.payload_len) {
1408 smux_alloc_pkt_payload(ack_pkt);
1409 memcpy(ack_pkt->payload, pkt->payload,
1410 ack_pkt->hdr.payload_len);
1411 }
1412 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1413 smux_tx_queue(ack_pkt, ch, 0);
1414 list_channel(ch);
1415 } else {
1416 pr_err("%s: Remote loopack allocation failure\n",
1417 __func__);
1418 }
1419 } else if (!do_retry) {
1420 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001421 metadata.read.pkt_priv = 0;
1422 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001423 tmp = ch->get_rx_buffer(ch->priv,
1424 (void **)&metadata.read.pkt_priv,
1425 (void **)&metadata.read.buffer,
1426 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001427
Eric Holmbergb8435c82012-06-05 14:51:29 -06001428 if (tmp == 0 && metadata.read.buffer) {
1429 /* place data into RX buffer */
1430 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001431 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001432 metadata.read.len = rx_len;
1433 schedule_notify(lcid, SMUX_READ_DONE,
1434 &metadata);
1435 } else if (tmp == -EAGAIN ||
1436 (tmp == 0 && !metadata.read.buffer)) {
1437 /* buffer allocation failed - add to retry queue */
1438 do_retry = 1;
1439 } else if (tmp < 0) {
1440 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1441 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001442 }
1443 }
1444
Eric Holmbergb8435c82012-06-05 14:51:29 -06001445 if (do_retry) {
1446 struct smux_rx_pkt_retry *retry;
1447
1448 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1449 if (!retry) {
1450 pr_err("%s: retry alloc failure\n", __func__);
1451 ret = -ENOMEM;
1452 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1453 goto out;
1454 }
1455 INIT_LIST_HEAD(&retry->rx_retry_list);
1456 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1457
1458 /* copy packet */
1459 retry->pkt = smux_alloc_pkt();
1460 if (!retry->pkt) {
1461 kfree(retry);
1462 pr_err("%s: pkt alloc failure\n", __func__);
1463 ret = -ENOMEM;
1464 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1465 goto out;
1466 }
1467 retry->pkt->hdr.lcid = lcid;
1468 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1469 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1470 if (retry->pkt->hdr.payload_len) {
1471 smux_alloc_pkt_payload(retry->pkt);
1472 memcpy(retry->pkt->payload, pkt->payload,
1473 retry->pkt->hdr.payload_len);
1474 }
1475
1476 /* add to retry queue */
1477 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1478 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1479 ++ch->rx_retry_queue_cnt;
1480 if (ch->rx_retry_queue_cnt == 1)
1481 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1482 msecs_to_jiffies(retry->timeout_in_ms));
1483 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1484 }
1485
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001486out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001487 return ret;
1488}
1489
1490/**
1491 * Handle receive byte command for testing purposes.
1492 *
1493 * @pkt Received packet
1494 *
1495 * @returns 0 for success
1496 */
1497static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1498{
1499 uint8_t lcid;
1500 int ret;
1501 struct smux_lch_t *ch;
1502 union notifier_metadata metadata;
1503 unsigned long flags;
1504
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001505 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1506 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001507 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001508 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001509
1510 lcid = pkt->hdr.lcid;
1511 ch = &smux_lch[lcid];
1512 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1513
1514 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1515 pr_err("smux: ch %d error data on local state 0x%x",
1516 lcid, ch->local_state);
1517 ret = -EIO;
1518 goto out;
1519 }
1520
1521 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1522 pr_err("smux: ch %d error data on remote state 0x%x",
1523 lcid, ch->remote_state);
1524 ret = -EIO;
1525 goto out;
1526 }
1527
1528 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1529 metadata.read.buffer = 0;
1530 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1531 ret = 0;
1532
1533out:
1534 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1535 return ret;
1536}
1537
1538/**
1539 * Handle receive status command.
1540 *
1541 * @pkt Received packet
1542 *
1543 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001544 */
1545static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1546{
1547 uint8_t lcid;
1548 int ret;
1549 struct smux_lch_t *ch;
1550 union notifier_metadata meta;
1551 unsigned long flags;
1552 int tx_ready = 0;
1553
1554 lcid = pkt->hdr.lcid;
1555 ch = &smux_lch[lcid];
1556
1557 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1558 meta.tiocm.tiocm_old = ch->remote_tiocm;
1559 meta.tiocm.tiocm_new = pkt->hdr.flags;
1560
1561 /* update logical channel flow control */
1562 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1563 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1564 /* logical channel flow control changed */
1565 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1566 /* disabled TX */
1567 SMUX_DBG("TX Flow control enabled\n");
1568 ch->tx_flow_control = 1;
1569 } else {
1570 /* re-enable channel */
1571 SMUX_DBG("TX Flow control disabled\n");
1572 ch->tx_flow_control = 0;
1573 tx_ready = 1;
1574 }
1575 }
1576 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1577 ch->remote_tiocm = pkt->hdr.flags;
1578 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1579
1580 /* client notification for status change */
1581 if (IS_FULLY_OPENED(ch)) {
1582 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1583 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1584 ret = 0;
1585 }
1586 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1587 if (tx_ready)
1588 list_channel(ch);
1589
1590 return ret;
1591}
1592
1593/**
1594 * Handle receive power command.
1595 *
1596 * @pkt Received packet
1597 *
1598 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001599 */
1600static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1601{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001602 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001603 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001604
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001605 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001606 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1607 /* local sleep request ack */
1608 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1609 /* Power-down complete, turn off UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001610 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001611 smux.power_state, SMUX_PWR_OFF_FLUSH);
1612 smux.power_state = SMUX_PWR_OFF_FLUSH;
1613 queue_work(smux_tx_wq, &smux_inactivity_work);
1614 } else {
1615 pr_err("%s: sleep request ack invalid in state %d\n",
1616 __func__, smux.power_state);
1617 }
1618 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001619 /*
1620 * Remote sleep request
1621 *
1622 * Even if we have data pending, we need to transition to the
1623 * POWER_OFF state and then perform a wakeup since the remote
1624 * side has requested a power-down.
1625 *
1626 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1627 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1628 * when it sends the packet.
1629 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001630 if (smux.power_state == SMUX_PWR_ON
1631 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1632 ack_pkt = smux_alloc_pkt();
1633 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001634 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001635 smux.power_state,
1636 SMUX_PWR_TURNING_OFF_FLUSH);
1637
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001638 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1639
1640 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001641 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1642 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001643 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1644 list_add_tail(&ack_pkt->list,
1645 &smux.power_queue);
1646 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001647 }
1648 } else {
1649 pr_err("%s: sleep request invalid in state %d\n",
1650 __func__, smux.power_state);
1651 }
1652 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001653 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001654
1655 return 0;
1656}
1657
1658/**
1659 * Handle dispatching a completed packet for receive processing.
1660 *
1661 * @pkt Packet to process
1662 *
1663 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001664 */
1665static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1666{
Eric Holmbergf9622662012-06-13 15:55:45 -06001667 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668
1669 SMUX_LOG_PKT_RX(pkt);
1670
1671 switch (pkt->hdr.cmd) {
1672 case SMUX_CMD_OPEN_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001673 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1674 pr_err("%s: invalid channel id %d\n",
1675 __func__, pkt->hdr.lcid);
1676 break;
1677 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001678 ret = smux_handle_rx_open_cmd(pkt);
1679 break;
1680
1681 case SMUX_CMD_DATA:
Eric Holmbergf9622662012-06-13 15:55:45 -06001682 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1683 pr_err("%s: invalid channel id %d\n",
1684 __func__, pkt->hdr.lcid);
1685 break;
1686 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001687 ret = smux_handle_rx_data_cmd(pkt);
1688 break;
1689
1690 case SMUX_CMD_CLOSE_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001691 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1692 pr_err("%s: invalid channel id %d\n",
1693 __func__, pkt->hdr.lcid);
1694 break;
1695 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001696 ret = smux_handle_rx_close_cmd(pkt);
1697 break;
1698
1699 case SMUX_CMD_STATUS:
Eric Holmbergf9622662012-06-13 15:55:45 -06001700 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1701 pr_err("%s: invalid channel id %d\n",
1702 __func__, pkt->hdr.lcid);
1703 break;
1704 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001705 ret = smux_handle_rx_status_cmd(pkt);
1706 break;
1707
1708 case SMUX_CMD_PWR_CTL:
1709 ret = smux_handle_rx_power_cmd(pkt);
1710 break;
1711
1712 case SMUX_CMD_BYTE:
1713 ret = smux_handle_rx_byte_cmd(pkt);
1714 break;
1715
1716 default:
1717 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1718 ret = -EINVAL;
1719 }
1720 return ret;
1721}
1722
1723/**
1724 * Deserializes a packet and dispatches it to the packet receive logic.
1725 *
1726 * @data Raw data for one packet
1727 * @len Length of the data
1728 *
1729 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001730 */
1731static int smux_deserialize(unsigned char *data, int len)
1732{
1733 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001734
1735 smux_init_pkt(&recv);
1736
1737 /*
1738 * It may be possible to optimize this to not use the
1739 * temporary buffer.
1740 */
1741 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1742
1743 if (recv.hdr.magic != SMUX_MAGIC) {
1744 pr_err("%s: invalid header magic\n", __func__);
1745 return -EINVAL;
1746 }
1747
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001748 if (recv.hdr.payload_len)
1749 recv.payload = data + sizeof(struct smux_hdr_t);
1750
1751 return smux_dispatch_rx_pkt(&recv);
1752}
1753
1754/**
1755 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001756 */
1757static void smux_handle_wakeup_req(void)
1758{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001759 unsigned long flags;
1760
1761 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001762 if (smux.power_state == SMUX_PWR_OFF
1763 || smux.power_state == SMUX_PWR_TURNING_ON) {
1764 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001765 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001766 smux.power_state, SMUX_PWR_ON);
1767 smux.power_state = SMUX_PWR_ON;
1768 queue_work(smux_tx_wq, &smux_wakeup_work);
1769 queue_work(smux_tx_wq, &smux_tx_work);
1770 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1771 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1772 smux_send_byte(SMUX_WAKEUP_ACK);
1773 } else {
1774 smux_send_byte(SMUX_WAKEUP_ACK);
1775 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001776 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001777}
1778
1779/**
1780 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001781 */
1782static void smux_handle_wakeup_ack(void)
1783{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001784 unsigned long flags;
1785
1786 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001787 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1788 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001789 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001790 smux.power_state, SMUX_PWR_ON);
1791 smux.power_state = SMUX_PWR_ON;
1792 queue_work(smux_tx_wq, &smux_tx_work);
1793 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1794 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1795
1796 } else if (smux.power_state != SMUX_PWR_ON) {
1797 /* invalid message */
1798 pr_err("%s: wakeup request ack invalid in state %d\n",
1799 __func__, smux.power_state);
1800 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001801 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001802}
1803
1804/**
1805 * RX State machine - IDLE state processing.
1806 *
1807 * @data New RX data to process
1808 * @len Length of the data
1809 * @used Return value of length processed
1810 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001811 */
1812static void smux_rx_handle_idle(const unsigned char *data,
1813 int len, int *used, int flag)
1814{
1815 int i;
1816
1817 if (flag) {
1818 if (smux_byte_loopback)
1819 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1820 smux_byte_loopback);
1821 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1822 ++*used;
1823 return;
1824 }
1825
1826 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1827 switch (data[i]) {
1828 case SMUX_MAGIC_WORD1:
1829 smux.rx_state = SMUX_RX_MAGIC;
1830 break;
1831 case SMUX_WAKEUP_REQ:
1832 smux_handle_wakeup_req();
1833 break;
1834 case SMUX_WAKEUP_ACK:
1835 smux_handle_wakeup_ack();
1836 break;
1837 default:
1838 /* unexpected character */
1839 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1840 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1841 smux_byte_loopback);
1842 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1843 (unsigned)data[i]);
1844 break;
1845 }
1846 }
1847
1848 *used = i;
1849}
1850
1851/**
1852 * RX State machine - Header Magic state processing.
1853 *
1854 * @data New RX data to process
1855 * @len Length of the data
1856 * @used Return value of length processed
1857 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001858 */
1859static void smux_rx_handle_magic(const unsigned char *data,
1860 int len, int *used, int flag)
1861{
1862 int i;
1863
1864 if (flag) {
1865 pr_err("%s: TTY RX error %d\n", __func__, flag);
1866 smux_enter_reset();
1867 smux.rx_state = SMUX_RX_FAILURE;
1868 ++*used;
1869 return;
1870 }
1871
1872 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1873 /* wait for completion of the magic */
1874 if (data[i] == SMUX_MAGIC_WORD2) {
1875 smux.recv_len = 0;
1876 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1877 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1878 smux.rx_state = SMUX_RX_HDR;
1879 } else {
1880 /* unexpected / trash character */
1881 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1882 __func__, data[i], *used, len);
1883 smux.rx_state = SMUX_RX_IDLE;
1884 }
1885 }
1886
1887 *used = i;
1888}
1889
1890/**
1891 * RX State machine - Packet Header state processing.
1892 *
1893 * @data New RX data to process
1894 * @len Length of the data
1895 * @used Return value of length processed
1896 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001897 */
1898static void smux_rx_handle_hdr(const unsigned char *data,
1899 int len, int *used, int flag)
1900{
1901 int i;
1902 struct smux_hdr_t *hdr;
1903
1904 if (flag) {
1905 pr_err("%s: TTY RX error %d\n", __func__, flag);
1906 smux_enter_reset();
1907 smux.rx_state = SMUX_RX_FAILURE;
1908 ++*used;
1909 return;
1910 }
1911
1912 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1913 smux.recv_buf[smux.recv_len++] = data[i];
1914
1915 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1916 /* complete header received */
1917 hdr = (struct smux_hdr_t *)smux.recv_buf;
1918 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1919 smux.rx_state = SMUX_RX_PAYLOAD;
1920 }
1921 }
1922 *used = i;
1923}
1924
1925/**
1926 * RX State machine - Packet Payload state processing.
1927 *
1928 * @data New RX data to process
1929 * @len Length of the data
1930 * @used Return value of length processed
1931 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001932 */
1933static void smux_rx_handle_pkt_payload(const unsigned char *data,
1934 int len, int *used, int flag)
1935{
1936 int remaining;
1937
1938 if (flag) {
1939 pr_err("%s: TTY RX error %d\n", __func__, flag);
1940 smux_enter_reset();
1941 smux.rx_state = SMUX_RX_FAILURE;
1942 ++*used;
1943 return;
1944 }
1945
1946 /* copy data into rx buffer */
1947 if (smux.pkt_remain < (len - *used))
1948 remaining = smux.pkt_remain;
1949 else
1950 remaining = len - *used;
1951
1952 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1953 smux.recv_len += remaining;
1954 smux.pkt_remain -= remaining;
1955 *used += remaining;
1956
1957 if (smux.pkt_remain == 0) {
1958 /* complete packet received */
1959 smux_deserialize(smux.recv_buf, smux.recv_len);
1960 smux.rx_state = SMUX_RX_IDLE;
1961 }
1962}
1963
1964/**
1965 * Feed data to the receive state machine.
1966 *
1967 * @data Pointer to data block
1968 * @len Length of data
1969 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001970 */
1971void smux_rx_state_machine(const unsigned char *data,
1972 int len, int flag)
1973{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001974 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001975
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001976 work.data = data;
1977 work.len = len;
1978 work.flag = flag;
1979 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1980 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001981
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001982 queue_work(smux_rx_wq, &work.work);
1983 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001984}
1985
1986/**
1987 * Add channel to transmit-ready list and trigger transmit worker.
1988 *
1989 * @ch Channel to add
1990 */
1991static void list_channel(struct smux_lch_t *ch)
1992{
1993 unsigned long flags;
1994
1995 SMUX_DBG("%s: listing channel %d\n",
1996 __func__, ch->lcid);
1997
1998 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1999 spin_lock(&ch->tx_lock_lhb2);
2000 smux.tx_activity_flag = 1;
2001 if (list_empty(&ch->tx_ready_list))
2002 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2003 spin_unlock(&ch->tx_lock_lhb2);
2004 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2005
2006 queue_work(smux_tx_wq, &smux_tx_work);
2007}
2008
2009/**
2010 * Transmit packet on correct transport and then perform client
2011 * notification.
2012 *
2013 * @ch Channel to transmit on
2014 * @pkt Packet to transmit
2015 */
2016static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2017{
2018 union notifier_metadata meta_write;
2019 int ret;
2020
2021 if (ch && pkt) {
2022 SMUX_LOG_PKT_TX(pkt);
2023 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2024 ret = smux_tx_loopback(pkt);
2025 else
2026 ret = smux_tx_tty(pkt);
2027
2028 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2029 /* notify write-done */
2030 meta_write.write.pkt_priv = pkt->priv;
2031 meta_write.write.buffer = pkt->payload;
2032 meta_write.write.len = pkt->hdr.payload_len;
2033 if (ret >= 0) {
2034 SMUX_DBG("%s: PKT write done", __func__);
2035 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2036 &meta_write);
2037 } else {
2038 pr_err("%s: failed to write pkt %d\n",
2039 __func__, ret);
2040 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2041 &meta_write);
2042 }
2043 }
2044 }
2045}
2046
2047/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002048 * Flush pending TTY TX data.
2049 */
2050static void smux_flush_tty(void)
2051{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002052 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002053 if (!smux.tty) {
2054 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002055 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002056 return;
2057 }
2058
2059 tty_wait_until_sent(smux.tty,
2060 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2061
2062 if (tty_chars_in_buffer(smux.tty) > 0)
2063 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002064
2065 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002066}
2067
2068/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002069 * Purge TX queue for logical channel.
2070 *
2071 * @ch Logical channel pointer
2072 *
2073 * Must be called with the following spinlocks locked:
2074 * state_lock_lhb1
2075 * tx_lock_lhb2
2076 */
2077static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2078{
2079 struct smux_pkt_t *pkt;
2080 int send_disconnect = 0;
2081
2082 while (!list_empty(&ch->tx_queue)) {
2083 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2084 list);
2085 list_del(&pkt->list);
2086
2087 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2088 /* Open was never sent, just force to closed state */
2089 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2090 send_disconnect = 1;
2091 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2092 /* Notify client of failed write */
2093 union notifier_metadata meta_write;
2094
2095 meta_write.write.pkt_priv = pkt->priv;
2096 meta_write.write.buffer = pkt->payload;
2097 meta_write.write.len = pkt->hdr.payload_len;
2098 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2099 }
2100 smux_free_pkt(pkt);
2101 }
2102
2103 if (send_disconnect) {
2104 union notifier_metadata meta_disconnected;
2105
2106 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2107 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2108 &meta_disconnected);
2109 }
2110}
2111
2112/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002113 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002114 *
2115 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002116 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002117static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002118{
2119 struct uart_state *state;
2120
2121 if (!smux.tty || !smux.tty->driver_data) {
2122 pr_err("%s: unable to find UART port for tty %p\n",
2123 __func__, smux.tty);
2124 return;
2125 }
2126 state = smux.tty->driver_data;
2127 msm_hs_request_clock_on(state->uart_port);
2128}
2129
2130/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002131 * Power-up the UART.
2132 */
2133static void smux_uart_power_on(void)
2134{
2135 mutex_lock(&smux.mutex_lha0);
2136 smux_uart_power_on_atomic();
2137 mutex_unlock(&smux.mutex_lha0);
2138}
2139
2140/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002141 * Power down the UART.
2142 */
2143static void smux_uart_power_off(void)
2144{
2145 struct uart_state *state;
2146
Eric Holmberg92a67df2012-06-25 13:56:24 -06002147 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002148 if (!smux.tty || !smux.tty->driver_data) {
2149 pr_err("%s: unable to find UART port for tty %p\n",
2150 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002151 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002152 return;
2153 }
2154 state = smux.tty->driver_data;
2155 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002156 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002157}
2158
2159/**
2160 * TX Wakeup Worker
2161 *
2162 * @work Not used
2163 *
2164 * Do an exponential back-off wakeup sequence with a maximum period
2165 * of approximately 1 second (1 << 20 microseconds).
2166 */
2167static void smux_wakeup_worker(struct work_struct *work)
2168{
2169 unsigned long flags;
2170 unsigned wakeup_delay;
2171 int complete = 0;
2172
Eric Holmberged1f00c2012-06-07 09:45:18 -06002173 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002174 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2175 if (smux.power_state == SMUX_PWR_ON) {
2176 /* wakeup complete */
2177 complete = 1;
2178 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2179 break;
2180 } else {
2181 /* retry */
2182 wakeup_delay = smux.pwr_wakeup_delay_us;
2183 smux.pwr_wakeup_delay_us <<= 1;
2184 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2185 smux.pwr_wakeup_delay_us =
2186 SMUX_WAKEUP_DELAY_MAX;
2187 }
2188 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2189 SMUX_DBG("%s: triggering wakeup\n", __func__);
2190 smux_send_byte(SMUX_WAKEUP_REQ);
2191
2192 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2193 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2194 wakeup_delay);
2195 usleep_range(wakeup_delay, 2*wakeup_delay);
2196 } else {
2197 /* schedule delayed work */
2198 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2199 __func__, wakeup_delay / 1000);
2200 queue_delayed_work(smux_tx_wq,
2201 &smux_wakeup_delayed_work,
2202 msecs_to_jiffies(wakeup_delay / 1000));
2203 break;
2204 }
2205 }
2206
2207 if (complete) {
2208 SMUX_DBG("%s: wakeup complete\n", __func__);
2209 /*
2210 * Cancel any pending retry. This avoids a race condition with
2211 * a new power-up request because:
2212 * 1) this worker doesn't modify the state
2213 * 2) this worker is processed on the same single-threaded
2214 * workqueue as new TX wakeup requests
2215 */
2216 cancel_delayed_work(&smux_wakeup_delayed_work);
2217 }
2218}
2219
2220
2221/**
2222 * Inactivity timeout worker. Periodically scheduled when link is active.
2223 * When it detects inactivity, it will power-down the UART link.
2224 *
2225 * @work Work structure (not used)
2226 */
2227static void smux_inactivity_worker(struct work_struct *work)
2228{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002229 struct smux_pkt_t *pkt;
2230 unsigned long flags;
2231
2232 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2233 spin_lock(&smux.tx_lock_lha2);
2234
2235 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2236 /* no activity */
2237 if (smux.powerdown_enabled) {
2238 if (smux.power_state == SMUX_PWR_ON) {
2239 /* start power-down sequence */
2240 pkt = smux_alloc_pkt();
2241 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002242 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002243 smux.power_state,
2244 SMUX_PWR_TURNING_OFF);
2245 smux.power_state = SMUX_PWR_TURNING_OFF;
2246
2247 /* send power-down request */
2248 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2249 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002250 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2251 list_add_tail(&pkt->list,
2252 &smux.power_queue);
2253 queue_work(smux_tx_wq, &smux_tx_work);
2254 } else {
2255 pr_err("%s: packet alloc failed\n",
2256 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002257 }
2258 }
2259 } else {
2260 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2261 __func__);
2262 }
2263 }
2264 smux.tx_activity_flag = 0;
2265 smux.rx_activity_flag = 0;
2266
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002267 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002268 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002269 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002270 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002271 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002272
2273 /* if data is pending, schedule a new wakeup */
2274 if (!list_empty(&smux.lch_tx_ready_list) ||
2275 !list_empty(&smux.power_queue))
2276 queue_work(smux_tx_wq, &smux_tx_work);
2277
2278 spin_unlock(&smux.tx_lock_lha2);
2279 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2280
2281 /* flush UART output queue and power down */
2282 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002283 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002284 } else {
2285 spin_unlock(&smux.tx_lock_lha2);
2286 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002287 }
2288
2289 /* reschedule inactivity worker */
2290 if (smux.power_state != SMUX_PWR_OFF)
2291 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2292 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2293}
2294
2295/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002296 * Remove RX retry packet from channel and free it.
2297 *
2298 * Must be called with state_lock_lhb1 locked.
2299 *
2300 * @ch Channel for retry packet
2301 * @retry Retry packet to remove
2302 */
2303void smux_remove_rx_retry(struct smux_lch_t *ch,
2304 struct smux_rx_pkt_retry *retry)
2305{
2306 list_del(&retry->rx_retry_list);
2307 --ch->rx_retry_queue_cnt;
2308 smux_free_pkt(retry->pkt);
2309 kfree(retry);
2310}
2311
2312/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002313 * RX worker handles all receive operations.
2314 *
2315 * @work Work structure contained in TBD structure
2316 */
2317static void smux_rx_worker(struct work_struct *work)
2318{
2319 unsigned long flags;
2320 int used;
2321 int initial_rx_state;
2322 struct smux_rx_worker_data *w;
2323 const unsigned char *data;
2324 int len;
2325 int flag;
2326
2327 w = container_of(work, struct smux_rx_worker_data, work);
2328 data = w->data;
2329 len = w->len;
2330 flag = w->flag;
2331
2332 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2333 smux.rx_activity_flag = 1;
2334 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2335
2336 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2337 used = 0;
2338 do {
2339 SMUX_DBG("%s: state %d; %d of %d\n",
2340 __func__, smux.rx_state, used, len);
2341 initial_rx_state = smux.rx_state;
2342
2343 switch (smux.rx_state) {
2344 case SMUX_RX_IDLE:
2345 smux_rx_handle_idle(data, len, &used, flag);
2346 break;
2347 case SMUX_RX_MAGIC:
2348 smux_rx_handle_magic(data, len, &used, flag);
2349 break;
2350 case SMUX_RX_HDR:
2351 smux_rx_handle_hdr(data, len, &used, flag);
2352 break;
2353 case SMUX_RX_PAYLOAD:
2354 smux_rx_handle_pkt_payload(data, len, &used, flag);
2355 break;
2356 default:
2357 SMUX_DBG("%s: invalid state %d\n",
2358 __func__, smux.rx_state);
2359 smux.rx_state = SMUX_RX_IDLE;
2360 break;
2361 }
2362 } while (used < len || smux.rx_state != initial_rx_state);
2363
2364 complete(&w->work_complete);
2365}
2366
2367/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002368 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2369 * because the client was not ready (-EAGAIN).
2370 *
2371 * @work Work structure contained in smux_lch_t structure
2372 */
2373static void smux_rx_retry_worker(struct work_struct *work)
2374{
2375 struct smux_lch_t *ch;
2376 struct smux_rx_pkt_retry *retry;
2377 union notifier_metadata metadata;
2378 int tmp;
2379 unsigned long flags;
2380
2381 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2382
2383 /* get next retry packet */
2384 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2385 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2386 /* port has been closed - remove all retries */
2387 while (!list_empty(&ch->rx_retry_queue)) {
2388 retry = list_first_entry(&ch->rx_retry_queue,
2389 struct smux_rx_pkt_retry,
2390 rx_retry_list);
2391 smux_remove_rx_retry(ch, retry);
2392 }
2393 }
2394
2395 if (list_empty(&ch->rx_retry_queue)) {
2396 SMUX_DBG("%s: retry list empty for channel %d\n",
2397 __func__, ch->lcid);
2398 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2399 return;
2400 }
2401 retry = list_first_entry(&ch->rx_retry_queue,
2402 struct smux_rx_pkt_retry,
2403 rx_retry_list);
2404 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2405
2406 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2407 metadata.read.pkt_priv = 0;
2408 metadata.read.buffer = 0;
2409 tmp = ch->get_rx_buffer(ch->priv,
2410 (void **)&metadata.read.pkt_priv,
2411 (void **)&metadata.read.buffer,
2412 retry->pkt->hdr.payload_len);
2413 if (tmp == 0 && metadata.read.buffer) {
2414 /* have valid RX buffer */
2415 memcpy(metadata.read.buffer, retry->pkt->payload,
2416 retry->pkt->hdr.payload_len);
2417 metadata.read.len = retry->pkt->hdr.payload_len;
2418
2419 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2420 smux_remove_rx_retry(ch, retry);
2421 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2422
2423 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2424 } else if (tmp == -EAGAIN ||
2425 (tmp == 0 && !metadata.read.buffer)) {
2426 /* retry again */
2427 retry->timeout_in_ms <<= 1;
2428 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2429 /* timed out */
2430 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2431 smux_remove_rx_retry(ch, retry);
2432 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2433 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2434 }
2435 } else {
2436 /* client error - drop packet */
2437 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2438 smux_remove_rx_retry(ch, retry);
2439 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2440
2441 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2442 }
2443
2444 /* schedule next retry */
2445 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2446 if (!list_empty(&ch->rx_retry_queue)) {
2447 retry = list_first_entry(&ch->rx_retry_queue,
2448 struct smux_rx_pkt_retry,
2449 rx_retry_list);
2450 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2451 msecs_to_jiffies(retry->timeout_in_ms));
2452 }
2453 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2454}
2455
2456/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002457 * Transmit worker handles serializing and transmitting packets onto the
2458 * underlying transport.
2459 *
2460 * @work Work structure (not used)
2461 */
2462static void smux_tx_worker(struct work_struct *work)
2463{
2464 struct smux_pkt_t *pkt;
2465 struct smux_lch_t *ch;
2466 unsigned low_wm_notif;
2467 unsigned lcid;
2468 unsigned long flags;
2469
2470
2471 /*
2472 * Transmit packets in round-robin fashion based upon ready
2473 * channels.
2474 *
2475 * To eliminate the need to hold a lock for the entire
2476 * iteration through the channel ready list, the head of the
2477 * ready-channel list is always the next channel to be
2478 * processed. To send a packet, the first valid packet in
2479 * the head channel is removed and the head channel is then
2480 * rescheduled at the end of the queue by removing it and
2481 * inserting after the tail. The locks can then be released
2482 * while the packet is processed.
2483 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002484 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002485 pkt = NULL;
2486 low_wm_notif = 0;
2487
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002488 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002489
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002490 /* handle wakeup if needed */
2491 if (smux.power_state == SMUX_PWR_OFF) {
2492 if (!list_empty(&smux.lch_tx_ready_list) ||
2493 !list_empty(&smux.power_queue)) {
2494 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002495 smux.pwr_wakeup_delay_us = 1;
Eric Holmbergff0b0112012-06-08 15:06:57 -06002496 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002497 smux.power_state,
2498 SMUX_PWR_TURNING_ON);
2499 smux.power_state = SMUX_PWR_TURNING_ON;
2500 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2501 flags);
2502 smux_uart_power_on();
2503 queue_work(smux_tx_wq, &smux_wakeup_work);
2504 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002505 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002506 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2507 flags);
2508 }
2509 break;
2510 }
2511
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002512 /* process any pending power packets */
2513 if (!list_empty(&smux.power_queue)) {
2514 pkt = list_first_entry(&smux.power_queue,
2515 struct smux_pkt_t, list);
2516 list_del(&pkt->list);
2517 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2518
2519 /* send the packet */
2520 SMUX_LOG_PKT_TX(pkt);
2521 if (!smux_byte_loopback) {
2522 smux_tx_tty(pkt);
2523 smux_flush_tty();
2524 } else {
2525 smux_tx_loopback(pkt);
2526 }
2527
2528 /* Adjust power state if this is a flush command */
2529 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2530 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2531 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2532 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002533 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002534 smux.power_state,
2535 SMUX_PWR_OFF_FLUSH);
2536 smux.power_state = SMUX_PWR_OFF_FLUSH;
2537 queue_work(smux_tx_wq, &smux_inactivity_work);
2538 }
2539 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2540
2541 smux_free_pkt(pkt);
2542 continue;
2543 }
2544
2545 /* get the next ready channel */
2546 if (list_empty(&smux.lch_tx_ready_list)) {
2547 /* no ready channels */
2548 SMUX_DBG("%s: no more ready channels, exiting\n",
2549 __func__);
2550 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2551 break;
2552 }
2553 smux.tx_activity_flag = 1;
2554
2555 if (smux.power_state != SMUX_PWR_ON) {
2556 /* channel not ready to transmit */
2557 SMUX_DBG("%s: can not tx with power state %d\n",
2558 __func__,
2559 smux.power_state);
2560 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2561 break;
2562 }
2563
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002564 /* get the next packet to send and rotate channel list */
2565 ch = list_first_entry(&smux.lch_tx_ready_list,
2566 struct smux_lch_t,
2567 tx_ready_list);
2568
2569 spin_lock(&ch->state_lock_lhb1);
2570 spin_lock(&ch->tx_lock_lhb2);
2571 if (!list_empty(&ch->tx_queue)) {
2572 /*
2573 * If remote TX flow control is enabled or
2574 * the channel is not fully opened, then only
2575 * send command packets.
2576 */
2577 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2578 struct smux_pkt_t *curr;
2579 list_for_each_entry(curr, &ch->tx_queue, list) {
2580 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2581 pkt = curr;
2582 break;
2583 }
2584 }
2585 } else {
2586 /* get next cmd/data packet to send */
2587 pkt = list_first_entry(&ch->tx_queue,
2588 struct smux_pkt_t, list);
2589 }
2590 }
2591
2592 if (pkt) {
2593 list_del(&pkt->list);
2594
2595 /* update packet stats */
2596 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2597 --ch->tx_pending_data_cnt;
2598 if (ch->notify_lwm &&
2599 ch->tx_pending_data_cnt
2600 <= SMUX_WM_LOW) {
2601 ch->notify_lwm = 0;
2602 low_wm_notif = 1;
2603 }
2604 }
2605
2606 /* advance to the next ready channel */
2607 list_rotate_left(&smux.lch_tx_ready_list);
2608 } else {
2609 /* no data in channel to send, remove from ready list */
2610 list_del(&ch->tx_ready_list);
2611 INIT_LIST_HEAD(&ch->tx_ready_list);
2612 }
2613 lcid = ch->lcid;
2614 spin_unlock(&ch->tx_lock_lhb2);
2615 spin_unlock(&ch->state_lock_lhb1);
2616 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2617
2618 if (low_wm_notif)
2619 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2620
2621 /* send the packet */
2622 smux_tx_pkt(ch, pkt);
2623 smux_free_pkt(pkt);
2624 }
2625}
2626
2627
2628/**********************************************************************/
2629/* Kernel API */
2630/**********************************************************************/
2631
2632/**
2633 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2634 * flags.
2635 *
2636 * @lcid Logical channel ID
2637 * @set Options to set
2638 * @clear Options to clear
2639 *
2640 * @returns 0 for success, < 0 for failure
2641 */
2642int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2643{
2644 unsigned long flags;
2645 struct smux_lch_t *ch;
2646 int tx_ready = 0;
2647 int ret = 0;
2648
2649 if (smux_assert_lch_id(lcid))
2650 return -ENXIO;
2651
2652 ch = &smux_lch[lcid];
2653 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2654
2655 /* Local loopback mode */
2656 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2657 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2658
2659 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2660 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2661
2662 /* Remote loopback mode */
2663 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2664 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2665
2666 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2667 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2668
2669 /* Flow control */
2670 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2671 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2672 ret = smux_send_status_cmd(ch);
2673 tx_ready = 1;
2674 }
2675
2676 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2677 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2678 ret = smux_send_status_cmd(ch);
2679 tx_ready = 1;
2680 }
2681
2682 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2683
2684 if (tx_ready)
2685 list_channel(ch);
2686
2687 return ret;
2688}
2689
2690/**
2691 * Starts the opening sequence for a logical channel.
2692 *
2693 * @lcid Logical channel ID
2694 * @priv Free for client usage
2695 * @notify Event notification function
2696 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2697 *
2698 * @returns 0 for success, <0 otherwise
2699 *
2700 * A channel must be fully closed (either not previously opened or
2701 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2702 * received.
2703 *
2704 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2705 * event.
2706 */
2707int msm_smux_open(uint8_t lcid, void *priv,
2708 void (*notify)(void *priv, int event_type, const void *metadata),
2709 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2710 int size))
2711{
2712 int ret;
2713 struct smux_lch_t *ch;
2714 struct smux_pkt_t *pkt;
2715 int tx_ready = 0;
2716 unsigned long flags;
2717
2718 if (smux_assert_lch_id(lcid))
2719 return -ENXIO;
2720
2721 ch = &smux_lch[lcid];
2722 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2723
2724 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2725 ret = -EAGAIN;
2726 goto out;
2727 }
2728
2729 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2730 pr_err("%s: open lcid %d local state %x invalid\n",
2731 __func__, lcid, ch->local_state);
2732 ret = -EINVAL;
2733 goto out;
2734 }
2735
2736 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2737 ch->local_state,
2738 SMUX_LCH_LOCAL_OPENING);
2739
2740 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2741
2742 ch->priv = priv;
2743 ch->notify = notify;
2744 ch->get_rx_buffer = get_rx_buffer;
2745 ret = 0;
2746
2747 /* Send Open Command */
2748 pkt = smux_alloc_pkt();
2749 if (!pkt) {
2750 ret = -ENOMEM;
2751 goto out;
2752 }
2753 pkt->hdr.magic = SMUX_MAGIC;
2754 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2755 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2756 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2757 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2758 pkt->hdr.lcid = lcid;
2759 pkt->hdr.payload_len = 0;
2760 pkt->hdr.pad_len = 0;
2761 smux_tx_queue(pkt, ch, 0);
2762 tx_ready = 1;
2763
2764out:
2765 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2766 if (tx_ready)
2767 list_channel(ch);
2768 return ret;
2769}
2770
2771/**
2772 * Starts the closing sequence for a logical channel.
2773 *
2774 * @lcid Logical channel ID
2775 *
2776 * @returns 0 for success, <0 otherwise
2777 *
2778 * Once the close event has been acknowledge by the remote side, the client
2779 * will receive a SMUX_DISCONNECTED notification.
2780 */
2781int msm_smux_close(uint8_t lcid)
2782{
2783 int ret = 0;
2784 struct smux_lch_t *ch;
2785 struct smux_pkt_t *pkt;
2786 int tx_ready = 0;
2787 unsigned long flags;
2788
2789 if (smux_assert_lch_id(lcid))
2790 return -ENXIO;
2791
2792 ch = &smux_lch[lcid];
2793 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2794 ch->local_tiocm = 0x0;
2795 ch->remote_tiocm = 0x0;
2796 ch->tx_pending_data_cnt = 0;
2797 ch->notify_lwm = 0;
2798
2799 /* Purge TX queue */
2800 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002801 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002802 spin_unlock(&ch->tx_lock_lhb2);
2803
2804 /* Send Close Command */
2805 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2806 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2807 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2808 ch->local_state,
2809 SMUX_LCH_LOCAL_CLOSING);
2810
2811 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2812 pkt = smux_alloc_pkt();
2813 if (pkt) {
2814 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2815 pkt->hdr.flags = 0;
2816 pkt->hdr.lcid = lcid;
2817 pkt->hdr.payload_len = 0;
2818 pkt->hdr.pad_len = 0;
2819 smux_tx_queue(pkt, ch, 0);
2820 tx_ready = 1;
2821 } else {
2822 pr_err("%s: pkt allocation failed\n", __func__);
2823 ret = -ENOMEM;
2824 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002825
2826 /* Purge RX retry queue */
2827 if (ch->rx_retry_queue_cnt)
2828 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002829 }
2830 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2831
2832 if (tx_ready)
2833 list_channel(ch);
2834
2835 return ret;
2836}
2837
2838/**
2839 * Write data to a logical channel.
2840 *
2841 * @lcid Logical channel ID
2842 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2843 * SMUX_WRITE_FAIL notification.
2844 * @data Data to write
2845 * @len Length of @data
2846 *
2847 * @returns 0 for success, <0 otherwise
2848 *
2849 * Data may be written immediately after msm_smux_open() is called,
2850 * but the data will wait in the transmit queue until the channel has
2851 * been fully opened.
2852 *
2853 * Once the data has been written, the client will receive either a completion
2854 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2855 */
2856int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2857{
2858 struct smux_lch_t *ch;
2859 struct smux_pkt_t *pkt;
2860 int tx_ready = 0;
2861 unsigned long flags;
2862 int ret;
2863
2864 if (smux_assert_lch_id(lcid))
2865 return -ENXIO;
2866
2867 ch = &smux_lch[lcid];
2868 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2869
2870 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2871 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2872 pr_err("%s: hdr.invalid local state %d channel %d\n",
2873 __func__, ch->local_state, lcid);
2874 ret = -EINVAL;
2875 goto out;
2876 }
2877
2878 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2879 pr_err("%s: payload %d too large\n",
2880 __func__, len);
2881 ret = -E2BIG;
2882 goto out;
2883 }
2884
2885 pkt = smux_alloc_pkt();
2886 if (!pkt) {
2887 ret = -ENOMEM;
2888 goto out;
2889 }
2890
2891 pkt->hdr.cmd = SMUX_CMD_DATA;
2892 pkt->hdr.lcid = lcid;
2893 pkt->hdr.flags = 0;
2894 pkt->hdr.payload_len = len;
2895 pkt->payload = (void *)data;
2896 pkt->priv = pkt_priv;
2897 pkt->hdr.pad_len = 0;
2898
2899 spin_lock(&ch->tx_lock_lhb2);
2900 /* verify high watermark */
2901 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2902
2903 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2904 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2905 __func__, lcid, SMUX_WM_HIGH,
2906 ch->tx_pending_data_cnt);
2907 ret = -EAGAIN;
2908 goto out_inner;
2909 }
2910
2911 /* queue packet for transmit */
2912 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2913 ch->notify_lwm = 1;
2914 pr_err("%s: high watermark hit\n", __func__);
2915 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2916 }
2917 list_add_tail(&pkt->list, &ch->tx_queue);
2918
2919 /* add to ready list */
2920 if (IS_FULLY_OPENED(ch))
2921 tx_ready = 1;
2922
2923 ret = 0;
2924
2925out_inner:
2926 spin_unlock(&ch->tx_lock_lhb2);
2927
2928out:
2929 if (ret)
2930 smux_free_pkt(pkt);
2931 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2932
2933 if (tx_ready)
2934 list_channel(ch);
2935
2936 return ret;
2937}
2938
2939/**
2940 * Returns true if the TX queue is currently full (high water mark).
2941 *
2942 * @lcid Logical channel ID
2943 * @returns 0 if channel is not full
2944 * 1 if it is full
2945 * < 0 for error
2946 */
2947int msm_smux_is_ch_full(uint8_t lcid)
2948{
2949 struct smux_lch_t *ch;
2950 unsigned long flags;
2951 int is_full = 0;
2952
2953 if (smux_assert_lch_id(lcid))
2954 return -ENXIO;
2955
2956 ch = &smux_lch[lcid];
2957
2958 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2959 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2960 is_full = 1;
2961 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2962
2963 return is_full;
2964}
2965
2966/**
2967 * Returns true if the TX queue has space for more packets it is at or
2968 * below the low water mark).
2969 *
2970 * @lcid Logical channel ID
2971 * @returns 0 if channel is above low watermark
2972 * 1 if it's at or below the low watermark
2973 * < 0 for error
2974 */
2975int msm_smux_is_ch_low(uint8_t lcid)
2976{
2977 struct smux_lch_t *ch;
2978 unsigned long flags;
2979 int is_low = 0;
2980
2981 if (smux_assert_lch_id(lcid))
2982 return -ENXIO;
2983
2984 ch = &smux_lch[lcid];
2985
2986 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2987 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2988 is_low = 1;
2989 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2990
2991 return is_low;
2992}
2993
2994/**
2995 * Send TIOCM status update.
2996 *
2997 * @ch Channel for update
2998 *
2999 * @returns 0 for success, <0 for failure
3000 *
3001 * Channel lock must be held before calling.
3002 */
3003static int smux_send_status_cmd(struct smux_lch_t *ch)
3004{
3005 struct smux_pkt_t *pkt;
3006
3007 if (!ch)
3008 return -EINVAL;
3009
3010 pkt = smux_alloc_pkt();
3011 if (!pkt)
3012 return -ENOMEM;
3013
3014 pkt->hdr.lcid = ch->lcid;
3015 pkt->hdr.cmd = SMUX_CMD_STATUS;
3016 pkt->hdr.flags = ch->local_tiocm;
3017 pkt->hdr.payload_len = 0;
3018 pkt->hdr.pad_len = 0;
3019 smux_tx_queue(pkt, ch, 0);
3020
3021 return 0;
3022}
3023
3024/**
3025 * Internal helper function for getting the TIOCM status with
3026 * state_lock_lhb1 already locked.
3027 *
3028 * @ch Channel pointer
3029 *
3030 * @returns TIOCM status
3031 */
3032static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3033{
3034 long status = 0x0;
3035
3036 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3037 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3038 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3039 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3040
3041 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3042 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3043
3044 return status;
3045}
3046
3047/**
3048 * Get the TIOCM status bits.
3049 *
3050 * @lcid Logical channel ID
3051 *
3052 * @returns >= 0 TIOCM status bits
3053 * < 0 Error condition
3054 */
3055long msm_smux_tiocm_get(uint8_t lcid)
3056{
3057 struct smux_lch_t *ch;
3058 unsigned long flags;
3059 long status = 0x0;
3060
3061 if (smux_assert_lch_id(lcid))
3062 return -ENXIO;
3063
3064 ch = &smux_lch[lcid];
3065 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3066 status = msm_smux_tiocm_get_atomic(ch);
3067 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3068
3069 return status;
3070}
3071
3072/**
3073 * Set/clear the TIOCM status bits.
3074 *
3075 * @lcid Logical channel ID
3076 * @set Bits to set
3077 * @clear Bits to clear
3078 *
3079 * @returns 0 for success; < 0 for failure
3080 *
3081 * If a bit is specified in both the @set and @clear masks, then the clear bit
3082 * definition will dominate and the bit will be cleared.
3083 */
3084int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3085{
3086 struct smux_lch_t *ch;
3087 unsigned long flags;
3088 uint8_t old_status;
3089 uint8_t status_set = 0x0;
3090 uint8_t status_clear = 0x0;
3091 int tx_ready = 0;
3092 int ret = 0;
3093
3094 if (smux_assert_lch_id(lcid))
3095 return -ENXIO;
3096
3097 ch = &smux_lch[lcid];
3098 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3099
3100 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3101 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3102 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3103 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3104
3105 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3106 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3107 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3108 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3109
3110 old_status = ch->local_tiocm;
3111 ch->local_tiocm |= status_set;
3112 ch->local_tiocm &= ~status_clear;
3113
3114 if (ch->local_tiocm != old_status) {
3115 ret = smux_send_status_cmd(ch);
3116 tx_ready = 1;
3117 }
3118 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3119
3120 if (tx_ready)
3121 list_channel(ch);
3122
3123 return ret;
3124}
3125
3126/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003127/* Subsystem Restart */
3128/**********************************************************************/
3129static struct notifier_block ssr_notifier = {
3130 .notifier_call = ssr_notifier_cb,
3131};
3132
3133/**
3134 * Handle Subsystem Restart (SSR) notifications.
3135 *
3136 * @this Pointer to ssr_notifier
3137 * @code SSR Code
3138 * @data Data pointer (not used)
3139 */
3140static int ssr_notifier_cb(struct notifier_block *this,
3141 unsigned long code,
3142 void *data)
3143{
3144 unsigned long flags;
3145 int power_off_uart = 0;
3146
Eric Holmbergd2697902012-06-15 09:58:46 -06003147 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3148 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3149 mutex_lock(&smux.mutex_lha0);
3150 smux.in_reset = 1;
3151 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003152 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003153 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3154 return NOTIFY_DONE;
3155 }
3156 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003157
3158 /* Cleanup channels */
Eric Holmbergd2697902012-06-15 09:58:46 -06003159 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003160 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003161 if (smux.tty)
3162 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003163
3164 /* Power-down UART */
3165 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3166 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003167 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003168 smux.power_state = SMUX_PWR_OFF;
3169 power_off_uart = 1;
3170 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003171 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003172 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3173
3174 if (power_off_uart)
3175 smux_uart_power_off();
3176
Eric Holmbergd2697902012-06-15 09:58:46 -06003177 smux.in_reset = 0;
3178 mutex_unlock(&smux.mutex_lha0);
3179
Eric Holmberged1f00c2012-06-07 09:45:18 -06003180 return NOTIFY_DONE;
3181}
3182
3183/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003184/* Line Discipline Interface */
3185/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003186static void smux_pdev_release(struct device *dev)
3187{
3188 struct platform_device *pdev;
3189
3190 pdev = container_of(dev, struct platform_device, dev);
3191 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3192 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3193}
3194
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003195static int smuxld_open(struct tty_struct *tty)
3196{
3197 int i;
3198 int tmp;
3199 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003200
3201 if (!smux.is_initialized)
3202 return -ENODEV;
3203
Eric Holmberged1f00c2012-06-07 09:45:18 -06003204 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003205 if (smux.ld_open_count) {
3206 pr_err("%s: %p multiple instances not supported\n",
3207 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003208 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003209 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003210 }
3211
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003212 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003213 pr_err("%s: tty->ops->write already NULL\n", __func__);
3214 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003215 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003216 }
3217
3218 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003219 ++smux.ld_open_count;
3220 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003221 smux.tty = tty;
3222 tty->disc_data = &smux;
3223 tty->receive_room = TTY_RECEIVE_ROOM;
3224 tty_driver_flush_buffer(tty);
3225
3226 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003227 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003228 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003229 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003230 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003231 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003232 queue_work(smux_tx_wq, &smux_inactivity_work);
3233 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003234 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003235 }
3236
3237 /* register platform devices */
3238 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003239 SMUX_DBG("%s: register pdev '%s'\n",
3240 __func__, smux_devs[i].name);
3241 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003242 tmp = platform_device_register(&smux_devs[i]);
3243 if (tmp)
3244 pr_err("%s: error %d registering device %s\n",
3245 __func__, tmp, smux_devs[i].name);
3246 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003247 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003248 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003249}
3250
3251static void smuxld_close(struct tty_struct *tty)
3252{
3253 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003254 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003255 int i;
3256
Eric Holmberged1f00c2012-06-07 09:45:18 -06003257 SMUX_DBG("%s: ldisc unload\n", __func__);
3258 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003259 if (smux.ld_open_count <= 0) {
3260 pr_err("%s: invalid ld count %d\n", __func__,
3261 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003262 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003263 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003264 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003265 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003266 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003267
3268 /* Cleanup channels */
3269 smux_lch_purge();
3270
3271 /* Unregister platform devices */
3272 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3273 SMUX_DBG("%s: unregister pdev '%s'\n",
3274 __func__, smux_devs[i].name);
3275 platform_device_unregister(&smux_devs[i]);
3276 }
3277
3278 /* Schedule UART power-up if it's down */
3279 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003280 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003281 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003282 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003283 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003284 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3285
3286 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003287 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003288
3289 /* Disconnect from TTY */
3290 smux.tty = NULL;
3291 mutex_unlock(&smux.mutex_lha0);
3292 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003293}
3294
3295/**
3296 * Receive data from TTY Line Discipline.
3297 *
3298 * @tty TTY structure
3299 * @cp Character data
3300 * @fp Flag data
3301 * @count Size of character and flag data
3302 */
3303void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3304 char *fp, int count)
3305{
3306 int i;
3307 int last_idx = 0;
3308 const char *tty_name = NULL;
3309 char *f;
3310
3311 if (smux_debug_mask & MSM_SMUX_DEBUG)
3312 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3313 16, 1, cp, count, true);
3314
3315 /* verify error flags */
3316 for (i = 0, f = fp; i < count; ++i, ++f) {
3317 if (*f != TTY_NORMAL) {
3318 if (tty)
3319 tty_name = tty->name;
3320 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3321 tty_name, *f, tty_flag_to_str(*f));
3322
3323 /* feed all previous valid data to the parser */
3324 smux_rx_state_machine(cp + last_idx, i - last_idx,
3325 TTY_NORMAL);
3326
3327 /* feed bad data to parser */
3328 smux_rx_state_machine(cp + i, 1, *f);
3329 last_idx = i + 1;
3330 }
3331 }
3332
3333 /* feed data to RX state machine */
3334 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3335}
3336
3337static void smuxld_flush_buffer(struct tty_struct *tty)
3338{
3339 pr_err("%s: not supported\n", __func__);
3340}
3341
3342static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3343{
3344 pr_err("%s: not supported\n", __func__);
3345 return -ENODEV;
3346}
3347
3348static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3349 unsigned char __user *buf, size_t nr)
3350{
3351 pr_err("%s: not supported\n", __func__);
3352 return -ENODEV;
3353}
3354
3355static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3356 const unsigned char *buf, size_t nr)
3357{
3358 pr_err("%s: not supported\n", __func__);
3359 return -ENODEV;
3360}
3361
3362static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3363 unsigned int cmd, unsigned long arg)
3364{
3365 pr_err("%s: not supported\n", __func__);
3366 return -ENODEV;
3367}
3368
3369static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3370 struct poll_table_struct *tbl)
3371{
3372 pr_err("%s: not supported\n", __func__);
3373 return -ENODEV;
3374}
3375
3376static void smuxld_write_wakeup(struct tty_struct *tty)
3377{
3378 pr_err("%s: not supported\n", __func__);
3379}
3380
3381static struct tty_ldisc_ops smux_ldisc_ops = {
3382 .owner = THIS_MODULE,
3383 .magic = TTY_LDISC_MAGIC,
3384 .name = "n_smux",
3385 .open = smuxld_open,
3386 .close = smuxld_close,
3387 .flush_buffer = smuxld_flush_buffer,
3388 .chars_in_buffer = smuxld_chars_in_buffer,
3389 .read = smuxld_read,
3390 .write = smuxld_write,
3391 .ioctl = smuxld_ioctl,
3392 .poll = smuxld_poll,
3393 .receive_buf = smuxld_receive_buf,
3394 .write_wakeup = smuxld_write_wakeup
3395};
3396
3397static int __init smux_init(void)
3398{
3399 int ret;
3400
Eric Holmberged1f00c2012-06-07 09:45:18 -06003401 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003402
3403 spin_lock_init(&smux.rx_lock_lha1);
3404 smux.rx_state = SMUX_RX_IDLE;
3405 smux.power_state = SMUX_PWR_OFF;
3406 smux.pwr_wakeup_delay_us = 1;
3407 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003408 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003409 smux.rx_activity_flag = 0;
3410 smux.tx_activity_flag = 0;
3411 smux.recv_len = 0;
3412 smux.tty = NULL;
3413 smux.ld_open_count = 0;
3414 smux.in_reset = 0;
3415 smux.is_initialized = 1;
3416 smux_byte_loopback = 0;
3417
3418 spin_lock_init(&smux.tx_lock_lha2);
3419 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3420
3421 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3422 if (ret != 0) {
3423 pr_err("%s: error %d registering line discipline\n",
3424 __func__, ret);
3425 return ret;
3426 }
3427
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003428 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003429
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003430 ret = lch_init();
3431 if (ret != 0) {
3432 pr_err("%s: lch_init failed\n", __func__);
3433 return ret;
3434 }
3435
3436 return 0;
3437}
3438
3439static void __exit smux_exit(void)
3440{
3441 int ret;
3442
3443 ret = tty_unregister_ldisc(N_SMUX);
3444 if (ret != 0) {
3445 pr_err("%s error %d unregistering line discipline\n",
3446 __func__, ret);
3447 return;
3448 }
3449}
3450
3451module_init(smux_init);
3452module_exit(smux_exit);
3453
3454MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3455MODULE_LICENSE("GPL v2");
3456MODULE_ALIAS_LDISC(N_SMUX);