blob: d0831175b44528ab3ed76a4bafcf6629dcdcfef4 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_WM_LOW 2
37#define SMUX_WM_HIGH 4
38#define SMUX_PKT_LOG_SIZE 80
39
40/* Maximum size we can accept in a single RX buffer */
41#define TTY_RECEIVE_ROOM 65536
42#define TTY_BUFFER_FULL_WAIT_MS 50
43
44/* maximum sleep time between wakeup attempts */
45#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
46
47/* minimum delay for scheduling delayed work */
48#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
49
50/* inactivity timeout for no rx/tx activity */
51#define SMUX_INACTIVITY_TIMEOUT_MS 1000
52
Eric Holmbergb8435c82012-06-05 14:51:29 -060053/* RX get_rx_buffer retry timeout values */
54#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
55#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
56
Eric Holmberg8ed30f22012-05-10 19:16:51 -060057enum {
58 MSM_SMUX_DEBUG = 1U << 0,
59 MSM_SMUX_INFO = 1U << 1,
60 MSM_SMUX_POWER_INFO = 1U << 2,
61 MSM_SMUX_PKT = 1U << 3,
62};
63
64static int smux_debug_mask;
65module_param_named(debug_mask, smux_debug_mask,
66 int, S_IRUGO | S_IWUSR | S_IWGRP);
67
68/* Simulated wakeup used for testing */
69int smux_byte_loopback;
70module_param_named(byte_loopback, smux_byte_loopback,
71 int, S_IRUGO | S_IWUSR | S_IWGRP);
72int smux_simulate_wakeup_delay = 1;
73module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
74 int, S_IRUGO | S_IWUSR | S_IWGRP);
75
76#define SMUX_DBG(x...) do { \
77 if (smux_debug_mask & MSM_SMUX_DEBUG) \
78 pr_info(x); \
79} while (0)
80
Eric Holmbergff0b0112012-06-08 15:06:57 -060081#define SMUX_PWR(x...) do { \
82 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
83 pr_info(x); \
84} while (0)
85
Eric Holmberg8ed30f22012-05-10 19:16:51 -060086#define SMUX_LOG_PKT_RX(pkt) do { \
87 if (smux_debug_mask & MSM_SMUX_PKT) \
88 smux_log_pkt(pkt, 1); \
89} while (0)
90
91#define SMUX_LOG_PKT_TX(pkt) do { \
92 if (smux_debug_mask & MSM_SMUX_PKT) \
93 smux_log_pkt(pkt, 0); \
94} while (0)
95
96/**
97 * Return true if channel is fully opened (both
98 * local and remote sides are in the OPENED state).
99 */
100#define IS_FULLY_OPENED(ch) \
101 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
102 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
103
104static struct platform_device smux_devs[] = {
105 {.name = "SMUX_CTL", .id = -1},
106 {.name = "SMUX_RMNET", .id = -1},
107 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
108 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
109 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
110 {.name = "SMUX_DIAG", .id = -1},
111};
112
113enum {
114 SMUX_CMD_STATUS_RTC = 1 << 0,
115 SMUX_CMD_STATUS_RTR = 1 << 1,
116 SMUX_CMD_STATUS_RI = 1 << 2,
117 SMUX_CMD_STATUS_DCD = 1 << 3,
118 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
119};
120
121/* Channel mode */
122enum {
123 SMUX_LCH_MODE_NORMAL,
124 SMUX_LCH_MODE_LOCAL_LOOPBACK,
125 SMUX_LCH_MODE_REMOTE_LOOPBACK,
126};
127
128enum {
129 SMUX_RX_IDLE,
130 SMUX_RX_MAGIC,
131 SMUX_RX_HDR,
132 SMUX_RX_PAYLOAD,
133 SMUX_RX_FAILURE,
134};
135
136/**
137 * Power states.
138 *
139 * The _FLUSH states are internal transitional states and are not part of the
140 * official state machine.
141 */
142enum {
143 SMUX_PWR_OFF,
144 SMUX_PWR_TURNING_ON,
145 SMUX_PWR_ON,
146 SMUX_PWR_TURNING_OFF_FLUSH,
147 SMUX_PWR_TURNING_OFF,
148 SMUX_PWR_OFF_FLUSH,
149};
150
151/**
152 * Logical Channel Structure. One instance per channel.
153 *
154 * Locking Hierarchy
155 * Each lock has a postfix that describes the locking level. If multiple locks
156 * are required, only increasing lock hierarchy numbers may be locked which
157 * ensures avoiding a deadlock.
158 *
159 * Locking Example
160 * If state_lock_lhb1 is currently held and the TX list needs to be
161 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
162 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
163 * not be acquired since it would result in a deadlock.
164 *
165 * Note that the Line Discipline locks (*_lha) should always be acquired
166 * before the logical channel locks.
167 */
168struct smux_lch_t {
169 /* channel state */
170 spinlock_t state_lock_lhb1;
171 uint8_t lcid;
172 unsigned local_state;
173 unsigned local_mode;
174 uint8_t local_tiocm;
175
176 unsigned remote_state;
177 unsigned remote_mode;
178 uint8_t remote_tiocm;
179
180 int tx_flow_control;
181
182 /* client callbacks and private data */
183 void *priv;
184 void (*notify)(void *priv, int event_type, const void *metadata);
185 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
186 int size);
187
Eric Holmbergb8435c82012-06-05 14:51:29 -0600188 /* RX Info */
189 struct list_head rx_retry_queue;
190 unsigned rx_retry_queue_cnt;
191 struct delayed_work rx_retry_work;
192
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600193 /* TX Info */
194 spinlock_t tx_lock_lhb2;
195 struct list_head tx_queue;
196 struct list_head tx_ready_list;
197 unsigned tx_pending_data_cnt;
198 unsigned notify_lwm;
199};
200
201union notifier_metadata {
202 struct smux_meta_disconnected disconnected;
203 struct smux_meta_read read;
204 struct smux_meta_write write;
205 struct smux_meta_tiocm tiocm;
206};
207
208struct smux_notify_handle {
209 void (*notify)(void *priv, int event_type, const void *metadata);
210 void *priv;
211 int event_type;
212 union notifier_metadata *metadata;
213};
214
215/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600216 * Get RX Buffer Retry structure.
217 *
218 * This is used for clients that are unable to provide an RX buffer
219 * immediately. This temporary structure will be used to temporarily hold the
220 * data and perform a retry.
221 */
222struct smux_rx_pkt_retry {
223 struct smux_pkt_t *pkt;
224 struct list_head rx_retry_list;
225 unsigned timeout_in_ms;
226};
227
228/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600229 * Receive worker data structure.
230 *
231 * One instance is created for every call to smux_rx_state_machine.
232 */
233struct smux_rx_worker_data {
234 const unsigned char *data;
235 int len;
236 int flag;
237
238 struct work_struct work;
239 struct completion work_complete;
240};
241
242/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600243 * Line discipline and module structure.
244 *
245 * Only one instance since multiple instances of line discipline are not
246 * allowed.
247 */
248struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600249 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600250
251 int is_initialized;
252 int in_reset;
253 int ld_open_count;
254 struct tty_struct *tty;
255
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600256 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600257 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
258 unsigned int recv_len;
259 unsigned int pkt_remain;
260 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600261
262 /* RX Activity - accessed by multiple threads */
263 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600264 unsigned rx_activity_flag;
265
266 /* TX / Power */
267 spinlock_t tx_lock_lha2;
268 struct list_head lch_tx_ready_list;
269 unsigned power_state;
270 unsigned pwr_wakeup_delay_us;
271 unsigned tx_activity_flag;
272 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600273 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600274};
275
276
277/* data structures */
278static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
279static struct smux_ldisc_t smux;
280static const char *tty_error_type[] = {
281 [TTY_NORMAL] = "normal",
282 [TTY_OVERRUN] = "overrun",
283 [TTY_BREAK] = "break",
284 [TTY_PARITY] = "parity",
285 [TTY_FRAME] = "framing",
286};
287
288static const char *smux_cmds[] = {
289 [SMUX_CMD_DATA] = "DATA",
290 [SMUX_CMD_OPEN_LCH] = "OPEN",
291 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
292 [SMUX_CMD_STATUS] = "STATUS",
293 [SMUX_CMD_PWR_CTL] = "PWR",
294 [SMUX_CMD_BYTE] = "Raw Byte",
295};
296
297static void smux_notify_local_fn(struct work_struct *work);
298static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
299
300static struct workqueue_struct *smux_notify_wq;
301static size_t handle_size;
302static struct kfifo smux_notify_fifo;
303static int queued_fifo_notifications;
304static DEFINE_SPINLOCK(notify_lock_lhc1);
305
306static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600307static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600308static void smux_tx_worker(struct work_struct *work);
309static DECLARE_WORK(smux_tx_work, smux_tx_worker);
310
311static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600312static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600313static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600314static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
315static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
316
317static void smux_inactivity_worker(struct work_struct *work);
318static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
319static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
320 smux_inactivity_worker);
321
322static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
323static void list_channel(struct smux_lch_t *ch);
324static int smux_send_status_cmd(struct smux_lch_t *ch);
325static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600326static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600327static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
328static int schedule_notify(uint8_t lcid, int event,
329 const union notifier_metadata *metadata);
330static int ssr_notifier_cb(struct notifier_block *this,
331 unsigned long code,
332 void *data);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600333
334/**
335 * Convert TTY Error Flags to string for logging purposes.
336 *
337 * @flag TTY_* flag
338 * @returns String description or NULL if unknown
339 */
340static const char *tty_flag_to_str(unsigned flag)
341{
342 if (flag < ARRAY_SIZE(tty_error_type))
343 return tty_error_type[flag];
344 return NULL;
345}
346
347/**
348 * Convert SMUX Command to string for logging purposes.
349 *
350 * @cmd SMUX command
351 * @returns String description or NULL if unknown
352 */
353static const char *cmd_to_str(unsigned cmd)
354{
355 if (cmd < ARRAY_SIZE(smux_cmds))
356 return smux_cmds[cmd];
357 return NULL;
358}
359
360/**
361 * Set the reset state due to an unrecoverable failure.
362 */
363static void smux_enter_reset(void)
364{
365 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
366 smux.in_reset = 1;
367}
368
369static int lch_init(void)
370{
371 unsigned int id;
372 struct smux_lch_t *ch;
373 int i = 0;
374
375 handle_size = sizeof(struct smux_notify_handle *);
376
377 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
378 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600379 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600380
381 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
382 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
383 __func__);
384 return -ENOMEM;
385 }
386
387 i |= kfifo_alloc(&smux_notify_fifo,
388 SMUX_NOTIFY_FIFO_SIZE * handle_size,
389 GFP_KERNEL);
390 i |= smux_loopback_init();
391
392 if (i) {
393 pr_err("%s: out of memory error\n", __func__);
394 return -ENOMEM;
395 }
396
397 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
398 ch = &smux_lch[id];
399
400 spin_lock_init(&ch->state_lock_lhb1);
401 ch->lcid = id;
402 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
403 ch->local_mode = SMUX_LCH_MODE_NORMAL;
404 ch->local_tiocm = 0x0;
405 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
406 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
407 ch->remote_tiocm = 0x0;
408 ch->tx_flow_control = 0;
409 ch->priv = 0;
410 ch->notify = 0;
411 ch->get_rx_buffer = 0;
412
Eric Holmbergb8435c82012-06-05 14:51:29 -0600413 INIT_LIST_HEAD(&ch->rx_retry_queue);
414 ch->rx_retry_queue_cnt = 0;
415 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
416
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600417 spin_lock_init(&ch->tx_lock_lhb2);
418 INIT_LIST_HEAD(&ch->tx_queue);
419 INIT_LIST_HEAD(&ch->tx_ready_list);
420 ch->tx_pending_data_cnt = 0;
421 ch->notify_lwm = 0;
422 }
423
424 return 0;
425}
426
Eric Holmberged1f00c2012-06-07 09:45:18 -0600427/**
428 * Empty and cleanup all SMUX logical channels for subsystem restart or line
429 * discipline disconnect.
430 */
431static void smux_lch_purge(void)
432{
433 struct smux_lch_t *ch;
434 unsigned long flags;
435 int i;
436
437 /* Empty TX ready list */
438 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
439 while (!list_empty(&smux.lch_tx_ready_list)) {
440 SMUX_DBG("%s: emptying ready list %p\n",
441 __func__, smux.lch_tx_ready_list.next);
442 ch = list_first_entry(&smux.lch_tx_ready_list,
443 struct smux_lch_t,
444 tx_ready_list);
445 list_del(&ch->tx_ready_list);
446 INIT_LIST_HEAD(&ch->tx_ready_list);
447 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600448
449 /* Purge Power Queue */
450 while (!list_empty(&smux.power_queue)) {
451 struct smux_pkt_t *pkt;
452
453 pkt = list_first_entry(&smux.power_queue,
454 struct smux_pkt_t,
455 list);
456 SMUX_DBG("%s: emptying power queue pkt=%p\n",
457 __func__, pkt);
458 smux_free_pkt(pkt);
459 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600460 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
461
462 /* Close all ports */
463 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
464 ch = &smux_lch[i];
465 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
466
467 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
468
469 /* Purge TX queue */
470 spin_lock(&ch->tx_lock_lhb2);
471 smux_purge_ch_tx_queue(ch);
472 spin_unlock(&ch->tx_lock_lhb2);
473
474 /* Notify user of disconnect and reset channel state */
475 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
476 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
477 union notifier_metadata meta;
478
479 meta.disconnected.is_ssr = smux.in_reset;
480 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
481 }
482
483 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
484 ch->local_mode = SMUX_LCH_MODE_NORMAL;
485 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
486 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
487 ch->tx_flow_control = 0;
488
489 /* Purge RX retry queue */
490 if (ch->rx_retry_queue_cnt)
491 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
492
493 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
494 }
495
496 /* Flush TX/RX workqueues */
497 SMUX_DBG("%s: flushing tx wq\n", __func__);
498 flush_workqueue(smux_tx_wq);
499 SMUX_DBG("%s: flushing rx wq\n", __func__);
500 flush_workqueue(smux_rx_wq);
501}
502
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600503int smux_assert_lch_id(uint32_t lcid)
504{
505 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
506 return -ENXIO;
507 else
508 return 0;
509}
510
511/**
512 * Log packet information for debug purposes.
513 *
514 * @pkt Packet to log
515 * @is_recv 1 = RX packet; 0 = TX Packet
516 *
517 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
518 *
519 * PKT Info:
520 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
521 *
522 * Direction: R = Receive, S = Send
523 * Local State: C = Closed; c = closing; o = opening; O = Opened
524 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
525 * Remote State: C = Closed; O = Opened
526 * Remote Mode: R = Remote loopback; N = Normal
527 */
528static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
529{
530 char logbuf[SMUX_PKT_LOG_SIZE];
531 char cmd_extra[16];
532 int i = 0;
533 int count;
534 int len;
535 char local_state;
536 char local_mode;
537 char remote_state;
538 char remote_mode;
539 struct smux_lch_t *ch;
540 unsigned char *data;
541
542 ch = &smux_lch[pkt->hdr.lcid];
543
544 switch (ch->local_state) {
545 case SMUX_LCH_LOCAL_CLOSED:
546 local_state = 'C';
547 break;
548 case SMUX_LCH_LOCAL_OPENING:
549 local_state = 'o';
550 break;
551 case SMUX_LCH_LOCAL_OPENED:
552 local_state = 'O';
553 break;
554 case SMUX_LCH_LOCAL_CLOSING:
555 local_state = 'c';
556 break;
557 default:
558 local_state = 'U';
559 break;
560 }
561
562 switch (ch->local_mode) {
563 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
564 local_mode = 'L';
565 break;
566 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
567 local_mode = 'R';
568 break;
569 case SMUX_LCH_MODE_NORMAL:
570 local_mode = 'N';
571 break;
572 default:
573 local_mode = 'U';
574 break;
575 }
576
577 switch (ch->remote_state) {
578 case SMUX_LCH_REMOTE_CLOSED:
579 remote_state = 'C';
580 break;
581 case SMUX_LCH_REMOTE_OPENED:
582 remote_state = 'O';
583 break;
584
585 default:
586 remote_state = 'U';
587 break;
588 }
589
590 switch (ch->remote_mode) {
591 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
592 remote_mode = 'R';
593 break;
594 case SMUX_LCH_MODE_NORMAL:
595 remote_mode = 'N';
596 break;
597 default:
598 remote_mode = 'U';
599 break;
600 }
601
602 /* determine command type (ACK, etc) */
603 cmd_extra[0] = '\0';
604 switch (pkt->hdr.cmd) {
605 case SMUX_CMD_OPEN_LCH:
606 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
607 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
608 break;
609 case SMUX_CMD_CLOSE_LCH:
610 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
611 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
612 break;
613 };
614
615 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
616 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
617 is_recv ? 'R' : 'S', pkt->hdr.lcid,
618 local_state, local_mode,
619 remote_state, remote_mode,
620 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
621 pkt->hdr.payload_len, pkt->hdr.pad_len);
622
623 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
624 data = (unsigned char *)pkt->payload;
625 for (count = 0; count < len; count++)
626 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
627 "%02x ", (unsigned)data[count]);
628
629 pr_info("%s\n", logbuf);
630}
631
632static void smux_notify_local_fn(struct work_struct *work)
633{
634 struct smux_notify_handle *notify_handle = NULL;
635 union notifier_metadata *metadata = NULL;
636 unsigned long flags;
637 int i;
638
639 for (;;) {
640 /* retrieve notification */
641 spin_lock_irqsave(&notify_lock_lhc1, flags);
642 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
643 i = kfifo_out(&smux_notify_fifo,
644 &notify_handle,
645 handle_size);
646 if (i != handle_size) {
647 pr_err("%s: unable to retrieve handle %d expected %d\n",
648 __func__, i, handle_size);
649 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
650 break;
651 }
652 } else {
653 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
654 break;
655 }
656 --queued_fifo_notifications;
657 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
658
659 /* notify client */
660 metadata = notify_handle->metadata;
661 notify_handle->notify(notify_handle->priv,
662 notify_handle->event_type,
663 metadata);
664
665 kfree(metadata);
666 kfree(notify_handle);
667 }
668}
669
670/**
671 * Initialize existing packet.
672 */
673void smux_init_pkt(struct smux_pkt_t *pkt)
674{
675 memset(pkt, 0x0, sizeof(*pkt));
676 pkt->hdr.magic = SMUX_MAGIC;
677 INIT_LIST_HEAD(&pkt->list);
678}
679
680/**
681 * Allocate and initialize packet.
682 *
683 * If a payload is needed, either set it directly and ensure that it's freed or
684 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
685 * automatically when smd_free_pkt() is called.
686 */
687struct smux_pkt_t *smux_alloc_pkt(void)
688{
689 struct smux_pkt_t *pkt;
690
691 /* Consider a free list implementation instead of kmalloc */
692 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
693 if (!pkt) {
694 pr_err("%s: out of memory\n", __func__);
695 return NULL;
696 }
697 smux_init_pkt(pkt);
698 pkt->allocated = 1;
699
700 return pkt;
701}
702
703/**
704 * Free packet.
705 *
706 * @pkt Packet to free (may be NULL)
707 *
708 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
709 * well. Otherwise, the caller is responsible for freeing the payload.
710 */
711void smux_free_pkt(struct smux_pkt_t *pkt)
712{
713 if (pkt) {
714 if (pkt->free_payload)
715 kfree(pkt->payload);
716 if (pkt->allocated)
717 kfree(pkt);
718 }
719}
720
721/**
722 * Allocate packet payload.
723 *
724 * @pkt Packet to add payload to
725 *
726 * @returns 0 on success, <0 upon error
727 *
728 * A flag is set to signal smux_free_pkt() to free the payload.
729 */
730int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
731{
732 if (!pkt)
733 return -EINVAL;
734
735 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
736 pkt->free_payload = 1;
737 if (!pkt->payload) {
738 pr_err("%s: unable to malloc %d bytes for payload\n",
739 __func__, pkt->hdr.payload_len);
740 return -ENOMEM;
741 }
742
743 return 0;
744}
745
746static int schedule_notify(uint8_t lcid, int event,
747 const union notifier_metadata *metadata)
748{
749 struct smux_notify_handle *notify_handle = 0;
750 union notifier_metadata *meta_copy = 0;
751 struct smux_lch_t *ch;
752 int i;
753 unsigned long flags;
754 int ret = 0;
755
756 ch = &smux_lch[lcid];
757 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
758 GFP_ATOMIC);
759 if (!notify_handle) {
760 pr_err("%s: out of memory\n", __func__);
761 ret = -ENOMEM;
762 goto free_out;
763 }
764
765 notify_handle->notify = ch->notify;
766 notify_handle->priv = ch->priv;
767 notify_handle->event_type = event;
768 if (metadata) {
769 meta_copy = kzalloc(sizeof(union notifier_metadata),
770 GFP_ATOMIC);
771 if (!meta_copy) {
772 pr_err("%s: out of memory\n", __func__);
773 ret = -ENOMEM;
774 goto free_out;
775 }
776 *meta_copy = *metadata;
777 notify_handle->metadata = meta_copy;
778 } else {
779 notify_handle->metadata = NULL;
780 }
781
782 spin_lock_irqsave(&notify_lock_lhc1, flags);
783 i = kfifo_avail(&smux_notify_fifo);
784 if (i < handle_size) {
785 pr_err("%s: fifo full error %d expected %d\n",
786 __func__, i, handle_size);
787 ret = -ENOMEM;
788 goto unlock_out;
789 }
790
791 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
792 if (i < 0 || i != handle_size) {
793 pr_err("%s: fifo not available error %d (expected %d)\n",
794 __func__, i, handle_size);
795 ret = -ENOSPC;
796 goto unlock_out;
797 }
798 ++queued_fifo_notifications;
799
800unlock_out:
801 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
802
803free_out:
804 queue_work(smux_notify_wq, &smux_notify_local);
805 if (ret < 0 && notify_handle) {
806 kfree(notify_handle->metadata);
807 kfree(notify_handle);
808 }
809 return ret;
810}
811
812/**
813 * Returns the serialized size of a packet.
814 *
815 * @pkt Packet to serialize
816 *
817 * @returns Serialized length of packet
818 */
819static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
820{
821 unsigned int size;
822
823 size = sizeof(struct smux_hdr_t);
824 size += pkt->hdr.payload_len;
825 size += pkt->hdr.pad_len;
826
827 return size;
828}
829
830/**
831 * Serialize packet @pkt into output buffer @data.
832 *
833 * @pkt Packet to serialize
834 * @out Destination buffer pointer
835 * @out_len Size of serialized packet
836 *
837 * @returns 0 for success
838 */
839int smux_serialize(struct smux_pkt_t *pkt, char *out,
840 unsigned int *out_len)
841{
842 char *data_start = out;
843
844 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
845 pr_err("%s: packet size %d too big\n",
846 __func__, smux_serialize_size(pkt));
847 return -E2BIG;
848 }
849
850 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
851 out += sizeof(struct smux_hdr_t);
852 if (pkt->payload) {
853 memcpy(out, pkt->payload, pkt->hdr.payload_len);
854 out += pkt->hdr.payload_len;
855 }
856 if (pkt->hdr.pad_len) {
857 memset(out, 0x0, pkt->hdr.pad_len);
858 out += pkt->hdr.pad_len;
859 }
860 *out_len = out - data_start;
861 return 0;
862}
863
864/**
865 * Serialize header and provide pointer to the data.
866 *
867 * @pkt Packet
868 * @out[out] Pointer to the serialized header data
869 * @out_len[out] Pointer to the serialized header length
870 */
871static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
872 unsigned int *out_len)
873{
874 *out = (char *)&pkt->hdr;
875 *out_len = sizeof(struct smux_hdr_t);
876}
877
878/**
879 * Serialize payload and provide pointer to the data.
880 *
881 * @pkt Packet
882 * @out[out] Pointer to the serialized payload data
883 * @out_len[out] Pointer to the serialized payload length
884 */
885static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
886 unsigned int *out_len)
887{
888 *out = pkt->payload;
889 *out_len = pkt->hdr.payload_len;
890}
891
892/**
893 * Serialize padding and provide pointer to the data.
894 *
895 * @pkt Packet
896 * @out[out] Pointer to the serialized padding (always NULL)
897 * @out_len[out] Pointer to the serialized payload length
898 *
899 * Since the padding field value is undefined, only the size of the patting
900 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
901 */
902static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
903 unsigned int *out_len)
904{
905 *out = NULL;
906 *out_len = pkt->hdr.pad_len;
907}
908
909/**
910 * Write data to TTY framework and handle breaking the writes up if needed.
911 *
912 * @data Data to write
913 * @len Length of data
914 *
915 * @returns 0 for success, < 0 for failure
916 */
917static int write_to_tty(char *data, unsigned len)
918{
919 int data_written;
920
921 if (!data)
922 return 0;
923
Eric Holmberged1f00c2012-06-07 09:45:18 -0600924 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600925 data_written = smux.tty->ops->write(smux.tty, data, len);
926 if (data_written >= 0) {
927 len -= data_written;
928 data += data_written;
929 } else {
930 pr_err("%s: TTY write returned error %d\n",
931 __func__, data_written);
932 return data_written;
933 }
934
935 if (len)
936 tty_wait_until_sent(smux.tty,
937 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600938 }
939 return 0;
940}
941
942/**
943 * Write packet to TTY.
944 *
945 * @pkt packet to write
946 *
947 * @returns 0 on success
948 */
949static int smux_tx_tty(struct smux_pkt_t *pkt)
950{
951 char *data;
952 unsigned int len;
953 int ret;
954
955 if (!smux.tty) {
956 pr_err("%s: TTY not initialized", __func__);
957 return -ENOTTY;
958 }
959
960 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
961 SMUX_DBG("%s: tty send single byte\n", __func__);
962 ret = write_to_tty(&pkt->hdr.flags, 1);
963 return ret;
964 }
965
966 smux_serialize_hdr(pkt, &data, &len);
967 ret = write_to_tty(data, len);
968 if (ret) {
969 pr_err("%s: failed %d to write header %d\n",
970 __func__, ret, len);
971 return ret;
972 }
973
974 smux_serialize_payload(pkt, &data, &len);
975 ret = write_to_tty(data, len);
976 if (ret) {
977 pr_err("%s: failed %d to write payload %d\n",
978 __func__, ret, len);
979 return ret;
980 }
981
982 smux_serialize_padding(pkt, &data, &len);
983 while (len > 0) {
984 char zero = 0x0;
985 ret = write_to_tty(&zero, 1);
986 if (ret) {
987 pr_err("%s: failed %d to write padding %d\n",
988 __func__, ret, len);
989 return ret;
990 }
991 --len;
992 }
993 return 0;
994}
995
996/**
997 * Send a single character.
998 *
999 * @ch Character to send
1000 */
1001static void smux_send_byte(char ch)
1002{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001003 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001004
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001005 pkt = smux_alloc_pkt();
1006 if (!pkt) {
1007 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1008 return;
1009 }
1010 pkt->hdr.cmd = SMUX_CMD_BYTE;
1011 pkt->hdr.flags = ch;
1012 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001013
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001014 list_add_tail(&pkt->list, &smux.power_queue);
1015 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001016}
1017
1018/**
1019 * Receive a single-character packet (used for internal testing).
1020 *
1021 * @ch Character to receive
1022 * @lcid Logical channel ID for packet
1023 *
1024 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001025 */
1026static int smux_receive_byte(char ch, int lcid)
1027{
1028 struct smux_pkt_t pkt;
1029
1030 smux_init_pkt(&pkt);
1031 pkt.hdr.lcid = lcid;
1032 pkt.hdr.cmd = SMUX_CMD_BYTE;
1033 pkt.hdr.flags = ch;
1034
1035 return smux_dispatch_rx_pkt(&pkt);
1036}
1037
1038/**
1039 * Queue packet for transmit.
1040 *
1041 * @pkt_ptr Packet to queue
1042 * @ch Channel to queue packet on
1043 * @queue Queue channel on ready list
1044 */
1045static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1046 int queue)
1047{
1048 unsigned long flags;
1049
1050 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1051
1052 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1053 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1054 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1055
1056 if (queue)
1057 list_channel(ch);
1058}
1059
1060/**
1061 * Handle receive OPEN ACK command.
1062 *
1063 * @pkt Received packet
1064 *
1065 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001066 */
1067static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1068{
1069 uint8_t lcid;
1070 int ret;
1071 struct smux_lch_t *ch;
1072 int enable_powerdown = 0;
1073
1074 lcid = pkt->hdr.lcid;
1075 ch = &smux_lch[lcid];
1076
1077 spin_lock(&ch->state_lock_lhb1);
1078 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1079 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1080 ch->local_state,
1081 SMUX_LCH_LOCAL_OPENED);
1082
1083 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1084 enable_powerdown = 1;
1085
1086 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1087 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1088 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1089 ret = 0;
1090 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1091 SMUX_DBG("Remote loopback OPEN ACK received\n");
1092 ret = 0;
1093 } else {
1094 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1095 __func__, lcid, ch->local_state);
1096 ret = -EINVAL;
1097 }
1098 spin_unlock(&ch->state_lock_lhb1);
1099
1100 if (enable_powerdown) {
1101 spin_lock(&smux.tx_lock_lha2);
1102 if (!smux.powerdown_enabled) {
1103 smux.powerdown_enabled = 1;
1104 SMUX_DBG("%s: enabling power-collapse support\n",
1105 __func__);
1106 }
1107 spin_unlock(&smux.tx_lock_lha2);
1108 }
1109
1110 return ret;
1111}
1112
1113static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1114{
1115 uint8_t lcid;
1116 int ret;
1117 struct smux_lch_t *ch;
1118 union notifier_metadata meta_disconnected;
1119 unsigned long flags;
1120
1121 lcid = pkt->hdr.lcid;
1122 ch = &smux_lch[lcid];
1123 meta_disconnected.disconnected.is_ssr = 0;
1124
1125 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1126
1127 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1128 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1129 SMUX_LCH_LOCAL_CLOSING,
1130 SMUX_LCH_LOCAL_CLOSED);
1131 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1132 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1133 schedule_notify(lcid, SMUX_DISCONNECTED,
1134 &meta_disconnected);
1135 ret = 0;
1136 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1137 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1138 ret = 0;
1139 } else {
1140 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1141 __func__, lcid, ch->local_state);
1142 ret = -EINVAL;
1143 }
1144 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1145 return ret;
1146}
1147
1148/**
1149 * Handle receive OPEN command.
1150 *
1151 * @pkt Received packet
1152 *
1153 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001154 */
1155static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1156{
1157 uint8_t lcid;
1158 int ret;
1159 struct smux_lch_t *ch;
1160 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001161 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001162 int tx_ready = 0;
1163 int enable_powerdown = 0;
1164
1165 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1166 return smux_handle_rx_open_ack(pkt);
1167
1168 lcid = pkt->hdr.lcid;
1169 ch = &smux_lch[lcid];
1170
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001171 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001172
1173 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1174 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1175 SMUX_LCH_REMOTE_CLOSED,
1176 SMUX_LCH_REMOTE_OPENED);
1177
1178 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1179 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1180 enable_powerdown = 1;
1181
1182 /* Send Open ACK */
1183 ack_pkt = smux_alloc_pkt();
1184 if (!ack_pkt) {
1185 /* exit out to allow retrying this later */
1186 ret = -ENOMEM;
1187 goto out;
1188 }
1189 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1190 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1191 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1192 ack_pkt->hdr.lcid = lcid;
1193 ack_pkt->hdr.payload_len = 0;
1194 ack_pkt->hdr.pad_len = 0;
1195 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1196 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1197 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1198 }
1199 smux_tx_queue(ack_pkt, ch, 0);
1200 tx_ready = 1;
1201
1202 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1203 /*
1204 * Send an Open command to the remote side to
1205 * simulate our local client doing it.
1206 */
1207 ack_pkt = smux_alloc_pkt();
1208 if (ack_pkt) {
1209 ack_pkt->hdr.lcid = lcid;
1210 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1211 ack_pkt->hdr.flags =
1212 SMUX_CMD_OPEN_POWER_COLLAPSE;
1213 ack_pkt->hdr.payload_len = 0;
1214 ack_pkt->hdr.pad_len = 0;
1215 smux_tx_queue(ack_pkt, ch, 0);
1216 tx_ready = 1;
1217 } else {
1218 pr_err("%s: Remote loopack allocation failure\n",
1219 __func__);
1220 }
1221 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1222 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1223 }
1224 ret = 0;
1225 } else {
1226 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1227 __func__, lcid, ch->remote_state);
1228 ret = -EINVAL;
1229 }
1230
1231out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001232 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001233
1234 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001235 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001236 if (!smux.powerdown_enabled) {
1237 smux.powerdown_enabled = 1;
1238 SMUX_DBG("%s: enabling power-collapse support\n",
1239 __func__);
1240 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001241 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001242 }
1243
1244 if (tx_ready)
1245 list_channel(ch);
1246
1247 return ret;
1248}
1249
1250/**
1251 * Handle receive CLOSE command.
1252 *
1253 * @pkt Received packet
1254 *
1255 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001256 */
1257static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1258{
1259 uint8_t lcid;
1260 int ret;
1261 struct smux_lch_t *ch;
1262 struct smux_pkt_t *ack_pkt;
1263 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001264 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001265 int tx_ready = 0;
1266
1267 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1268 return smux_handle_close_ack(pkt);
1269
1270 lcid = pkt->hdr.lcid;
1271 ch = &smux_lch[lcid];
1272 meta_disconnected.disconnected.is_ssr = 0;
1273
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001274 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001275 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1276 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1277 SMUX_LCH_REMOTE_OPENED,
1278 SMUX_LCH_REMOTE_CLOSED);
1279
1280 ack_pkt = smux_alloc_pkt();
1281 if (!ack_pkt) {
1282 /* exit out to allow retrying this later */
1283 ret = -ENOMEM;
1284 goto out;
1285 }
1286 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1287 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1288 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1289 ack_pkt->hdr.lcid = lcid;
1290 ack_pkt->hdr.payload_len = 0;
1291 ack_pkt->hdr.pad_len = 0;
1292 smux_tx_queue(ack_pkt, ch, 0);
1293 tx_ready = 1;
1294
1295 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1296 /*
1297 * Send a Close command to the remote side to simulate
1298 * our local client doing it.
1299 */
1300 ack_pkt = smux_alloc_pkt();
1301 if (ack_pkt) {
1302 ack_pkt->hdr.lcid = lcid;
1303 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1304 ack_pkt->hdr.flags = 0;
1305 ack_pkt->hdr.payload_len = 0;
1306 ack_pkt->hdr.pad_len = 0;
1307 smux_tx_queue(ack_pkt, ch, 0);
1308 tx_ready = 1;
1309 } else {
1310 pr_err("%s: Remote loopack allocation failure\n",
1311 __func__);
1312 }
1313 }
1314
1315 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1316 schedule_notify(lcid, SMUX_DISCONNECTED,
1317 &meta_disconnected);
1318 ret = 0;
1319 } else {
1320 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1321 __func__, lcid, ch->remote_state);
1322 ret = -EINVAL;
1323 }
1324out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001325 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001326 if (tx_ready)
1327 list_channel(ch);
1328
1329 return ret;
1330}
1331
1332/*
1333 * Handle receive DATA command.
1334 *
1335 * @pkt Received packet
1336 *
1337 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001338 */
1339static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1340{
1341 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001342 int ret = 0;
1343 int do_retry = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001344 int tmp;
1345 int rx_len;
1346 struct smux_lch_t *ch;
1347 union notifier_metadata metadata;
1348 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001349 struct smux_pkt_t *ack_pkt;
1350 unsigned long flags;
1351
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001352 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1353 ret = -ENXIO;
1354 goto out;
1355 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001356
Eric Holmbergb8435c82012-06-05 14:51:29 -06001357 rx_len = pkt->hdr.payload_len;
1358 if (rx_len == 0) {
1359 ret = -EINVAL;
1360 goto out;
1361 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001362
1363 lcid = pkt->hdr.lcid;
1364 ch = &smux_lch[lcid];
1365 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1366 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1367
1368 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1369 && !remote_loopback) {
1370 pr_err("smux: ch %d error data on local state 0x%x",
1371 lcid, ch->local_state);
1372 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001373 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001374 goto out;
1375 }
1376
1377 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1378 pr_err("smux: ch %d error data on remote state 0x%x",
1379 lcid, ch->remote_state);
1380 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001381 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001382 goto out;
1383 }
1384
Eric Holmbergb8435c82012-06-05 14:51:29 -06001385 if (!list_empty(&ch->rx_retry_queue)) {
1386 do_retry = 1;
1387 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1388 /* retry queue full */
1389 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1390 ret = -ENOMEM;
1391 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1392 goto out;
1393 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001394 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001395 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001396
Eric Holmbergb8435c82012-06-05 14:51:29 -06001397 if (remote_loopback) {
1398 /* Echo the data back to the remote client. */
1399 ack_pkt = smux_alloc_pkt();
1400 if (ack_pkt) {
1401 ack_pkt->hdr.lcid = lcid;
1402 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1403 ack_pkt->hdr.flags = 0;
1404 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1405 if (ack_pkt->hdr.payload_len) {
1406 smux_alloc_pkt_payload(ack_pkt);
1407 memcpy(ack_pkt->payload, pkt->payload,
1408 ack_pkt->hdr.payload_len);
1409 }
1410 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1411 smux_tx_queue(ack_pkt, ch, 0);
1412 list_channel(ch);
1413 } else {
1414 pr_err("%s: Remote loopack allocation failure\n",
1415 __func__);
1416 }
1417 } else if (!do_retry) {
1418 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001419 metadata.read.pkt_priv = 0;
1420 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001421 tmp = ch->get_rx_buffer(ch->priv,
1422 (void **)&metadata.read.pkt_priv,
1423 (void **)&metadata.read.buffer,
1424 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001425
Eric Holmbergb8435c82012-06-05 14:51:29 -06001426 if (tmp == 0 && metadata.read.buffer) {
1427 /* place data into RX buffer */
1428 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001429 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001430 metadata.read.len = rx_len;
1431 schedule_notify(lcid, SMUX_READ_DONE,
1432 &metadata);
1433 } else if (tmp == -EAGAIN ||
1434 (tmp == 0 && !metadata.read.buffer)) {
1435 /* buffer allocation failed - add to retry queue */
1436 do_retry = 1;
1437 } else if (tmp < 0) {
1438 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1439 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001440 }
1441 }
1442
Eric Holmbergb8435c82012-06-05 14:51:29 -06001443 if (do_retry) {
1444 struct smux_rx_pkt_retry *retry;
1445
1446 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1447 if (!retry) {
1448 pr_err("%s: retry alloc failure\n", __func__);
1449 ret = -ENOMEM;
1450 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1451 goto out;
1452 }
1453 INIT_LIST_HEAD(&retry->rx_retry_list);
1454 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1455
1456 /* copy packet */
1457 retry->pkt = smux_alloc_pkt();
1458 if (!retry->pkt) {
1459 kfree(retry);
1460 pr_err("%s: pkt alloc failure\n", __func__);
1461 ret = -ENOMEM;
1462 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1463 goto out;
1464 }
1465 retry->pkt->hdr.lcid = lcid;
1466 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1467 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1468 if (retry->pkt->hdr.payload_len) {
1469 smux_alloc_pkt_payload(retry->pkt);
1470 memcpy(retry->pkt->payload, pkt->payload,
1471 retry->pkt->hdr.payload_len);
1472 }
1473
1474 /* add to retry queue */
1475 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1476 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1477 ++ch->rx_retry_queue_cnt;
1478 if (ch->rx_retry_queue_cnt == 1)
1479 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1480 msecs_to_jiffies(retry->timeout_in_ms));
1481 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1482 }
1483
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001484out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001485 return ret;
1486}
1487
1488/**
1489 * Handle receive byte command for testing purposes.
1490 *
1491 * @pkt Received packet
1492 *
1493 * @returns 0 for success
1494 */
1495static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1496{
1497 uint8_t lcid;
1498 int ret;
1499 struct smux_lch_t *ch;
1500 union notifier_metadata metadata;
1501 unsigned long flags;
1502
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001503 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1504 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001505 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001506 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001507
1508 lcid = pkt->hdr.lcid;
1509 ch = &smux_lch[lcid];
1510 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1511
1512 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1513 pr_err("smux: ch %d error data on local state 0x%x",
1514 lcid, ch->local_state);
1515 ret = -EIO;
1516 goto out;
1517 }
1518
1519 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1520 pr_err("smux: ch %d error data on remote state 0x%x",
1521 lcid, ch->remote_state);
1522 ret = -EIO;
1523 goto out;
1524 }
1525
1526 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1527 metadata.read.buffer = 0;
1528 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1529 ret = 0;
1530
1531out:
1532 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1533 return ret;
1534}
1535
1536/**
1537 * Handle receive status command.
1538 *
1539 * @pkt Received packet
1540 *
1541 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001542 */
1543static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1544{
1545 uint8_t lcid;
1546 int ret;
1547 struct smux_lch_t *ch;
1548 union notifier_metadata meta;
1549 unsigned long flags;
1550 int tx_ready = 0;
1551
1552 lcid = pkt->hdr.lcid;
1553 ch = &smux_lch[lcid];
1554
1555 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1556 meta.tiocm.tiocm_old = ch->remote_tiocm;
1557 meta.tiocm.tiocm_new = pkt->hdr.flags;
1558
1559 /* update logical channel flow control */
1560 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1561 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1562 /* logical channel flow control changed */
1563 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1564 /* disabled TX */
1565 SMUX_DBG("TX Flow control enabled\n");
1566 ch->tx_flow_control = 1;
1567 } else {
1568 /* re-enable channel */
1569 SMUX_DBG("TX Flow control disabled\n");
1570 ch->tx_flow_control = 0;
1571 tx_ready = 1;
1572 }
1573 }
1574 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1575 ch->remote_tiocm = pkt->hdr.flags;
1576 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1577
1578 /* client notification for status change */
1579 if (IS_FULLY_OPENED(ch)) {
1580 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1581 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1582 ret = 0;
1583 }
1584 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1585 if (tx_ready)
1586 list_channel(ch);
1587
1588 return ret;
1589}
1590
1591/**
1592 * Handle receive power command.
1593 *
1594 * @pkt Received packet
1595 *
1596 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001597 */
1598static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1599{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001600 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001601 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001602
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001603 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001604 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1605 /* local sleep request ack */
1606 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1607 /* Power-down complete, turn off UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001608 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001609 smux.power_state, SMUX_PWR_OFF_FLUSH);
1610 smux.power_state = SMUX_PWR_OFF_FLUSH;
1611 queue_work(smux_tx_wq, &smux_inactivity_work);
1612 } else {
1613 pr_err("%s: sleep request ack invalid in state %d\n",
1614 __func__, smux.power_state);
1615 }
1616 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001617 /*
1618 * Remote sleep request
1619 *
1620 * Even if we have data pending, we need to transition to the
1621 * POWER_OFF state and then perform a wakeup since the remote
1622 * side has requested a power-down.
1623 *
1624 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1625 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1626 * when it sends the packet.
1627 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001628 if (smux.power_state == SMUX_PWR_ON
1629 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1630 ack_pkt = smux_alloc_pkt();
1631 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001632 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001633 smux.power_state,
1634 SMUX_PWR_TURNING_OFF_FLUSH);
1635
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001636 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1637
1638 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001639 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1640 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001641 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1642 list_add_tail(&ack_pkt->list,
1643 &smux.power_queue);
1644 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001645 }
1646 } else {
1647 pr_err("%s: sleep request invalid in state %d\n",
1648 __func__, smux.power_state);
1649 }
1650 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001651 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001652
1653 return 0;
1654}
1655
1656/**
1657 * Handle dispatching a completed packet for receive processing.
1658 *
1659 * @pkt Packet to process
1660 *
1661 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001662 */
1663static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1664{
1665 int ret;
1666
1667 SMUX_LOG_PKT_RX(pkt);
1668
1669 switch (pkt->hdr.cmd) {
1670 case SMUX_CMD_OPEN_LCH:
1671 ret = smux_handle_rx_open_cmd(pkt);
1672 break;
1673
1674 case SMUX_CMD_DATA:
1675 ret = smux_handle_rx_data_cmd(pkt);
1676 break;
1677
1678 case SMUX_CMD_CLOSE_LCH:
1679 ret = smux_handle_rx_close_cmd(pkt);
1680 break;
1681
1682 case SMUX_CMD_STATUS:
1683 ret = smux_handle_rx_status_cmd(pkt);
1684 break;
1685
1686 case SMUX_CMD_PWR_CTL:
1687 ret = smux_handle_rx_power_cmd(pkt);
1688 break;
1689
1690 case SMUX_CMD_BYTE:
1691 ret = smux_handle_rx_byte_cmd(pkt);
1692 break;
1693
1694 default:
1695 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1696 ret = -EINVAL;
1697 }
1698 return ret;
1699}
1700
1701/**
1702 * Deserializes a packet and dispatches it to the packet receive logic.
1703 *
1704 * @data Raw data for one packet
1705 * @len Length of the data
1706 *
1707 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001708 */
1709static int smux_deserialize(unsigned char *data, int len)
1710{
1711 struct smux_pkt_t recv;
1712 uint8_t lcid;
1713
1714 smux_init_pkt(&recv);
1715
1716 /*
1717 * It may be possible to optimize this to not use the
1718 * temporary buffer.
1719 */
1720 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1721
1722 if (recv.hdr.magic != SMUX_MAGIC) {
1723 pr_err("%s: invalid header magic\n", __func__);
1724 return -EINVAL;
1725 }
1726
1727 lcid = recv.hdr.lcid;
1728 if (smux_assert_lch_id(lcid)) {
1729 pr_err("%s: invalid channel id %d\n", __func__, lcid);
1730 return -ENXIO;
1731 }
1732
1733 if (recv.hdr.payload_len)
1734 recv.payload = data + sizeof(struct smux_hdr_t);
1735
1736 return smux_dispatch_rx_pkt(&recv);
1737}
1738
1739/**
1740 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001741 */
1742static void smux_handle_wakeup_req(void)
1743{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001744 unsigned long flags;
1745
1746 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001747 if (smux.power_state == SMUX_PWR_OFF
1748 || smux.power_state == SMUX_PWR_TURNING_ON) {
1749 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001750 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001751 smux.power_state, SMUX_PWR_ON);
1752 smux.power_state = SMUX_PWR_ON;
1753 queue_work(smux_tx_wq, &smux_wakeup_work);
1754 queue_work(smux_tx_wq, &smux_tx_work);
1755 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1756 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1757 smux_send_byte(SMUX_WAKEUP_ACK);
1758 } else {
1759 smux_send_byte(SMUX_WAKEUP_ACK);
1760 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001761 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001762}
1763
1764/**
1765 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001766 */
1767static void smux_handle_wakeup_ack(void)
1768{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001769 unsigned long flags;
1770
1771 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001772 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1773 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001774 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001775 smux.power_state, SMUX_PWR_ON);
1776 smux.power_state = SMUX_PWR_ON;
1777 queue_work(smux_tx_wq, &smux_tx_work);
1778 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1779 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1780
1781 } else if (smux.power_state != SMUX_PWR_ON) {
1782 /* invalid message */
1783 pr_err("%s: wakeup request ack invalid in state %d\n",
1784 __func__, smux.power_state);
1785 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001786 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001787}
1788
1789/**
1790 * RX State machine - IDLE state processing.
1791 *
1792 * @data New RX data to process
1793 * @len Length of the data
1794 * @used Return value of length processed
1795 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001796 */
1797static void smux_rx_handle_idle(const unsigned char *data,
1798 int len, int *used, int flag)
1799{
1800 int i;
1801
1802 if (flag) {
1803 if (smux_byte_loopback)
1804 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1805 smux_byte_loopback);
1806 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1807 ++*used;
1808 return;
1809 }
1810
1811 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1812 switch (data[i]) {
1813 case SMUX_MAGIC_WORD1:
1814 smux.rx_state = SMUX_RX_MAGIC;
1815 break;
1816 case SMUX_WAKEUP_REQ:
1817 smux_handle_wakeup_req();
1818 break;
1819 case SMUX_WAKEUP_ACK:
1820 smux_handle_wakeup_ack();
1821 break;
1822 default:
1823 /* unexpected character */
1824 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1825 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1826 smux_byte_loopback);
1827 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1828 (unsigned)data[i]);
1829 break;
1830 }
1831 }
1832
1833 *used = i;
1834}
1835
1836/**
1837 * RX State machine - Header Magic state processing.
1838 *
1839 * @data New RX data to process
1840 * @len Length of the data
1841 * @used Return value of length processed
1842 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001843 */
1844static void smux_rx_handle_magic(const unsigned char *data,
1845 int len, int *used, int flag)
1846{
1847 int i;
1848
1849 if (flag) {
1850 pr_err("%s: TTY RX error %d\n", __func__, flag);
1851 smux_enter_reset();
1852 smux.rx_state = SMUX_RX_FAILURE;
1853 ++*used;
1854 return;
1855 }
1856
1857 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1858 /* wait for completion of the magic */
1859 if (data[i] == SMUX_MAGIC_WORD2) {
1860 smux.recv_len = 0;
1861 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1862 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1863 smux.rx_state = SMUX_RX_HDR;
1864 } else {
1865 /* unexpected / trash character */
1866 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1867 __func__, data[i], *used, len);
1868 smux.rx_state = SMUX_RX_IDLE;
1869 }
1870 }
1871
1872 *used = i;
1873}
1874
1875/**
1876 * RX State machine - Packet Header state processing.
1877 *
1878 * @data New RX data to process
1879 * @len Length of the data
1880 * @used Return value of length processed
1881 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001882 */
1883static void smux_rx_handle_hdr(const unsigned char *data,
1884 int len, int *used, int flag)
1885{
1886 int i;
1887 struct smux_hdr_t *hdr;
1888
1889 if (flag) {
1890 pr_err("%s: TTY RX error %d\n", __func__, flag);
1891 smux_enter_reset();
1892 smux.rx_state = SMUX_RX_FAILURE;
1893 ++*used;
1894 return;
1895 }
1896
1897 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1898 smux.recv_buf[smux.recv_len++] = data[i];
1899
1900 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1901 /* complete header received */
1902 hdr = (struct smux_hdr_t *)smux.recv_buf;
1903 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1904 smux.rx_state = SMUX_RX_PAYLOAD;
1905 }
1906 }
1907 *used = i;
1908}
1909
1910/**
1911 * RX State machine - Packet Payload state processing.
1912 *
1913 * @data New RX data to process
1914 * @len Length of the data
1915 * @used Return value of length processed
1916 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001917 */
1918static void smux_rx_handle_pkt_payload(const unsigned char *data,
1919 int len, int *used, int flag)
1920{
1921 int remaining;
1922
1923 if (flag) {
1924 pr_err("%s: TTY RX error %d\n", __func__, flag);
1925 smux_enter_reset();
1926 smux.rx_state = SMUX_RX_FAILURE;
1927 ++*used;
1928 return;
1929 }
1930
1931 /* copy data into rx buffer */
1932 if (smux.pkt_remain < (len - *used))
1933 remaining = smux.pkt_remain;
1934 else
1935 remaining = len - *used;
1936
1937 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1938 smux.recv_len += remaining;
1939 smux.pkt_remain -= remaining;
1940 *used += remaining;
1941
1942 if (smux.pkt_remain == 0) {
1943 /* complete packet received */
1944 smux_deserialize(smux.recv_buf, smux.recv_len);
1945 smux.rx_state = SMUX_RX_IDLE;
1946 }
1947}
1948
1949/**
1950 * Feed data to the receive state machine.
1951 *
1952 * @data Pointer to data block
1953 * @len Length of data
1954 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001955 */
1956void smux_rx_state_machine(const unsigned char *data,
1957 int len, int flag)
1958{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001959 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001960
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001961 work.data = data;
1962 work.len = len;
1963 work.flag = flag;
1964 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
1965 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001966
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001967 queue_work(smux_rx_wq, &work.work);
1968 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001969}
1970
1971/**
1972 * Add channel to transmit-ready list and trigger transmit worker.
1973 *
1974 * @ch Channel to add
1975 */
1976static void list_channel(struct smux_lch_t *ch)
1977{
1978 unsigned long flags;
1979
1980 SMUX_DBG("%s: listing channel %d\n",
1981 __func__, ch->lcid);
1982
1983 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
1984 spin_lock(&ch->tx_lock_lhb2);
1985 smux.tx_activity_flag = 1;
1986 if (list_empty(&ch->tx_ready_list))
1987 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
1988 spin_unlock(&ch->tx_lock_lhb2);
1989 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
1990
1991 queue_work(smux_tx_wq, &smux_tx_work);
1992}
1993
1994/**
1995 * Transmit packet on correct transport and then perform client
1996 * notification.
1997 *
1998 * @ch Channel to transmit on
1999 * @pkt Packet to transmit
2000 */
2001static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2002{
2003 union notifier_metadata meta_write;
2004 int ret;
2005
2006 if (ch && pkt) {
2007 SMUX_LOG_PKT_TX(pkt);
2008 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2009 ret = smux_tx_loopback(pkt);
2010 else
2011 ret = smux_tx_tty(pkt);
2012
2013 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2014 /* notify write-done */
2015 meta_write.write.pkt_priv = pkt->priv;
2016 meta_write.write.buffer = pkt->payload;
2017 meta_write.write.len = pkt->hdr.payload_len;
2018 if (ret >= 0) {
2019 SMUX_DBG("%s: PKT write done", __func__);
2020 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2021 &meta_write);
2022 } else {
2023 pr_err("%s: failed to write pkt %d\n",
2024 __func__, ret);
2025 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2026 &meta_write);
2027 }
2028 }
2029 }
2030}
2031
2032/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002033 * Flush pending TTY TX data.
2034 */
2035static void smux_flush_tty(void)
2036{
2037 if (!smux.tty) {
2038 pr_err("%s: ldisc not loaded\n", __func__);
2039 return;
2040 }
2041
2042 tty_wait_until_sent(smux.tty,
2043 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2044
2045 if (tty_chars_in_buffer(smux.tty) > 0)
2046 pr_err("%s: unable to flush UART queue\n", __func__);
2047}
2048
2049/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002050 * Purge TX queue for logical channel.
2051 *
2052 * @ch Logical channel pointer
2053 *
2054 * Must be called with the following spinlocks locked:
2055 * state_lock_lhb1
2056 * tx_lock_lhb2
2057 */
2058static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2059{
2060 struct smux_pkt_t *pkt;
2061 int send_disconnect = 0;
2062
2063 while (!list_empty(&ch->tx_queue)) {
2064 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2065 list);
2066 list_del(&pkt->list);
2067
2068 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2069 /* Open was never sent, just force to closed state */
2070 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2071 send_disconnect = 1;
2072 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2073 /* Notify client of failed write */
2074 union notifier_metadata meta_write;
2075
2076 meta_write.write.pkt_priv = pkt->priv;
2077 meta_write.write.buffer = pkt->payload;
2078 meta_write.write.len = pkt->hdr.payload_len;
2079 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2080 }
2081 smux_free_pkt(pkt);
2082 }
2083
2084 if (send_disconnect) {
2085 union notifier_metadata meta_disconnected;
2086
2087 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2088 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2089 &meta_disconnected);
2090 }
2091}
2092
2093/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002094 * Power-up the UART.
2095 */
2096static void smux_uart_power_on(void)
2097{
2098 struct uart_state *state;
2099
2100 if (!smux.tty || !smux.tty->driver_data) {
2101 pr_err("%s: unable to find UART port for tty %p\n",
2102 __func__, smux.tty);
2103 return;
2104 }
2105 state = smux.tty->driver_data;
2106 msm_hs_request_clock_on(state->uart_port);
2107}
2108
2109/**
2110 * Power down the UART.
2111 */
2112static void smux_uart_power_off(void)
2113{
2114 struct uart_state *state;
2115
2116 if (!smux.tty || !smux.tty->driver_data) {
2117 pr_err("%s: unable to find UART port for tty %p\n",
2118 __func__, smux.tty);
2119 return;
2120 }
2121 state = smux.tty->driver_data;
2122 msm_hs_request_clock_off(state->uart_port);
2123}
2124
2125/**
2126 * TX Wakeup Worker
2127 *
2128 * @work Not used
2129 *
2130 * Do an exponential back-off wakeup sequence with a maximum period
2131 * of approximately 1 second (1 << 20 microseconds).
2132 */
2133static void smux_wakeup_worker(struct work_struct *work)
2134{
2135 unsigned long flags;
2136 unsigned wakeup_delay;
2137 int complete = 0;
2138
Eric Holmberged1f00c2012-06-07 09:45:18 -06002139 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002140 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2141 if (smux.power_state == SMUX_PWR_ON) {
2142 /* wakeup complete */
2143 complete = 1;
2144 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2145 break;
2146 } else {
2147 /* retry */
2148 wakeup_delay = smux.pwr_wakeup_delay_us;
2149 smux.pwr_wakeup_delay_us <<= 1;
2150 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2151 smux.pwr_wakeup_delay_us =
2152 SMUX_WAKEUP_DELAY_MAX;
2153 }
2154 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2155 SMUX_DBG("%s: triggering wakeup\n", __func__);
2156 smux_send_byte(SMUX_WAKEUP_REQ);
2157
2158 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2159 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2160 wakeup_delay);
2161 usleep_range(wakeup_delay, 2*wakeup_delay);
2162 } else {
2163 /* schedule delayed work */
2164 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2165 __func__, wakeup_delay / 1000);
2166 queue_delayed_work(smux_tx_wq,
2167 &smux_wakeup_delayed_work,
2168 msecs_to_jiffies(wakeup_delay / 1000));
2169 break;
2170 }
2171 }
2172
2173 if (complete) {
2174 SMUX_DBG("%s: wakeup complete\n", __func__);
2175 /*
2176 * Cancel any pending retry. This avoids a race condition with
2177 * a new power-up request because:
2178 * 1) this worker doesn't modify the state
2179 * 2) this worker is processed on the same single-threaded
2180 * workqueue as new TX wakeup requests
2181 */
2182 cancel_delayed_work(&smux_wakeup_delayed_work);
2183 }
2184}
2185
2186
2187/**
2188 * Inactivity timeout worker. Periodically scheduled when link is active.
2189 * When it detects inactivity, it will power-down the UART link.
2190 *
2191 * @work Work structure (not used)
2192 */
2193static void smux_inactivity_worker(struct work_struct *work)
2194{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002195 struct smux_pkt_t *pkt;
2196 unsigned long flags;
2197
2198 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2199 spin_lock(&smux.tx_lock_lha2);
2200
2201 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2202 /* no activity */
2203 if (smux.powerdown_enabled) {
2204 if (smux.power_state == SMUX_PWR_ON) {
2205 /* start power-down sequence */
2206 pkt = smux_alloc_pkt();
2207 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002208 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002209 smux.power_state,
2210 SMUX_PWR_TURNING_OFF);
2211 smux.power_state = SMUX_PWR_TURNING_OFF;
2212
2213 /* send power-down request */
2214 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2215 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002216 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2217 list_add_tail(&pkt->list,
2218 &smux.power_queue);
2219 queue_work(smux_tx_wq, &smux_tx_work);
2220 } else {
2221 pr_err("%s: packet alloc failed\n",
2222 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002223 }
2224 }
2225 } else {
2226 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2227 __func__);
2228 }
2229 }
2230 smux.tx_activity_flag = 0;
2231 smux.rx_activity_flag = 0;
2232
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002233 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002234 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002235 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002236 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002237 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002238
2239 /* if data is pending, schedule a new wakeup */
2240 if (!list_empty(&smux.lch_tx_ready_list) ||
2241 !list_empty(&smux.power_queue))
2242 queue_work(smux_tx_wq, &smux_tx_work);
2243
2244 spin_unlock(&smux.tx_lock_lha2);
2245 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2246
2247 /* flush UART output queue and power down */
2248 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002249 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002250 } else {
2251 spin_unlock(&smux.tx_lock_lha2);
2252 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002253 }
2254
2255 /* reschedule inactivity worker */
2256 if (smux.power_state != SMUX_PWR_OFF)
2257 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2258 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2259}
2260
2261/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002262 * Remove RX retry packet from channel and free it.
2263 *
2264 * Must be called with state_lock_lhb1 locked.
2265 *
2266 * @ch Channel for retry packet
2267 * @retry Retry packet to remove
2268 */
2269void smux_remove_rx_retry(struct smux_lch_t *ch,
2270 struct smux_rx_pkt_retry *retry)
2271{
2272 list_del(&retry->rx_retry_list);
2273 --ch->rx_retry_queue_cnt;
2274 smux_free_pkt(retry->pkt);
2275 kfree(retry);
2276}
2277
2278/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002279 * RX worker handles all receive operations.
2280 *
2281 * @work Work structure contained in TBD structure
2282 */
2283static void smux_rx_worker(struct work_struct *work)
2284{
2285 unsigned long flags;
2286 int used;
2287 int initial_rx_state;
2288 struct smux_rx_worker_data *w;
2289 const unsigned char *data;
2290 int len;
2291 int flag;
2292
2293 w = container_of(work, struct smux_rx_worker_data, work);
2294 data = w->data;
2295 len = w->len;
2296 flag = w->flag;
2297
2298 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2299 smux.rx_activity_flag = 1;
2300 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2301
2302 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2303 used = 0;
2304 do {
2305 SMUX_DBG("%s: state %d; %d of %d\n",
2306 __func__, smux.rx_state, used, len);
2307 initial_rx_state = smux.rx_state;
2308
2309 switch (smux.rx_state) {
2310 case SMUX_RX_IDLE:
2311 smux_rx_handle_idle(data, len, &used, flag);
2312 break;
2313 case SMUX_RX_MAGIC:
2314 smux_rx_handle_magic(data, len, &used, flag);
2315 break;
2316 case SMUX_RX_HDR:
2317 smux_rx_handle_hdr(data, len, &used, flag);
2318 break;
2319 case SMUX_RX_PAYLOAD:
2320 smux_rx_handle_pkt_payload(data, len, &used, flag);
2321 break;
2322 default:
2323 SMUX_DBG("%s: invalid state %d\n",
2324 __func__, smux.rx_state);
2325 smux.rx_state = SMUX_RX_IDLE;
2326 break;
2327 }
2328 } while (used < len || smux.rx_state != initial_rx_state);
2329
2330 complete(&w->work_complete);
2331}
2332
2333/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002334 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2335 * because the client was not ready (-EAGAIN).
2336 *
2337 * @work Work structure contained in smux_lch_t structure
2338 */
2339static void smux_rx_retry_worker(struct work_struct *work)
2340{
2341 struct smux_lch_t *ch;
2342 struct smux_rx_pkt_retry *retry;
2343 union notifier_metadata metadata;
2344 int tmp;
2345 unsigned long flags;
2346
2347 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2348
2349 /* get next retry packet */
2350 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2351 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2352 /* port has been closed - remove all retries */
2353 while (!list_empty(&ch->rx_retry_queue)) {
2354 retry = list_first_entry(&ch->rx_retry_queue,
2355 struct smux_rx_pkt_retry,
2356 rx_retry_list);
2357 smux_remove_rx_retry(ch, retry);
2358 }
2359 }
2360
2361 if (list_empty(&ch->rx_retry_queue)) {
2362 SMUX_DBG("%s: retry list empty for channel %d\n",
2363 __func__, ch->lcid);
2364 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2365 return;
2366 }
2367 retry = list_first_entry(&ch->rx_retry_queue,
2368 struct smux_rx_pkt_retry,
2369 rx_retry_list);
2370 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2371
2372 SMUX_DBG("%s: retrying rx pkt %p\n", __func__, retry);
2373 metadata.read.pkt_priv = 0;
2374 metadata.read.buffer = 0;
2375 tmp = ch->get_rx_buffer(ch->priv,
2376 (void **)&metadata.read.pkt_priv,
2377 (void **)&metadata.read.buffer,
2378 retry->pkt->hdr.payload_len);
2379 if (tmp == 0 && metadata.read.buffer) {
2380 /* have valid RX buffer */
2381 memcpy(metadata.read.buffer, retry->pkt->payload,
2382 retry->pkt->hdr.payload_len);
2383 metadata.read.len = retry->pkt->hdr.payload_len;
2384
2385 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2386 smux_remove_rx_retry(ch, retry);
2387 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2388
2389 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
2390 } else if (tmp == -EAGAIN ||
2391 (tmp == 0 && !metadata.read.buffer)) {
2392 /* retry again */
2393 retry->timeout_in_ms <<= 1;
2394 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2395 /* timed out */
2396 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2397 smux_remove_rx_retry(ch, retry);
2398 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2399 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2400 }
2401 } else {
2402 /* client error - drop packet */
2403 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2404 smux_remove_rx_retry(ch, retry);
2405 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2406
2407 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2408 }
2409
2410 /* schedule next retry */
2411 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2412 if (!list_empty(&ch->rx_retry_queue)) {
2413 retry = list_first_entry(&ch->rx_retry_queue,
2414 struct smux_rx_pkt_retry,
2415 rx_retry_list);
2416 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2417 msecs_to_jiffies(retry->timeout_in_ms));
2418 }
2419 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2420}
2421
2422/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002423 * Transmit worker handles serializing and transmitting packets onto the
2424 * underlying transport.
2425 *
2426 * @work Work structure (not used)
2427 */
2428static void smux_tx_worker(struct work_struct *work)
2429{
2430 struct smux_pkt_t *pkt;
2431 struct smux_lch_t *ch;
2432 unsigned low_wm_notif;
2433 unsigned lcid;
2434 unsigned long flags;
2435
2436
2437 /*
2438 * Transmit packets in round-robin fashion based upon ready
2439 * channels.
2440 *
2441 * To eliminate the need to hold a lock for the entire
2442 * iteration through the channel ready list, the head of the
2443 * ready-channel list is always the next channel to be
2444 * processed. To send a packet, the first valid packet in
2445 * the head channel is removed and the head channel is then
2446 * rescheduled at the end of the queue by removing it and
2447 * inserting after the tail. The locks can then be released
2448 * while the packet is processed.
2449 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002450 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002451 pkt = NULL;
2452 low_wm_notif = 0;
2453
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002454 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002455
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002456 /* handle wakeup if needed */
2457 if (smux.power_state == SMUX_PWR_OFF) {
2458 if (!list_empty(&smux.lch_tx_ready_list) ||
2459 !list_empty(&smux.power_queue)) {
2460 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002461 smux.pwr_wakeup_delay_us = 1;
Eric Holmbergff0b0112012-06-08 15:06:57 -06002462 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002463 smux.power_state,
2464 SMUX_PWR_TURNING_ON);
2465 smux.power_state = SMUX_PWR_TURNING_ON;
2466 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2467 flags);
2468 smux_uart_power_on();
2469 queue_work(smux_tx_wq, &smux_wakeup_work);
2470 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002471 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002472 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2473 flags);
2474 }
2475 break;
2476 }
2477
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002478 /* process any pending power packets */
2479 if (!list_empty(&smux.power_queue)) {
2480 pkt = list_first_entry(&smux.power_queue,
2481 struct smux_pkt_t, list);
2482 list_del(&pkt->list);
2483 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2484
2485 /* send the packet */
2486 SMUX_LOG_PKT_TX(pkt);
2487 if (!smux_byte_loopback) {
2488 smux_tx_tty(pkt);
2489 smux_flush_tty();
2490 } else {
2491 smux_tx_loopback(pkt);
2492 }
2493
2494 /* Adjust power state if this is a flush command */
2495 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2496 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2497 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2498 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002499 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002500 smux.power_state,
2501 SMUX_PWR_OFF_FLUSH);
2502 smux.power_state = SMUX_PWR_OFF_FLUSH;
2503 queue_work(smux_tx_wq, &smux_inactivity_work);
2504 }
2505 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2506
2507 smux_free_pkt(pkt);
2508 continue;
2509 }
2510
2511 /* get the next ready channel */
2512 if (list_empty(&smux.lch_tx_ready_list)) {
2513 /* no ready channels */
2514 SMUX_DBG("%s: no more ready channels, exiting\n",
2515 __func__);
2516 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2517 break;
2518 }
2519 smux.tx_activity_flag = 1;
2520
2521 if (smux.power_state != SMUX_PWR_ON) {
2522 /* channel not ready to transmit */
2523 SMUX_DBG("%s: can not tx with power state %d\n",
2524 __func__,
2525 smux.power_state);
2526 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2527 break;
2528 }
2529
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002530 /* get the next packet to send and rotate channel list */
2531 ch = list_first_entry(&smux.lch_tx_ready_list,
2532 struct smux_lch_t,
2533 tx_ready_list);
2534
2535 spin_lock(&ch->state_lock_lhb1);
2536 spin_lock(&ch->tx_lock_lhb2);
2537 if (!list_empty(&ch->tx_queue)) {
2538 /*
2539 * If remote TX flow control is enabled or
2540 * the channel is not fully opened, then only
2541 * send command packets.
2542 */
2543 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2544 struct smux_pkt_t *curr;
2545 list_for_each_entry(curr, &ch->tx_queue, list) {
2546 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2547 pkt = curr;
2548 break;
2549 }
2550 }
2551 } else {
2552 /* get next cmd/data packet to send */
2553 pkt = list_first_entry(&ch->tx_queue,
2554 struct smux_pkt_t, list);
2555 }
2556 }
2557
2558 if (pkt) {
2559 list_del(&pkt->list);
2560
2561 /* update packet stats */
2562 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2563 --ch->tx_pending_data_cnt;
2564 if (ch->notify_lwm &&
2565 ch->tx_pending_data_cnt
2566 <= SMUX_WM_LOW) {
2567 ch->notify_lwm = 0;
2568 low_wm_notif = 1;
2569 }
2570 }
2571
2572 /* advance to the next ready channel */
2573 list_rotate_left(&smux.lch_tx_ready_list);
2574 } else {
2575 /* no data in channel to send, remove from ready list */
2576 list_del(&ch->tx_ready_list);
2577 INIT_LIST_HEAD(&ch->tx_ready_list);
2578 }
2579 lcid = ch->lcid;
2580 spin_unlock(&ch->tx_lock_lhb2);
2581 spin_unlock(&ch->state_lock_lhb1);
2582 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2583
2584 if (low_wm_notif)
2585 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2586
2587 /* send the packet */
2588 smux_tx_pkt(ch, pkt);
2589 smux_free_pkt(pkt);
2590 }
2591}
2592
2593
2594/**********************************************************************/
2595/* Kernel API */
2596/**********************************************************************/
2597
2598/**
2599 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2600 * flags.
2601 *
2602 * @lcid Logical channel ID
2603 * @set Options to set
2604 * @clear Options to clear
2605 *
2606 * @returns 0 for success, < 0 for failure
2607 */
2608int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2609{
2610 unsigned long flags;
2611 struct smux_lch_t *ch;
2612 int tx_ready = 0;
2613 int ret = 0;
2614
2615 if (smux_assert_lch_id(lcid))
2616 return -ENXIO;
2617
2618 ch = &smux_lch[lcid];
2619 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2620
2621 /* Local loopback mode */
2622 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2623 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2624
2625 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2626 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2627
2628 /* Remote loopback mode */
2629 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2630 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2631
2632 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2633 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2634
2635 /* Flow control */
2636 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2637 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2638 ret = smux_send_status_cmd(ch);
2639 tx_ready = 1;
2640 }
2641
2642 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
2643 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2644 ret = smux_send_status_cmd(ch);
2645 tx_ready = 1;
2646 }
2647
2648 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2649
2650 if (tx_ready)
2651 list_channel(ch);
2652
2653 return ret;
2654}
2655
2656/**
2657 * Starts the opening sequence for a logical channel.
2658 *
2659 * @lcid Logical channel ID
2660 * @priv Free for client usage
2661 * @notify Event notification function
2662 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2663 *
2664 * @returns 0 for success, <0 otherwise
2665 *
2666 * A channel must be fully closed (either not previously opened or
2667 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2668 * received.
2669 *
2670 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2671 * event.
2672 */
2673int msm_smux_open(uint8_t lcid, void *priv,
2674 void (*notify)(void *priv, int event_type, const void *metadata),
2675 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2676 int size))
2677{
2678 int ret;
2679 struct smux_lch_t *ch;
2680 struct smux_pkt_t *pkt;
2681 int tx_ready = 0;
2682 unsigned long flags;
2683
2684 if (smux_assert_lch_id(lcid))
2685 return -ENXIO;
2686
2687 ch = &smux_lch[lcid];
2688 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2689
2690 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2691 ret = -EAGAIN;
2692 goto out;
2693 }
2694
2695 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2696 pr_err("%s: open lcid %d local state %x invalid\n",
2697 __func__, lcid, ch->local_state);
2698 ret = -EINVAL;
2699 goto out;
2700 }
2701
2702 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2703 ch->local_state,
2704 SMUX_LCH_LOCAL_OPENING);
2705
2706 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2707
2708 ch->priv = priv;
2709 ch->notify = notify;
2710 ch->get_rx_buffer = get_rx_buffer;
2711 ret = 0;
2712
2713 /* Send Open Command */
2714 pkt = smux_alloc_pkt();
2715 if (!pkt) {
2716 ret = -ENOMEM;
2717 goto out;
2718 }
2719 pkt->hdr.magic = SMUX_MAGIC;
2720 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2721 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2722 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2723 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2724 pkt->hdr.lcid = lcid;
2725 pkt->hdr.payload_len = 0;
2726 pkt->hdr.pad_len = 0;
2727 smux_tx_queue(pkt, ch, 0);
2728 tx_ready = 1;
2729
2730out:
2731 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2732 if (tx_ready)
2733 list_channel(ch);
2734 return ret;
2735}
2736
2737/**
2738 * Starts the closing sequence for a logical channel.
2739 *
2740 * @lcid Logical channel ID
2741 *
2742 * @returns 0 for success, <0 otherwise
2743 *
2744 * Once the close event has been acknowledge by the remote side, the client
2745 * will receive a SMUX_DISCONNECTED notification.
2746 */
2747int msm_smux_close(uint8_t lcid)
2748{
2749 int ret = 0;
2750 struct smux_lch_t *ch;
2751 struct smux_pkt_t *pkt;
2752 int tx_ready = 0;
2753 unsigned long flags;
2754
2755 if (smux_assert_lch_id(lcid))
2756 return -ENXIO;
2757
2758 ch = &smux_lch[lcid];
2759 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2760 ch->local_tiocm = 0x0;
2761 ch->remote_tiocm = 0x0;
2762 ch->tx_pending_data_cnt = 0;
2763 ch->notify_lwm = 0;
2764
2765 /* Purge TX queue */
2766 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002767 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002768 spin_unlock(&ch->tx_lock_lhb2);
2769
2770 /* Send Close Command */
2771 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2772 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2773 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2774 ch->local_state,
2775 SMUX_LCH_LOCAL_CLOSING);
2776
2777 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2778 pkt = smux_alloc_pkt();
2779 if (pkt) {
2780 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2781 pkt->hdr.flags = 0;
2782 pkt->hdr.lcid = lcid;
2783 pkt->hdr.payload_len = 0;
2784 pkt->hdr.pad_len = 0;
2785 smux_tx_queue(pkt, ch, 0);
2786 tx_ready = 1;
2787 } else {
2788 pr_err("%s: pkt allocation failed\n", __func__);
2789 ret = -ENOMEM;
2790 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002791
2792 /* Purge RX retry queue */
2793 if (ch->rx_retry_queue_cnt)
2794 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002795 }
2796 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2797
2798 if (tx_ready)
2799 list_channel(ch);
2800
2801 return ret;
2802}
2803
2804/**
2805 * Write data to a logical channel.
2806 *
2807 * @lcid Logical channel ID
2808 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2809 * SMUX_WRITE_FAIL notification.
2810 * @data Data to write
2811 * @len Length of @data
2812 *
2813 * @returns 0 for success, <0 otherwise
2814 *
2815 * Data may be written immediately after msm_smux_open() is called,
2816 * but the data will wait in the transmit queue until the channel has
2817 * been fully opened.
2818 *
2819 * Once the data has been written, the client will receive either a completion
2820 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2821 */
2822int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2823{
2824 struct smux_lch_t *ch;
2825 struct smux_pkt_t *pkt;
2826 int tx_ready = 0;
2827 unsigned long flags;
2828 int ret;
2829
2830 if (smux_assert_lch_id(lcid))
2831 return -ENXIO;
2832
2833 ch = &smux_lch[lcid];
2834 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2835
2836 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2837 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2838 pr_err("%s: hdr.invalid local state %d channel %d\n",
2839 __func__, ch->local_state, lcid);
2840 ret = -EINVAL;
2841 goto out;
2842 }
2843
2844 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2845 pr_err("%s: payload %d too large\n",
2846 __func__, len);
2847 ret = -E2BIG;
2848 goto out;
2849 }
2850
2851 pkt = smux_alloc_pkt();
2852 if (!pkt) {
2853 ret = -ENOMEM;
2854 goto out;
2855 }
2856
2857 pkt->hdr.cmd = SMUX_CMD_DATA;
2858 pkt->hdr.lcid = lcid;
2859 pkt->hdr.flags = 0;
2860 pkt->hdr.payload_len = len;
2861 pkt->payload = (void *)data;
2862 pkt->priv = pkt_priv;
2863 pkt->hdr.pad_len = 0;
2864
2865 spin_lock(&ch->tx_lock_lhb2);
2866 /* verify high watermark */
2867 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
2868
2869 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH) {
2870 pr_err("%s: ch %d high watermark %d exceeded %d\n",
2871 __func__, lcid, SMUX_WM_HIGH,
2872 ch->tx_pending_data_cnt);
2873 ret = -EAGAIN;
2874 goto out_inner;
2875 }
2876
2877 /* queue packet for transmit */
2878 if (++ch->tx_pending_data_cnt == SMUX_WM_HIGH) {
2879 ch->notify_lwm = 1;
2880 pr_err("%s: high watermark hit\n", __func__);
2881 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
2882 }
2883 list_add_tail(&pkt->list, &ch->tx_queue);
2884
2885 /* add to ready list */
2886 if (IS_FULLY_OPENED(ch))
2887 tx_ready = 1;
2888
2889 ret = 0;
2890
2891out_inner:
2892 spin_unlock(&ch->tx_lock_lhb2);
2893
2894out:
2895 if (ret)
2896 smux_free_pkt(pkt);
2897 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2898
2899 if (tx_ready)
2900 list_channel(ch);
2901
2902 return ret;
2903}
2904
2905/**
2906 * Returns true if the TX queue is currently full (high water mark).
2907 *
2908 * @lcid Logical channel ID
2909 * @returns 0 if channel is not full
2910 * 1 if it is full
2911 * < 0 for error
2912 */
2913int msm_smux_is_ch_full(uint8_t lcid)
2914{
2915 struct smux_lch_t *ch;
2916 unsigned long flags;
2917 int is_full = 0;
2918
2919 if (smux_assert_lch_id(lcid))
2920 return -ENXIO;
2921
2922 ch = &smux_lch[lcid];
2923
2924 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2925 if (ch->tx_pending_data_cnt >= SMUX_WM_HIGH)
2926 is_full = 1;
2927 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2928
2929 return is_full;
2930}
2931
2932/**
2933 * Returns true if the TX queue has space for more packets it is at or
2934 * below the low water mark).
2935 *
2936 * @lcid Logical channel ID
2937 * @returns 0 if channel is above low watermark
2938 * 1 if it's at or below the low watermark
2939 * < 0 for error
2940 */
2941int msm_smux_is_ch_low(uint8_t lcid)
2942{
2943 struct smux_lch_t *ch;
2944 unsigned long flags;
2945 int is_low = 0;
2946
2947 if (smux_assert_lch_id(lcid))
2948 return -ENXIO;
2949
2950 ch = &smux_lch[lcid];
2951
2952 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
2953 if (ch->tx_pending_data_cnt <= SMUX_WM_LOW)
2954 is_low = 1;
2955 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
2956
2957 return is_low;
2958}
2959
2960/**
2961 * Send TIOCM status update.
2962 *
2963 * @ch Channel for update
2964 *
2965 * @returns 0 for success, <0 for failure
2966 *
2967 * Channel lock must be held before calling.
2968 */
2969static int smux_send_status_cmd(struct smux_lch_t *ch)
2970{
2971 struct smux_pkt_t *pkt;
2972
2973 if (!ch)
2974 return -EINVAL;
2975
2976 pkt = smux_alloc_pkt();
2977 if (!pkt)
2978 return -ENOMEM;
2979
2980 pkt->hdr.lcid = ch->lcid;
2981 pkt->hdr.cmd = SMUX_CMD_STATUS;
2982 pkt->hdr.flags = ch->local_tiocm;
2983 pkt->hdr.payload_len = 0;
2984 pkt->hdr.pad_len = 0;
2985 smux_tx_queue(pkt, ch, 0);
2986
2987 return 0;
2988}
2989
2990/**
2991 * Internal helper function for getting the TIOCM status with
2992 * state_lock_lhb1 already locked.
2993 *
2994 * @ch Channel pointer
2995 *
2996 * @returns TIOCM status
2997 */
2998static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
2999{
3000 long status = 0x0;
3001
3002 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3003 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3004 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3005 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3006
3007 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3008 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3009
3010 return status;
3011}
3012
3013/**
3014 * Get the TIOCM status bits.
3015 *
3016 * @lcid Logical channel ID
3017 *
3018 * @returns >= 0 TIOCM status bits
3019 * < 0 Error condition
3020 */
3021long msm_smux_tiocm_get(uint8_t lcid)
3022{
3023 struct smux_lch_t *ch;
3024 unsigned long flags;
3025 long status = 0x0;
3026
3027 if (smux_assert_lch_id(lcid))
3028 return -ENXIO;
3029
3030 ch = &smux_lch[lcid];
3031 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3032 status = msm_smux_tiocm_get_atomic(ch);
3033 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3034
3035 return status;
3036}
3037
3038/**
3039 * Set/clear the TIOCM status bits.
3040 *
3041 * @lcid Logical channel ID
3042 * @set Bits to set
3043 * @clear Bits to clear
3044 *
3045 * @returns 0 for success; < 0 for failure
3046 *
3047 * If a bit is specified in both the @set and @clear masks, then the clear bit
3048 * definition will dominate and the bit will be cleared.
3049 */
3050int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3051{
3052 struct smux_lch_t *ch;
3053 unsigned long flags;
3054 uint8_t old_status;
3055 uint8_t status_set = 0x0;
3056 uint8_t status_clear = 0x0;
3057 int tx_ready = 0;
3058 int ret = 0;
3059
3060 if (smux_assert_lch_id(lcid))
3061 return -ENXIO;
3062
3063 ch = &smux_lch[lcid];
3064 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3065
3066 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3067 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3068 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3069 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3070
3071 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3072 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3073 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3074 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3075
3076 old_status = ch->local_tiocm;
3077 ch->local_tiocm |= status_set;
3078 ch->local_tiocm &= ~status_clear;
3079
3080 if (ch->local_tiocm != old_status) {
3081 ret = smux_send_status_cmd(ch);
3082 tx_ready = 1;
3083 }
3084 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3085
3086 if (tx_ready)
3087 list_channel(ch);
3088
3089 return ret;
3090}
3091
3092/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003093/* Subsystem Restart */
3094/**********************************************************************/
3095static struct notifier_block ssr_notifier = {
3096 .notifier_call = ssr_notifier_cb,
3097};
3098
3099/**
3100 * Handle Subsystem Restart (SSR) notifications.
3101 *
3102 * @this Pointer to ssr_notifier
3103 * @code SSR Code
3104 * @data Data pointer (not used)
3105 */
3106static int ssr_notifier_cb(struct notifier_block *this,
3107 unsigned long code,
3108 void *data)
3109{
3110 unsigned long flags;
3111 int power_off_uart = 0;
3112
3113 if (code != SUBSYS_AFTER_SHUTDOWN)
3114 return NOTIFY_DONE;
3115
3116 /* Cleanup channels */
3117 smux_lch_purge();
3118
3119 /* Power-down UART */
3120 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3121 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003122 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003123 smux.power_state = SMUX_PWR_OFF;
3124 power_off_uart = 1;
3125 }
3126 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3127
3128 if (power_off_uart)
3129 smux_uart_power_off();
3130
3131 return NOTIFY_DONE;
3132}
3133
3134/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003135/* Line Discipline Interface */
3136/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003137static void smux_pdev_release(struct device *dev)
3138{
3139 struct platform_device *pdev;
3140
3141 pdev = container_of(dev, struct platform_device, dev);
3142 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3143 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3144}
3145
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003146static int smuxld_open(struct tty_struct *tty)
3147{
3148 int i;
3149 int tmp;
3150 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003151
3152 if (!smux.is_initialized)
3153 return -ENODEV;
3154
Eric Holmberged1f00c2012-06-07 09:45:18 -06003155 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003156 if (smux.ld_open_count) {
3157 pr_err("%s: %p multiple instances not supported\n",
3158 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003159 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003160 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003161 }
3162
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003163 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003164 pr_err("%s: tty->ops->write already NULL\n", __func__);
3165 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003166 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003167 }
3168
3169 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003170 ++smux.ld_open_count;
3171 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003172 smux.tty = tty;
3173 tty->disc_data = &smux;
3174 tty->receive_room = TTY_RECEIVE_ROOM;
3175 tty_driver_flush_buffer(tty);
3176
3177 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003178 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003179 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003180 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003181 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003182 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003183 queue_work(smux_tx_wq, &smux_inactivity_work);
3184 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003185 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003186 }
3187
3188 /* register platform devices */
3189 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003190 SMUX_DBG("%s: register pdev '%s'\n",
3191 __func__, smux_devs[i].name);
3192 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003193 tmp = platform_device_register(&smux_devs[i]);
3194 if (tmp)
3195 pr_err("%s: error %d registering device %s\n",
3196 __func__, tmp, smux_devs[i].name);
3197 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003198 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003199 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003200}
3201
3202static void smuxld_close(struct tty_struct *tty)
3203{
3204 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003205 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003206 int i;
3207
Eric Holmberged1f00c2012-06-07 09:45:18 -06003208 SMUX_DBG("%s: ldisc unload\n", __func__);
3209 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003210 if (smux.ld_open_count <= 0) {
3211 pr_err("%s: invalid ld count %d\n", __func__,
3212 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003213 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003214 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003215 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003216 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003217 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003218
3219 /* Cleanup channels */
3220 smux_lch_purge();
3221
3222 /* Unregister platform devices */
3223 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3224 SMUX_DBG("%s: unregister pdev '%s'\n",
3225 __func__, smux_devs[i].name);
3226 platform_device_unregister(&smux_devs[i]);
3227 }
3228
3229 /* Schedule UART power-up if it's down */
3230 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003231 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003232 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003233 smux.power_state = SMUX_PWR_OFF;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003234 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3235
3236 if (power_up_uart)
3237 smux_uart_power_on();
3238
3239 /* Disconnect from TTY */
3240 smux.tty = NULL;
3241 mutex_unlock(&smux.mutex_lha0);
3242 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003243}
3244
3245/**
3246 * Receive data from TTY Line Discipline.
3247 *
3248 * @tty TTY structure
3249 * @cp Character data
3250 * @fp Flag data
3251 * @count Size of character and flag data
3252 */
3253void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3254 char *fp, int count)
3255{
3256 int i;
3257 int last_idx = 0;
3258 const char *tty_name = NULL;
3259 char *f;
3260
3261 if (smux_debug_mask & MSM_SMUX_DEBUG)
3262 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3263 16, 1, cp, count, true);
3264
3265 /* verify error flags */
3266 for (i = 0, f = fp; i < count; ++i, ++f) {
3267 if (*f != TTY_NORMAL) {
3268 if (tty)
3269 tty_name = tty->name;
3270 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3271 tty_name, *f, tty_flag_to_str(*f));
3272
3273 /* feed all previous valid data to the parser */
3274 smux_rx_state_machine(cp + last_idx, i - last_idx,
3275 TTY_NORMAL);
3276
3277 /* feed bad data to parser */
3278 smux_rx_state_machine(cp + i, 1, *f);
3279 last_idx = i + 1;
3280 }
3281 }
3282
3283 /* feed data to RX state machine */
3284 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3285}
3286
3287static void smuxld_flush_buffer(struct tty_struct *tty)
3288{
3289 pr_err("%s: not supported\n", __func__);
3290}
3291
3292static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3293{
3294 pr_err("%s: not supported\n", __func__);
3295 return -ENODEV;
3296}
3297
3298static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3299 unsigned char __user *buf, size_t nr)
3300{
3301 pr_err("%s: not supported\n", __func__);
3302 return -ENODEV;
3303}
3304
3305static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3306 const unsigned char *buf, size_t nr)
3307{
3308 pr_err("%s: not supported\n", __func__);
3309 return -ENODEV;
3310}
3311
3312static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3313 unsigned int cmd, unsigned long arg)
3314{
3315 pr_err("%s: not supported\n", __func__);
3316 return -ENODEV;
3317}
3318
3319static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3320 struct poll_table_struct *tbl)
3321{
3322 pr_err("%s: not supported\n", __func__);
3323 return -ENODEV;
3324}
3325
3326static void smuxld_write_wakeup(struct tty_struct *tty)
3327{
3328 pr_err("%s: not supported\n", __func__);
3329}
3330
3331static struct tty_ldisc_ops smux_ldisc_ops = {
3332 .owner = THIS_MODULE,
3333 .magic = TTY_LDISC_MAGIC,
3334 .name = "n_smux",
3335 .open = smuxld_open,
3336 .close = smuxld_close,
3337 .flush_buffer = smuxld_flush_buffer,
3338 .chars_in_buffer = smuxld_chars_in_buffer,
3339 .read = smuxld_read,
3340 .write = smuxld_write,
3341 .ioctl = smuxld_ioctl,
3342 .poll = smuxld_poll,
3343 .receive_buf = smuxld_receive_buf,
3344 .write_wakeup = smuxld_write_wakeup
3345};
3346
3347static int __init smux_init(void)
3348{
3349 int ret;
3350
Eric Holmberged1f00c2012-06-07 09:45:18 -06003351 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003352
3353 spin_lock_init(&smux.rx_lock_lha1);
3354 smux.rx_state = SMUX_RX_IDLE;
3355 smux.power_state = SMUX_PWR_OFF;
3356 smux.pwr_wakeup_delay_us = 1;
3357 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003358 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003359 smux.rx_activity_flag = 0;
3360 smux.tx_activity_flag = 0;
3361 smux.recv_len = 0;
3362 smux.tty = NULL;
3363 smux.ld_open_count = 0;
3364 smux.in_reset = 0;
3365 smux.is_initialized = 1;
3366 smux_byte_loopback = 0;
3367
3368 spin_lock_init(&smux.tx_lock_lha2);
3369 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3370
3371 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3372 if (ret != 0) {
3373 pr_err("%s: error %d registering line discipline\n",
3374 __func__, ret);
3375 return ret;
3376 }
3377
Eric Holmberged1f00c2012-06-07 09:45:18 -06003378 subsys_notif_register_notifier("qsc", &ssr_notifier);
3379
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003380 ret = lch_init();
3381 if (ret != 0) {
3382 pr_err("%s: lch_init failed\n", __func__);
3383 return ret;
3384 }
3385
3386 return 0;
3387}
3388
3389static void __exit smux_exit(void)
3390{
3391 int ret;
3392
3393 ret = tty_unregister_ldisc(N_SMUX);
3394 if (ret != 0) {
3395 pr_err("%s error %d unregistering line discipline\n",
3396 __func__, ret);
3397 return;
3398 }
3399}
3400
3401module_init(smux_init);
3402module_exit(smux_exit);
3403
3404MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3405MODULE_LICENSE("GPL v2");
3406MODULE_ALIAS_LDISC(N_SMUX);