blob: 68e366923f48740077b351bc4712184b5c783506 [file] [log] [blame]
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001/* drivers/tty/n_smux.c
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/errno.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20#include <linux/tty_driver.h>
21#include <linux/smux.h>
22#include <linux/list.h>
23#include <linux/kfifo.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/delay.h>
Eric Holmberged1f00c2012-06-07 09:45:18 -060028#include <mach/subsystem_notif.h>
29#include <mach/subsystem_restart.h>
Eric Holmberg8ed30f22012-05-10 19:16:51 -060030#include <mach/msm_serial_hs.h>
31#include "smux_private.h"
32#include "smux_loopback.h"
33
34#define SMUX_NOTIFY_FIFO_SIZE 128
35#define SMUX_TX_QUEUE_SIZE 256
Eric Holmberg8ed30f22012-05-10 19:16:51 -060036#define SMUX_PKT_LOG_SIZE 80
37
38/* Maximum size we can accept in a single RX buffer */
39#define TTY_RECEIVE_ROOM 65536
40#define TTY_BUFFER_FULL_WAIT_MS 50
41
42/* maximum sleep time between wakeup attempts */
43#define SMUX_WAKEUP_DELAY_MAX (1 << 20)
44
45/* minimum delay for scheduling delayed work */
46#define SMUX_WAKEUP_DELAY_MIN (1 << 15)
47
48/* inactivity timeout for no rx/tx activity */
49#define SMUX_INACTIVITY_TIMEOUT_MS 1000
50
Eric Holmbergb8435c82012-06-05 14:51:29 -060051/* RX get_rx_buffer retry timeout values */
52#define SMUX_RX_RETRY_MIN_MS (1 << 0) /* 1 ms */
53#define SMUX_RX_RETRY_MAX_MS (1 << 10) /* 1024 ms */
54
Eric Holmberg8ed30f22012-05-10 19:16:51 -060055enum {
56 MSM_SMUX_DEBUG = 1U << 0,
57 MSM_SMUX_INFO = 1U << 1,
58 MSM_SMUX_POWER_INFO = 1U << 2,
59 MSM_SMUX_PKT = 1U << 3,
60};
61
62static int smux_debug_mask;
63module_param_named(debug_mask, smux_debug_mask,
64 int, S_IRUGO | S_IWUSR | S_IWGRP);
65
66/* Simulated wakeup used for testing */
67int smux_byte_loopback;
68module_param_named(byte_loopback, smux_byte_loopback,
69 int, S_IRUGO | S_IWUSR | S_IWGRP);
70int smux_simulate_wakeup_delay = 1;
71module_param_named(simulate_wakeup_delay, smux_simulate_wakeup_delay,
72 int, S_IRUGO | S_IWUSR | S_IWGRP);
73
74#define SMUX_DBG(x...) do { \
75 if (smux_debug_mask & MSM_SMUX_DEBUG) \
76 pr_info(x); \
77} while (0)
78
Eric Holmbergff0b0112012-06-08 15:06:57 -060079#define SMUX_PWR(x...) do { \
80 if (smux_debug_mask & MSM_SMUX_POWER_INFO) \
81 pr_info(x); \
82} while (0)
83
Eric Holmberg8ed30f22012-05-10 19:16:51 -060084#define SMUX_LOG_PKT_RX(pkt) do { \
85 if (smux_debug_mask & MSM_SMUX_PKT) \
86 smux_log_pkt(pkt, 1); \
87} while (0)
88
89#define SMUX_LOG_PKT_TX(pkt) do { \
90 if (smux_debug_mask & MSM_SMUX_PKT) \
91 smux_log_pkt(pkt, 0); \
92} while (0)
93
94/**
95 * Return true if channel is fully opened (both
96 * local and remote sides are in the OPENED state).
97 */
98#define IS_FULLY_OPENED(ch) \
99 (ch && (ch)->local_state == SMUX_LCH_LOCAL_OPENED \
100 && (ch)->remote_state == SMUX_LCH_REMOTE_OPENED)
101
102static struct platform_device smux_devs[] = {
103 {.name = "SMUX_CTL", .id = -1},
104 {.name = "SMUX_RMNET", .id = -1},
105 {.name = "SMUX_DUN_DATA_HSUART", .id = 0},
106 {.name = "SMUX_RMNET_DATA_HSUART", .id = 1},
107 {.name = "SMUX_RMNET_CTL_HSUART", .id = 0},
108 {.name = "SMUX_DIAG", .id = -1},
109};
110
111enum {
112 SMUX_CMD_STATUS_RTC = 1 << 0,
113 SMUX_CMD_STATUS_RTR = 1 << 1,
114 SMUX_CMD_STATUS_RI = 1 << 2,
115 SMUX_CMD_STATUS_DCD = 1 << 3,
116 SMUX_CMD_STATUS_FLOW_CNTL = 1 << 4,
117};
118
119/* Channel mode */
120enum {
121 SMUX_LCH_MODE_NORMAL,
122 SMUX_LCH_MODE_LOCAL_LOOPBACK,
123 SMUX_LCH_MODE_REMOTE_LOOPBACK,
124};
125
126enum {
127 SMUX_RX_IDLE,
128 SMUX_RX_MAGIC,
129 SMUX_RX_HDR,
130 SMUX_RX_PAYLOAD,
131 SMUX_RX_FAILURE,
132};
133
134/**
135 * Power states.
136 *
137 * The _FLUSH states are internal transitional states and are not part of the
138 * official state machine.
139 */
140enum {
141 SMUX_PWR_OFF,
142 SMUX_PWR_TURNING_ON,
143 SMUX_PWR_ON,
144 SMUX_PWR_TURNING_OFF_FLUSH,
145 SMUX_PWR_TURNING_OFF,
146 SMUX_PWR_OFF_FLUSH,
147};
148
149/**
150 * Logical Channel Structure. One instance per channel.
151 *
152 * Locking Hierarchy
153 * Each lock has a postfix that describes the locking level. If multiple locks
154 * are required, only increasing lock hierarchy numbers may be locked which
155 * ensures avoiding a deadlock.
156 *
157 * Locking Example
158 * If state_lock_lhb1 is currently held and the TX list needs to be
159 * manipulated, then tx_lock_lhb2 may be locked since it's locking hierarchy
160 * is greater. However, if tx_lock_lhb2 is held, then state_lock_lhb1 may
161 * not be acquired since it would result in a deadlock.
162 *
163 * Note that the Line Discipline locks (*_lha) should always be acquired
164 * before the logical channel locks.
165 */
166struct smux_lch_t {
167 /* channel state */
168 spinlock_t state_lock_lhb1;
169 uint8_t lcid;
170 unsigned local_state;
171 unsigned local_mode;
172 uint8_t local_tiocm;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600173 unsigned options;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600174
175 unsigned remote_state;
176 unsigned remote_mode;
177 uint8_t remote_tiocm;
178
179 int tx_flow_control;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600180 int rx_flow_control_auto;
181 int rx_flow_control_client;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600182
183 /* client callbacks and private data */
184 void *priv;
185 void (*notify)(void *priv, int event_type, const void *metadata);
186 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
187 int size);
188
Eric Holmbergb8435c82012-06-05 14:51:29 -0600189 /* RX Info */
190 struct list_head rx_retry_queue;
191 unsigned rx_retry_queue_cnt;
192 struct delayed_work rx_retry_work;
193
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600194 /* TX Info */
195 spinlock_t tx_lock_lhb2;
196 struct list_head tx_queue;
197 struct list_head tx_ready_list;
198 unsigned tx_pending_data_cnt;
199 unsigned notify_lwm;
200};
201
202union notifier_metadata {
203 struct smux_meta_disconnected disconnected;
204 struct smux_meta_read read;
205 struct smux_meta_write write;
206 struct smux_meta_tiocm tiocm;
207};
208
209struct smux_notify_handle {
210 void (*notify)(void *priv, int event_type, const void *metadata);
211 void *priv;
212 int event_type;
213 union notifier_metadata *metadata;
214};
215
216/**
Eric Holmbergb8435c82012-06-05 14:51:29 -0600217 * Get RX Buffer Retry structure.
218 *
219 * This is used for clients that are unable to provide an RX buffer
220 * immediately. This temporary structure will be used to temporarily hold the
221 * data and perform a retry.
222 */
223struct smux_rx_pkt_retry {
224 struct smux_pkt_t *pkt;
225 struct list_head rx_retry_list;
226 unsigned timeout_in_ms;
227};
228
229/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600230 * Receive worker data structure.
231 *
232 * One instance is created for every call to smux_rx_state_machine.
233 */
234struct smux_rx_worker_data {
235 const unsigned char *data;
236 int len;
237 int flag;
238
239 struct work_struct work;
240 struct completion work_complete;
241};
242
243/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600244 * Line discipline and module structure.
245 *
246 * Only one instance since multiple instances of line discipline are not
247 * allowed.
248 */
249struct smux_ldisc_t {
Eric Holmberged1f00c2012-06-07 09:45:18 -0600250 struct mutex mutex_lha0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600251
252 int is_initialized;
253 int in_reset;
254 int ld_open_count;
255 struct tty_struct *tty;
256
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600257 /* RX State Machine (singled-threaded access by smux_rx_wq) */
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600258 unsigned char recv_buf[SMUX_MAX_PKT_SIZE];
259 unsigned int recv_len;
260 unsigned int pkt_remain;
261 unsigned rx_state;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600262
263 /* RX Activity - accessed by multiple threads */
264 spinlock_t rx_lock_lha1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600265 unsigned rx_activity_flag;
266
267 /* TX / Power */
268 spinlock_t tx_lock_lha2;
269 struct list_head lch_tx_ready_list;
270 unsigned power_state;
271 unsigned pwr_wakeup_delay_us;
272 unsigned tx_activity_flag;
273 unsigned powerdown_enabled;
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600274 struct list_head power_queue;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600275};
276
277
278/* data structures */
279static struct smux_lch_t smux_lch[SMUX_NUM_LOGICAL_CHANNELS];
280static struct smux_ldisc_t smux;
281static const char *tty_error_type[] = {
282 [TTY_NORMAL] = "normal",
283 [TTY_OVERRUN] = "overrun",
284 [TTY_BREAK] = "break",
285 [TTY_PARITY] = "parity",
286 [TTY_FRAME] = "framing",
287};
288
289static const char *smux_cmds[] = {
290 [SMUX_CMD_DATA] = "DATA",
291 [SMUX_CMD_OPEN_LCH] = "OPEN",
292 [SMUX_CMD_CLOSE_LCH] = "CLOSE",
293 [SMUX_CMD_STATUS] = "STATUS",
294 [SMUX_CMD_PWR_CTL] = "PWR",
295 [SMUX_CMD_BYTE] = "Raw Byte",
296};
297
298static void smux_notify_local_fn(struct work_struct *work);
299static DECLARE_WORK(smux_notify_local, smux_notify_local_fn);
300
301static struct workqueue_struct *smux_notify_wq;
302static size_t handle_size;
303static struct kfifo smux_notify_fifo;
304static int queued_fifo_notifications;
305static DEFINE_SPINLOCK(notify_lock_lhc1);
306
307static struct workqueue_struct *smux_tx_wq;
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600308static struct workqueue_struct *smux_rx_wq;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600309static void smux_tx_worker(struct work_struct *work);
310static DECLARE_WORK(smux_tx_work, smux_tx_worker);
311
312static void smux_wakeup_worker(struct work_struct *work);
Eric Holmbergb8435c82012-06-05 14:51:29 -0600313static void smux_rx_retry_worker(struct work_struct *work);
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600314static void smux_rx_worker(struct work_struct *work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600315static DECLARE_WORK(smux_wakeup_work, smux_wakeup_worker);
316static DECLARE_DELAYED_WORK(smux_wakeup_delayed_work, smux_wakeup_worker);
317
318static void smux_inactivity_worker(struct work_struct *work);
319static DECLARE_WORK(smux_inactivity_work, smux_inactivity_worker);
320static DECLARE_DELAYED_WORK(smux_delayed_inactivity_work,
321 smux_inactivity_worker);
322
323static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch);
324static void list_channel(struct smux_lch_t *ch);
325static int smux_send_status_cmd(struct smux_lch_t *ch);
326static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600327static void smux_flush_tty(void);
Eric Holmberged1f00c2012-06-07 09:45:18 -0600328static void smux_purge_ch_tx_queue(struct smux_lch_t *ch);
329static int schedule_notify(uint8_t lcid, int event,
330 const union notifier_metadata *metadata);
331static int ssr_notifier_cb(struct notifier_block *this,
332 unsigned long code,
333 void *data);
Eric Holmberg92a67df2012-06-25 13:56:24 -0600334static void smux_uart_power_on_atomic(void);
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600335static int smux_rx_flow_control_updated(struct smux_lch_t *ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600336
337/**
338 * Convert TTY Error Flags to string for logging purposes.
339 *
340 * @flag TTY_* flag
341 * @returns String description or NULL if unknown
342 */
343static const char *tty_flag_to_str(unsigned flag)
344{
345 if (flag < ARRAY_SIZE(tty_error_type))
346 return tty_error_type[flag];
347 return NULL;
348}
349
350/**
351 * Convert SMUX Command to string for logging purposes.
352 *
353 * @cmd SMUX command
354 * @returns String description or NULL if unknown
355 */
356static const char *cmd_to_str(unsigned cmd)
357{
358 if (cmd < ARRAY_SIZE(smux_cmds))
359 return smux_cmds[cmd];
360 return NULL;
361}
362
363/**
364 * Set the reset state due to an unrecoverable failure.
365 */
366static void smux_enter_reset(void)
367{
368 pr_err("%s: unrecoverable failure, waiting for ssr\n", __func__);
369 smux.in_reset = 1;
370}
371
372static int lch_init(void)
373{
374 unsigned int id;
375 struct smux_lch_t *ch;
376 int i = 0;
377
378 handle_size = sizeof(struct smux_notify_handle *);
379
380 smux_notify_wq = create_singlethread_workqueue("smux_notify_wq");
381 smux_tx_wq = create_singlethread_workqueue("smux_tx_wq");
Eric Holmberg0560f7a2012-05-31 15:50:26 -0600382 smux_rx_wq = create_singlethread_workqueue("smux_rx_wq");
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600383
384 if (IS_ERR(smux_notify_wq) || IS_ERR(smux_tx_wq)) {
385 SMUX_DBG("%s: create_singlethread_workqueue ENOMEM\n",
386 __func__);
387 return -ENOMEM;
388 }
389
390 i |= kfifo_alloc(&smux_notify_fifo,
391 SMUX_NOTIFY_FIFO_SIZE * handle_size,
392 GFP_KERNEL);
393 i |= smux_loopback_init();
394
395 if (i) {
396 pr_err("%s: out of memory error\n", __func__);
397 return -ENOMEM;
398 }
399
400 for (id = 0 ; id < SMUX_NUM_LOGICAL_CHANNELS; id++) {
401 ch = &smux_lch[id];
402
403 spin_lock_init(&ch->state_lock_lhb1);
404 ch->lcid = id;
405 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
406 ch->local_mode = SMUX_LCH_MODE_NORMAL;
407 ch->local_tiocm = 0x0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600408 ch->options = SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600409 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
410 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
411 ch->remote_tiocm = 0x0;
412 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600413 ch->rx_flow_control_auto = 0;
414 ch->rx_flow_control_client = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600415 ch->priv = 0;
416 ch->notify = 0;
417 ch->get_rx_buffer = 0;
418
Eric Holmbergb8435c82012-06-05 14:51:29 -0600419 INIT_LIST_HEAD(&ch->rx_retry_queue);
420 ch->rx_retry_queue_cnt = 0;
421 INIT_DELAYED_WORK(&ch->rx_retry_work, smux_rx_retry_worker);
422
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600423 spin_lock_init(&ch->tx_lock_lhb2);
424 INIT_LIST_HEAD(&ch->tx_queue);
425 INIT_LIST_HEAD(&ch->tx_ready_list);
426 ch->tx_pending_data_cnt = 0;
427 ch->notify_lwm = 0;
428 }
429
430 return 0;
431}
432
Eric Holmberged1f00c2012-06-07 09:45:18 -0600433/**
434 * Empty and cleanup all SMUX logical channels for subsystem restart or line
435 * discipline disconnect.
436 */
437static void smux_lch_purge(void)
438{
439 struct smux_lch_t *ch;
440 unsigned long flags;
441 int i;
442
443 /* Empty TX ready list */
444 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
445 while (!list_empty(&smux.lch_tx_ready_list)) {
446 SMUX_DBG("%s: emptying ready list %p\n",
447 __func__, smux.lch_tx_ready_list.next);
448 ch = list_first_entry(&smux.lch_tx_ready_list,
449 struct smux_lch_t,
450 tx_ready_list);
451 list_del(&ch->tx_ready_list);
452 INIT_LIST_HEAD(&ch->tx_ready_list);
453 }
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600454
455 /* Purge Power Queue */
456 while (!list_empty(&smux.power_queue)) {
457 struct smux_pkt_t *pkt;
458
459 pkt = list_first_entry(&smux.power_queue,
460 struct smux_pkt_t,
461 list);
Eric Holmberg6b19f7f2012-06-15 09:53:52 -0600462 list_del(&pkt->list);
Eric Holmbergffddd4c2012-06-08 12:37:51 -0600463 SMUX_DBG("%s: emptying power queue pkt=%p\n",
464 __func__, pkt);
465 smux_free_pkt(pkt);
466 }
Eric Holmberged1f00c2012-06-07 09:45:18 -0600467 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
468
469 /* Close all ports */
470 for (i = 0 ; i < SMUX_NUM_LOGICAL_CHANNELS; i++) {
471 ch = &smux_lch[i];
472 SMUX_DBG("%s: cleaning up lcid %d\n", __func__, i);
473
474 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
475
476 /* Purge TX queue */
477 spin_lock(&ch->tx_lock_lhb2);
478 smux_purge_ch_tx_queue(ch);
479 spin_unlock(&ch->tx_lock_lhb2);
480
481 /* Notify user of disconnect and reset channel state */
482 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
483 ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
484 union notifier_metadata meta;
485
486 meta.disconnected.is_ssr = smux.in_reset;
487 schedule_notify(ch->lcid, SMUX_DISCONNECTED, &meta);
488 }
489
490 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
491 ch->local_mode = SMUX_LCH_MODE_NORMAL;
492 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
493 ch->remote_mode = SMUX_LCH_MODE_NORMAL;
494 ch->tx_flow_control = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -0600495 ch->rx_flow_control_auto = 0;
496 ch->rx_flow_control_client = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -0600497
498 /* Purge RX retry queue */
499 if (ch->rx_retry_queue_cnt)
500 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
501
502 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
503 }
504
505 /* Flush TX/RX workqueues */
506 SMUX_DBG("%s: flushing tx wq\n", __func__);
507 flush_workqueue(smux_tx_wq);
508 SMUX_DBG("%s: flushing rx wq\n", __func__);
509 flush_workqueue(smux_rx_wq);
510}
511
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600512int smux_assert_lch_id(uint32_t lcid)
513{
514 if (lcid >= SMUX_NUM_LOGICAL_CHANNELS)
515 return -ENXIO;
516 else
517 return 0;
518}
519
520/**
521 * Log packet information for debug purposes.
522 *
523 * @pkt Packet to log
524 * @is_recv 1 = RX packet; 0 = TX Packet
525 *
526 * [DIR][LCID] [LOCAL_STATE][LOCAL_MODE]:[REMOTE_STATE][REMOTE_MODE] PKT Info
527 *
528 * PKT Info:
529 * [CMD] flags [flags] len [PAYLOAD_LEN]:[PAD_LEN] [Payload hex bytes]
530 *
531 * Direction: R = Receive, S = Send
532 * Local State: C = Closed; c = closing; o = opening; O = Opened
533 * Local Mode: L = Local loopback; R = Remote loopback; N = Normal
534 * Remote State: C = Closed; O = Opened
535 * Remote Mode: R = Remote loopback; N = Normal
536 */
537static void smux_log_pkt(struct smux_pkt_t *pkt, int is_recv)
538{
539 char logbuf[SMUX_PKT_LOG_SIZE];
540 char cmd_extra[16];
541 int i = 0;
542 int count;
543 int len;
544 char local_state;
545 char local_mode;
546 char remote_state;
547 char remote_mode;
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600548 struct smux_lch_t *ch = NULL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600549 unsigned char *data;
550
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600551 if (!smux_assert_lch_id(pkt->hdr.lcid))
552 ch = &smux_lch[pkt->hdr.lcid];
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600553
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600554 if (ch) {
555 switch (ch->local_state) {
556 case SMUX_LCH_LOCAL_CLOSED:
557 local_state = 'C';
558 break;
559 case SMUX_LCH_LOCAL_OPENING:
560 local_state = 'o';
561 break;
562 case SMUX_LCH_LOCAL_OPENED:
563 local_state = 'O';
564 break;
565 case SMUX_LCH_LOCAL_CLOSING:
566 local_state = 'c';
567 break;
568 default:
569 local_state = 'U';
570 break;
571 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600572
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600573 switch (ch->local_mode) {
574 case SMUX_LCH_MODE_LOCAL_LOOPBACK:
575 local_mode = 'L';
576 break;
577 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
578 local_mode = 'R';
579 break;
580 case SMUX_LCH_MODE_NORMAL:
581 local_mode = 'N';
582 break;
583 default:
584 local_mode = 'U';
585 break;
586 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600587
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600588 switch (ch->remote_state) {
589 case SMUX_LCH_REMOTE_CLOSED:
590 remote_state = 'C';
591 break;
592 case SMUX_LCH_REMOTE_OPENED:
593 remote_state = 'O';
594 break;
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600595
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600596 default:
597 remote_state = 'U';
598 break;
599 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600600
Eric Holmbergbb72c6c2012-07-02 14:51:34 -0600601 switch (ch->remote_mode) {
602 case SMUX_LCH_MODE_REMOTE_LOOPBACK:
603 remote_mode = 'R';
604 break;
605 case SMUX_LCH_MODE_NORMAL:
606 remote_mode = 'N';
607 break;
608 default:
609 remote_mode = 'U';
610 break;
611 }
612 } else {
613 /* broadcast channel */
614 local_state = '-';
615 local_mode = '-';
616 remote_state = '-';
617 remote_mode = '-';
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600618 }
619
620 /* determine command type (ACK, etc) */
621 cmd_extra[0] = '\0';
622 switch (pkt->hdr.cmd) {
623 case SMUX_CMD_OPEN_LCH:
624 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
625 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
626 break;
627 case SMUX_CMD_CLOSE_LCH:
628 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
629 snprintf(cmd_extra, sizeof(cmd_extra), " ACK");
630 break;
631 };
632
633 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
634 "smux: %c%d %c%c:%c%c %s%s flags %x len %d:%d ",
635 is_recv ? 'R' : 'S', pkt->hdr.lcid,
636 local_state, local_mode,
637 remote_state, remote_mode,
638 cmd_to_str(pkt->hdr.cmd), cmd_extra, pkt->hdr.flags,
639 pkt->hdr.payload_len, pkt->hdr.pad_len);
640
641 len = (pkt->hdr.payload_len > 16) ? 16 : pkt->hdr.payload_len;
642 data = (unsigned char *)pkt->payload;
643 for (count = 0; count < len; count++)
644 i += snprintf(logbuf + i, SMUX_PKT_LOG_SIZE - i,
645 "%02x ", (unsigned)data[count]);
646
647 pr_info("%s\n", logbuf);
648}
649
650static void smux_notify_local_fn(struct work_struct *work)
651{
652 struct smux_notify_handle *notify_handle = NULL;
653 union notifier_metadata *metadata = NULL;
654 unsigned long flags;
655 int i;
656
657 for (;;) {
658 /* retrieve notification */
659 spin_lock_irqsave(&notify_lock_lhc1, flags);
660 if (kfifo_len(&smux_notify_fifo) >= handle_size) {
661 i = kfifo_out(&smux_notify_fifo,
662 &notify_handle,
663 handle_size);
664 if (i != handle_size) {
665 pr_err("%s: unable to retrieve handle %d expected %d\n",
666 __func__, i, handle_size);
667 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
668 break;
669 }
670 } else {
671 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
672 break;
673 }
674 --queued_fifo_notifications;
675 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
676
677 /* notify client */
678 metadata = notify_handle->metadata;
679 notify_handle->notify(notify_handle->priv,
680 notify_handle->event_type,
681 metadata);
682
683 kfree(metadata);
684 kfree(notify_handle);
685 }
686}
687
688/**
689 * Initialize existing packet.
690 */
691void smux_init_pkt(struct smux_pkt_t *pkt)
692{
693 memset(pkt, 0x0, sizeof(*pkt));
694 pkt->hdr.magic = SMUX_MAGIC;
695 INIT_LIST_HEAD(&pkt->list);
696}
697
698/**
699 * Allocate and initialize packet.
700 *
701 * If a payload is needed, either set it directly and ensure that it's freed or
702 * use smd_alloc_pkt_payload() to allocate a packet and it will be freed
703 * automatically when smd_free_pkt() is called.
704 */
705struct smux_pkt_t *smux_alloc_pkt(void)
706{
707 struct smux_pkt_t *pkt;
708
709 /* Consider a free list implementation instead of kmalloc */
710 pkt = kmalloc(sizeof(struct smux_pkt_t), GFP_ATOMIC);
711 if (!pkt) {
712 pr_err("%s: out of memory\n", __func__);
713 return NULL;
714 }
715 smux_init_pkt(pkt);
716 pkt->allocated = 1;
717
718 return pkt;
719}
720
721/**
722 * Free packet.
723 *
724 * @pkt Packet to free (may be NULL)
725 *
726 * If payload was allocated using smux_alloc_pkt_payload(), then it is freed as
727 * well. Otherwise, the caller is responsible for freeing the payload.
728 */
729void smux_free_pkt(struct smux_pkt_t *pkt)
730{
731 if (pkt) {
732 if (pkt->free_payload)
733 kfree(pkt->payload);
734 if (pkt->allocated)
735 kfree(pkt);
736 }
737}
738
739/**
740 * Allocate packet payload.
741 *
742 * @pkt Packet to add payload to
743 *
744 * @returns 0 on success, <0 upon error
745 *
746 * A flag is set to signal smux_free_pkt() to free the payload.
747 */
748int smux_alloc_pkt_payload(struct smux_pkt_t *pkt)
749{
750 if (!pkt)
751 return -EINVAL;
752
753 pkt->payload = kmalloc(pkt->hdr.payload_len, GFP_ATOMIC);
754 pkt->free_payload = 1;
755 if (!pkt->payload) {
756 pr_err("%s: unable to malloc %d bytes for payload\n",
757 __func__, pkt->hdr.payload_len);
758 return -ENOMEM;
759 }
760
761 return 0;
762}
763
764static int schedule_notify(uint8_t lcid, int event,
765 const union notifier_metadata *metadata)
766{
767 struct smux_notify_handle *notify_handle = 0;
768 union notifier_metadata *meta_copy = 0;
769 struct smux_lch_t *ch;
770 int i;
771 unsigned long flags;
772 int ret = 0;
773
774 ch = &smux_lch[lcid];
775 notify_handle = kzalloc(sizeof(struct smux_notify_handle),
776 GFP_ATOMIC);
777 if (!notify_handle) {
778 pr_err("%s: out of memory\n", __func__);
779 ret = -ENOMEM;
780 goto free_out;
781 }
782
783 notify_handle->notify = ch->notify;
784 notify_handle->priv = ch->priv;
785 notify_handle->event_type = event;
786 if (metadata) {
787 meta_copy = kzalloc(sizeof(union notifier_metadata),
788 GFP_ATOMIC);
789 if (!meta_copy) {
790 pr_err("%s: out of memory\n", __func__);
791 ret = -ENOMEM;
792 goto free_out;
793 }
794 *meta_copy = *metadata;
795 notify_handle->metadata = meta_copy;
796 } else {
797 notify_handle->metadata = NULL;
798 }
799
800 spin_lock_irqsave(&notify_lock_lhc1, flags);
801 i = kfifo_avail(&smux_notify_fifo);
802 if (i < handle_size) {
803 pr_err("%s: fifo full error %d expected %d\n",
804 __func__, i, handle_size);
805 ret = -ENOMEM;
806 goto unlock_out;
807 }
808
809 i = kfifo_in(&smux_notify_fifo, &notify_handle, handle_size);
810 if (i < 0 || i != handle_size) {
811 pr_err("%s: fifo not available error %d (expected %d)\n",
812 __func__, i, handle_size);
813 ret = -ENOSPC;
814 goto unlock_out;
815 }
816 ++queued_fifo_notifications;
817
818unlock_out:
819 spin_unlock_irqrestore(&notify_lock_lhc1, flags);
820
821free_out:
822 queue_work(smux_notify_wq, &smux_notify_local);
823 if (ret < 0 && notify_handle) {
824 kfree(notify_handle->metadata);
825 kfree(notify_handle);
826 }
827 return ret;
828}
829
830/**
831 * Returns the serialized size of a packet.
832 *
833 * @pkt Packet to serialize
834 *
835 * @returns Serialized length of packet
836 */
837static unsigned int smux_serialize_size(struct smux_pkt_t *pkt)
838{
839 unsigned int size;
840
841 size = sizeof(struct smux_hdr_t);
842 size += pkt->hdr.payload_len;
843 size += pkt->hdr.pad_len;
844
845 return size;
846}
847
848/**
849 * Serialize packet @pkt into output buffer @data.
850 *
851 * @pkt Packet to serialize
852 * @out Destination buffer pointer
853 * @out_len Size of serialized packet
854 *
855 * @returns 0 for success
856 */
857int smux_serialize(struct smux_pkt_t *pkt, char *out,
858 unsigned int *out_len)
859{
860 char *data_start = out;
861
862 if (smux_serialize_size(pkt) > SMUX_MAX_PKT_SIZE) {
863 pr_err("%s: packet size %d too big\n",
864 __func__, smux_serialize_size(pkt));
865 return -E2BIG;
866 }
867
868 memcpy(out, &pkt->hdr, sizeof(struct smux_hdr_t));
869 out += sizeof(struct smux_hdr_t);
870 if (pkt->payload) {
871 memcpy(out, pkt->payload, pkt->hdr.payload_len);
872 out += pkt->hdr.payload_len;
873 }
874 if (pkt->hdr.pad_len) {
875 memset(out, 0x0, pkt->hdr.pad_len);
876 out += pkt->hdr.pad_len;
877 }
878 *out_len = out - data_start;
879 return 0;
880}
881
882/**
883 * Serialize header and provide pointer to the data.
884 *
885 * @pkt Packet
886 * @out[out] Pointer to the serialized header data
887 * @out_len[out] Pointer to the serialized header length
888 */
889static void smux_serialize_hdr(struct smux_pkt_t *pkt, char **out,
890 unsigned int *out_len)
891{
892 *out = (char *)&pkt->hdr;
893 *out_len = sizeof(struct smux_hdr_t);
894}
895
896/**
897 * Serialize payload and provide pointer to the data.
898 *
899 * @pkt Packet
900 * @out[out] Pointer to the serialized payload data
901 * @out_len[out] Pointer to the serialized payload length
902 */
903static void smux_serialize_payload(struct smux_pkt_t *pkt, char **out,
904 unsigned int *out_len)
905{
906 *out = pkt->payload;
907 *out_len = pkt->hdr.payload_len;
908}
909
910/**
911 * Serialize padding and provide pointer to the data.
912 *
913 * @pkt Packet
914 * @out[out] Pointer to the serialized padding (always NULL)
915 * @out_len[out] Pointer to the serialized payload length
916 *
917 * Since the padding field value is undefined, only the size of the patting
918 * (@out_len) is set and the buffer pointer (@out) will always be NULL.
919 */
920static void smux_serialize_padding(struct smux_pkt_t *pkt, char **out,
921 unsigned int *out_len)
922{
923 *out = NULL;
924 *out_len = pkt->hdr.pad_len;
925}
926
927/**
928 * Write data to TTY framework and handle breaking the writes up if needed.
929 *
930 * @data Data to write
931 * @len Length of data
932 *
933 * @returns 0 for success, < 0 for failure
934 */
935static int write_to_tty(char *data, unsigned len)
936{
937 int data_written;
938
939 if (!data)
940 return 0;
941
Eric Holmberged1f00c2012-06-07 09:45:18 -0600942 while (len > 0 && !smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600943 data_written = smux.tty->ops->write(smux.tty, data, len);
944 if (data_written >= 0) {
945 len -= data_written;
946 data += data_written;
947 } else {
948 pr_err("%s: TTY write returned error %d\n",
949 __func__, data_written);
950 return data_written;
951 }
952
953 if (len)
954 tty_wait_until_sent(smux.tty,
955 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
Eric Holmberg8ed30f22012-05-10 19:16:51 -0600956 }
957 return 0;
958}
959
960/**
961 * Write packet to TTY.
962 *
963 * @pkt packet to write
964 *
965 * @returns 0 on success
966 */
967static int smux_tx_tty(struct smux_pkt_t *pkt)
968{
969 char *data;
970 unsigned int len;
971 int ret;
972
973 if (!smux.tty) {
974 pr_err("%s: TTY not initialized", __func__);
975 return -ENOTTY;
976 }
977
978 if (pkt->hdr.cmd == SMUX_CMD_BYTE) {
979 SMUX_DBG("%s: tty send single byte\n", __func__);
980 ret = write_to_tty(&pkt->hdr.flags, 1);
981 return ret;
982 }
983
984 smux_serialize_hdr(pkt, &data, &len);
985 ret = write_to_tty(data, len);
986 if (ret) {
987 pr_err("%s: failed %d to write header %d\n",
988 __func__, ret, len);
989 return ret;
990 }
991
992 smux_serialize_payload(pkt, &data, &len);
993 ret = write_to_tty(data, len);
994 if (ret) {
995 pr_err("%s: failed %d to write payload %d\n",
996 __func__, ret, len);
997 return ret;
998 }
999
1000 smux_serialize_padding(pkt, &data, &len);
1001 while (len > 0) {
1002 char zero = 0x0;
1003 ret = write_to_tty(&zero, 1);
1004 if (ret) {
1005 pr_err("%s: failed %d to write padding %d\n",
1006 __func__, ret, len);
1007 return ret;
1008 }
1009 --len;
1010 }
1011 return 0;
1012}
1013
1014/**
1015 * Send a single character.
1016 *
1017 * @ch Character to send
1018 */
1019static void smux_send_byte(char ch)
1020{
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001021 struct smux_pkt_t *pkt;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001022
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001023 pkt = smux_alloc_pkt();
1024 if (!pkt) {
1025 pr_err("%s: alloc failure for byte %x\n", __func__, ch);
1026 return;
1027 }
1028 pkt->hdr.cmd = SMUX_CMD_BYTE;
1029 pkt->hdr.flags = ch;
1030 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001031
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001032 list_add_tail(&pkt->list, &smux.power_queue);
1033 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001034}
1035
1036/**
1037 * Receive a single-character packet (used for internal testing).
1038 *
1039 * @ch Character to receive
1040 * @lcid Logical channel ID for packet
1041 *
1042 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001043 */
1044static int smux_receive_byte(char ch, int lcid)
1045{
1046 struct smux_pkt_t pkt;
1047
1048 smux_init_pkt(&pkt);
1049 pkt.hdr.lcid = lcid;
1050 pkt.hdr.cmd = SMUX_CMD_BYTE;
1051 pkt.hdr.flags = ch;
1052
1053 return smux_dispatch_rx_pkt(&pkt);
1054}
1055
1056/**
1057 * Queue packet for transmit.
1058 *
1059 * @pkt_ptr Packet to queue
1060 * @ch Channel to queue packet on
1061 * @queue Queue channel on ready list
1062 */
1063static void smux_tx_queue(struct smux_pkt_t *pkt_ptr, struct smux_lch_t *ch,
1064 int queue)
1065{
1066 unsigned long flags;
1067
1068 SMUX_DBG("%s: queuing pkt %p\n", __func__, pkt_ptr);
1069
1070 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
1071 list_add_tail(&pkt_ptr->list, &ch->tx_queue);
1072 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
1073
1074 if (queue)
1075 list_channel(ch);
1076}
1077
1078/**
1079 * Handle receive OPEN ACK command.
1080 *
1081 * @pkt Received packet
1082 *
1083 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001084 */
1085static int smux_handle_rx_open_ack(struct smux_pkt_t *pkt)
1086{
1087 uint8_t lcid;
1088 int ret;
1089 struct smux_lch_t *ch;
1090 int enable_powerdown = 0;
1091
1092 lcid = pkt->hdr.lcid;
1093 ch = &smux_lch[lcid];
1094
1095 spin_lock(&ch->state_lock_lhb1);
1096 if (ch->local_state == SMUX_LCH_LOCAL_OPENING) {
1097 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1098 ch->local_state,
1099 SMUX_LCH_LOCAL_OPENED);
1100
1101 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1102 enable_powerdown = 1;
1103
1104 ch->local_state = SMUX_LCH_LOCAL_OPENED;
1105 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED)
1106 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1107 ret = 0;
1108 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1109 SMUX_DBG("Remote loopback OPEN ACK received\n");
1110 ret = 0;
1111 } else {
1112 pr_err("%s: lcid %d state 0x%x open ack invalid\n",
1113 __func__, lcid, ch->local_state);
1114 ret = -EINVAL;
1115 }
1116 spin_unlock(&ch->state_lock_lhb1);
1117
1118 if (enable_powerdown) {
1119 spin_lock(&smux.tx_lock_lha2);
1120 if (!smux.powerdown_enabled) {
1121 smux.powerdown_enabled = 1;
1122 SMUX_DBG("%s: enabling power-collapse support\n",
1123 __func__);
1124 }
1125 spin_unlock(&smux.tx_lock_lha2);
1126 }
1127
1128 return ret;
1129}
1130
1131static int smux_handle_close_ack(struct smux_pkt_t *pkt)
1132{
1133 uint8_t lcid;
1134 int ret;
1135 struct smux_lch_t *ch;
1136 union notifier_metadata meta_disconnected;
1137 unsigned long flags;
1138
1139 lcid = pkt->hdr.lcid;
1140 ch = &smux_lch[lcid];
1141 meta_disconnected.disconnected.is_ssr = 0;
1142
1143 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1144
1145 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
1146 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
1147 SMUX_LCH_LOCAL_CLOSING,
1148 SMUX_LCH_LOCAL_CLOSED);
1149 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
1150 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED)
1151 schedule_notify(lcid, SMUX_DISCONNECTED,
1152 &meta_disconnected);
1153 ret = 0;
1154 } else if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1155 SMUX_DBG("Remote loopback CLOSE ACK received\n");
1156 ret = 0;
1157 } else {
1158 pr_err("%s: lcid %d state 0x%x close ack invalid\n",
1159 __func__, lcid, ch->local_state);
1160 ret = -EINVAL;
1161 }
1162 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1163 return ret;
1164}
1165
1166/**
1167 * Handle receive OPEN command.
1168 *
1169 * @pkt Received packet
1170 *
1171 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001172 */
1173static int smux_handle_rx_open_cmd(struct smux_pkt_t *pkt)
1174{
1175 uint8_t lcid;
1176 int ret;
1177 struct smux_lch_t *ch;
1178 struct smux_pkt_t *ack_pkt;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001179 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001180 int tx_ready = 0;
1181 int enable_powerdown = 0;
1182
1183 if (pkt->hdr.flags & SMUX_CMD_OPEN_ACK)
1184 return smux_handle_rx_open_ack(pkt);
1185
1186 lcid = pkt->hdr.lcid;
1187 ch = &smux_lch[lcid];
1188
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001189 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001190
1191 if (ch->remote_state == SMUX_LCH_REMOTE_CLOSED) {
1192 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1193 SMUX_LCH_REMOTE_CLOSED,
1194 SMUX_LCH_REMOTE_OPENED);
1195
1196 ch->remote_state = SMUX_LCH_REMOTE_OPENED;
1197 if (pkt->hdr.flags & SMUX_CMD_OPEN_POWER_COLLAPSE)
1198 enable_powerdown = 1;
1199
1200 /* Send Open ACK */
1201 ack_pkt = smux_alloc_pkt();
1202 if (!ack_pkt) {
1203 /* exit out to allow retrying this later */
1204 ret = -ENOMEM;
1205 goto out;
1206 }
1207 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1208 ack_pkt->hdr.flags = SMUX_CMD_OPEN_ACK
1209 | SMUX_CMD_OPEN_POWER_COLLAPSE;
1210 ack_pkt->hdr.lcid = lcid;
1211 ack_pkt->hdr.payload_len = 0;
1212 ack_pkt->hdr.pad_len = 0;
1213 if (pkt->hdr.flags & SMUX_CMD_OPEN_REMOTE_LOOPBACK) {
1214 ch->remote_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
1215 ack_pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
1216 }
1217 smux_tx_queue(ack_pkt, ch, 0);
1218 tx_ready = 1;
1219
1220 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1221 /*
1222 * Send an Open command to the remote side to
1223 * simulate our local client doing it.
1224 */
1225 ack_pkt = smux_alloc_pkt();
1226 if (ack_pkt) {
1227 ack_pkt->hdr.lcid = lcid;
1228 ack_pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
1229 ack_pkt->hdr.flags =
1230 SMUX_CMD_OPEN_POWER_COLLAPSE;
1231 ack_pkt->hdr.payload_len = 0;
1232 ack_pkt->hdr.pad_len = 0;
1233 smux_tx_queue(ack_pkt, ch, 0);
1234 tx_ready = 1;
1235 } else {
1236 pr_err("%s: Remote loopack allocation failure\n",
1237 __func__);
1238 }
1239 } else if (ch->local_state == SMUX_LCH_LOCAL_OPENED) {
1240 schedule_notify(lcid, SMUX_CONNECTED, NULL);
1241 }
1242 ret = 0;
1243 } else {
1244 pr_err("%s: lcid %d remote state 0x%x open invalid\n",
1245 __func__, lcid, ch->remote_state);
1246 ret = -EINVAL;
1247 }
1248
1249out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001250 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001251
1252 if (enable_powerdown) {
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001253 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8b9a6402012-06-05 13:32:57 -06001254 if (!smux.powerdown_enabled) {
1255 smux.powerdown_enabled = 1;
1256 SMUX_DBG("%s: enabling power-collapse support\n",
1257 __func__);
1258 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001259 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001260 }
1261
1262 if (tx_ready)
1263 list_channel(ch);
1264
1265 return ret;
1266}
1267
1268/**
1269 * Handle receive CLOSE command.
1270 *
1271 * @pkt Received packet
1272 *
1273 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001274 */
1275static int smux_handle_rx_close_cmd(struct smux_pkt_t *pkt)
1276{
1277 uint8_t lcid;
1278 int ret;
1279 struct smux_lch_t *ch;
1280 struct smux_pkt_t *ack_pkt;
1281 union notifier_metadata meta_disconnected;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001282 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001283 int tx_ready = 0;
1284
1285 if (pkt->hdr.flags & SMUX_CMD_CLOSE_ACK)
1286 return smux_handle_close_ack(pkt);
1287
1288 lcid = pkt->hdr.lcid;
1289 ch = &smux_lch[lcid];
1290 meta_disconnected.disconnected.is_ssr = 0;
1291
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001292 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001293 if (ch->remote_state == SMUX_LCH_REMOTE_OPENED) {
1294 SMUX_DBG("lcid %d remote state 0x%x -> 0x%x\n", lcid,
1295 SMUX_LCH_REMOTE_OPENED,
1296 SMUX_LCH_REMOTE_CLOSED);
1297
1298 ack_pkt = smux_alloc_pkt();
1299 if (!ack_pkt) {
1300 /* exit out to allow retrying this later */
1301 ret = -ENOMEM;
1302 goto out;
1303 }
1304 ch->remote_state = SMUX_LCH_REMOTE_CLOSED;
1305 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1306 ack_pkt->hdr.flags = SMUX_CMD_CLOSE_ACK;
1307 ack_pkt->hdr.lcid = lcid;
1308 ack_pkt->hdr.payload_len = 0;
1309 ack_pkt->hdr.pad_len = 0;
1310 smux_tx_queue(ack_pkt, ch, 0);
1311 tx_ready = 1;
1312
1313 if (ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK) {
1314 /*
1315 * Send a Close command to the remote side to simulate
1316 * our local client doing it.
1317 */
1318 ack_pkt = smux_alloc_pkt();
1319 if (ack_pkt) {
1320 ack_pkt->hdr.lcid = lcid;
1321 ack_pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
1322 ack_pkt->hdr.flags = 0;
1323 ack_pkt->hdr.payload_len = 0;
1324 ack_pkt->hdr.pad_len = 0;
1325 smux_tx_queue(ack_pkt, ch, 0);
1326 tx_ready = 1;
1327 } else {
1328 pr_err("%s: Remote loopack allocation failure\n",
1329 __func__);
1330 }
1331 }
1332
1333 if (ch->local_state == SMUX_LCH_LOCAL_CLOSED)
1334 schedule_notify(lcid, SMUX_DISCONNECTED,
1335 &meta_disconnected);
1336 ret = 0;
1337 } else {
1338 pr_err("%s: lcid %d remote state 0x%x close invalid\n",
1339 __func__, lcid, ch->remote_state);
1340 ret = -EINVAL;
1341 }
1342out:
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001343 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001344 if (tx_ready)
1345 list_channel(ch);
1346
1347 return ret;
1348}
1349
1350/*
1351 * Handle receive DATA command.
1352 *
1353 * @pkt Received packet
1354 *
1355 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001356 */
1357static int smux_handle_rx_data_cmd(struct smux_pkt_t *pkt)
1358{
1359 uint8_t lcid;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001360 int ret = 0;
1361 int do_retry = 0;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001362 int tx_ready = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001363 int tmp;
1364 int rx_len;
1365 struct smux_lch_t *ch;
1366 union notifier_metadata metadata;
1367 int remote_loopback;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001368 struct smux_pkt_t *ack_pkt;
1369 unsigned long flags;
1370
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001371 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1372 ret = -ENXIO;
1373 goto out;
1374 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001375
Eric Holmbergb8435c82012-06-05 14:51:29 -06001376 rx_len = pkt->hdr.payload_len;
1377 if (rx_len == 0) {
1378 ret = -EINVAL;
1379 goto out;
1380 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001381
1382 lcid = pkt->hdr.lcid;
1383 ch = &smux_lch[lcid];
1384 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1385 remote_loopback = ch->remote_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK;
1386
1387 if (ch->local_state != SMUX_LCH_LOCAL_OPENED
1388 && !remote_loopback) {
1389 pr_err("smux: ch %d error data on local state 0x%x",
1390 lcid, ch->local_state);
1391 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001392 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001393 goto out;
1394 }
1395
1396 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1397 pr_err("smux: ch %d error data on remote state 0x%x",
1398 lcid, ch->remote_state);
1399 ret = -EIO;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001400 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001401 goto out;
1402 }
1403
Eric Holmbergb8435c82012-06-05 14:51:29 -06001404 if (!list_empty(&ch->rx_retry_queue)) {
1405 do_retry = 1;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001406
1407 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
1408 !ch->rx_flow_control_auto &&
1409 ((ch->rx_retry_queue_cnt + 1) >= SMUX_RX_WM_HIGH)) {
1410 /* need to flow control RX */
1411 ch->rx_flow_control_auto = 1;
1412 tx_ready |= smux_rx_flow_control_updated(ch);
1413 schedule_notify(ch->lcid, SMUX_RX_RETRY_HIGH_WM_HIT,
1414 NULL);
1415 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06001416 if ((ch->rx_retry_queue_cnt + 1) > SMUX_RX_RETRY_MAX_PKTS) {
1417 /* retry queue full */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001418 pr_err("%s: ch %d RX retry queue full\n",
1419 __func__, lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001420 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1421 ret = -ENOMEM;
1422 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1423 goto out;
1424 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001425 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001426 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001427
Eric Holmbergb8435c82012-06-05 14:51:29 -06001428 if (remote_loopback) {
1429 /* Echo the data back to the remote client. */
1430 ack_pkt = smux_alloc_pkt();
1431 if (ack_pkt) {
1432 ack_pkt->hdr.lcid = lcid;
1433 ack_pkt->hdr.cmd = SMUX_CMD_DATA;
1434 ack_pkt->hdr.flags = 0;
1435 ack_pkt->hdr.payload_len = pkt->hdr.payload_len;
1436 if (ack_pkt->hdr.payload_len) {
1437 smux_alloc_pkt_payload(ack_pkt);
1438 memcpy(ack_pkt->payload, pkt->payload,
1439 ack_pkt->hdr.payload_len);
1440 }
1441 ack_pkt->hdr.pad_len = pkt->hdr.pad_len;
1442 smux_tx_queue(ack_pkt, ch, 0);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001443 tx_ready = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001444 } else {
1445 pr_err("%s: Remote loopack allocation failure\n",
1446 __func__);
1447 }
1448 } else if (!do_retry) {
1449 /* request buffer from client */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001450 metadata.read.pkt_priv = 0;
1451 metadata.read.buffer = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06001452 tmp = ch->get_rx_buffer(ch->priv,
1453 (void **)&metadata.read.pkt_priv,
1454 (void **)&metadata.read.buffer,
1455 rx_len);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001456
Eric Holmbergb8435c82012-06-05 14:51:29 -06001457 if (tmp == 0 && metadata.read.buffer) {
1458 /* place data into RX buffer */
1459 memcpy(metadata.read.buffer, pkt->payload,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001460 rx_len);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001461 metadata.read.len = rx_len;
1462 schedule_notify(lcid, SMUX_READ_DONE,
1463 &metadata);
1464 } else if (tmp == -EAGAIN ||
1465 (tmp == 0 && !metadata.read.buffer)) {
1466 /* buffer allocation failed - add to retry queue */
1467 do_retry = 1;
1468 } else if (tmp < 0) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001469 pr_err("%s: ch %d Client RX buffer alloc failed %d\n",
1470 __func__, lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06001471 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1472 ret = -ENOMEM;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001473 }
1474 }
1475
Eric Holmbergb8435c82012-06-05 14:51:29 -06001476 if (do_retry) {
1477 struct smux_rx_pkt_retry *retry;
1478
1479 retry = kmalloc(sizeof(struct smux_rx_pkt_retry), GFP_KERNEL);
1480 if (!retry) {
1481 pr_err("%s: retry alloc failure\n", __func__);
1482 ret = -ENOMEM;
1483 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1484 goto out;
1485 }
1486 INIT_LIST_HEAD(&retry->rx_retry_list);
1487 retry->timeout_in_ms = SMUX_RX_RETRY_MIN_MS;
1488
1489 /* copy packet */
1490 retry->pkt = smux_alloc_pkt();
1491 if (!retry->pkt) {
1492 kfree(retry);
1493 pr_err("%s: pkt alloc failure\n", __func__);
1494 ret = -ENOMEM;
1495 schedule_notify(lcid, SMUX_READ_FAIL, NULL);
1496 goto out;
1497 }
1498 retry->pkt->hdr.lcid = lcid;
1499 retry->pkt->hdr.payload_len = pkt->hdr.payload_len;
1500 retry->pkt->hdr.pad_len = pkt->hdr.pad_len;
1501 if (retry->pkt->hdr.payload_len) {
1502 smux_alloc_pkt_payload(retry->pkt);
1503 memcpy(retry->pkt->payload, pkt->payload,
1504 retry->pkt->hdr.payload_len);
1505 }
1506
1507 /* add to retry queue */
1508 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1509 list_add_tail(&retry->rx_retry_list, &ch->rx_retry_queue);
1510 ++ch->rx_retry_queue_cnt;
1511 if (ch->rx_retry_queue_cnt == 1)
1512 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
1513 msecs_to_jiffies(retry->timeout_in_ms));
1514 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1515 }
1516
Eric Holmberg2e0906f2012-06-26 13:29:14 -06001517 if (tx_ready)
1518 list_channel(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001519out:
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001520 return ret;
1521}
1522
1523/**
1524 * Handle receive byte command for testing purposes.
1525 *
1526 * @pkt Received packet
1527 *
1528 * @returns 0 for success
1529 */
1530static int smux_handle_rx_byte_cmd(struct smux_pkt_t *pkt)
1531{
1532 uint8_t lcid;
1533 int ret;
1534 struct smux_lch_t *ch;
1535 union notifier_metadata metadata;
1536 unsigned long flags;
1537
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001538 if (!pkt || smux_assert_lch_id(pkt->hdr.lcid)) {
1539 pr_err("%s: invalid packet or channel id\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001540 return -ENXIO;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001541 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001542
1543 lcid = pkt->hdr.lcid;
1544 ch = &smux_lch[lcid];
1545 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1546
1547 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
1548 pr_err("smux: ch %d error data on local state 0x%x",
1549 lcid, ch->local_state);
1550 ret = -EIO;
1551 goto out;
1552 }
1553
1554 if (ch->remote_state != SMUX_LCH_REMOTE_OPENED) {
1555 pr_err("smux: ch %d error data on remote state 0x%x",
1556 lcid, ch->remote_state);
1557 ret = -EIO;
1558 goto out;
1559 }
1560
1561 metadata.read.pkt_priv = (void *)(int)pkt->hdr.flags;
1562 metadata.read.buffer = 0;
1563 schedule_notify(lcid, SMUX_READ_DONE, &metadata);
1564 ret = 0;
1565
1566out:
1567 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1568 return ret;
1569}
1570
1571/**
1572 * Handle receive status command.
1573 *
1574 * @pkt Received packet
1575 *
1576 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001577 */
1578static int smux_handle_rx_status_cmd(struct smux_pkt_t *pkt)
1579{
1580 uint8_t lcid;
1581 int ret;
1582 struct smux_lch_t *ch;
1583 union notifier_metadata meta;
1584 unsigned long flags;
1585 int tx_ready = 0;
1586
1587 lcid = pkt->hdr.lcid;
1588 ch = &smux_lch[lcid];
1589
1590 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
1591 meta.tiocm.tiocm_old = ch->remote_tiocm;
1592 meta.tiocm.tiocm_new = pkt->hdr.flags;
1593
1594 /* update logical channel flow control */
1595 if ((meta.tiocm.tiocm_old & SMUX_CMD_STATUS_FLOW_CNTL) ^
1596 (meta.tiocm.tiocm_new & SMUX_CMD_STATUS_FLOW_CNTL)) {
1597 /* logical channel flow control changed */
1598 if (pkt->hdr.flags & SMUX_CMD_STATUS_FLOW_CNTL) {
1599 /* disabled TX */
1600 SMUX_DBG("TX Flow control enabled\n");
1601 ch->tx_flow_control = 1;
1602 } else {
1603 /* re-enable channel */
1604 SMUX_DBG("TX Flow control disabled\n");
1605 ch->tx_flow_control = 0;
1606 tx_ready = 1;
1607 }
1608 }
1609 meta.tiocm.tiocm_old = msm_smux_tiocm_get_atomic(ch);
1610 ch->remote_tiocm = pkt->hdr.flags;
1611 meta.tiocm.tiocm_new = msm_smux_tiocm_get_atomic(ch);
1612
1613 /* client notification for status change */
1614 if (IS_FULLY_OPENED(ch)) {
1615 if (meta.tiocm.tiocm_old != meta.tiocm.tiocm_new)
1616 schedule_notify(lcid, SMUX_TIOCM_UPDATE, &meta);
1617 ret = 0;
1618 }
1619 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
1620 if (tx_ready)
1621 list_channel(ch);
1622
1623 return ret;
1624}
1625
1626/**
1627 * Handle receive power command.
1628 *
1629 * @pkt Received packet
1630 *
1631 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001632 */
1633static int smux_handle_rx_power_cmd(struct smux_pkt_t *pkt)
1634{
Steve Mucklef132c6c2012-06-06 18:30:57 -07001635 struct smux_pkt_t *ack_pkt = NULL;
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001636 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001637
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001638 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001639 if (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK) {
1640 /* local sleep request ack */
1641 if (smux.power_state == SMUX_PWR_TURNING_OFF) {
1642 /* Power-down complete, turn off UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001643 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001644 smux.power_state, SMUX_PWR_OFF_FLUSH);
1645 smux.power_state = SMUX_PWR_OFF_FLUSH;
1646 queue_work(smux_tx_wq, &smux_inactivity_work);
1647 } else {
1648 pr_err("%s: sleep request ack invalid in state %d\n",
1649 __func__, smux.power_state);
1650 }
1651 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001652 /*
1653 * Remote sleep request
1654 *
1655 * Even if we have data pending, we need to transition to the
1656 * POWER_OFF state and then perform a wakeup since the remote
1657 * side has requested a power-down.
1658 *
1659 * The state here is set to SMUX_PWR_TURNING_OFF_FLUSH and
1660 * the TX thread will set the state to SMUX_PWR_TURNING_OFF
1661 * when it sends the packet.
1662 */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001663 if (smux.power_state == SMUX_PWR_ON
1664 || smux.power_state == SMUX_PWR_TURNING_OFF) {
1665 ack_pkt = smux_alloc_pkt();
1666 if (ack_pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06001667 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001668 smux.power_state,
1669 SMUX_PWR_TURNING_OFF_FLUSH);
1670
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001671 smux.power_state = SMUX_PWR_TURNING_OFF_FLUSH;
1672
1673 /* send power-down ack */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001674 ack_pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
1675 ack_pkt->hdr.flags = SMUX_CMD_PWR_CTL_ACK;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06001676 ack_pkt->hdr.lcid = SMUX_BROADCAST_LCID;
1677 list_add_tail(&ack_pkt->list,
1678 &smux.power_queue);
1679 queue_work(smux_tx_wq, &smux_tx_work);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001680 }
1681 } else {
1682 pr_err("%s: sleep request invalid in state %d\n",
1683 __func__, smux.power_state);
1684 }
1685 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001686 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001687
1688 return 0;
1689}
1690
1691/**
1692 * Handle dispatching a completed packet for receive processing.
1693 *
1694 * @pkt Packet to process
1695 *
1696 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001697 */
1698static int smux_dispatch_rx_pkt(struct smux_pkt_t *pkt)
1699{
Eric Holmbergf9622662012-06-13 15:55:45 -06001700 int ret = -ENXIO;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001701
1702 SMUX_LOG_PKT_RX(pkt);
1703
1704 switch (pkt->hdr.cmd) {
1705 case SMUX_CMD_OPEN_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001706 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1707 pr_err("%s: invalid channel id %d\n",
1708 __func__, pkt->hdr.lcid);
1709 break;
1710 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001711 ret = smux_handle_rx_open_cmd(pkt);
1712 break;
1713
1714 case SMUX_CMD_DATA:
Eric Holmbergf9622662012-06-13 15:55:45 -06001715 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1716 pr_err("%s: invalid channel id %d\n",
1717 __func__, pkt->hdr.lcid);
1718 break;
1719 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001720 ret = smux_handle_rx_data_cmd(pkt);
1721 break;
1722
1723 case SMUX_CMD_CLOSE_LCH:
Eric Holmbergf9622662012-06-13 15:55:45 -06001724 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1725 pr_err("%s: invalid channel id %d\n",
1726 __func__, pkt->hdr.lcid);
1727 break;
1728 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001729 ret = smux_handle_rx_close_cmd(pkt);
1730 break;
1731
1732 case SMUX_CMD_STATUS:
Eric Holmbergf9622662012-06-13 15:55:45 -06001733 if (smux_assert_lch_id(pkt->hdr.lcid)) {
1734 pr_err("%s: invalid channel id %d\n",
1735 __func__, pkt->hdr.lcid);
1736 break;
1737 }
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001738 ret = smux_handle_rx_status_cmd(pkt);
1739 break;
1740
1741 case SMUX_CMD_PWR_CTL:
1742 ret = smux_handle_rx_power_cmd(pkt);
1743 break;
1744
1745 case SMUX_CMD_BYTE:
1746 ret = smux_handle_rx_byte_cmd(pkt);
1747 break;
1748
1749 default:
1750 pr_err("%s: command %d unknown\n", __func__, pkt->hdr.cmd);
1751 ret = -EINVAL;
1752 }
1753 return ret;
1754}
1755
1756/**
1757 * Deserializes a packet and dispatches it to the packet receive logic.
1758 *
1759 * @data Raw data for one packet
1760 * @len Length of the data
1761 *
1762 * @returns 0 for success
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001763 */
1764static int smux_deserialize(unsigned char *data, int len)
1765{
1766 struct smux_pkt_t recv;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001767
1768 smux_init_pkt(&recv);
1769
1770 /*
1771 * It may be possible to optimize this to not use the
1772 * temporary buffer.
1773 */
1774 memcpy(&recv.hdr, data, sizeof(struct smux_hdr_t));
1775
1776 if (recv.hdr.magic != SMUX_MAGIC) {
1777 pr_err("%s: invalid header magic\n", __func__);
1778 return -EINVAL;
1779 }
1780
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001781 if (recv.hdr.payload_len)
1782 recv.payload = data + sizeof(struct smux_hdr_t);
1783
1784 return smux_dispatch_rx_pkt(&recv);
1785}
1786
1787/**
1788 * Handle wakeup request byte.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001789 */
1790static void smux_handle_wakeup_req(void)
1791{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001792 unsigned long flags;
1793
1794 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001795 if (smux.power_state == SMUX_PWR_OFF
1796 || smux.power_state == SMUX_PWR_TURNING_ON) {
1797 /* wakeup system */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001798 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001799 smux.power_state, SMUX_PWR_ON);
1800 smux.power_state = SMUX_PWR_ON;
1801 queue_work(smux_tx_wq, &smux_wakeup_work);
1802 queue_work(smux_tx_wq, &smux_tx_work);
1803 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1804 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1805 smux_send_byte(SMUX_WAKEUP_ACK);
1806 } else {
1807 smux_send_byte(SMUX_WAKEUP_ACK);
1808 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001809 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001810}
1811
1812/**
1813 * Handle wakeup request ack.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001814 */
1815static void smux_handle_wakeup_ack(void)
1816{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001817 unsigned long flags;
1818
1819 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001820 if (smux.power_state == SMUX_PWR_TURNING_ON) {
1821 /* received response to wakeup request */
Eric Holmbergff0b0112012-06-08 15:06:57 -06001822 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001823 smux.power_state, SMUX_PWR_ON);
1824 smux.power_state = SMUX_PWR_ON;
1825 queue_work(smux_tx_wq, &smux_tx_work);
1826 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
1827 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
1828
1829 } else if (smux.power_state != SMUX_PWR_ON) {
1830 /* invalid message */
1831 pr_err("%s: wakeup request ack invalid in state %d\n",
1832 __func__, smux.power_state);
1833 }
Eric Holmberg0560f7a2012-05-31 15:50:26 -06001834 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001835}
1836
1837/**
1838 * RX State machine - IDLE state processing.
1839 *
1840 * @data New RX data to process
1841 * @len Length of the data
1842 * @used Return value of length processed
1843 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001844 */
1845static void smux_rx_handle_idle(const unsigned char *data,
1846 int len, int *used, int flag)
1847{
1848 int i;
1849
1850 if (flag) {
1851 if (smux_byte_loopback)
1852 smux_receive_byte(SMUX_UT_ECHO_ACK_FAIL,
1853 smux_byte_loopback);
1854 pr_err("%s: TTY error 0x%x - ignoring\n", __func__, flag);
1855 ++*used;
1856 return;
1857 }
1858
1859 for (i = *used; i < len && smux.rx_state == SMUX_RX_IDLE; i++) {
1860 switch (data[i]) {
1861 case SMUX_MAGIC_WORD1:
1862 smux.rx_state = SMUX_RX_MAGIC;
1863 break;
1864 case SMUX_WAKEUP_REQ:
1865 smux_handle_wakeup_req();
1866 break;
1867 case SMUX_WAKEUP_ACK:
1868 smux_handle_wakeup_ack();
1869 break;
1870 default:
1871 /* unexpected character */
1872 if (smux_byte_loopback && data[i] == SMUX_UT_ECHO_REQ)
1873 smux_receive_byte(SMUX_UT_ECHO_ACK_OK,
1874 smux_byte_loopback);
1875 pr_err("%s: parse error 0x%02x - ignoring\n", __func__,
1876 (unsigned)data[i]);
1877 break;
1878 }
1879 }
1880
1881 *used = i;
1882}
1883
1884/**
1885 * RX State machine - Header Magic state processing.
1886 *
1887 * @data New RX data to process
1888 * @len Length of the data
1889 * @used Return value of length processed
1890 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001891 */
1892static void smux_rx_handle_magic(const unsigned char *data,
1893 int len, int *used, int flag)
1894{
1895 int i;
1896
1897 if (flag) {
1898 pr_err("%s: TTY RX error %d\n", __func__, flag);
1899 smux_enter_reset();
1900 smux.rx_state = SMUX_RX_FAILURE;
1901 ++*used;
1902 return;
1903 }
1904
1905 for (i = *used; i < len && smux.rx_state == SMUX_RX_MAGIC; i++) {
1906 /* wait for completion of the magic */
1907 if (data[i] == SMUX_MAGIC_WORD2) {
1908 smux.recv_len = 0;
1909 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD1;
1910 smux.recv_buf[smux.recv_len++] = SMUX_MAGIC_WORD2;
1911 smux.rx_state = SMUX_RX_HDR;
1912 } else {
1913 /* unexpected / trash character */
1914 pr_err("%s: rx parse error for char %c; *used=%d, len=%d\n",
1915 __func__, data[i], *used, len);
1916 smux.rx_state = SMUX_RX_IDLE;
1917 }
1918 }
1919
1920 *used = i;
1921}
1922
1923/**
1924 * RX State machine - Packet Header state processing.
1925 *
1926 * @data New RX data to process
1927 * @len Length of the data
1928 * @used Return value of length processed
1929 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001930 */
1931static void smux_rx_handle_hdr(const unsigned char *data,
1932 int len, int *used, int flag)
1933{
1934 int i;
1935 struct smux_hdr_t *hdr;
1936
1937 if (flag) {
1938 pr_err("%s: TTY RX error %d\n", __func__, flag);
1939 smux_enter_reset();
1940 smux.rx_state = SMUX_RX_FAILURE;
1941 ++*used;
1942 return;
1943 }
1944
1945 for (i = *used; i < len && smux.rx_state == SMUX_RX_HDR; i++) {
1946 smux.recv_buf[smux.recv_len++] = data[i];
1947
1948 if (smux.recv_len == sizeof(struct smux_hdr_t)) {
1949 /* complete header received */
1950 hdr = (struct smux_hdr_t *)smux.recv_buf;
1951 smux.pkt_remain = hdr->payload_len + hdr->pad_len;
1952 smux.rx_state = SMUX_RX_PAYLOAD;
1953 }
1954 }
1955 *used = i;
1956}
1957
1958/**
1959 * RX State machine - Packet Payload state processing.
1960 *
1961 * @data New RX data to process
1962 * @len Length of the data
1963 * @used Return value of length processed
1964 * @flag Error flag - TTY_NORMAL 0 for no failure
Eric Holmberg8ed30f22012-05-10 19:16:51 -06001965 */
1966static void smux_rx_handle_pkt_payload(const unsigned char *data,
1967 int len, int *used, int flag)
1968{
1969 int remaining;
1970
1971 if (flag) {
1972 pr_err("%s: TTY RX error %d\n", __func__, flag);
1973 smux_enter_reset();
1974 smux.rx_state = SMUX_RX_FAILURE;
1975 ++*used;
1976 return;
1977 }
1978
1979 /* copy data into rx buffer */
1980 if (smux.pkt_remain < (len - *used))
1981 remaining = smux.pkt_remain;
1982 else
1983 remaining = len - *used;
1984
1985 memcpy(&smux.recv_buf[smux.recv_len], &data[*used], remaining);
1986 smux.recv_len += remaining;
1987 smux.pkt_remain -= remaining;
1988 *used += remaining;
1989
1990 if (smux.pkt_remain == 0) {
1991 /* complete packet received */
1992 smux_deserialize(smux.recv_buf, smux.recv_len);
1993 smux.rx_state = SMUX_RX_IDLE;
1994 }
1995}
1996
1997/**
1998 * Feed data to the receive state machine.
1999 *
2000 * @data Pointer to data block
2001 * @len Length of data
2002 * @flag TTY_NORMAL (0) for no error, otherwise TTY Error Flag
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002003 */
2004void smux_rx_state_machine(const unsigned char *data,
2005 int len, int flag)
2006{
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002007 struct smux_rx_worker_data work;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002008
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002009 work.data = data;
2010 work.len = len;
2011 work.flag = flag;
2012 INIT_WORK_ONSTACK(&work.work, smux_rx_worker);
2013 work.work_complete = COMPLETION_INITIALIZER_ONSTACK(work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002014
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002015 queue_work(smux_rx_wq, &work.work);
2016 wait_for_completion(&work.work_complete);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002017}
2018
2019/**
2020 * Add channel to transmit-ready list and trigger transmit worker.
2021 *
2022 * @ch Channel to add
2023 */
2024static void list_channel(struct smux_lch_t *ch)
2025{
2026 unsigned long flags;
2027
2028 SMUX_DBG("%s: listing channel %d\n",
2029 __func__, ch->lcid);
2030
2031 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2032 spin_lock(&ch->tx_lock_lhb2);
2033 smux.tx_activity_flag = 1;
2034 if (list_empty(&ch->tx_ready_list))
2035 list_add_tail(&ch->tx_ready_list, &smux.lch_tx_ready_list);
2036 spin_unlock(&ch->tx_lock_lhb2);
2037 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2038
2039 queue_work(smux_tx_wq, &smux_tx_work);
2040}
2041
2042/**
2043 * Transmit packet on correct transport and then perform client
2044 * notification.
2045 *
2046 * @ch Channel to transmit on
2047 * @pkt Packet to transmit
2048 */
2049static void smux_tx_pkt(struct smux_lch_t *ch, struct smux_pkt_t *pkt)
2050{
2051 union notifier_metadata meta_write;
2052 int ret;
2053
2054 if (ch && pkt) {
2055 SMUX_LOG_PKT_TX(pkt);
2056 if (ch->local_mode == SMUX_LCH_MODE_LOCAL_LOOPBACK)
2057 ret = smux_tx_loopback(pkt);
2058 else
2059 ret = smux_tx_tty(pkt);
2060
2061 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2062 /* notify write-done */
2063 meta_write.write.pkt_priv = pkt->priv;
2064 meta_write.write.buffer = pkt->payload;
2065 meta_write.write.len = pkt->hdr.payload_len;
2066 if (ret >= 0) {
2067 SMUX_DBG("%s: PKT write done", __func__);
2068 schedule_notify(ch->lcid, SMUX_WRITE_DONE,
2069 &meta_write);
2070 } else {
2071 pr_err("%s: failed to write pkt %d\n",
2072 __func__, ret);
2073 schedule_notify(ch->lcid, SMUX_WRITE_FAIL,
2074 &meta_write);
2075 }
2076 }
2077 }
2078}
2079
2080/**
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002081 * Flush pending TTY TX data.
2082 */
2083static void smux_flush_tty(void)
2084{
Eric Holmberg92a67df2012-06-25 13:56:24 -06002085 mutex_lock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002086 if (!smux.tty) {
2087 pr_err("%s: ldisc not loaded\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002088 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002089 return;
2090 }
2091
2092 tty_wait_until_sent(smux.tty,
2093 msecs_to_jiffies(TTY_BUFFER_FULL_WAIT_MS));
2094
2095 if (tty_chars_in_buffer(smux.tty) > 0)
2096 pr_err("%s: unable to flush UART queue\n", __func__);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002097
2098 mutex_unlock(&smux.mutex_lha0);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002099}
2100
2101/**
Eric Holmberged1f00c2012-06-07 09:45:18 -06002102 * Purge TX queue for logical channel.
2103 *
2104 * @ch Logical channel pointer
2105 *
2106 * Must be called with the following spinlocks locked:
2107 * state_lock_lhb1
2108 * tx_lock_lhb2
2109 */
2110static void smux_purge_ch_tx_queue(struct smux_lch_t *ch)
2111{
2112 struct smux_pkt_t *pkt;
2113 int send_disconnect = 0;
2114
2115 while (!list_empty(&ch->tx_queue)) {
2116 pkt = list_first_entry(&ch->tx_queue, struct smux_pkt_t,
2117 list);
2118 list_del(&pkt->list);
2119
2120 if (pkt->hdr.cmd == SMUX_CMD_OPEN_LCH) {
2121 /* Open was never sent, just force to closed state */
2122 ch->local_state = SMUX_LCH_LOCAL_CLOSED;
2123 send_disconnect = 1;
2124 } else if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2125 /* Notify client of failed write */
2126 union notifier_metadata meta_write;
2127
2128 meta_write.write.pkt_priv = pkt->priv;
2129 meta_write.write.buffer = pkt->payload;
2130 meta_write.write.len = pkt->hdr.payload_len;
2131 schedule_notify(ch->lcid, SMUX_WRITE_FAIL, &meta_write);
2132 }
2133 smux_free_pkt(pkt);
2134 }
2135
2136 if (send_disconnect) {
2137 union notifier_metadata meta_disconnected;
2138
2139 meta_disconnected.disconnected.is_ssr = smux.in_reset;
2140 schedule_notify(ch->lcid, SMUX_DISCONNECTED,
2141 &meta_disconnected);
2142 }
2143}
2144
2145/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002146 * Power-up the UART.
Eric Holmberg92a67df2012-06-25 13:56:24 -06002147 *
2148 * Must be called with smux.mutex_lha0 already locked.
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002149 */
Eric Holmberg92a67df2012-06-25 13:56:24 -06002150static void smux_uart_power_on_atomic(void)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002151{
2152 struct uart_state *state;
2153
2154 if (!smux.tty || !smux.tty->driver_data) {
2155 pr_err("%s: unable to find UART port for tty %p\n",
2156 __func__, smux.tty);
2157 return;
2158 }
2159 state = smux.tty->driver_data;
2160 msm_hs_request_clock_on(state->uart_port);
2161}
2162
2163/**
Eric Holmberg92a67df2012-06-25 13:56:24 -06002164 * Power-up the UART.
2165 */
2166static void smux_uart_power_on(void)
2167{
2168 mutex_lock(&smux.mutex_lha0);
2169 smux_uart_power_on_atomic();
2170 mutex_unlock(&smux.mutex_lha0);
2171}
2172
2173/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002174 * Power down the UART.
2175 */
2176static void smux_uart_power_off(void)
2177{
2178 struct uart_state *state;
2179
Eric Holmberg92a67df2012-06-25 13:56:24 -06002180 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002181 if (!smux.tty || !smux.tty->driver_data) {
2182 pr_err("%s: unable to find UART port for tty %p\n",
2183 __func__, smux.tty);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002184 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002185 return;
2186 }
2187 state = smux.tty->driver_data;
2188 msm_hs_request_clock_off(state->uart_port);
Eric Holmberg92a67df2012-06-25 13:56:24 -06002189 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002190}
2191
2192/**
2193 * TX Wakeup Worker
2194 *
2195 * @work Not used
2196 *
2197 * Do an exponential back-off wakeup sequence with a maximum period
2198 * of approximately 1 second (1 << 20 microseconds).
2199 */
2200static void smux_wakeup_worker(struct work_struct *work)
2201{
2202 unsigned long flags;
2203 unsigned wakeup_delay;
2204 int complete = 0;
2205
Eric Holmberged1f00c2012-06-07 09:45:18 -06002206 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002207 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2208 if (smux.power_state == SMUX_PWR_ON) {
2209 /* wakeup complete */
2210 complete = 1;
2211 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2212 break;
2213 } else {
2214 /* retry */
2215 wakeup_delay = smux.pwr_wakeup_delay_us;
2216 smux.pwr_wakeup_delay_us <<= 1;
2217 if (smux.pwr_wakeup_delay_us > SMUX_WAKEUP_DELAY_MAX)
2218 smux.pwr_wakeup_delay_us =
2219 SMUX_WAKEUP_DELAY_MAX;
2220 }
2221 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2222 SMUX_DBG("%s: triggering wakeup\n", __func__);
2223 smux_send_byte(SMUX_WAKEUP_REQ);
2224
2225 if (wakeup_delay < SMUX_WAKEUP_DELAY_MIN) {
2226 SMUX_DBG("%s: sleeping for %u us\n", __func__,
2227 wakeup_delay);
2228 usleep_range(wakeup_delay, 2*wakeup_delay);
2229 } else {
2230 /* schedule delayed work */
2231 SMUX_DBG("%s: scheduling delayed wakeup in %u ms\n",
2232 __func__, wakeup_delay / 1000);
2233 queue_delayed_work(smux_tx_wq,
2234 &smux_wakeup_delayed_work,
2235 msecs_to_jiffies(wakeup_delay / 1000));
2236 break;
2237 }
2238 }
2239
2240 if (complete) {
2241 SMUX_DBG("%s: wakeup complete\n", __func__);
2242 /*
2243 * Cancel any pending retry. This avoids a race condition with
2244 * a new power-up request because:
2245 * 1) this worker doesn't modify the state
2246 * 2) this worker is processed on the same single-threaded
2247 * workqueue as new TX wakeup requests
2248 */
2249 cancel_delayed_work(&smux_wakeup_delayed_work);
2250 }
2251}
2252
2253
2254/**
2255 * Inactivity timeout worker. Periodically scheduled when link is active.
2256 * When it detects inactivity, it will power-down the UART link.
2257 *
2258 * @work Work structure (not used)
2259 */
2260static void smux_inactivity_worker(struct work_struct *work)
2261{
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002262 struct smux_pkt_t *pkt;
2263 unsigned long flags;
2264
2265 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2266 spin_lock(&smux.tx_lock_lha2);
2267
2268 if (!smux.tx_activity_flag && !smux.rx_activity_flag) {
2269 /* no activity */
2270 if (smux.powerdown_enabled) {
2271 if (smux.power_state == SMUX_PWR_ON) {
2272 /* start power-down sequence */
2273 pkt = smux_alloc_pkt();
2274 if (pkt) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002275 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002276 smux.power_state,
2277 SMUX_PWR_TURNING_OFF);
2278 smux.power_state = SMUX_PWR_TURNING_OFF;
2279
2280 /* send power-down request */
2281 pkt->hdr.cmd = SMUX_CMD_PWR_CTL;
2282 pkt->hdr.flags = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002283 pkt->hdr.lcid = SMUX_BROADCAST_LCID;
2284 list_add_tail(&pkt->list,
2285 &smux.power_queue);
2286 queue_work(smux_tx_wq, &smux_tx_work);
2287 } else {
2288 pr_err("%s: packet alloc failed\n",
2289 __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002290 }
2291 }
2292 } else {
2293 SMUX_DBG("%s: link inactive, but powerdown disabled\n",
2294 __func__);
2295 }
2296 }
2297 smux.tx_activity_flag = 0;
2298 smux.rx_activity_flag = 0;
2299
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002300 if (smux.power_state == SMUX_PWR_OFF_FLUSH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002301 /* ready to power-down the UART */
Eric Holmbergff0b0112012-06-08 15:06:57 -06002302 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002303 smux.power_state, SMUX_PWR_OFF);
Eric Holmbergff0b0112012-06-08 15:06:57 -06002304 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002305
2306 /* if data is pending, schedule a new wakeup */
2307 if (!list_empty(&smux.lch_tx_ready_list) ||
2308 !list_empty(&smux.power_queue))
2309 queue_work(smux_tx_wq, &smux_tx_work);
2310
2311 spin_unlock(&smux.tx_lock_lha2);
2312 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2313
2314 /* flush UART output queue and power down */
2315 smux_flush_tty();
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002316 smux_uart_power_off();
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002317 } else {
2318 spin_unlock(&smux.tx_lock_lha2);
2319 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002320 }
2321
2322 /* reschedule inactivity worker */
2323 if (smux.power_state != SMUX_PWR_OFF)
2324 queue_delayed_work(smux_tx_wq, &smux_delayed_inactivity_work,
2325 msecs_to_jiffies(SMUX_INACTIVITY_TIMEOUT_MS));
2326}
2327
2328/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002329 * Remove RX retry packet from channel and free it.
2330 *
Eric Holmbergb8435c82012-06-05 14:51:29 -06002331 * @ch Channel for retry packet
2332 * @retry Retry packet to remove
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002333 *
2334 * @returns 1 if flow control updated; 0 otherwise
2335 *
2336 * Must be called with state_lock_lhb1 locked.
Eric Holmbergb8435c82012-06-05 14:51:29 -06002337 */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002338int smux_remove_rx_retry(struct smux_lch_t *ch,
Eric Holmbergb8435c82012-06-05 14:51:29 -06002339 struct smux_rx_pkt_retry *retry)
2340{
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002341 int tx_ready = 0;
2342
Eric Holmbergb8435c82012-06-05 14:51:29 -06002343 list_del(&retry->rx_retry_list);
2344 --ch->rx_retry_queue_cnt;
2345 smux_free_pkt(retry->pkt);
2346 kfree(retry);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002347
2348 if ((ch->options & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) &&
2349 (ch->rx_retry_queue_cnt <= SMUX_RX_WM_LOW) &&
2350 ch->rx_flow_control_auto) {
2351 ch->rx_flow_control_auto = 0;
2352 smux_rx_flow_control_updated(ch);
2353 schedule_notify(ch->lcid, SMUX_RX_RETRY_LOW_WM_HIT, NULL);
2354 tx_ready = 1;
2355 }
2356 return tx_ready;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002357}
2358
2359/**
Eric Holmberg0560f7a2012-05-31 15:50:26 -06002360 * RX worker handles all receive operations.
2361 *
2362 * @work Work structure contained in TBD structure
2363 */
2364static void smux_rx_worker(struct work_struct *work)
2365{
2366 unsigned long flags;
2367 int used;
2368 int initial_rx_state;
2369 struct smux_rx_worker_data *w;
2370 const unsigned char *data;
2371 int len;
2372 int flag;
2373
2374 w = container_of(work, struct smux_rx_worker_data, work);
2375 data = w->data;
2376 len = w->len;
2377 flag = w->flag;
2378
2379 spin_lock_irqsave(&smux.rx_lock_lha1, flags);
2380 smux.rx_activity_flag = 1;
2381 spin_unlock_irqrestore(&smux.rx_lock_lha1, flags);
2382
2383 SMUX_DBG("%s: %p, len=%d, flag=%d\n", __func__, data, len, flag);
2384 used = 0;
2385 do {
2386 SMUX_DBG("%s: state %d; %d of %d\n",
2387 __func__, smux.rx_state, used, len);
2388 initial_rx_state = smux.rx_state;
2389
2390 switch (smux.rx_state) {
2391 case SMUX_RX_IDLE:
2392 smux_rx_handle_idle(data, len, &used, flag);
2393 break;
2394 case SMUX_RX_MAGIC:
2395 smux_rx_handle_magic(data, len, &used, flag);
2396 break;
2397 case SMUX_RX_HDR:
2398 smux_rx_handle_hdr(data, len, &used, flag);
2399 break;
2400 case SMUX_RX_PAYLOAD:
2401 smux_rx_handle_pkt_payload(data, len, &used, flag);
2402 break;
2403 default:
2404 SMUX_DBG("%s: invalid state %d\n",
2405 __func__, smux.rx_state);
2406 smux.rx_state = SMUX_RX_IDLE;
2407 break;
2408 }
2409 } while (used < len || smux.rx_state != initial_rx_state);
2410
2411 complete(&w->work_complete);
2412}
2413
2414/**
Eric Holmbergb8435c82012-06-05 14:51:29 -06002415 * RX Retry worker handles retrying get_rx_buffer calls that previously failed
2416 * because the client was not ready (-EAGAIN).
2417 *
2418 * @work Work structure contained in smux_lch_t structure
2419 */
2420static void smux_rx_retry_worker(struct work_struct *work)
2421{
2422 struct smux_lch_t *ch;
2423 struct smux_rx_pkt_retry *retry;
2424 union notifier_metadata metadata;
2425 int tmp;
2426 unsigned long flags;
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002427 int immediate_retry = 0;
2428 int tx_ready = 0;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002429
2430 ch = container_of(work, struct smux_lch_t, rx_retry_work.work);
2431
2432 /* get next retry packet */
2433 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2434 if (ch->local_state != SMUX_LCH_LOCAL_OPENED) {
2435 /* port has been closed - remove all retries */
2436 while (!list_empty(&ch->rx_retry_queue)) {
2437 retry = list_first_entry(&ch->rx_retry_queue,
2438 struct smux_rx_pkt_retry,
2439 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002440 (void)smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002441 }
2442 }
2443
2444 if (list_empty(&ch->rx_retry_queue)) {
2445 SMUX_DBG("%s: retry list empty for channel %d\n",
2446 __func__, ch->lcid);
2447 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2448 return;
2449 }
2450 retry = list_first_entry(&ch->rx_retry_queue,
2451 struct smux_rx_pkt_retry,
2452 rx_retry_list);
2453 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2454
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002455 SMUX_DBG("%s: ch %d retrying rx pkt %p\n",
2456 __func__, ch->lcid, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002457 metadata.read.pkt_priv = 0;
2458 metadata.read.buffer = 0;
2459 tmp = ch->get_rx_buffer(ch->priv,
2460 (void **)&metadata.read.pkt_priv,
2461 (void **)&metadata.read.buffer,
2462 retry->pkt->hdr.payload_len);
2463 if (tmp == 0 && metadata.read.buffer) {
2464 /* have valid RX buffer */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002465
Eric Holmbergb8435c82012-06-05 14:51:29 -06002466 memcpy(metadata.read.buffer, retry->pkt->payload,
2467 retry->pkt->hdr.payload_len);
2468 metadata.read.len = retry->pkt->hdr.payload_len;
2469
2470 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002471 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002472 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002473 schedule_notify(ch->lcid, SMUX_READ_DONE, &metadata);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002474 if (tx_ready)
2475 list_channel(ch);
2476
2477 immediate_retry = 1;
Eric Holmbergb8435c82012-06-05 14:51:29 -06002478 } else if (tmp == -EAGAIN ||
2479 (tmp == 0 && !metadata.read.buffer)) {
2480 /* retry again */
2481 retry->timeout_in_ms <<= 1;
2482 if (retry->timeout_in_ms > SMUX_RX_RETRY_MAX_MS) {
2483 /* timed out */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002484 pr_err("%s: ch %d RX retry client timeout\n",
2485 __func__, ch->lcid);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002486 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002487 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002488 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002489 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
2490 if (tx_ready)
2491 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002492 }
2493 } else {
2494 /* client error - drop packet */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002495 pr_err("%s: ch %d RX retry client failed (%d)\n",
2496 __func__, ch->lcid, tmp);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002497 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002498 tx_ready = smux_remove_rx_retry(ch, retry);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002499 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002500 schedule_notify(ch->lcid, SMUX_READ_FAIL, NULL);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002501 if (tx_ready)
2502 list_channel(ch);
Eric Holmbergb8435c82012-06-05 14:51:29 -06002503 }
2504
2505 /* schedule next retry */
2506 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2507 if (!list_empty(&ch->rx_retry_queue)) {
2508 retry = list_first_entry(&ch->rx_retry_queue,
2509 struct smux_rx_pkt_retry,
2510 rx_retry_list);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002511
2512 if (immediate_retry)
2513 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
2514 else
2515 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work,
2516 msecs_to_jiffies(retry->timeout_in_ms));
Eric Holmbergb8435c82012-06-05 14:51:29 -06002517 }
2518 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2519}
2520
2521/**
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002522 * Transmit worker handles serializing and transmitting packets onto the
2523 * underlying transport.
2524 *
2525 * @work Work structure (not used)
2526 */
2527static void smux_tx_worker(struct work_struct *work)
2528{
2529 struct smux_pkt_t *pkt;
2530 struct smux_lch_t *ch;
2531 unsigned low_wm_notif;
2532 unsigned lcid;
2533 unsigned long flags;
2534
2535
2536 /*
2537 * Transmit packets in round-robin fashion based upon ready
2538 * channels.
2539 *
2540 * To eliminate the need to hold a lock for the entire
2541 * iteration through the channel ready list, the head of the
2542 * ready-channel list is always the next channel to be
2543 * processed. To send a packet, the first valid packet in
2544 * the head channel is removed and the head channel is then
2545 * rescheduled at the end of the queue by removing it and
2546 * inserting after the tail. The locks can then be released
2547 * while the packet is processed.
2548 */
Eric Holmberged1f00c2012-06-07 09:45:18 -06002549 while (!smux.in_reset) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002550 pkt = NULL;
2551 low_wm_notif = 0;
2552
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002553 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002554
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002555 /* handle wakeup if needed */
2556 if (smux.power_state == SMUX_PWR_OFF) {
2557 if (!list_empty(&smux.lch_tx_ready_list) ||
2558 !list_empty(&smux.power_queue)) {
2559 /* data to transmit, do wakeup */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002560 smux.pwr_wakeup_delay_us = 1;
Eric Holmbergff0b0112012-06-08 15:06:57 -06002561 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002562 smux.power_state,
2563 SMUX_PWR_TURNING_ON);
2564 smux.power_state = SMUX_PWR_TURNING_ON;
2565 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2566 flags);
2567 smux_uart_power_on();
2568 queue_work(smux_tx_wq, &smux_wakeup_work);
2569 } else {
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002570 /* no activity -- stay asleep */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002571 spin_unlock_irqrestore(&smux.tx_lock_lha2,
2572 flags);
2573 }
2574 break;
2575 }
2576
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002577 /* process any pending power packets */
2578 if (!list_empty(&smux.power_queue)) {
2579 pkt = list_first_entry(&smux.power_queue,
2580 struct smux_pkt_t, list);
2581 list_del(&pkt->list);
2582 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2583
2584 /* send the packet */
2585 SMUX_LOG_PKT_TX(pkt);
2586 if (!smux_byte_loopback) {
2587 smux_tx_tty(pkt);
2588 smux_flush_tty();
2589 } else {
2590 smux_tx_loopback(pkt);
2591 }
2592
2593 /* Adjust power state if this is a flush command */
2594 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
2595 if (smux.power_state == SMUX_PWR_TURNING_OFF_FLUSH &&
2596 pkt->hdr.cmd == SMUX_CMD_PWR_CTL &&
2597 (pkt->hdr.flags & SMUX_CMD_PWR_CTL_ACK)) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06002598 SMUX_PWR("%s: Power %d->%d\n", __func__,
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002599 smux.power_state,
2600 SMUX_PWR_OFF_FLUSH);
2601 smux.power_state = SMUX_PWR_OFF_FLUSH;
2602 queue_work(smux_tx_wq, &smux_inactivity_work);
2603 }
2604 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2605
2606 smux_free_pkt(pkt);
2607 continue;
2608 }
2609
2610 /* get the next ready channel */
2611 if (list_empty(&smux.lch_tx_ready_list)) {
2612 /* no ready channels */
2613 SMUX_DBG("%s: no more ready channels, exiting\n",
2614 __func__);
2615 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2616 break;
2617 }
2618 smux.tx_activity_flag = 1;
2619
2620 if (smux.power_state != SMUX_PWR_ON) {
2621 /* channel not ready to transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002622 SMUX_DBG("%s: waiting for link up (state %d)\n",
Eric Holmbergffddd4c2012-06-08 12:37:51 -06002623 __func__,
2624 smux.power_state);
2625 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2626 break;
2627 }
2628
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002629 /* get the next packet to send and rotate channel list */
2630 ch = list_first_entry(&smux.lch_tx_ready_list,
2631 struct smux_lch_t,
2632 tx_ready_list);
2633
2634 spin_lock(&ch->state_lock_lhb1);
2635 spin_lock(&ch->tx_lock_lhb2);
2636 if (!list_empty(&ch->tx_queue)) {
2637 /*
2638 * If remote TX flow control is enabled or
2639 * the channel is not fully opened, then only
2640 * send command packets.
2641 */
2642 if (ch->tx_flow_control || !IS_FULLY_OPENED(ch)) {
2643 struct smux_pkt_t *curr;
2644 list_for_each_entry(curr, &ch->tx_queue, list) {
2645 if (curr->hdr.cmd != SMUX_CMD_DATA) {
2646 pkt = curr;
2647 break;
2648 }
2649 }
2650 } else {
2651 /* get next cmd/data packet to send */
2652 pkt = list_first_entry(&ch->tx_queue,
2653 struct smux_pkt_t, list);
2654 }
2655 }
2656
2657 if (pkt) {
2658 list_del(&pkt->list);
2659
2660 /* update packet stats */
2661 if (pkt->hdr.cmd == SMUX_CMD_DATA) {
2662 --ch->tx_pending_data_cnt;
2663 if (ch->notify_lwm &&
2664 ch->tx_pending_data_cnt
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002665 <= SMUX_TX_WM_LOW) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002666 ch->notify_lwm = 0;
2667 low_wm_notif = 1;
2668 }
2669 }
2670
2671 /* advance to the next ready channel */
2672 list_rotate_left(&smux.lch_tx_ready_list);
2673 } else {
2674 /* no data in channel to send, remove from ready list */
2675 list_del(&ch->tx_ready_list);
2676 INIT_LIST_HEAD(&ch->tx_ready_list);
2677 }
2678 lcid = ch->lcid;
2679 spin_unlock(&ch->tx_lock_lhb2);
2680 spin_unlock(&ch->state_lock_lhb1);
2681 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
2682
2683 if (low_wm_notif)
2684 schedule_notify(lcid, SMUX_LOW_WM_HIT, NULL);
2685
2686 /* send the packet */
2687 smux_tx_pkt(ch, pkt);
2688 smux_free_pkt(pkt);
2689 }
2690}
2691
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002692/**
2693 * Update the RX flow control (sent in the TIOCM Status command).
2694 *
2695 * @ch Channel for update
2696 *
2697 * @returns 1 for updated, 0 for not updated
2698 *
2699 * Must be called with ch->state_lock_lhb1 locked.
2700 */
2701static int smux_rx_flow_control_updated(struct smux_lch_t *ch)
2702{
2703 int updated = 0;
2704 int prev_state;
2705
2706 prev_state = ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL;
2707
2708 if (ch->rx_flow_control_client || ch->rx_flow_control_auto)
2709 ch->local_tiocm |= SMUX_CMD_STATUS_FLOW_CNTL;
2710 else
2711 ch->local_tiocm &= ~SMUX_CMD_STATUS_FLOW_CNTL;
2712
2713 if (prev_state != (ch->local_tiocm & SMUX_CMD_STATUS_FLOW_CNTL)) {
2714 smux_send_status_cmd(ch);
2715 updated = 1;
2716 }
2717
2718 return updated;
2719}
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002720
2721/**********************************************************************/
2722/* Kernel API */
2723/**********************************************************************/
2724
2725/**
2726 * Set or clear channel option using the SMUX_CH_OPTION_* channel
2727 * flags.
2728 *
2729 * @lcid Logical channel ID
2730 * @set Options to set
2731 * @clear Options to clear
2732 *
2733 * @returns 0 for success, < 0 for failure
2734 */
2735int msm_smux_set_ch_option(uint8_t lcid, uint32_t set, uint32_t clear)
2736{
2737 unsigned long flags;
2738 struct smux_lch_t *ch;
2739 int tx_ready = 0;
2740 int ret = 0;
2741
2742 if (smux_assert_lch_id(lcid))
2743 return -ENXIO;
2744
2745 ch = &smux_lch[lcid];
2746 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2747
2748 /* Local loopback mode */
2749 if (set & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2750 ch->local_mode = SMUX_LCH_MODE_LOCAL_LOOPBACK;
2751
2752 if (clear & SMUX_CH_OPTION_LOCAL_LOOPBACK)
2753 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2754
2755 /* Remote loopback mode */
2756 if (set & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2757 ch->local_mode = SMUX_LCH_MODE_REMOTE_LOOPBACK;
2758
2759 if (clear & SMUX_CH_OPTION_REMOTE_LOOPBACK)
2760 ch->local_mode = SMUX_LCH_MODE_NORMAL;
2761
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002762 /* RX Flow control */
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002763 if (set & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002764 ch->rx_flow_control_client = 1;
2765 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002766 }
2767
2768 if (clear & SMUX_CH_OPTION_REMOTE_TX_STOP) {
Eric Holmberg2e0906f2012-06-26 13:29:14 -06002769 ch->rx_flow_control_client = 0;
2770 tx_ready |= smux_rx_flow_control_updated(ch);
2771 }
2772
2773 /* Auto RX Flow Control */
2774 if (set & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2775 SMUX_DBG("%s: auto rx flow control option enabled\n",
2776 __func__);
2777 ch->options |= SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2778 }
2779
2780 if (clear & SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP) {
2781 SMUX_DBG("%s: auto rx flow control option disabled\n",
2782 __func__);
2783 ch->options &= ~SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP;
2784 ch->rx_flow_control_auto = 0;
2785 tx_ready |= smux_rx_flow_control_updated(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002786 }
2787
2788 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2789
2790 if (tx_ready)
2791 list_channel(ch);
2792
2793 return ret;
2794}
2795
2796/**
2797 * Starts the opening sequence for a logical channel.
2798 *
2799 * @lcid Logical channel ID
2800 * @priv Free for client usage
2801 * @notify Event notification function
2802 * @get_rx_buffer Function used to provide a receive buffer to SMUX
2803 *
2804 * @returns 0 for success, <0 otherwise
2805 *
2806 * A channel must be fully closed (either not previously opened or
2807 * msm_smux_close() has been called and the SMUX_DISCONNECTED has been
2808 * received.
2809 *
2810 * One the remote side is opened, the client will receive a SMUX_CONNECTED
2811 * event.
2812 */
2813int msm_smux_open(uint8_t lcid, void *priv,
2814 void (*notify)(void *priv, int event_type, const void *metadata),
2815 int (*get_rx_buffer)(void *priv, void **pkt_priv, void **buffer,
2816 int size))
2817{
2818 int ret;
2819 struct smux_lch_t *ch;
2820 struct smux_pkt_t *pkt;
2821 int tx_ready = 0;
2822 unsigned long flags;
2823
2824 if (smux_assert_lch_id(lcid))
2825 return -ENXIO;
2826
2827 ch = &smux_lch[lcid];
2828 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2829
2830 if (ch->local_state == SMUX_LCH_LOCAL_CLOSING) {
2831 ret = -EAGAIN;
2832 goto out;
2833 }
2834
2835 if (ch->local_state != SMUX_LCH_LOCAL_CLOSED) {
2836 pr_err("%s: open lcid %d local state %x invalid\n",
2837 __func__, lcid, ch->local_state);
2838 ret = -EINVAL;
2839 goto out;
2840 }
2841
2842 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2843 ch->local_state,
2844 SMUX_LCH_LOCAL_OPENING);
2845
2846 ch->local_state = SMUX_LCH_LOCAL_OPENING;
2847
2848 ch->priv = priv;
2849 ch->notify = notify;
2850 ch->get_rx_buffer = get_rx_buffer;
2851 ret = 0;
2852
2853 /* Send Open Command */
2854 pkt = smux_alloc_pkt();
2855 if (!pkt) {
2856 ret = -ENOMEM;
2857 goto out;
2858 }
2859 pkt->hdr.magic = SMUX_MAGIC;
2860 pkt->hdr.cmd = SMUX_CMD_OPEN_LCH;
2861 pkt->hdr.flags = SMUX_CMD_OPEN_POWER_COLLAPSE;
2862 if (ch->local_mode == SMUX_LCH_MODE_REMOTE_LOOPBACK)
2863 pkt->hdr.flags |= SMUX_CMD_OPEN_REMOTE_LOOPBACK;
2864 pkt->hdr.lcid = lcid;
2865 pkt->hdr.payload_len = 0;
2866 pkt->hdr.pad_len = 0;
2867 smux_tx_queue(pkt, ch, 0);
2868 tx_ready = 1;
2869
2870out:
2871 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2872 if (tx_ready)
2873 list_channel(ch);
2874 return ret;
2875}
2876
2877/**
2878 * Starts the closing sequence for a logical channel.
2879 *
2880 * @lcid Logical channel ID
2881 *
2882 * @returns 0 for success, <0 otherwise
2883 *
2884 * Once the close event has been acknowledge by the remote side, the client
2885 * will receive a SMUX_DISCONNECTED notification.
2886 */
2887int msm_smux_close(uint8_t lcid)
2888{
2889 int ret = 0;
2890 struct smux_lch_t *ch;
2891 struct smux_pkt_t *pkt;
2892 int tx_ready = 0;
2893 unsigned long flags;
2894
2895 if (smux_assert_lch_id(lcid))
2896 return -ENXIO;
2897
2898 ch = &smux_lch[lcid];
2899 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2900 ch->local_tiocm = 0x0;
2901 ch->remote_tiocm = 0x0;
2902 ch->tx_pending_data_cnt = 0;
2903 ch->notify_lwm = 0;
2904
2905 /* Purge TX queue */
2906 spin_lock(&ch->tx_lock_lhb2);
Eric Holmberged1f00c2012-06-07 09:45:18 -06002907 smux_purge_ch_tx_queue(ch);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002908 spin_unlock(&ch->tx_lock_lhb2);
2909
2910 /* Send Close Command */
2911 if (ch->local_state == SMUX_LCH_LOCAL_OPENED ||
2912 ch->local_state == SMUX_LCH_LOCAL_OPENING) {
2913 SMUX_DBG("lcid %d local state 0x%x -> 0x%x\n", lcid,
2914 ch->local_state,
2915 SMUX_LCH_LOCAL_CLOSING);
2916
2917 ch->local_state = SMUX_LCH_LOCAL_CLOSING;
2918 pkt = smux_alloc_pkt();
2919 if (pkt) {
2920 pkt->hdr.cmd = SMUX_CMD_CLOSE_LCH;
2921 pkt->hdr.flags = 0;
2922 pkt->hdr.lcid = lcid;
2923 pkt->hdr.payload_len = 0;
2924 pkt->hdr.pad_len = 0;
2925 smux_tx_queue(pkt, ch, 0);
2926 tx_ready = 1;
2927 } else {
2928 pr_err("%s: pkt allocation failed\n", __func__);
2929 ret = -ENOMEM;
2930 }
Eric Holmbergb8435c82012-06-05 14:51:29 -06002931
2932 /* Purge RX retry queue */
2933 if (ch->rx_retry_queue_cnt)
2934 queue_delayed_work(smux_rx_wq, &ch->rx_retry_work, 0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06002935 }
2936 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
2937
2938 if (tx_ready)
2939 list_channel(ch);
2940
2941 return ret;
2942}
2943
2944/**
2945 * Write data to a logical channel.
2946 *
2947 * @lcid Logical channel ID
2948 * @pkt_priv Client data that will be returned with the SMUX_WRITE_DONE or
2949 * SMUX_WRITE_FAIL notification.
2950 * @data Data to write
2951 * @len Length of @data
2952 *
2953 * @returns 0 for success, <0 otherwise
2954 *
2955 * Data may be written immediately after msm_smux_open() is called,
2956 * but the data will wait in the transmit queue until the channel has
2957 * been fully opened.
2958 *
2959 * Once the data has been written, the client will receive either a completion
2960 * (SMUX_WRITE_DONE) or a failure notice (SMUX_WRITE_FAIL).
2961 */
2962int msm_smux_write(uint8_t lcid, void *pkt_priv, const void *data, int len)
2963{
2964 struct smux_lch_t *ch;
2965 struct smux_pkt_t *pkt;
2966 int tx_ready = 0;
2967 unsigned long flags;
2968 int ret;
2969
2970 if (smux_assert_lch_id(lcid))
2971 return -ENXIO;
2972
2973 ch = &smux_lch[lcid];
2974 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
2975
2976 if (ch->local_state != SMUX_LCH_LOCAL_OPENED &&
2977 ch->local_state != SMUX_LCH_LOCAL_OPENING) {
2978 pr_err("%s: hdr.invalid local state %d channel %d\n",
2979 __func__, ch->local_state, lcid);
2980 ret = -EINVAL;
2981 goto out;
2982 }
2983
2984 if (len > SMUX_MAX_PKT_SIZE - sizeof(struct smux_hdr_t)) {
2985 pr_err("%s: payload %d too large\n",
2986 __func__, len);
2987 ret = -E2BIG;
2988 goto out;
2989 }
2990
2991 pkt = smux_alloc_pkt();
2992 if (!pkt) {
2993 ret = -ENOMEM;
2994 goto out;
2995 }
2996
2997 pkt->hdr.cmd = SMUX_CMD_DATA;
2998 pkt->hdr.lcid = lcid;
2999 pkt->hdr.flags = 0;
3000 pkt->hdr.payload_len = len;
3001 pkt->payload = (void *)data;
3002 pkt->priv = pkt_priv;
3003 pkt->hdr.pad_len = 0;
3004
3005 spin_lock(&ch->tx_lock_lhb2);
3006 /* verify high watermark */
3007 SMUX_DBG("%s: pending %d", __func__, ch->tx_pending_data_cnt);
3008
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003009 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003010 pr_err("%s: ch %d high watermark %d exceeded %d\n",
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003011 __func__, lcid, SMUX_TX_WM_HIGH,
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003012 ch->tx_pending_data_cnt);
3013 ret = -EAGAIN;
3014 goto out_inner;
3015 }
3016
3017 /* queue packet for transmit */
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003018 if (++ch->tx_pending_data_cnt == SMUX_TX_WM_HIGH) {
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003019 ch->notify_lwm = 1;
3020 pr_err("%s: high watermark hit\n", __func__);
3021 schedule_notify(lcid, SMUX_HIGH_WM_HIT, NULL);
3022 }
3023 list_add_tail(&pkt->list, &ch->tx_queue);
3024
3025 /* add to ready list */
3026 if (IS_FULLY_OPENED(ch))
3027 tx_ready = 1;
3028
3029 ret = 0;
3030
3031out_inner:
3032 spin_unlock(&ch->tx_lock_lhb2);
3033
3034out:
3035 if (ret)
3036 smux_free_pkt(pkt);
3037 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3038
3039 if (tx_ready)
3040 list_channel(ch);
3041
3042 return ret;
3043}
3044
3045/**
3046 * Returns true if the TX queue is currently full (high water mark).
3047 *
3048 * @lcid Logical channel ID
3049 * @returns 0 if channel is not full
3050 * 1 if it is full
3051 * < 0 for error
3052 */
3053int msm_smux_is_ch_full(uint8_t lcid)
3054{
3055 struct smux_lch_t *ch;
3056 unsigned long flags;
3057 int is_full = 0;
3058
3059 if (smux_assert_lch_id(lcid))
3060 return -ENXIO;
3061
3062 ch = &smux_lch[lcid];
3063
3064 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003065 if (ch->tx_pending_data_cnt >= SMUX_TX_WM_HIGH)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003066 is_full = 1;
3067 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3068
3069 return is_full;
3070}
3071
3072/**
3073 * Returns true if the TX queue has space for more packets it is at or
3074 * below the low water mark).
3075 *
3076 * @lcid Logical channel ID
3077 * @returns 0 if channel is above low watermark
3078 * 1 if it's at or below the low watermark
3079 * < 0 for error
3080 */
3081int msm_smux_is_ch_low(uint8_t lcid)
3082{
3083 struct smux_lch_t *ch;
3084 unsigned long flags;
3085 int is_low = 0;
3086
3087 if (smux_assert_lch_id(lcid))
3088 return -ENXIO;
3089
3090 ch = &smux_lch[lcid];
3091
3092 spin_lock_irqsave(&ch->tx_lock_lhb2, flags);
Eric Holmberg2e0906f2012-06-26 13:29:14 -06003093 if (ch->tx_pending_data_cnt <= SMUX_TX_WM_LOW)
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003094 is_low = 1;
3095 spin_unlock_irqrestore(&ch->tx_lock_lhb2, flags);
3096
3097 return is_low;
3098}
3099
3100/**
3101 * Send TIOCM status update.
3102 *
3103 * @ch Channel for update
3104 *
3105 * @returns 0 for success, <0 for failure
3106 *
3107 * Channel lock must be held before calling.
3108 */
3109static int smux_send_status_cmd(struct smux_lch_t *ch)
3110{
3111 struct smux_pkt_t *pkt;
3112
3113 if (!ch)
3114 return -EINVAL;
3115
3116 pkt = smux_alloc_pkt();
3117 if (!pkt)
3118 return -ENOMEM;
3119
3120 pkt->hdr.lcid = ch->lcid;
3121 pkt->hdr.cmd = SMUX_CMD_STATUS;
3122 pkt->hdr.flags = ch->local_tiocm;
3123 pkt->hdr.payload_len = 0;
3124 pkt->hdr.pad_len = 0;
3125 smux_tx_queue(pkt, ch, 0);
3126
3127 return 0;
3128}
3129
3130/**
3131 * Internal helper function for getting the TIOCM status with
3132 * state_lock_lhb1 already locked.
3133 *
3134 * @ch Channel pointer
3135 *
3136 * @returns TIOCM status
3137 */
3138static long msm_smux_tiocm_get_atomic(struct smux_lch_t *ch)
3139{
3140 long status = 0x0;
3141
3142 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DSR : 0;
3143 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_CTS : 0;
3144 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_RI) ? TIOCM_RI : 0;
3145 status |= (ch->remote_tiocm & SMUX_CMD_STATUS_DCD) ? TIOCM_CD : 0;
3146
3147 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTC) ? TIOCM_DTR : 0;
3148 status |= (ch->local_tiocm & SMUX_CMD_STATUS_RTR) ? TIOCM_RTS : 0;
3149
3150 return status;
3151}
3152
3153/**
3154 * Get the TIOCM status bits.
3155 *
3156 * @lcid Logical channel ID
3157 *
3158 * @returns >= 0 TIOCM status bits
3159 * < 0 Error condition
3160 */
3161long msm_smux_tiocm_get(uint8_t lcid)
3162{
3163 struct smux_lch_t *ch;
3164 unsigned long flags;
3165 long status = 0x0;
3166
3167 if (smux_assert_lch_id(lcid))
3168 return -ENXIO;
3169
3170 ch = &smux_lch[lcid];
3171 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3172 status = msm_smux_tiocm_get_atomic(ch);
3173 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3174
3175 return status;
3176}
3177
3178/**
3179 * Set/clear the TIOCM status bits.
3180 *
3181 * @lcid Logical channel ID
3182 * @set Bits to set
3183 * @clear Bits to clear
3184 *
3185 * @returns 0 for success; < 0 for failure
3186 *
3187 * If a bit is specified in both the @set and @clear masks, then the clear bit
3188 * definition will dominate and the bit will be cleared.
3189 */
3190int msm_smux_tiocm_set(uint8_t lcid, uint32_t set, uint32_t clear)
3191{
3192 struct smux_lch_t *ch;
3193 unsigned long flags;
3194 uint8_t old_status;
3195 uint8_t status_set = 0x0;
3196 uint8_t status_clear = 0x0;
3197 int tx_ready = 0;
3198 int ret = 0;
3199
3200 if (smux_assert_lch_id(lcid))
3201 return -ENXIO;
3202
3203 ch = &smux_lch[lcid];
3204 spin_lock_irqsave(&ch->state_lock_lhb1, flags);
3205
3206 status_set |= (set & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3207 status_set |= (set & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3208 status_set |= (set & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3209 status_set |= (set & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3210
3211 status_clear |= (clear & TIOCM_DTR) ? SMUX_CMD_STATUS_RTC : 0;
3212 status_clear |= (clear & TIOCM_RTS) ? SMUX_CMD_STATUS_RTR : 0;
3213 status_clear |= (clear & TIOCM_RI) ? SMUX_CMD_STATUS_RI : 0;
3214 status_clear |= (clear & TIOCM_CD) ? SMUX_CMD_STATUS_DCD : 0;
3215
3216 old_status = ch->local_tiocm;
3217 ch->local_tiocm |= status_set;
3218 ch->local_tiocm &= ~status_clear;
3219
3220 if (ch->local_tiocm != old_status) {
3221 ret = smux_send_status_cmd(ch);
3222 tx_ready = 1;
3223 }
3224 spin_unlock_irqrestore(&ch->state_lock_lhb1, flags);
3225
3226 if (tx_ready)
3227 list_channel(ch);
3228
3229 return ret;
3230}
3231
3232/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003233/* Subsystem Restart */
3234/**********************************************************************/
3235static struct notifier_block ssr_notifier = {
3236 .notifier_call = ssr_notifier_cb,
3237};
3238
3239/**
3240 * Handle Subsystem Restart (SSR) notifications.
3241 *
3242 * @this Pointer to ssr_notifier
3243 * @code SSR Code
3244 * @data Data pointer (not used)
3245 */
3246static int ssr_notifier_cb(struct notifier_block *this,
3247 unsigned long code,
3248 void *data)
3249{
3250 unsigned long flags;
3251 int power_off_uart = 0;
3252
Eric Holmbergd2697902012-06-15 09:58:46 -06003253 if (code == SUBSYS_BEFORE_SHUTDOWN) {
3254 SMUX_DBG("%s: ssr - before shutdown\n", __func__);
3255 mutex_lock(&smux.mutex_lha0);
3256 smux.in_reset = 1;
3257 mutex_unlock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003258 return NOTIFY_DONE;
Eric Holmbergd2697902012-06-15 09:58:46 -06003259 } else if (code != SUBSYS_AFTER_SHUTDOWN) {
3260 return NOTIFY_DONE;
3261 }
3262 SMUX_DBG("%s: ssr - after shutdown\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003263
3264 /* Cleanup channels */
Eric Holmbergd2697902012-06-15 09:58:46 -06003265 mutex_lock(&smux.mutex_lha0);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003266 smux_lch_purge();
Eric Holmbergd2697902012-06-15 09:58:46 -06003267 if (smux.tty)
3268 tty_driver_flush_buffer(smux.tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003269
3270 /* Power-down UART */
3271 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
3272 if (smux.power_state != SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003273 SMUX_PWR("%s: SSR - turning off UART\n", __func__);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003274 smux.power_state = SMUX_PWR_OFF;
3275 power_off_uart = 1;
3276 }
Eric Holmbergd2697902012-06-15 09:58:46 -06003277 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003278 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3279
3280 if (power_off_uart)
3281 smux_uart_power_off();
3282
Eric Holmbergd2697902012-06-15 09:58:46 -06003283 smux.in_reset = 0;
3284 mutex_unlock(&smux.mutex_lha0);
3285
Eric Holmberged1f00c2012-06-07 09:45:18 -06003286 return NOTIFY_DONE;
3287}
3288
3289/**********************************************************************/
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003290/* Line Discipline Interface */
3291/**********************************************************************/
Eric Holmberged1f00c2012-06-07 09:45:18 -06003292static void smux_pdev_release(struct device *dev)
3293{
3294 struct platform_device *pdev;
3295
3296 pdev = container_of(dev, struct platform_device, dev);
3297 SMUX_DBG("%s: releasing pdev %p '%s'\n", __func__, pdev, pdev->name);
3298 memset(&pdev->dev, 0x0, sizeof(pdev->dev));
3299}
3300
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003301static int smuxld_open(struct tty_struct *tty)
3302{
3303 int i;
3304 int tmp;
3305 unsigned long flags;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003306
3307 if (!smux.is_initialized)
3308 return -ENODEV;
3309
Eric Holmberged1f00c2012-06-07 09:45:18 -06003310 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003311 if (smux.ld_open_count) {
3312 pr_err("%s: %p multiple instances not supported\n",
3313 __func__, tty);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003314 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003315 return -EEXIST;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003316 }
3317
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003318 if (tty->ops->write == NULL) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003319 pr_err("%s: tty->ops->write already NULL\n", __func__);
3320 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003321 return -EINVAL;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003322 }
3323
3324 /* connect to TTY */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003325 ++smux.ld_open_count;
3326 smux.in_reset = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003327 smux.tty = tty;
3328 tty->disc_data = &smux;
3329 tty->receive_room = TTY_RECEIVE_ROOM;
3330 tty_driver_flush_buffer(tty);
3331
3332 /* power-down the UART if we are idle */
Eric Holmberged1f00c2012-06-07 09:45:18 -06003333 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003334 if (smux.power_state == SMUX_PWR_OFF) {
Eric Holmbergff0b0112012-06-08 15:06:57 -06003335 SMUX_PWR("%s: powering off uart\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003336 smux.power_state = SMUX_PWR_OFF_FLUSH;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003337 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003338 queue_work(smux_tx_wq, &smux_inactivity_work);
3339 } else {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003340 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003341 }
3342
3343 /* register platform devices */
3344 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
Eric Holmberged1f00c2012-06-07 09:45:18 -06003345 SMUX_DBG("%s: register pdev '%s'\n",
3346 __func__, smux_devs[i].name);
3347 smux_devs[i].dev.release = smux_pdev_release;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003348 tmp = platform_device_register(&smux_devs[i]);
3349 if (tmp)
3350 pr_err("%s: error %d registering device %s\n",
3351 __func__, tmp, smux_devs[i].name);
3352 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003353 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003354 return 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003355}
3356
3357static void smuxld_close(struct tty_struct *tty)
3358{
3359 unsigned long flags;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003360 int power_up_uart = 0;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003361 int i;
3362
Eric Holmberged1f00c2012-06-07 09:45:18 -06003363 SMUX_DBG("%s: ldisc unload\n", __func__);
3364 mutex_lock(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003365 if (smux.ld_open_count <= 0) {
3366 pr_err("%s: invalid ld count %d\n", __func__,
3367 smux.ld_open_count);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003368 mutex_unlock(&smux.mutex_lha0);
Eric Holmberg902c51e2012-05-29 12:12:16 -06003369 return;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003370 }
Eric Holmberged1f00c2012-06-07 09:45:18 -06003371 smux.in_reset = 1;
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003372 --smux.ld_open_count;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003373
3374 /* Cleanup channels */
3375 smux_lch_purge();
3376
3377 /* Unregister platform devices */
3378 for (i = 0; i < ARRAY_SIZE(smux_devs); ++i) {
3379 SMUX_DBG("%s: unregister pdev '%s'\n",
3380 __func__, smux_devs[i].name);
3381 platform_device_unregister(&smux_devs[i]);
3382 }
3383
3384 /* Schedule UART power-up if it's down */
3385 spin_lock_irqsave(&smux.tx_lock_lha2, flags);
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003386 if (smux.power_state == SMUX_PWR_OFF)
Eric Holmberged1f00c2012-06-07 09:45:18 -06003387 power_up_uart = 1;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003388 smux.power_state = SMUX_PWR_OFF;
Eric Holmbergd2697902012-06-15 09:58:46 -06003389 smux.powerdown_enabled = 0;
Eric Holmberged1f00c2012-06-07 09:45:18 -06003390 spin_unlock_irqrestore(&smux.tx_lock_lha2, flags);
3391
3392 if (power_up_uart)
Eric Holmberg92a67df2012-06-25 13:56:24 -06003393 smux_uart_power_on_atomic();
Eric Holmberged1f00c2012-06-07 09:45:18 -06003394
3395 /* Disconnect from TTY */
3396 smux.tty = NULL;
3397 mutex_unlock(&smux.mutex_lha0);
3398 SMUX_DBG("%s: ldisc complete\n", __func__);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003399}
3400
3401/**
3402 * Receive data from TTY Line Discipline.
3403 *
3404 * @tty TTY structure
3405 * @cp Character data
3406 * @fp Flag data
3407 * @count Size of character and flag data
3408 */
3409void smuxld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
3410 char *fp, int count)
3411{
3412 int i;
3413 int last_idx = 0;
3414 const char *tty_name = NULL;
3415 char *f;
3416
3417 if (smux_debug_mask & MSM_SMUX_DEBUG)
3418 print_hex_dump(KERN_INFO, "smux tty rx: ", DUMP_PREFIX_OFFSET,
3419 16, 1, cp, count, true);
3420
3421 /* verify error flags */
3422 for (i = 0, f = fp; i < count; ++i, ++f) {
3423 if (*f != TTY_NORMAL) {
3424 if (tty)
3425 tty_name = tty->name;
3426 pr_err("%s: TTY %s Error %d (%s)\n", __func__,
3427 tty_name, *f, tty_flag_to_str(*f));
3428
3429 /* feed all previous valid data to the parser */
3430 smux_rx_state_machine(cp + last_idx, i - last_idx,
3431 TTY_NORMAL);
3432
3433 /* feed bad data to parser */
3434 smux_rx_state_machine(cp + i, 1, *f);
3435 last_idx = i + 1;
3436 }
3437 }
3438
3439 /* feed data to RX state machine */
3440 smux_rx_state_machine(cp + last_idx, count - last_idx, TTY_NORMAL);
3441}
3442
3443static void smuxld_flush_buffer(struct tty_struct *tty)
3444{
3445 pr_err("%s: not supported\n", __func__);
3446}
3447
3448static ssize_t smuxld_chars_in_buffer(struct tty_struct *tty)
3449{
3450 pr_err("%s: not supported\n", __func__);
3451 return -ENODEV;
3452}
3453
3454static ssize_t smuxld_read(struct tty_struct *tty, struct file *file,
3455 unsigned char __user *buf, size_t nr)
3456{
3457 pr_err("%s: not supported\n", __func__);
3458 return -ENODEV;
3459}
3460
3461static ssize_t smuxld_write(struct tty_struct *tty, struct file *file,
3462 const unsigned char *buf, size_t nr)
3463{
3464 pr_err("%s: not supported\n", __func__);
3465 return -ENODEV;
3466}
3467
3468static int smuxld_ioctl(struct tty_struct *tty, struct file *file,
3469 unsigned int cmd, unsigned long arg)
3470{
3471 pr_err("%s: not supported\n", __func__);
3472 return -ENODEV;
3473}
3474
3475static unsigned int smuxld_poll(struct tty_struct *tty, struct file *file,
3476 struct poll_table_struct *tbl)
3477{
3478 pr_err("%s: not supported\n", __func__);
3479 return -ENODEV;
3480}
3481
3482static void smuxld_write_wakeup(struct tty_struct *tty)
3483{
3484 pr_err("%s: not supported\n", __func__);
3485}
3486
3487static struct tty_ldisc_ops smux_ldisc_ops = {
3488 .owner = THIS_MODULE,
3489 .magic = TTY_LDISC_MAGIC,
3490 .name = "n_smux",
3491 .open = smuxld_open,
3492 .close = smuxld_close,
3493 .flush_buffer = smuxld_flush_buffer,
3494 .chars_in_buffer = smuxld_chars_in_buffer,
3495 .read = smuxld_read,
3496 .write = smuxld_write,
3497 .ioctl = smuxld_ioctl,
3498 .poll = smuxld_poll,
3499 .receive_buf = smuxld_receive_buf,
3500 .write_wakeup = smuxld_write_wakeup
3501};
3502
3503static int __init smux_init(void)
3504{
3505 int ret;
3506
Eric Holmberged1f00c2012-06-07 09:45:18 -06003507 mutex_init(&smux.mutex_lha0);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003508
3509 spin_lock_init(&smux.rx_lock_lha1);
3510 smux.rx_state = SMUX_RX_IDLE;
3511 smux.power_state = SMUX_PWR_OFF;
3512 smux.pwr_wakeup_delay_us = 1;
3513 smux.powerdown_enabled = 0;
Eric Holmbergffddd4c2012-06-08 12:37:51 -06003514 INIT_LIST_HEAD(&smux.power_queue);
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003515 smux.rx_activity_flag = 0;
3516 smux.tx_activity_flag = 0;
3517 smux.recv_len = 0;
3518 smux.tty = NULL;
3519 smux.ld_open_count = 0;
3520 smux.in_reset = 0;
3521 smux.is_initialized = 1;
3522 smux_byte_loopback = 0;
3523
3524 spin_lock_init(&smux.tx_lock_lha2);
3525 INIT_LIST_HEAD(&smux.lch_tx_ready_list);
3526
3527 ret = tty_register_ldisc(N_SMUX, &smux_ldisc_ops);
3528 if (ret != 0) {
3529 pr_err("%s: error %d registering line discipline\n",
3530 __func__, ret);
3531 return ret;
3532 }
3533
Eric Holmberg6c9f2a52012-06-14 10:49:04 -06003534 subsys_notif_register_notifier("external_modem", &ssr_notifier);
Eric Holmberged1f00c2012-06-07 09:45:18 -06003535
Eric Holmberg8ed30f22012-05-10 19:16:51 -06003536 ret = lch_init();
3537 if (ret != 0) {
3538 pr_err("%s: lch_init failed\n", __func__);
3539 return ret;
3540 }
3541
3542 return 0;
3543}
3544
3545static void __exit smux_exit(void)
3546{
3547 int ret;
3548
3549 ret = tty_unregister_ldisc(N_SMUX);
3550 if (ret != 0) {
3551 pr_err("%s error %d unregistering line discipline\n",
3552 __func__, ret);
3553 return;
3554 }
3555}
3556
3557module_init(smux_init);
3558module_exit(smux_exit);
3559
3560MODULE_DESCRIPTION("Serial Mux TTY Line Discipline");
3561MODULE_LICENSE("GPL v2");
3562MODULE_ALIAS_LDISC(N_SMUX);