blob: 22147ce82c225491fd78b1b65fd1d7c58906a5cb [file] [log] [blame]
Devin Kim39bbf562012-07-12 08:20:49 -07001/* arch/arm/mach-msm/hsic_tty.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
5 * Author: Brian Swetland <swetland@google.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/fs.h>
20#include <linux/cdev.h>
21#include <linux/device.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/wakelock.h>
25#include <linux/platform_device.h>
26#include <linux/sched.h>
27
28#include <linux/tty.h>
29#include <linux/tty_driver.h>
30#include <linux/tty_flip.h>
31
32#include <mach/usb_bridge.h>
33
34#define MAX_HSIC_TTYS 2
35#define MAX_TTY_BUF_SIZE 2048
36
37static DEFINE_MUTEX(hsic_tty_lock);
38
39static uint hsic_tty_modem_wait = 60;
40module_param_named(modem_wait, hsic_tty_modem_wait,
41 uint, S_IRUGO | S_IWUSR | S_IWGRP);
42
43static uint lge_ds_modem_wait = 20;
44module_param_named(ds_modem_wait, lge_ds_modem_wait,
45 uint, S_IRUGO | S_IWUSR | S_IWGRP);
46
47#define DATA_BRIDGE_NAME_MAX_LEN 20
48
49#define HSIC_TTY_DATA_RMNET_RX_Q_SIZE 50
50#define HSIC_TTY_DATA_RMNET_TX_Q_SIZE 300
51#define HSIC_TTY_DATA_SERIAL_RX_Q_SIZE 2
52#define HSIC_TTY_DATA_SERIAL_TX_Q_SIZE 2
53#define HSIC_TTY_DATA_RX_REQ_SIZE 2048
54#define HSIC_TTY_DATA_TX_INTR_THRESHOLD 20
55
56static unsigned int hsic_tty_data_rmnet_tx_q_size =
57 HSIC_TTY_DATA_RMNET_TX_Q_SIZE;
58module_param(hsic_tty_data_rmnet_tx_q_size, uint, S_IRUGO | S_IWUSR);
59
60static unsigned int hsic_tty_data_rmnet_rx_q_size =
61 HSIC_TTY_DATA_RMNET_RX_Q_SIZE;
62module_param(hsic_tty_data_rmnet_rx_q_size, uint, S_IRUGO | S_IWUSR);
63
64static unsigned int hsic_tty_data_serial_tx_q_size =
65 HSIC_TTY_DATA_SERIAL_TX_Q_SIZE;
66module_param(hsic_tty_data_serial_tx_q_size, uint, S_IRUGO | S_IWUSR);
67
68static unsigned int hsic_tty_data_serial_rx_q_size =
69 HSIC_TTY_DATA_SERIAL_RX_Q_SIZE;
70module_param(hsic_tty_data_serial_rx_q_size, uint, S_IRUGO | S_IWUSR);
71
72static unsigned int hsic_tty_data_rx_req_size = HSIC_TTY_DATA_RX_REQ_SIZE;
73module_param(hsic_tty_data_rx_req_size, uint, S_IRUGO | S_IWUSR);
74
75unsigned int hsic_tty_data_tx_intr_thld = HSIC_TTY_DATA_TX_INTR_THRESHOLD;
76module_param(hsic_tty_data_tx_intr_thld, uint, S_IRUGO | S_IWUSR);
77
78/*flow ctrl*/
79#define HSIC_TTY_DATA_FLOW_CTRL_EN_THRESHOLD 500
80#define HSIC_TTY_DATA_FLOW_CTRL_DISABLE 300
81#define HSIC_TTY_DATA_FLOW_CTRL_SUPPORT 1
82#define HSIC_TTY_DATA_PENDLIMIT_WITH_BRIDGE 500
83
84static unsigned int hsic_tty_data_fctrl_support =
85 HSIC_TTY_DATA_FLOW_CTRL_SUPPORT;
86module_param(hsic_tty_data_fctrl_support, uint, S_IRUGO | S_IWUSR);
87
88static unsigned int hsic_tty_data_fctrl_en_thld =
89 HSIC_TTY_DATA_FLOW_CTRL_EN_THRESHOLD;
90module_param(hsic_tty_data_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
91
92static unsigned int hsic_tty_data_fctrl_dis_thld =
93 HSIC_TTY_DATA_FLOW_CTRL_DISABLE;
94module_param(hsic_tty_data_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
95
96static unsigned int hsic_tty_data_pend_limit_with_bridge =
97 HSIC_TTY_DATA_PENDLIMIT_WITH_BRIDGE;
98module_param(hsic_tty_data_pend_limit_with_bridge, uint, S_IRUGO | S_IWUSR);
99
100#define CH_OPENED 0
101#define CH_READY 1
102
103struct hsic_tty_info {
104 struct tty_struct *tty;
105 struct wake_lock wake_lock;
106 int open_count;
107 struct timer_list buf_req_timer;
108 struct completion ch_allocated;
109 struct platform_driver driver;
110 int in_reset;
111 int in_reset_updated;
112 int is_open;
113
114 wait_queue_head_t ch_opened_wait_queue;
115 spinlock_t reset_lock;
116 struct hsic_config *hsic;
117
118 /* gadget */
119 atomic_t connected;
120
121 /* data transfer queues */
122 unsigned int tx_q_size;
123 struct list_head tx_idle;
124 struct sk_buff_head tx_skb_q;
125 spinlock_t tx_lock;
126
127 unsigned int rx_q_size;
128 struct list_head rx_idle;
129 struct sk_buff_head rx_skb_q;
130 spinlock_t rx_lock;
131
132 /* work */
133 struct workqueue_struct *wq;
134 struct work_struct connect_w;
135 struct work_struct disconnect_w;
136 struct work_struct write_tomdm_w;
137 struct work_struct write_tohost_w;
138
139 struct bridge brdg;
140
141 /*bridge status */
142 unsigned long bridge_sts;
143
144 /*counters */
145 unsigned long to_modem;
146 unsigned long to_host;
147 unsigned int rx_throttled_cnt;
148 unsigned int rx_unthrottled_cnt;
149 unsigned int tx_throttled_cnt;
150 unsigned int tx_unthrottled_cnt;
151 unsigned int tomodem_drp_cnt;
152 unsigned int unthrottled_pnd_skbs;
153};
154
155/**
156 * HSIC port configuration.
157 *
158 * @tty_dev_index Index into hsic_tty[]
159 * @port_name Name of the HSIC port
160 * @dev_name Name of the TTY Device (if NULL, @port_name is used)
161 * @edge HSIC edge
162 */
163struct hsic_config {
164 uint32_t tty_dev_index;
165 const char *port_name;
166 const char *dev_name;
167};
168
169static struct hsic_config hsic_configs[] = {
170 {0, "dun_data_hsic0", NULL},
171 //{1, "rmnet_data_hsic0", NULL},
172};
173
174static struct hsic_tty_info hsic_tty[MAX_HSIC_TTYS];
175
176static int is_in_reset(struct hsic_tty_info *info)
177{
178 return info->in_reset;
179}
180
181static void buf_req_retry(unsigned long param)
182{
183 struct hsic_tty_info *info = (struct hsic_tty_info *)param;
184 unsigned long flags;
185
186 spin_lock_irqsave(&info->reset_lock, flags);
187 if (info->is_open) {
188 spin_unlock_irqrestore(&info->reset_lock, flags);
189 queue_work(info->wq, &info->write_tohost_w);
190 return;
191 }
192 spin_unlock_irqrestore(&info->reset_lock, flags);
193}
194
195static void hsic_tty_data_write_tohost(struct work_struct *w)
196{
197 struct hsic_tty_info *info =
198 container_of(w, struct hsic_tty_info, write_tohost_w);
199 struct tty_struct *tty = info->tty;
200 struct sk_buff *skb;
201 unsigned char *ptr;
202 unsigned long flags;
203 int avail;
204
205 pr_debug("%s\n", __func__);
206
207 if (!info)
208 return;
209
210 spin_lock_irqsave(&info->tx_lock, flags);
211 for (;;) {
212 if (is_in_reset(info)) {
213 /* signal TTY clients using TTY_BREAK */
214 tty_insert_flip_char(tty, 0x00, TTY_BREAK);
215 tty_flip_buffer_push(tty);
216 break;
217 }
218
219 skb = __skb_dequeue(&info->tx_skb_q);
220 if (!skb)
221 break;
222
223 avail = skb->len;
224 if (avail == 0)
225 break;
226
227 avail = tty_prepare_flip_string(tty, &ptr, avail);
228 if (avail <= 0) {
229 if (!timer_pending(&info->buf_req_timer)) {
230 init_timer(&info->buf_req_timer);
231 info->buf_req_timer.expires = jiffies +
232 ((30 * HZ) / 1000);
233 info->buf_req_timer.function = buf_req_retry;
234 info->buf_req_timer.data = (unsigned long)info;
235 add_timer(&info->buf_req_timer);
236 }
237 spin_unlock_irqrestore(&info->tx_lock, flags);
238 return;
239 }
240
241 memcpy(ptr, skb->data, avail);
242 dev_kfree_skb_any(skb);
243
244 wake_lock_timeout(&info->wake_lock, HZ / 2);
245 tty_flip_buffer_push(tty);
246
247 info->to_host++;
248 }
249
250 /* XXX only when writable and necessary */
251 tty_wakeup(tty);
252 spin_unlock_irqrestore(&info->tx_lock, flags);
253}
254
255static int hsic_tty_data_receive(void *p, void *data, size_t len)
256{
257 struct hsic_tty_info *info = p;
258 unsigned long flags;
259 struct sk_buff *skb = data;
260
261 if (!info || !atomic_read(&info->connected)) {
262 dev_kfree_skb_any(skb);
263 return -ENOTCONN;
264 }
265
266 pr_debug("%s: p:%p#%d skb_len:%d\n", __func__,
267 info, info->tty->index, skb->len);
268
269 spin_lock_irqsave(&info->tx_lock, flags);
270 __skb_queue_tail(&info->tx_skb_q, skb);
271
272 if (hsic_tty_data_fctrl_support &&
273 info->tx_skb_q.qlen >= hsic_tty_data_fctrl_en_thld) {
274 set_bit(RX_THROTTLED, &info->brdg.flags);
275 info->rx_throttled_cnt++;
276 pr_debug("%s: flow ctrl enabled: tx skbq len: %u\n",
277 __func__, info->tx_skb_q.qlen);
278 spin_unlock_irqrestore(&info->tx_lock, flags);
279 queue_work(info->wq, &info->write_tohost_w);
280 return -EBUSY;
281 }
282
283 spin_unlock_irqrestore(&info->tx_lock, flags);
284
285 queue_work(info->wq, &info->write_tohost_w);
286
287 return 0;
288}
289
290static void hsic_tty_data_write_tomdm(struct work_struct *w)
291{
292 struct hsic_tty_info *info =
293 container_of(w, struct hsic_tty_info, write_tomdm_w);
294 struct sk_buff *skb;
295 unsigned long flags;
296 int ret;
297
298 pr_debug("%s\n", __func__);
299
300 if (!info || !atomic_read(&info->connected))
301 return;
302
303 spin_lock_irqsave(&info->rx_lock, flags);
304 if (test_bit(TX_THROTTLED, &info->brdg.flags)) {
305 spin_unlock_irqrestore(&info->rx_lock, flags);
306 return;
307 }
308
309 while ((skb = __skb_dequeue(&info->rx_skb_q))) {
310 pr_debug("%s: info:%p tom:%lu pno:%d\n", __func__,
311 info, info->to_modem, info->tty->index);
312
313 spin_unlock_irqrestore(&info->rx_lock, flags);
314 ret = data_bridge_write(info->brdg.ch_id, skb);
315 spin_lock_irqsave(&info->rx_lock, flags);
316 if (ret < 0) {
317 if (ret == -EBUSY) {
318 /*flow control */
319 info->tx_throttled_cnt++;
320 break;
321 }
322 pr_err("%s: write error:%d\n", __func__, ret);
323 info->tomodem_drp_cnt++;
324 dev_kfree_skb_any(skb);
325 break;
326 }
327 info->to_modem++;
328 }
329 spin_unlock_irqrestore(&info->rx_lock, flags);
330}
331
332static void hsic_tty_data_connect_w(struct work_struct *w)
333{
334 struct hsic_tty_info *info =
335 container_of(w, struct hsic_tty_info, connect_w);
336 unsigned long flags;
337 int ret;
338
339 pr_debug("%s\n", __func__);
340
341 if (!info || !atomic_read(&info->connected) ||
342 !test_bit(CH_READY, &info->bridge_sts))
343 return;
344
345 pr_debug("%s: info:%p\n", __func__, info);
346
347 ret = data_bridge_open(&info->brdg);
348 if (ret) {
349 pr_err("%s: unable open bridge ch:%d err:%d\n",
350 __func__, info->brdg.ch_id, ret);
351 return;
352 }
353
354 set_bit(CH_OPENED, &info->bridge_sts);
355
356 spin_lock_irqsave(&info->reset_lock, flags);
357 info->in_reset = 0;
358 info->in_reset_updated = 1;
359 info->is_open = 1;
360 wake_up_interruptible(&info->ch_opened_wait_queue);
361 spin_unlock_irqrestore(&info->reset_lock, flags);
362}
363
364static void hsic_tty_data_disconnect_w(struct work_struct *w)
365{
366 struct hsic_tty_info *info =
367 container_of(w, struct hsic_tty_info, connect_w);
368 unsigned long flags;
369
370 pr_debug("%s\n", __func__);
371
372 if (!test_bit(CH_OPENED, &info->bridge_sts))
373 return;
374
375 data_bridge_close(info->brdg.ch_id);
376 clear_bit(CH_OPENED, &info->bridge_sts);
377
378 spin_lock_irqsave(&info->reset_lock, flags);
379 info->in_reset = 1;
380 info->in_reset_updated = 1;
381 info->is_open = 0;
382 wake_up_interruptible(&info->ch_opened_wait_queue);
383 spin_unlock_irqrestore(&info->reset_lock, flags);
384 /* schedule task to send TTY_BREAK */
385 queue_work(info->wq, &info->write_tohost_w);
386}
387
388static int hsic_tty_open(struct tty_struct *tty, struct file *f)
389{
390 int res = 0;
391 unsigned int n = tty->index;
392 struct hsic_tty_info *info;
393 unsigned long flags;
394
395 pr_debug("%s\n", __func__);
396
397 if (n >= MAX_HSIC_TTYS || !hsic_tty[n].hsic)
398 return -ENODEV;
399
400 info = hsic_tty + n;
401
402 mutex_lock(&hsic_tty_lock);
403 tty->driver_data = info;
404
405 if (info->open_count++ == 0) {
406 /*
407 * Wait for a channel to be allocated so we know
408 * the modem is ready enough.
409 */
410 if (hsic_tty_modem_wait) {
411 res = try_wait_for_completion(&info->ch_allocated);
412
413 if (res == 0) {
414 pr_debug
415 ("%s: Timed out waiting for HSIC channel\n",
416 __func__);
417 res = -ETIMEDOUT;
418 goto out;
419 } else if (res < 0) {
420 pr_err
421 ("%s: Error waiting for HSIC channel: %d\n",
422 __func__, res);
423 goto out;
424 }
425 pr_info("%s: opened %s\n", __func__,
426 hsic_tty[n].hsic->port_name);
427
428 res = 0;
429 }
430
431 info->tty = tty;
432 wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND,
433 hsic_tty[n].hsic->port_name);
434 if (!atomic_read(&info->connected)) {
435 atomic_set(&info->connected, 1);
436
437 spin_lock_irqsave(&info->tx_lock, flags);
438 info->to_host = 0;
439 info->rx_throttled_cnt = 0;
440 info->rx_unthrottled_cnt = 0;
441 info->unthrottled_pnd_skbs = 0;
442 spin_unlock_irqrestore(&info->tx_lock, flags);
443
444 spin_lock_irqsave(&info->rx_lock, flags);
445 info->to_modem = 0;
446 info->tomodem_drp_cnt = 0;
447 info->tx_throttled_cnt = 0;
448 info->tx_unthrottled_cnt = 0;
449 spin_unlock_irqrestore(&info->rx_lock, flags);
450
451 set_bit(CH_READY, &info->bridge_sts);
452
453 queue_work(info->wq, &info->connect_w);
454
455 res =
456 wait_event_interruptible_timeout(info->
457 ch_opened_wait_queue,
458 info->is_open,
459 (2 * HZ));
460 if (res == 0)
461 res = -ETIMEDOUT;
462 if (res < 0) {
463 pr_err("%s: wait for %s hsic_open failed %d\n",
464 __func__, hsic_tty[n].hsic->port_name,
465 res);
466 goto out;
467 }
468 res = 0;
469 }
470 }
471
472out:
473 mutex_unlock(&hsic_tty_lock);
474
475 return res;
476}
477
478static void hsic_tty_close(struct tty_struct *tty, struct file *f)
479{
480 struct hsic_tty_info *info = tty->driver_data;
481 unsigned long flags;
482 int res = 0;
483 int n = tty->index;
484 struct sk_buff *skb;
485
486 pr_debug("%s\n", __func__);
487
488 if (info == 0)
489 return;
490
491 mutex_lock(&hsic_tty_lock);
492 if (--info->open_count == 0) {
493 spin_lock_irqsave(&info->reset_lock, flags);
494 info->is_open = 0;
495 spin_unlock_irqrestore(&info->reset_lock, flags);
496 if (info->tty) {
497 wake_lock_destroy(&info->wake_lock);
498 info->tty = 0;
499 }
500 tty->driver_data = 0;
501 del_timer(&info->buf_req_timer);
502 if (atomic_read(&info->connected)) {
503 atomic_set(&info->connected, 0);
504
505 spin_lock_irqsave(&info->tx_lock, flags);
506 clear_bit(RX_THROTTLED, &info->brdg.flags);
507 spin_unlock_irqrestore(&info->tx_lock, flags);
508
509 spin_lock_irqsave(&info->rx_lock, flags);
510 clear_bit(TX_THROTTLED, &info->brdg.flags);
511 spin_unlock_irqrestore(&info->rx_lock, flags);
512
513 queue_work(info->wq, &info->disconnect_w);
514
515 pr_info("%s: waiting to close hsic %s completely\n",
516 __func__, hsic_tty[n].hsic->port_name);
517 /* wait for reopen ready status in seconds */
518 res =
519 wait_event_interruptible_timeout(info->
520 ch_opened_wait_queue,
521 !info->is_open,
522 (lge_ds_modem_wait
523 * HZ));
524 if (res == 0) {
525 /* just in case, remain result value */
526 res = -ETIMEDOUT;
527 pr_err("%s: timeout to wait for %s hsic_close.\
528 next hsic_open may fail....%d\n", __func__, hsic_tty[n].hsic->port_name, res);
529 }
530 if (res < 0) {
531 pr_err("%s: wait for %s hsic_close failed.\
532 next hsic_open may fail....%d\n", __func__, hsic_tty[n].hsic->port_name, res);
533 }
534
535 data_bridge_close(info->brdg.ch_id);
536
537 clear_bit(CH_READY, &info->bridge_sts);
538 clear_bit(CH_OPENED, &info->bridge_sts);
539
540 spin_lock_irqsave(&info->tx_lock, flags);
541 while ((skb = __skb_dequeue(&info->tx_skb_q)))
542 dev_kfree_skb_any(skb);
543 spin_unlock_irqrestore(&info->tx_lock, flags);
544
545 spin_lock_irqsave(&info->rx_lock, flags);
546 while ((skb = __skb_dequeue(&info->rx_skb_q)))
547 dev_kfree_skb_any(skb);
548 spin_unlock_irqrestore(&info->rx_lock, flags);
549 }
550 }
551 mutex_unlock(&hsic_tty_lock);
552}
553
554static int hsic_tty_write(struct tty_struct *tty, const unsigned char *buf,
555 int len)
556{
557 struct hsic_tty_info *info = tty->driver_data;
558 int avail;
559 struct sk_buff *skb;
560
561 pr_debug("%s\n", __func__);
562
563 /* if we're writing to a packet channel we will
564 ** never be able to write more data than there
565 ** is currently space for
566 */
567 if (is_in_reset(info))
568 return -ENETRESET;
569
570 avail = test_bit(CH_OPENED, &info->bridge_sts);
571 /* if no space, we'll have to setup a notification later to wake up the
572 * tty framework when space becomes avaliable
573 */
574 if (!avail)
575 return 0;
576
577 skb = alloc_skb(len, GFP_ATOMIC);
578 skb->data = (unsigned char *)buf;
579 skb->len = len;
580
581 spin_lock(&info->rx_lock);
582 __skb_queue_tail(&info->rx_skb_q, skb);
583 queue_work(info->wq, &info->write_tomdm_w);
584 spin_unlock(&info->rx_lock);
585
586 return len;
587}
588
589static int hsic_tty_write_room(struct tty_struct *tty)
590{
591 struct hsic_tty_info *info = tty->driver_data;
592 return test_bit(CH_OPENED, &info->bridge_sts);
593}
594
595static int hsic_tty_chars_in_buffer(struct tty_struct *tty)
596{
597 struct hsic_tty_info *info = tty->driver_data;
598 return test_bit(CH_OPENED, &info->bridge_sts);
599}
600
601static void hsic_tty_unthrottle(struct tty_struct *tty)
602{
603 struct hsic_tty_info *info = tty->driver_data;
604 unsigned long flags;
605
606 pr_debug("%s\n", __func__);
607
608 spin_lock_irqsave(&info->reset_lock, flags);
609 if (info->is_open) {
610 spin_unlock_irqrestore(&info->reset_lock, flags);
611 if (hsic_tty_data_fctrl_support &&
612 info->tx_skb_q.qlen <= hsic_tty_data_fctrl_dis_thld &&
613 test_and_clear_bit(RX_THROTTLED, &info->brdg.flags)) {
614 info->rx_unthrottled_cnt++;
615 info->unthrottled_pnd_skbs = info->tx_skb_q.qlen;
616 pr_debug("%s: disable flow ctrl:"
617 " tx skbq len: %u\n",
618 __func__, info->tx_skb_q.qlen);
619 data_bridge_unthrottle_rx(info->brdg.ch_id);
620 queue_work(info->wq, &info->write_tohost_w);
621 }
622 return;
623 }
624 spin_unlock_irqrestore(&info->reset_lock, flags);
625}
626
627static struct tty_operations hsic_tty_ops = {
628 .open = hsic_tty_open,
629 .close = hsic_tty_close,
630 .write = hsic_tty_write,
631 .write_room = hsic_tty_write_room,
632 .chars_in_buffer = hsic_tty_chars_in_buffer,
633 .unthrottle = hsic_tty_unthrottle,
634};
635
636static int hsic_tty_dummy_probe(struct platform_device *pdev)
637{
638 int n;
639 int idx;
640
641 for (n = 0; n < ARRAY_SIZE(hsic_configs); ++n) {
642 idx = hsic_configs[n].tty_dev_index;
643
644 if (!hsic_configs[n].dev_name)
645 continue;
646
647 if (/* pdev->id == hsic_configs[n].edge && */
648 !strncmp(pdev->name, hsic_configs[n].dev_name,
649 DATA_BRIDGE_NAME_MAX_LEN)) {
650 complete_all(&hsic_tty[idx].ch_allocated);
651 pr_info("%s: %s ch_allocated\n", __func__,
652 hsic_configs[n].dev_name);
653 return 0;
654 }
655 }
656 pr_err("%s: unknown device '%s'\n", __func__, pdev->name);
657
658 return -ENODEV;
659}
660
661static struct tty_driver *hsic_tty_driver;
662
663static int __init hsic_tty_init(void)
664{
665 int ret;
666 int n;
667 int idx;
668
669 pr_debug("%s\n", __func__);
670
671 hsic_tty_driver = alloc_tty_driver(MAX_HSIC_TTYS);
672 if (hsic_tty_driver == 0)
673 return -ENOMEM;
674
675 hsic_tty_driver->owner = THIS_MODULE;
676 hsic_tty_driver->driver_name = "hsic_tty_driver";
677 hsic_tty_driver->name = "hsic";
678 hsic_tty_driver->major = 0;
679 hsic_tty_driver->minor_start = 0;
680 hsic_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
681 hsic_tty_driver->subtype = SERIAL_TYPE_NORMAL;
682 hsic_tty_driver->init_termios = tty_std_termios;
683 hsic_tty_driver->init_termios.c_iflag = 0;
684 hsic_tty_driver->init_termios.c_oflag = 0;
685 hsic_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
686 hsic_tty_driver->init_termios.c_lflag = 0;
687 hsic_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
688 TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
689 tty_set_operations(hsic_tty_driver, &hsic_tty_ops);
690
691 ret = tty_register_driver(hsic_tty_driver);
692 if (ret) {
693 put_tty_driver(hsic_tty_driver);
694 pr_err("%s: driver registration failed %d\n", __func__, ret);
695 return ret;
696 }
697
698 for (n = 0; n < ARRAY_SIZE(hsic_configs); ++n) {
699 idx = hsic_configs[n].tty_dev_index;
700
701 if (hsic_configs[n].dev_name == NULL)
702 hsic_configs[n].dev_name = hsic_configs[n].port_name;
703
704 tty_register_device(hsic_tty_driver, idx, 0);
705 init_completion(&hsic_tty[idx].ch_allocated);
706
707 hsic_tty[idx].wq =
708 create_singlethread_workqueue(hsic_configs[n].port_name);
709 if (!hsic_tty[idx].wq) {
710 pr_err("%s: Unable to create workqueue:%s\n",
711 __func__, hsic_configs[n].port_name);
712 return -ENOMEM;
713 }
714
715 /* port initialization */
716 spin_lock_init(&hsic_tty[idx].rx_lock);
717 spin_lock_init(&hsic_tty[idx].tx_lock);
718
719 INIT_WORK(&hsic_tty[idx].connect_w, hsic_tty_data_connect_w);
720 INIT_WORK(&hsic_tty[idx].disconnect_w,
721 hsic_tty_data_disconnect_w);
722 INIT_WORK(&hsic_tty[idx].write_tohost_w,
723 hsic_tty_data_write_tohost);
724 INIT_WORK(&hsic_tty[idx].write_tomdm_w,
725 hsic_tty_data_write_tomdm);
726
727 INIT_LIST_HEAD(&hsic_tty[idx].tx_idle);
728 INIT_LIST_HEAD(&hsic_tty[idx].rx_idle);
729
730 skb_queue_head_init(&hsic_tty[idx].tx_skb_q);
731 skb_queue_head_init(&hsic_tty[idx].rx_skb_q);
732
733 hsic_tty[idx].brdg.ch_id = idx;
734 hsic_tty[idx].brdg.ctx = &hsic_tty[idx];
735 hsic_tty[idx].brdg.ops.send_pkt = hsic_tty_data_receive;
736
737 hsic_tty[idx].driver.probe = hsic_tty_dummy_probe;
738 hsic_tty[idx].driver.driver.name = hsic_configs[n].dev_name;
739 hsic_tty[idx].driver.driver.owner = THIS_MODULE;
740 spin_lock_init(&hsic_tty[idx].reset_lock);
741 hsic_tty[idx].is_open = 0;
742 init_waitqueue_head(&hsic_tty[idx].ch_opened_wait_queue);
743 ret = platform_driver_register(&hsic_tty[idx].driver);
744
745 if (ret) {
746 pr_err("%s: init failed %d (%d)\n", __func__, idx, ret);
747 hsic_tty[idx].driver.probe = NULL;
748 goto out;
749 }
750 hsic_tty[idx].hsic = &hsic_configs[n];
751 }
752 return 0;
753
754out:
755 /* unregister platform devices */
756 for (n = 0; n < ARRAY_SIZE(hsic_configs); ++n) {
757 idx = hsic_configs[n].tty_dev_index;
758
759 if (hsic_tty[idx].driver.probe) {
760 platform_driver_unregister(&hsic_tty[idx].driver);
761 tty_unregister_device(hsic_tty_driver, idx);
762 }
763 }
764
765 tty_unregister_driver(hsic_tty_driver);
766 put_tty_driver(hsic_tty_driver);
767 return ret;
768}
769
770module_init(hsic_tty_init);