blob: de93049c9506c6a9868ea51383007ac1bcf66ec6 [file] [log] [blame]
David Brownellc1dca562008-06-19 17:51:44 -07001/*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18/* #define VERBOSE_DEBUG */
19
20#include <linux/kernel.h>
stephane duverger1e413942010-06-29 16:57:25 +020021#include <linux/sched.h>
David Brownellc1dca562008-06-19 17:51:44 -070022#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/tty.h>
26#include <linux/tty_flip.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040028#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/debugfs.h>
David Brownellc1dca562008-06-19 17:51:44 -070030
31#include "u_serial.h"
32
33
34/*
35 * This component encapsulates the TTY layer glue needed to provide basic
36 * "serial port" functionality through the USB gadget stack. Each such
37 * port is exposed through a /dev/ttyGS* node.
38 *
39 * After initialization (gserial_setup), these TTY port devices stay
40 * available until they are removed (gserial_cleanup). Each one may be
41 * connected to a USB function (gserial_connect), or disconnected (with
42 * gserial_disconnect) when the USB host issues a config change event.
43 * Data can only flow when the port is connected to the host.
44 *
45 * A given TTY port can be made available in multiple configurations.
46 * For example, each one might expose a ttyGS0 node which provides a
47 * login application. In one case that might use CDC ACM interface 0,
48 * while another configuration might use interface 3 for that. The
49 * work to handle that (including descriptor management) is not part
50 * of this component.
51 *
52 * Configurations may expose more than one TTY port. For example, if
53 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
54 * for a telephone or fax link. And ttyGS2 might be something that just
55 * needs a simple byte stream interface for some messaging protocol that
56 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
57 */
58
David Brownell937ef732008-07-07 12:16:08 -070059#define PREFIX "ttyGS"
60
David Brownellc1dca562008-06-19 17:51:44 -070061/*
62 * gserial is the lifecycle interface, used by USB functions
63 * gs_port is the I/O nexus, used by the tty driver
64 * tty_struct links to the tty/filesystem framework
65 *
66 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070067 * inactive; managed by gserial_{connect,disconnect}(). each gserial
68 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070069 * gserial->ioport == usb_ep->driver_data ... gs_port
70 * gs_port->port_usb ... gserial
71 *
72 * gs_port <---> tty_struct ... links will be null when the TTY file
73 * isn't opened; managed by gs_open()/gs_close()
74 * gserial->port_tty ... tty_struct
75 * tty_struct->driver_data ... gserial
76 */
77
78/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
79 * next layer of buffering. For TX that's a circular buffer; for RX
80 * consider it a NOP. A third layer is provided by the TTY code.
81 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082#define TX_QUEUE_SIZE 8
83#define TX_BUF_SIZE 4096
David Brownellc1dca562008-06-19 17:51:44 -070084#define WRITE_BUF_SIZE 8192 /* TX only */
85
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086#define RX_QUEUE_SIZE 8
87#define RX_BUF_SIZE 4096
88
89
David Brownellc1dca562008-06-19 17:51:44 -070090/* circular buffer */
91struct gs_buf {
92 unsigned buf_size;
93 char *buf_buf;
94 char *buf_get;
95 char *buf_put;
96};
97
98/*
99 * The port structure holds info for each port, one for each minor number
100 * (and thus for each /dev/ node).
101 */
102struct gs_port {
103 spinlock_t port_lock; /* guard port_* access */
104
105 struct gserial *port_usb;
106 struct tty_struct *port_tty;
107
108 unsigned open_count;
109 bool openclose; /* open/close in progress */
110 u8 port_num;
111
112 wait_queue_head_t close_wait; /* wait for last close */
113
114 struct list_head read_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700115 int read_started;
116 int read_allocated;
David Brownell937ef732008-07-07 12:16:08 -0700117 struct list_head read_queue;
118 unsigned n_read;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 struct work_struct push;
David Brownellc1dca562008-06-19 17:51:44 -0700120
121 struct list_head write_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700122 int write_started;
123 int write_allocated;
David Brownellc1dca562008-06-19 17:51:44 -0700124 struct gs_buf port_write_buf;
125 wait_queue_head_t drain_wait; /* wait while writes drain */
126
127 /* REVISIT this state ... */
128 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 unsigned long nbytes_from_host;
130 unsigned long nbytes_to_tty;
131 unsigned long nbytes_from_tty;
132 unsigned long nbytes_to_host;
David Brownellc1dca562008-06-19 17:51:44 -0700133};
134
135/* increase N_PORTS if you need more */
John Michelau677ba872010-11-08 18:05:37 -0600136#define N_PORTS 8
David Brownellc1dca562008-06-19 17:51:44 -0700137static struct portmaster {
138 struct mutex lock; /* protect open/close */
139 struct gs_port *port;
140} ports[N_PORTS];
141static unsigned n_ports;
142
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143static struct workqueue_struct *gserial_wq;
144
David Brownellc1dca562008-06-19 17:51:44 -0700145#define GS_CLOSE_TIMEOUT 15 /* seconds */
146
147
148
149#ifdef VERBOSE_DEBUG
150#define pr_vdebug(fmt, arg...) \
151 pr_debug(fmt, ##arg)
152#else
153#define pr_vdebug(fmt, arg...) \
154 ({ if (0) pr_debug(fmt, ##arg); })
155#endif
156
157/*-------------------------------------------------------------------------*/
158
159/* Circular Buffer */
160
161/*
162 * gs_buf_alloc
163 *
164 * Allocate a circular buffer and all associated memory.
165 */
166static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
167{
168 gb->buf_buf = kmalloc(size, GFP_KERNEL);
169 if (gb->buf_buf == NULL)
170 return -ENOMEM;
171
172 gb->buf_size = size;
173 gb->buf_put = gb->buf_buf;
174 gb->buf_get = gb->buf_buf;
175
176 return 0;
177}
178
179/*
180 * gs_buf_free
181 *
182 * Free the buffer and all associated memory.
183 */
184static void gs_buf_free(struct gs_buf *gb)
185{
186 kfree(gb->buf_buf);
187 gb->buf_buf = NULL;
188}
189
190/*
191 * gs_buf_clear
192 *
193 * Clear out all data in the circular buffer.
194 */
195static void gs_buf_clear(struct gs_buf *gb)
196{
197 gb->buf_get = gb->buf_put;
198 /* equivalent to a get of all data available */
199}
200
201/*
202 * gs_buf_data_avail
203 *
David Brownell1f1ba112008-08-06 18:49:57 -0700204 * Return the number of bytes of data written into the circular
David Brownellc1dca562008-06-19 17:51:44 -0700205 * buffer.
206 */
207static unsigned gs_buf_data_avail(struct gs_buf *gb)
208{
209 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
210}
211
212/*
213 * gs_buf_space_avail
214 *
215 * Return the number of bytes of space available in the circular
216 * buffer.
217 */
218static unsigned gs_buf_space_avail(struct gs_buf *gb)
219{
220 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
221}
222
223/*
224 * gs_buf_put
225 *
226 * Copy data data from a user buffer and put it into the circular buffer.
227 * Restrict to the amount of space available.
228 *
229 * Return the number of bytes copied.
230 */
231static unsigned
232gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
233{
234 unsigned len;
235
236 len = gs_buf_space_avail(gb);
237 if (count > len)
238 count = len;
239
240 if (count == 0)
241 return 0;
242
243 len = gb->buf_buf + gb->buf_size - gb->buf_put;
244 if (count > len) {
245 memcpy(gb->buf_put, buf, len);
246 memcpy(gb->buf_buf, buf+len, count - len);
247 gb->buf_put = gb->buf_buf + count - len;
248 } else {
249 memcpy(gb->buf_put, buf, count);
250 if (count < len)
251 gb->buf_put += count;
252 else /* count == len */
253 gb->buf_put = gb->buf_buf;
254 }
255
256 return count;
257}
258
259/*
260 * gs_buf_get
261 *
262 * Get data from the circular buffer and copy to the given buffer.
263 * Restrict to the amount of data available.
264 *
265 * Return the number of bytes copied.
266 */
267static unsigned
268gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
269{
270 unsigned len;
271
272 len = gs_buf_data_avail(gb);
273 if (count > len)
274 count = len;
275
276 if (count == 0)
277 return 0;
278
279 len = gb->buf_buf + gb->buf_size - gb->buf_get;
280 if (count > len) {
281 memcpy(buf, gb->buf_get, len);
282 memcpy(buf+len, gb->buf_buf, count - len);
283 gb->buf_get = gb->buf_buf + count - len;
284 } else {
285 memcpy(buf, gb->buf_get, count);
286 if (count < len)
287 gb->buf_get += count;
288 else /* count == len */
289 gb->buf_get = gb->buf_buf;
290 }
291
292 return count;
293}
294
295/*-------------------------------------------------------------------------*/
296
297/* I/O glue between TTY (upper) and USB function (lower) driver layers */
298
299/*
300 * gs_alloc_req
301 *
302 * Allocate a usb_request and its buffer. Returns a pointer to the
303 * usb_request or NULL if there is an error.
304 */
David Brownell1f1ba112008-08-06 18:49:57 -0700305struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700306gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
307{
308 struct usb_request *req;
309
310 req = usb_ep_alloc_request(ep, kmalloc_flags);
311
312 if (req != NULL) {
313 req->length = len;
314 req->buf = kmalloc(len, kmalloc_flags);
315 if (req->buf == NULL) {
316 usb_ep_free_request(ep, req);
317 return NULL;
318 }
319 }
320
321 return req;
322}
323
324/*
325 * gs_free_req
326 *
327 * Free a usb_request and its buffer.
328 */
David Brownell1f1ba112008-08-06 18:49:57 -0700329void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700330{
331 kfree(req->buf);
332 usb_ep_free_request(ep, req);
333}
334
335/*
336 * gs_send_packet
337 *
338 * If there is data to send, a packet is built in the given
339 * buffer and the size is returned. If there is no data to
340 * send, 0 is returned.
341 *
342 * Called with port_lock held.
343 */
344static unsigned
345gs_send_packet(struct gs_port *port, char *packet, unsigned size)
346{
347 unsigned len;
348
349 len = gs_buf_data_avail(&port->port_write_buf);
350 if (len < size)
351 size = len;
352 if (size != 0)
353 size = gs_buf_get(&port->port_write_buf, packet, size);
354 return size;
355}
356
357/*
358 * gs_start_tx
359 *
360 * This function finds available write requests, calls
361 * gs_send_packet to fill these packets with data, and
362 * continues until either there are no more write requests
363 * available or no more data to send. This function is
364 * run whenever data arrives or write requests are available.
365 *
366 * Context: caller owns port_lock; port_usb is non-null.
367 */
368static int gs_start_tx(struct gs_port *port)
369/*
370__releases(&port->port_lock)
371__acquires(&port->port_lock)
372*/
373{
374 struct list_head *pool = &port->write_pool;
375 struct usb_ep *in = port->port_usb->in;
376 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 static long prev_len;
David Brownellc1dca562008-06-19 17:51:44 -0700378 bool do_tty_wake = false;
379
380 while (!list_empty(pool)) {
381 struct usb_request *req;
382 int len;
383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 if (port->write_started >= TX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700385 break;
386
David Brownellc1dca562008-06-19 17:51:44 -0700387 req = list_entry(pool->next, struct usb_request, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
David Brownellc1dca562008-06-19 17:51:44 -0700389 if (len == 0) {
Rajkumar Raghupathy40985292012-04-12 15:19:53 +0530390 /* Queue zero length packet explicitly to make it
391 * work with UDCs which don't support req->zero flag
392 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 if (prev_len && (prev_len % in->maxpacket == 0)) {
394 req->length = 0;
395 list_del(&req->list);
396 spin_unlock(&port->port_lock);
397 status = usb_ep_queue(in, req, GFP_ATOMIC);
398 spin_lock(&port->port_lock);
399 if (!port->port_usb) {
400 gs_free_req(in, req);
401 break;
402 }
403 if (status) {
404 printk(KERN_ERR "%s: %s err %d\n",
405 __func__, "queue", status);
406 list_add(&req->list, pool);
407 }
408 prev_len = 0;
409 }
David Brownellc1dca562008-06-19 17:51:44 -0700410 wake_up_interruptible(&port->drain_wait);
411 break;
412 }
413 do_tty_wake = true;
414
415 req->length = len;
416 list_del(&req->list);
417
David Brownell937ef732008-07-07 12:16:08 -0700418 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
419 port->port_num, len, *((u8 *)req->buf),
David Brownellc1dca562008-06-19 17:51:44 -0700420 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700421
422 /* Drop lock while we call out of driver; completions
423 * could be issued while we do so. Disconnection may
424 * happen too; maybe immediately before we queue this!
425 *
426 * NOTE that we may keep sending data for a while after
427 * the TTY closed (dev->ioport->port_tty is NULL).
428 */
429 spin_unlock(&port->port_lock);
430 status = usb_ep_queue(in, req, GFP_ATOMIC);
431 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432 /*
433 * If port_usb is NULL, gserial disconnect is called
434 * while the spinlock is dropped and all requests are
435 * freed. Free the current request here.
436 */
437 if (!port->port_usb) {
438 do_tty_wake = false;
439 gs_free_req(in, req);
440 break;
441 }
David Brownellc1dca562008-06-19 17:51:44 -0700442 if (status) {
443 pr_debug("%s: %s %s err %d\n",
444 __func__, "queue", in->name, status);
445 list_add(&req->list, pool);
446 break;
447 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 prev_len = req->length;
449 port->nbytes_from_tty += req->length;
David Brownellc1dca562008-06-19 17:51:44 -0700450
David Brownellc1dca562008-06-19 17:51:44 -0700451 }
452
453 if (do_tty_wake && port->port_tty)
454 tty_wakeup(port->port_tty);
455 return status;
456}
457
David Brownellc1dca562008-06-19 17:51:44 -0700458/*
459 * Context: caller owns port_lock, and port_usb is set
460 */
461static unsigned gs_start_rx(struct gs_port *port)
462/*
463__releases(&port->port_lock)
464__acquires(&port->port_lock)
465*/
466{
467 struct list_head *pool = &port->read_pool;
468 struct usb_ep *out = port->port_usb->out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 unsigned started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700470
471 while (!list_empty(pool)) {
472 struct usb_request *req;
473 int status;
474 struct tty_struct *tty;
475
David Brownell937ef732008-07-07 12:16:08 -0700476 /* no more rx if closed */
David Brownellc1dca562008-06-19 17:51:44 -0700477 tty = port->port_tty;
David Brownell937ef732008-07-07 12:16:08 -0700478 if (!tty)
David Brownellc1dca562008-06-19 17:51:44 -0700479 break;
480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481 if (port->read_started >= RX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700482 break;
483
David Brownellc1dca562008-06-19 17:51:44 -0700484 req = list_entry(pool->next, struct usb_request, list);
485 list_del(&req->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 req->length = RX_BUF_SIZE;
David Brownellc1dca562008-06-19 17:51:44 -0700487
488 /* drop lock while we call out; the controller driver
489 * may need to call us back (e.g. for disconnect)
490 */
491 spin_unlock(&port->port_lock);
492 status = usb_ep_queue(out, req, GFP_ATOMIC);
493 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 /*
495 * If port_usb is NULL, gserial disconnect is called
496 * while the spinlock is dropped and all requests are
497 * freed. Free the current request here.
498 */
499 if (!port->port_usb) {
500 started = 0;
501 gs_free_req(out, req);
502 break;
503 }
David Brownellc1dca562008-06-19 17:51:44 -0700504 if (status) {
505 pr_debug("%s: %s %s err %d\n",
506 __func__, "queue", out->name, status);
507 list_add(&req->list, pool);
508 break;
509 }
Jim Sung28609d42010-11-04 18:47:51 -0700510 port->read_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700511
David Brownellc1dca562008-06-19 17:51:44 -0700512 }
Jim Sung28609d42010-11-04 18:47:51 -0700513 return port->read_started;
David Brownellc1dca562008-06-19 17:51:44 -0700514}
515
David Brownell937ef732008-07-07 12:16:08 -0700516/*
517 * RX tasklet takes data out of the RX queue and hands it up to the TTY
518 * layer until it refuses to take any more data (or is throttled back).
519 * Then it issues reads for any further data.
520 *
521 * If the RX queue becomes full enough that no usb_request is queued,
522 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
523 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
524 * can be buffered before the TTY layer's buffers (currently 64 KB).
525 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526static void gs_rx_push(struct work_struct *w)
David Brownell937ef732008-07-07 12:16:08 -0700527{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528 struct gs_port *port = container_of(w, struct gs_port, push);
David Brownell937ef732008-07-07 12:16:08 -0700529 struct tty_struct *tty;
530 struct list_head *queue = &port->read_queue;
531 bool disconnect = false;
532 bool do_push = false;
533
534 /* hand any queued data to the tty */
535 spin_lock_irq(&port->port_lock);
536 tty = port->port_tty;
537 while (!list_empty(queue)) {
538 struct usb_request *req;
539
540 req = list_first_entry(queue, struct usb_request, list);
541
542 /* discard data if tty was closed */
543 if (!tty)
544 goto recycle;
545
546 /* leave data queued if tty was rx throttled */
547 if (test_bit(TTY_THROTTLED, &tty->flags))
548 break;
549
550 switch (req->status) {
551 case -ESHUTDOWN:
552 disconnect = true;
553 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
554 break;
555
556 default:
557 /* presumably a transient fault */
558 pr_warning(PREFIX "%d: unexpected RX status %d\n",
559 port->port_num, req->status);
560 /* FALLTHROUGH */
561 case 0:
562 /* normal completion */
563 break;
564 }
565
566 /* push data to (open) tty */
567 if (req->actual) {
568 char *packet = req->buf;
569 unsigned size = req->actual;
570 unsigned n;
571 int count;
572
573 /* we may have pushed part of this packet already... */
574 n = port->n_read;
575 if (n) {
576 packet += n;
577 size -= n;
578 }
579
580 count = tty_insert_flip_string(tty, packet, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 port->nbytes_to_tty += count;
David Brownell937ef732008-07-07 12:16:08 -0700582 if (count)
583 do_push = true;
584 if (count != size) {
585 /* stop pushing; TTY layer can't handle more */
586 port->n_read += count;
587 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
588 port->port_num,
589 count, req->actual);
590 break;
591 }
592 port->n_read = 0;
593 }
594recycle:
595 list_move(&req->list, &port->read_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700596 port->read_started--;
David Brownell937ef732008-07-07 12:16:08 -0700597 }
598
Jon Povey44a0c012010-06-14 19:41:04 +0900599 /* Push from tty to ldisc; without low_latency set this is handled by
600 * a workqueue, so we won't get callbacks and can hold port_lock
David Brownell937ef732008-07-07 12:16:08 -0700601 */
Shaun Silk50238292011-09-26 11:26:43 +1000602 if (tty && do_push)
David Brownell937ef732008-07-07 12:16:08 -0700603 tty_flip_buffer_push(tty);
David Brownell937ef732008-07-07 12:16:08 -0700604
David Brownell937ef732008-07-07 12:16:08 -0700605 /* We want our data queue to become empty ASAP, keeping data
606 * in the tty and ldisc (not here). If we couldn't push any
607 * this time around, there may be trouble unless there's an
608 * implicit tty_unthrottle() call on its way...
609 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 * REVISIT we should probably add a timer to keep the work queue
David Brownell937ef732008-07-07 12:16:08 -0700611 * from starving ... but it's not clear that case ever happens.
612 */
613 if (!list_empty(queue) && tty) {
614 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
615 if (do_push)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -0700617 else
618 pr_warning(PREFIX "%d: RX not scheduled?\n",
619 port->port_num);
620 }
621 }
622
623 /* If we're still connected, refill the USB RX queue. */
624 if (!disconnect && port->port_usb)
625 gs_start_rx(port);
626
627 spin_unlock_irq(&port->port_lock);
628}
629
David Brownellc1dca562008-06-19 17:51:44 -0700630static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
631{
David Brownellc1dca562008-06-19 17:51:44 -0700632 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700634
David Brownell937ef732008-07-07 12:16:08 -0700635 /* Queue all received data until the tty layer is ready for it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 spin_lock_irqsave(&port->port_lock, flags);
637 port->nbytes_from_host += req->actual;
David Brownell937ef732008-07-07 12:16:08 -0700638 list_add_tail(&req->list, &port->read_queue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 queue_work(gserial_wq, &port->push);
640 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700641}
642
643static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
644{
645 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700647
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 spin_lock_irqsave(&port->port_lock, flags);
649 port->nbytes_to_host += req->actual;
David Brownellc1dca562008-06-19 17:51:44 -0700650 list_add(&req->list, &port->write_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700651 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700652
653 switch (req->status) {
654 default:
655 /* presumably a transient fault */
656 pr_warning("%s: unexpected %s status %d\n",
657 __func__, ep->name, req->status);
658 /* FALL THROUGH */
659 case 0:
660 /* normal completion */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 if (port->port_usb)
662 gs_start_tx(port);
David Brownellc1dca562008-06-19 17:51:44 -0700663 break;
664
665 case -ESHUTDOWN:
666 /* disconnect */
667 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
668 break;
669 }
670
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700672}
673
Jim Sung28609d42010-11-04 18:47:51 -0700674static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
675 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700676{
677 struct usb_request *req;
678
679 while (!list_empty(head)) {
680 req = list_entry(head->next, struct usb_request, list);
681 list_del(&req->list);
682 gs_free_req(ep, req);
Jim Sung28609d42010-11-04 18:47:51 -0700683 if (allocated)
684 (*allocated)--;
David Brownellc1dca562008-06-19 17:51:44 -0700685 }
686}
687
688static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 int num, int size, void (*fn)(struct usb_ep *, struct usb_request *),
Jim Sung28609d42010-11-04 18:47:51 -0700690 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700691{
692 int i;
693 struct usb_request *req;
694
695 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
696 * do quite that many this time, don't fail ... we just won't
697 * be as speedy as we might otherwise be.
698 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 for (i = 0; i < num; i++) {
700 req = gs_alloc_req(ep, size, GFP_ATOMIC);
David Brownellc1dca562008-06-19 17:51:44 -0700701 if (!req)
702 return list_empty(head) ? -ENOMEM : 0;
703 req->complete = fn;
704 list_add_tail(&req->list, head);
Jim Sung28609d42010-11-04 18:47:51 -0700705 if (allocated)
706 (*allocated)++;
David Brownellc1dca562008-06-19 17:51:44 -0700707 }
708 return 0;
709}
710
711/**
712 * gs_start_io - start USB I/O streams
713 * @dev: encapsulates endpoints to use
714 * Context: holding port_lock; port_tty and port_usb are non-null
715 *
716 * We only start I/O when something is connected to both sides of
717 * this port. If nothing is listening on the host side, we may
718 * be pointlessly filling up our TX buffers and FIFO.
719 */
720static int gs_start_io(struct gs_port *port)
721{
722 struct list_head *head = &port->read_pool;
723 struct usb_ep *ep = port->port_usb->out;
724 int status;
725 unsigned started;
726
727 /* Allocate RX and TX I/O buffers. We can't easily do this much
728 * earlier (with GFP_KERNEL) because the requests are coupled to
729 * endpoints, as are the packet sizes we'll be using. Different
730 * configurations may use different endpoints with a given port;
731 * and high speed vs full speed changes packet sizes too.
732 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700733 status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
734 gs_read_complete, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700735 if (status)
736 return status;
737
738 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700740 if (status) {
Jim Sung28609d42010-11-04 18:47:51 -0700741 gs_free_requests(ep, head, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700742 return status;
743 }
744
745 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700746 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700747 started = gs_start_rx(port);
748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 if (!port->port_usb)
750 return -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700751 /* unblock any pending writes into our circular buffer */
752 if (started) {
753 tty_wakeup(port->port_tty);
754 } else {
Jim Sung28609d42010-11-04 18:47:51 -0700755 gs_free_requests(ep, head, &port->read_allocated);
756 gs_free_requests(port->port_usb->in, &port->write_pool,
757 &port->write_allocated);
David Brownell937ef732008-07-07 12:16:08 -0700758 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700759 }
760
David Brownell937ef732008-07-07 12:16:08 -0700761 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700762}
763
764/*-------------------------------------------------------------------------*/
765
766/* TTY Driver */
767
768/*
769 * gs_open sets up the link between a gs_port and its associated TTY.
770 * That link is broken *only* by TTY close(), and all driver methods
771 * know that.
772 */
773static int gs_open(struct tty_struct *tty, struct file *file)
774{
775 int port_num = tty->index;
776 struct gs_port *port;
777 int status;
778
David Brownellc1dca562008-06-19 17:51:44 -0700779 do {
780 mutex_lock(&ports[port_num].lock);
781 port = ports[port_num].port;
782 if (!port)
783 status = -ENODEV;
784 else {
785 spin_lock_irq(&port->port_lock);
786
787 /* already open? Great. */
788 if (port->open_count) {
789 status = 0;
790 port->open_count++;
791
792 /* currently opening/closing? wait ... */
793 } else if (port->openclose) {
794 status = -EBUSY;
795
796 /* ... else we do the work */
797 } else {
798 status = -EAGAIN;
799 port->openclose = true;
800 }
801 spin_unlock_irq(&port->port_lock);
802 }
803 mutex_unlock(&ports[port_num].lock);
804
805 switch (status) {
806 default:
807 /* fully handled */
808 return status;
809 case -EAGAIN:
810 /* must do the work */
811 break;
812 case -EBUSY:
813 /* wait for EAGAIN task to finish */
814 msleep(1);
815 /* REVISIT could have a waitchannel here, if
816 * concurrent open performance is important
817 */
818 break;
819 }
820 } while (status != -EAGAIN);
821
822 /* Do the "real open" */
823 spin_lock_irq(&port->port_lock);
824
825 /* allocate circular buffer on first open */
826 if (port->port_write_buf.buf_buf == NULL) {
827
828 spin_unlock_irq(&port->port_lock);
829 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
830 spin_lock_irq(&port->port_lock);
831
832 if (status) {
833 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
834 port->port_num, tty, file);
835 port->openclose = false;
836 goto exit_unlock_port;
837 }
838 }
839
840 /* REVISIT if REMOVED (ports[].port NULL), abort the open
841 * to let rmmod work faster (but this way isn't wrong).
842 */
843
844 /* REVISIT maybe wait for "carrier detect" */
845
846 tty->driver_data = port;
847 port->port_tty = tty;
848
849 port->open_count = 1;
850 port->openclose = false;
851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852 /* low_latency means ldiscs work is carried in the same context
853 * of tty_flip_buffer_push. The same can be called from IRQ with
854 * low_latency = 0. But better to use a dedicated worker thread
855 * to push the data.
856 */
857 tty->low_latency = 1;
858
David Brownellc1dca562008-06-19 17:51:44 -0700859 /* if connected, start the I/O stream */
860 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700861 struct gserial *gser = port->port_usb;
862
David Brownellc1dca562008-06-19 17:51:44 -0700863 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
864 gs_start_io(port);
865
David Brownell1f1ba112008-08-06 18:49:57 -0700866 if (gser->connect)
867 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700868 }
869
870 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
871
872 status = 0;
873
874exit_unlock_port:
875 spin_unlock_irq(&port->port_lock);
876 return status;
877}
878
879static int gs_writes_finished(struct gs_port *p)
880{
881 int cond;
882
883 /* return true on disconnect or empty buffer */
884 spin_lock_irq(&p->port_lock);
885 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
886 spin_unlock_irq(&p->port_lock);
887
888 return cond;
889}
890
891static void gs_close(struct tty_struct *tty, struct file *file)
892{
893 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700894 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700895
896 spin_lock_irq(&port->port_lock);
897
898 if (port->open_count != 1) {
899 if (port->open_count == 0)
900 WARN_ON(1);
901 else
902 --port->open_count;
903 goto exit;
904 }
905
906 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
907
908 /* mark port as closing but in use; we can drop port lock
909 * and sleep if necessary
910 */
911 port->openclose = true;
912 port->open_count = 0;
913
David Brownell1f1ba112008-08-06 18:49:57 -0700914 gser = port->port_usb;
915 if (gser && gser->disconnect)
916 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700917
918 /* wait for circular write buffer to drain, disconnect, or at
919 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
920 */
David Brownell1f1ba112008-08-06 18:49:57 -0700921 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700922 spin_unlock_irq(&port->port_lock);
923 wait_event_interruptible_timeout(port->drain_wait,
924 gs_writes_finished(port),
925 GS_CLOSE_TIMEOUT * HZ);
926 spin_lock_irq(&port->port_lock);
David Brownell1f1ba112008-08-06 18:49:57 -0700927 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700928 }
929
930 /* Iff we're disconnected, there can be no I/O in flight so it's
931 * ok to free the circular buffer; else just scrub it. And don't
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 * let the push work queue fire again until we're re-opened.
David Brownellc1dca562008-06-19 17:51:44 -0700933 */
David Brownell1f1ba112008-08-06 18:49:57 -0700934 if (gser == NULL)
David Brownellc1dca562008-06-19 17:51:44 -0700935 gs_buf_free(&port->port_write_buf);
936 else
937 gs_buf_clear(&port->port_write_buf);
938
David Brownellc1dca562008-06-19 17:51:44 -0700939 tty->driver_data = NULL;
940 port->port_tty = NULL;
941
942 port->openclose = false;
943
944 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
945 port->port_num, tty, file);
946
947 wake_up_interruptible(&port->close_wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948
949 /*
950 * Freeing the previously queued requests as they are
951 * allocated again as a part of gs_open()
952 */
953 if (port->port_usb) {
954 spin_unlock_irq(&port->port_lock);
955 usb_ep_fifo_flush(gser->out);
956 usb_ep_fifo_flush(gser->in);
957 spin_lock_irq(&port->port_lock);
958 gs_free_requests(gser->out, &port->read_queue, NULL);
959 gs_free_requests(gser->out, &port->read_pool, NULL);
960 gs_free_requests(gser->in, &port->write_pool, NULL);
961 }
962 port->read_allocated = port->read_started =
963 port->write_allocated = port->write_started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700964exit:
965 spin_unlock_irq(&port->port_lock);
966}
967
968static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
969{
970 struct gs_port *port = tty->driver_data;
971 unsigned long flags;
972 int status;
973
974 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
975 port->port_num, tty, count);
976
977 spin_lock_irqsave(&port->port_lock, flags);
978 if (count)
979 count = gs_buf_put(&port->port_write_buf, buf, count);
980 /* treat count == 0 as flush_chars() */
981 if (port->port_usb)
982 status = gs_start_tx(port);
983 spin_unlock_irqrestore(&port->port_lock, flags);
984
985 return count;
986}
987
988static int gs_put_char(struct tty_struct *tty, unsigned char ch)
989{
990 struct gs_port *port = tty->driver_data;
991 unsigned long flags;
992 int status;
993
994 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
995 port->port_num, tty, ch, __builtin_return_address(0));
996
997 spin_lock_irqsave(&port->port_lock, flags);
998 status = gs_buf_put(&port->port_write_buf, &ch, 1);
999 spin_unlock_irqrestore(&port->port_lock, flags);
1000
1001 return status;
1002}
1003
1004static void gs_flush_chars(struct tty_struct *tty)
1005{
1006 struct gs_port *port = tty->driver_data;
1007 unsigned long flags;
1008
1009 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
1010
1011 spin_lock_irqsave(&port->port_lock, flags);
1012 if (port->port_usb)
1013 gs_start_tx(port);
1014 spin_unlock_irqrestore(&port->port_lock, flags);
1015}
1016
1017static int gs_write_room(struct tty_struct *tty)
1018{
1019 struct gs_port *port = tty->driver_data;
1020 unsigned long flags;
1021 int room = 0;
1022
1023 spin_lock_irqsave(&port->port_lock, flags);
1024 if (port->port_usb)
1025 room = gs_buf_space_avail(&port->port_write_buf);
1026 spin_unlock_irqrestore(&port->port_lock, flags);
1027
1028 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
1029 port->port_num, tty, room);
1030
1031 return room;
1032}
1033
1034static int gs_chars_in_buffer(struct tty_struct *tty)
1035{
1036 struct gs_port *port = tty->driver_data;
1037 unsigned long flags;
1038 int chars = 0;
1039
1040 spin_lock_irqsave(&port->port_lock, flags);
1041 chars = gs_buf_data_avail(&port->port_write_buf);
1042 spin_unlock_irqrestore(&port->port_lock, flags);
1043
1044 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
1045 port->port_num, tty, chars);
1046
1047 return chars;
1048}
1049
1050/* undo side effects of setting TTY_THROTTLED */
1051static void gs_unthrottle(struct tty_struct *tty)
1052{
1053 struct gs_port *port = tty->driver_data;
1054 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -07001055
1056 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -07001057 if (port->port_usb) {
1058 /* Kickstart read queue processing. We don't do xon/xoff,
1059 * rts/cts, or other handshaking with the host, but if the
1060 * read queue backs up enough we'll be NAKing OUT packets.
1061 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -07001063 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
1064 }
David Brownellc1dca562008-06-19 17:51:44 -07001065 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -07001066}
1067
David Brownell1f1ba112008-08-06 18:49:57 -07001068static int gs_break_ctl(struct tty_struct *tty, int duration)
1069{
1070 struct gs_port *port = tty->driver_data;
1071 int status = 0;
1072 struct gserial *gser;
1073
1074 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1075 port->port_num, duration);
1076
1077 spin_lock_irq(&port->port_lock);
1078 gser = port->port_usb;
1079 if (gser && gser->send_break)
1080 status = gser->send_break(gser, duration);
1081 spin_unlock_irq(&port->port_lock);
1082
1083 return status;
1084}
1085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086static int gs_tiocmget(struct tty_struct *tty)
1087{
1088 struct gs_port *port = tty->driver_data;
1089 struct gserial *gser;
1090 unsigned int result = 0;
1091
1092 spin_lock_irq(&port->port_lock);
1093 gser = port->port_usb;
1094 if (!gser) {
1095 result = -ENODEV;
1096 goto fail;
1097 }
1098
1099 if (gser->get_dtr)
1100 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1101
1102 if (gser->get_rts)
1103 result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
1104
1105 if (gser->serial_state & TIOCM_CD)
1106 result |= TIOCM_CD;
1107
1108 if (gser->serial_state & TIOCM_RI)
1109 result |= TIOCM_RI;
1110fail:
1111 spin_unlock_irq(&port->port_lock);
1112 return result;
1113}
1114
1115static int gs_tiocmset(struct tty_struct *tty,
1116 unsigned int set, unsigned int clear)
1117{
1118 struct gs_port *port = tty->driver_data;
1119 struct gserial *gser;
1120 int status = 0;
1121
1122 spin_lock_irq(&port->port_lock);
1123 gser = port->port_usb;
1124 if (!gser) {
1125 status = -ENODEV;
1126 goto fail;
1127 }
1128
1129 if (set & TIOCM_RI) {
1130 if (gser->send_ring_indicator) {
1131 gser->serial_state |= TIOCM_RI;
1132 status = gser->send_ring_indicator(gser, 1);
1133 }
1134 }
1135 if (clear & TIOCM_RI) {
1136 if (gser->send_ring_indicator) {
1137 gser->serial_state &= ~TIOCM_RI;
1138 status = gser->send_ring_indicator(gser, 0);
1139 }
1140 }
1141 if (set & TIOCM_CD) {
1142 if (gser->send_carrier_detect) {
1143 gser->serial_state |= TIOCM_CD;
1144 status = gser->send_carrier_detect(gser, 1);
1145 }
1146 }
1147 if (clear & TIOCM_CD) {
1148 if (gser->send_carrier_detect) {
1149 gser->serial_state &= ~TIOCM_CD;
1150 status = gser->send_carrier_detect(gser, 0);
1151 }
1152 }
1153fail:
1154 spin_unlock_irq(&port->port_lock);
1155 return status;
1156}
David Brownellc1dca562008-06-19 17:51:44 -07001157static const struct tty_operations gs_tty_ops = {
1158 .open = gs_open,
1159 .close = gs_close,
1160 .write = gs_write,
1161 .put_char = gs_put_char,
1162 .flush_chars = gs_flush_chars,
1163 .write_room = gs_write_room,
1164 .chars_in_buffer = gs_chars_in_buffer,
1165 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -07001166 .break_ctl = gs_break_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167 .tiocmget = gs_tiocmget,
1168 .tiocmset = gs_tiocmset,
David Brownellc1dca562008-06-19 17:51:44 -07001169};
1170
1171/*-------------------------------------------------------------------------*/
1172
1173static struct tty_driver *gs_tty_driver;
1174
Benoit Gobya035fb52011-12-14 18:04:07 -08001175static int
David Brownellc1dca562008-06-19 17:51:44 -07001176gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1177{
1178 struct gs_port *port;
1179
1180 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1181 if (port == NULL)
1182 return -ENOMEM;
1183
1184 spin_lock_init(&port->port_lock);
1185 init_waitqueue_head(&port->close_wait);
1186 init_waitqueue_head(&port->drain_wait);
1187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 INIT_WORK(&port->push, gs_rx_push);
David Brownellc1dca562008-06-19 17:51:44 -07001189
1190 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001191 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001192 INIT_LIST_HEAD(&port->write_pool);
1193
1194 port->port_num = port_num;
1195 port->port_line_coding = *coding;
1196
1197 ports[port_num].port = port;
1198
1199 return 0;
1200}
1201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202
1203#if defined(CONFIG_DEBUG_FS)
1204
1205#define BUF_SIZE 512
1206
1207static ssize_t debug_read_status(struct file *file, char __user *ubuf,
1208 size_t count, loff_t *ppos)
1209{
1210 struct gs_port *ui_dev = file->private_data;
1211 struct tty_struct *tty;
1212 struct gserial *gser;
1213 char *buf;
1214 unsigned long flags;
1215 int i = 0;
1216 int ret;
1217 int result = 0;
1218
1219 tty = ui_dev->port_tty;
1220 gser = ui_dev->port_usb;
1221
1222 buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
1223 if (!buf)
1224 return -ENOMEM;
1225
1226 spin_lock_irqsave(&ui_dev->port_lock, flags);
1227
1228 i += scnprintf(buf + i, BUF_SIZE - i,
1229 "nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
1230
1231 i += scnprintf(buf + i, BUF_SIZE - i,
1232 "nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
1233
1234 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
1235 (ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
1236
1237 i += scnprintf(buf + i, BUF_SIZE - i,
1238 "nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
1239
1240 i += scnprintf(buf + i, BUF_SIZE - i,
1241 "nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
1242
1243 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
1244 (ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
1245
1246 if (tty)
1247 i += scnprintf(buf + i, BUF_SIZE - i,
1248 "tty_flags: %lu\n", tty->flags);
1249
1250 if (gser->get_dtr) {
1251 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1252 i += scnprintf(buf + i, BUF_SIZE - i,
1253 "DTR_status: %d\n", result);
1254 }
1255
1256 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1257
1258 ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
1259
1260 kfree(buf);
1261
1262 return ret;
1263}
1264
1265static ssize_t debug_write_reset(struct file *file, const char __user *buf,
1266 size_t count, loff_t *ppos)
1267{
1268 struct gs_port *ui_dev = file->private_data;
1269 unsigned long flags;
1270
1271 spin_lock_irqsave(&ui_dev->port_lock, flags);
1272 ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
1273 ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
1274 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1275
1276 return count;
1277}
1278
1279static int serial_debug_open(struct inode *inode, struct file *file)
1280{
1281 file->private_data = inode->i_private;
1282 return 0;
1283}
1284
1285const struct file_operations debug_rst_ops = {
1286 .open = serial_debug_open,
1287 .write = debug_write_reset,
1288};
1289
1290const struct file_operations debug_adb_ops = {
1291 .open = serial_debug_open,
1292 .read = debug_read_status,
1293};
1294
1295static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
1296{
1297 struct dentry *dent;
1298 char buf[48];
1299
1300 snprintf(buf, 48, "usb_serial%d", port_num);
1301 dent = debugfs_create_dir(buf, 0);
1302 if (IS_ERR(dent))
1303 return;
1304
1305 debugfs_create_file("readstatus", 0444, dent, ui_dev, &debug_adb_ops);
1306 debugfs_create_file("reset", 0222, dent, ui_dev, &debug_rst_ops);
1307}
1308#else
1309static void usb_debugfs_init(struct gs_port *ui_dev) {}
1310#endif
1311
David Brownellc1dca562008-06-19 17:51:44 -07001312/**
1313 * gserial_setup - initialize TTY driver for one or more ports
1314 * @g: gadget to associate with these ports
1315 * @count: how many ports to support
1316 * Context: may sleep
1317 *
1318 * The TTY stack needs to know in advance how many devices it should
1319 * plan to manage. Use this call to set up the ports you will be
1320 * exporting through USB. Later, connect them to functions based
1321 * on what configuration is activated by the USB host; and disconnect
1322 * them as appropriate.
1323 *
1324 * An example would be a two-configuration device in which both
1325 * configurations expose port 0, but through different functions.
1326 * One configuration could even expose port 1 while the other
1327 * one doesn't.
1328 *
1329 * Returns negative errno or zero.
1330 */
Benoit Gobya035fb52011-12-14 18:04:07 -08001331int gserial_setup(struct usb_gadget *g, unsigned count)
David Brownellc1dca562008-06-19 17:51:44 -07001332{
1333 unsigned i;
1334 struct usb_cdc_line_coding coding;
1335 int status;
1336
1337 if (count == 0 || count > N_PORTS)
1338 return -EINVAL;
1339
1340 gs_tty_driver = alloc_tty_driver(count);
1341 if (!gs_tty_driver)
1342 return -ENOMEM;
1343
David Brownellc1dca562008-06-19 17:51:44 -07001344 gs_tty_driver->driver_name = "g_serial";
David Brownell937ef732008-07-07 12:16:08 -07001345 gs_tty_driver->name = PREFIX;
David Brownellc1dca562008-06-19 17:51:44 -07001346 /* uses dynamically assigned dev_t values */
1347
1348 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1349 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
1351 | TTY_DRIVER_RESET_TERMIOS;
David Brownellc1dca562008-06-19 17:51:44 -07001352 gs_tty_driver->init_termios = tty_std_termios;
1353
1354 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1355 * MS-Windows. Otherwise, most of these flags shouldn't affect
1356 * anything unless we were to actually hook up to a serial line.
1357 */
1358 gs_tty_driver->init_termios.c_cflag =
1359 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1360 gs_tty_driver->init_termios.c_ispeed = 9600;
1361 gs_tty_driver->init_termios.c_ospeed = 9600;
1362
Harvey Harrison551509d2009-02-11 14:11:36 -08001363 coding.dwDTERate = cpu_to_le32(9600);
David Brownellc1dca562008-06-19 17:51:44 -07001364 coding.bCharFormat = 8;
1365 coding.bParityType = USB_CDC_NO_PARITY;
1366 coding.bDataBits = USB_CDC_1_STOP_BITS;
1367
1368 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370 gserial_wq = create_singlethread_workqueue("k_gserial");
1371 if (!gserial_wq) {
1372 status = -ENOMEM;
1373 goto fail;
1374 }
1375
David Brownellc1dca562008-06-19 17:51:44 -07001376 /* make devices be openable */
1377 for (i = 0; i < count; i++) {
1378 mutex_init(&ports[i].lock);
1379 status = gs_port_alloc(i, &coding);
1380 if (status) {
1381 count = i;
1382 goto fail;
1383 }
1384 }
1385 n_ports = count;
1386
1387 /* export the driver ... */
1388 status = tty_register_driver(gs_tty_driver);
1389 if (status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001390 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001391 pr_err("%s: cannot register, err %d\n",
1392 __func__, status);
1393 goto fail;
1394 }
1395
1396 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1397 for (i = 0; i < count; i++) {
1398 struct device *tty_dev;
1399
1400 tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1401 if (IS_ERR(tty_dev))
1402 pr_warning("%s: no classdev for port %d, err %ld\n",
1403 __func__, i, PTR_ERR(tty_dev));
1404 }
1405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 for (i = 0; i < count; i++)
1407 usb_debugfs_init(ports[i].port, i);
1408
David Brownellc1dca562008-06-19 17:51:44 -07001409 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1410 count, (count == 1) ? "" : "s");
1411
1412 return status;
1413fail:
1414 while (count--)
1415 kfree(ports[count].port);
Pavankumar Kondetif0f95d82011-09-23 11:38:57 +05301416 if (gserial_wq)
1417 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001418 put_tty_driver(gs_tty_driver);
1419 gs_tty_driver = NULL;
1420 return status;
1421}
1422
1423static int gs_closed(struct gs_port *port)
1424{
1425 int cond;
1426
1427 spin_lock_irq(&port->port_lock);
1428 cond = (port->open_count == 0) && !port->openclose;
1429 spin_unlock_irq(&port->port_lock);
1430 return cond;
1431}
1432
1433/**
1434 * gserial_cleanup - remove TTY-over-USB driver and devices
1435 * Context: may sleep
1436 *
1437 * This is called to free all resources allocated by @gserial_setup().
1438 * Accordingly, it may need to wait until some open /dev/ files have
1439 * closed.
1440 *
1441 * The caller must have issued @gserial_disconnect() for any ports
1442 * that had previously been connected, so that there is never any
1443 * I/O pending when it's called.
1444 */
1445void gserial_cleanup(void)
1446{
1447 unsigned i;
1448 struct gs_port *port;
1449
David Brownellac90e362008-07-01 13:18:20 -07001450 if (!gs_tty_driver)
1451 return;
1452
David Brownellc1dca562008-06-19 17:51:44 -07001453 /* start sysfs and /dev/ttyGS* node removal */
1454 for (i = 0; i < n_ports; i++)
1455 tty_unregister_device(gs_tty_driver, i);
1456
1457 for (i = 0; i < n_ports; i++) {
1458 /* prevent new opens */
1459 mutex_lock(&ports[i].lock);
1460 port = ports[i].port;
1461 ports[i].port = NULL;
1462 mutex_unlock(&ports[i].lock);
1463
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001464 cancel_work_sync(&port->push);
David Brownell937ef732008-07-07 12:16:08 -07001465
David Brownellc1dca562008-06-19 17:51:44 -07001466 /* wait for old opens to finish */
1467 wait_event(port->close_wait, gs_closed(port));
1468
1469 WARN_ON(port->port_usb != NULL);
1470
1471 kfree(port);
1472 }
1473 n_ports = 0;
1474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001476 tty_unregister_driver(gs_tty_driver);
Jon Poveyb23097b2010-06-14 19:42:10 +09001477 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001478 gs_tty_driver = NULL;
1479
1480 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1481}
1482
1483/**
1484 * gserial_connect - notify TTY I/O glue that USB link is active
1485 * @gser: the function, set up with endpoints and descriptors
1486 * @port_num: which port is active
1487 * Context: any (usually from irq)
1488 *
1489 * This is called activate endpoints and let the TTY layer know that
1490 * the connection is active ... not unlike "carrier detect". It won't
1491 * necessarily start I/O queues; unless the TTY is held open by any
1492 * task, there would be no point. However, the endpoints will be
1493 * activated so the USB host can perform I/O, subject to basic USB
1494 * hardware flow control.
1495 *
1496 * Caller needs to have set up the endpoints and USB function in @dev
1497 * before calling this, as well as the appropriate (speed-specific)
1498 * endpoint descriptors, and also have set up the TTY driver by calling
1499 * @gserial_setup().
1500 *
1501 * Returns negative errno or zero.
1502 * On success, ep->driver_data will be overwritten.
1503 */
1504int gserial_connect(struct gserial *gser, u8 port_num)
1505{
1506 struct gs_port *port;
1507 unsigned long flags;
1508 int status;
1509
1510 if (!gs_tty_driver || port_num >= n_ports)
1511 return -ENXIO;
1512
1513 /* we "know" gserial_cleanup() hasn't been called */
1514 port = ports[port_num].port;
1515
1516 /* activate the endpoints */
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001517 status = usb_ep_enable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001518 if (status < 0)
1519 return status;
1520 gser->in->driver_data = port;
1521
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001522 status = usb_ep_enable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001523 if (status < 0)
1524 goto fail_out;
1525 gser->out->driver_data = port;
1526
1527 /* then tell the tty glue that I/O can work */
1528 spin_lock_irqsave(&port->port_lock, flags);
1529 gser->ioport = port;
1530 port->port_usb = gser;
1531
1532 /* REVISIT unclear how best to handle this state...
1533 * we don't really couple it with the Linux TTY.
1534 */
1535 gser->port_line_coding = port->port_line_coding;
1536
1537 /* REVISIT if waiting on "carrier detect", signal. */
1538
David Brownell1f1ba112008-08-06 18:49:57 -07001539 /* if it's already open, start I/O ... and notify the serial
1540 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001541 */
David Brownellc1dca562008-06-19 17:51:44 -07001542 if (port->open_count) {
1543 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1544 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001545 if (gser->connect)
1546 gser->connect(gser);
1547 } else {
1548 if (gser->disconnect)
1549 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001550 }
1551
1552 spin_unlock_irqrestore(&port->port_lock, flags);
1553
1554 return status;
1555
1556fail_out:
1557 usb_ep_disable(gser->in);
1558 gser->in->driver_data = NULL;
1559 return status;
1560}
1561
1562/**
1563 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1564 * @gser: the function, on which gserial_connect() was called
1565 * Context: any (usually from irq)
1566 *
1567 * This is called to deactivate endpoints and let the TTY layer know
1568 * that the connection went inactive ... not unlike "hangup".
1569 *
1570 * On return, the state is as if gserial_connect() had never been called;
1571 * there is no active USB I/O on these endpoints.
1572 */
1573void gserial_disconnect(struct gserial *gser)
1574{
1575 struct gs_port *port = gser->ioport;
1576 unsigned long flags;
1577
1578 if (!port)
1579 return;
1580
1581 /* tell the TTY glue not to do I/O here any more */
1582 spin_lock_irqsave(&port->port_lock, flags);
1583
1584 /* REVISIT as above: how best to track this? */
1585 port->port_line_coding = gser->port_line_coding;
1586
1587 port->port_usb = NULL;
1588 gser->ioport = NULL;
1589 if (port->open_count > 0 || port->openclose) {
1590 wake_up_interruptible(&port->drain_wait);
1591 if (port->port_tty)
1592 tty_hangup(port->port_tty);
1593 }
1594 spin_unlock_irqrestore(&port->port_lock, flags);
1595
1596 /* disable endpoints, aborting down any active I/O */
1597 usb_ep_disable(gser->out);
1598 gser->out->driver_data = NULL;
1599
1600 usb_ep_disable(gser->in);
1601 gser->in->driver_data = NULL;
1602
1603 /* finally, free any unused/unusable I/O buffers */
1604 spin_lock_irqsave(&port->port_lock, flags);
1605 if (port->open_count == 0 && !port->openclose)
1606 gs_buf_free(&port->port_write_buf);
Jim Sung28609d42010-11-04 18:47:51 -07001607 gs_free_requests(gser->out, &port->read_pool, NULL);
1608 gs_free_requests(gser->out, &port->read_queue, NULL);
1609 gs_free_requests(gser->in, &port->write_pool, NULL);
1610
1611 port->read_allocated = port->read_started =
1612 port->write_allocated = port->write_started = 0;
1613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001614 port->nbytes_from_host = port->nbytes_to_tty =
1615 port->nbytes_from_tty = port->nbytes_to_host = 0;
1616
David Brownellc1dca562008-06-19 17:51:44 -07001617 spin_unlock_irqrestore(&port->port_lock, flags);
1618}