blob: 3139c4880f0c0a1cd7c2a1b2461cae969ebd6679 [file] [log] [blame]
David Brownellc1dca562008-06-19 17:51:44 -07001/*
2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
3 *
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
7 *
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
12 *
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
16 */
17
18/* #define VERBOSE_DEBUG */
19
20#include <linux/kernel.h>
stephane duverger1e413942010-06-29 16:57:25 +020021#include <linux/sched.h>
David Brownellc1dca562008-06-19 17:51:44 -070022#include <linux/interrupt.h>
23#include <linux/device.h>
24#include <linux/delay.h>
25#include <linux/tty.h>
26#include <linux/tty_flip.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Paul Gortmakerf940fcd2011-05-27 09:56:31 -040028#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#include <linux/debugfs.h>
David Brownellc1dca562008-06-19 17:51:44 -070030
31#include "u_serial.h"
32
33
34/*
35 * This component encapsulates the TTY layer glue needed to provide basic
36 * "serial port" functionality through the USB gadget stack. Each such
37 * port is exposed through a /dev/ttyGS* node.
38 *
39 * After initialization (gserial_setup), these TTY port devices stay
40 * available until they are removed (gserial_cleanup). Each one may be
41 * connected to a USB function (gserial_connect), or disconnected (with
42 * gserial_disconnect) when the USB host issues a config change event.
43 * Data can only flow when the port is connected to the host.
44 *
45 * A given TTY port can be made available in multiple configurations.
46 * For example, each one might expose a ttyGS0 node which provides a
47 * login application. In one case that might use CDC ACM interface 0,
48 * while another configuration might use interface 3 for that. The
49 * work to handle that (including descriptor management) is not part
50 * of this component.
51 *
52 * Configurations may expose more than one TTY port. For example, if
53 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
54 * for a telephone or fax link. And ttyGS2 might be something that just
55 * needs a simple byte stream interface for some messaging protocol that
56 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
57 */
58
David Brownell937ef732008-07-07 12:16:08 -070059#define PREFIX "ttyGS"
60
David Brownellc1dca562008-06-19 17:51:44 -070061/*
62 * gserial is the lifecycle interface, used by USB functions
63 * gs_port is the I/O nexus, used by the tty driver
64 * tty_struct links to the tty/filesystem framework
65 *
66 * gserial <---> gs_port ... links will be null when the USB link is
David Brownell1f1ba112008-08-06 18:49:57 -070067 * inactive; managed by gserial_{connect,disconnect}(). each gserial
68 * instance can wrap its own USB control protocol.
David Brownellc1dca562008-06-19 17:51:44 -070069 * gserial->ioport == usb_ep->driver_data ... gs_port
70 * gs_port->port_usb ... gserial
71 *
72 * gs_port <---> tty_struct ... links will be null when the TTY file
73 * isn't opened; managed by gs_open()/gs_close()
74 * gserial->port_tty ... tty_struct
75 * tty_struct->driver_data ... gserial
76 */
77
78/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
79 * next layer of buffering. For TX that's a circular buffer; for RX
80 * consider it a NOP. A third layer is provided by the TTY code.
81 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082#define TX_QUEUE_SIZE 8
83#define TX_BUF_SIZE 4096
Devin Kimd5bfef02012-06-20 08:48:13 -070084#define WRITE_BUF_SIZE (8192+1) /* TX only */
David Brownellc1dca562008-06-19 17:51:44 -070085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086#define RX_QUEUE_SIZE 8
87#define RX_BUF_SIZE 4096
88
89
David Brownellc1dca562008-06-19 17:51:44 -070090/* circular buffer */
91struct gs_buf {
92 unsigned buf_size;
93 char *buf_buf;
94 char *buf_get;
95 char *buf_put;
96};
97
98/*
99 * The port structure holds info for each port, one for each minor number
100 * (and thus for each /dev/ node).
101 */
102struct gs_port {
103 spinlock_t port_lock; /* guard port_* access */
104
105 struct gserial *port_usb;
106 struct tty_struct *port_tty;
107
108 unsigned open_count;
109 bool openclose; /* open/close in progress */
110 u8 port_num;
111
112 wait_queue_head_t close_wait; /* wait for last close */
113
114 struct list_head read_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700115 int read_started;
116 int read_allocated;
David Brownell937ef732008-07-07 12:16:08 -0700117 struct list_head read_queue;
118 unsigned n_read;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 struct work_struct push;
David Brownellc1dca562008-06-19 17:51:44 -0700120
121 struct list_head write_pool;
Jim Sung28609d42010-11-04 18:47:51 -0700122 int write_started;
123 int write_allocated;
David Brownellc1dca562008-06-19 17:51:44 -0700124 struct gs_buf port_write_buf;
125 wait_queue_head_t drain_wait; /* wait while writes drain */
126
127 /* REVISIT this state ... */
128 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 unsigned long nbytes_from_host;
130 unsigned long nbytes_to_tty;
131 unsigned long nbytes_from_tty;
132 unsigned long nbytes_to_host;
David Brownellc1dca562008-06-19 17:51:44 -0700133};
134
135/* increase N_PORTS if you need more */
John Michelau677ba872010-11-08 18:05:37 -0600136#define N_PORTS 8
David Brownellc1dca562008-06-19 17:51:44 -0700137static struct portmaster {
138 struct mutex lock; /* protect open/close */
139 struct gs_port *port;
140} ports[N_PORTS];
141static unsigned n_ports;
142
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143static struct workqueue_struct *gserial_wq;
144
David Brownellc1dca562008-06-19 17:51:44 -0700145#define GS_CLOSE_TIMEOUT 15 /* seconds */
146
147
148
149#ifdef VERBOSE_DEBUG
150#define pr_vdebug(fmt, arg...) \
151 pr_debug(fmt, ##arg)
152#else
153#define pr_vdebug(fmt, arg...) \
154 ({ if (0) pr_debug(fmt, ##arg); })
155#endif
156
157/*-------------------------------------------------------------------------*/
158
159/* Circular Buffer */
160
161/*
162 * gs_buf_alloc
163 *
164 * Allocate a circular buffer and all associated memory.
165 */
166static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
167{
168 gb->buf_buf = kmalloc(size, GFP_KERNEL);
169 if (gb->buf_buf == NULL)
170 return -ENOMEM;
171
172 gb->buf_size = size;
173 gb->buf_put = gb->buf_buf;
174 gb->buf_get = gb->buf_buf;
175
176 return 0;
177}
178
179/*
180 * gs_buf_free
181 *
182 * Free the buffer and all associated memory.
183 */
184static void gs_buf_free(struct gs_buf *gb)
185{
186 kfree(gb->buf_buf);
187 gb->buf_buf = NULL;
188}
189
190/*
191 * gs_buf_clear
192 *
193 * Clear out all data in the circular buffer.
194 */
195static void gs_buf_clear(struct gs_buf *gb)
196{
197 gb->buf_get = gb->buf_put;
198 /* equivalent to a get of all data available */
199}
200
201/*
202 * gs_buf_data_avail
203 *
David Brownell1f1ba112008-08-06 18:49:57 -0700204 * Return the number of bytes of data written into the circular
David Brownellc1dca562008-06-19 17:51:44 -0700205 * buffer.
206 */
207static unsigned gs_buf_data_avail(struct gs_buf *gb)
208{
209 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
210}
211
212/*
213 * gs_buf_space_avail
214 *
215 * Return the number of bytes of space available in the circular
216 * buffer.
217 */
218static unsigned gs_buf_space_avail(struct gs_buf *gb)
219{
220 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
221}
222
223/*
224 * gs_buf_put
225 *
226 * Copy data data from a user buffer and put it into the circular buffer.
227 * Restrict to the amount of space available.
228 *
229 * Return the number of bytes copied.
230 */
231static unsigned
232gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
233{
234 unsigned len;
235
236 len = gs_buf_space_avail(gb);
237 if (count > len)
238 count = len;
239
240 if (count == 0)
241 return 0;
242
243 len = gb->buf_buf + gb->buf_size - gb->buf_put;
244 if (count > len) {
245 memcpy(gb->buf_put, buf, len);
246 memcpy(gb->buf_buf, buf+len, count - len);
247 gb->buf_put = gb->buf_buf + count - len;
248 } else {
249 memcpy(gb->buf_put, buf, count);
250 if (count < len)
251 gb->buf_put += count;
252 else /* count == len */
253 gb->buf_put = gb->buf_buf;
254 }
255
256 return count;
257}
258
259/*
260 * gs_buf_get
261 *
262 * Get data from the circular buffer and copy to the given buffer.
263 * Restrict to the amount of data available.
264 *
265 * Return the number of bytes copied.
266 */
267static unsigned
268gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
269{
270 unsigned len;
271
272 len = gs_buf_data_avail(gb);
273 if (count > len)
274 count = len;
275
276 if (count == 0)
277 return 0;
278
279 len = gb->buf_buf + gb->buf_size - gb->buf_get;
280 if (count > len) {
281 memcpy(buf, gb->buf_get, len);
282 memcpy(buf+len, gb->buf_buf, count - len);
283 gb->buf_get = gb->buf_buf + count - len;
284 } else {
285 memcpy(buf, gb->buf_get, count);
286 if (count < len)
287 gb->buf_get += count;
288 else /* count == len */
289 gb->buf_get = gb->buf_buf;
290 }
291
292 return count;
293}
294
295/*-------------------------------------------------------------------------*/
296
297/* I/O glue between TTY (upper) and USB function (lower) driver layers */
298
299/*
300 * gs_alloc_req
301 *
302 * Allocate a usb_request and its buffer. Returns a pointer to the
303 * usb_request or NULL if there is an error.
304 */
David Brownell1f1ba112008-08-06 18:49:57 -0700305struct usb_request *
David Brownellc1dca562008-06-19 17:51:44 -0700306gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
307{
308 struct usb_request *req;
309
310 req = usb_ep_alloc_request(ep, kmalloc_flags);
311
312 if (req != NULL) {
313 req->length = len;
314 req->buf = kmalloc(len, kmalloc_flags);
315 if (req->buf == NULL) {
316 usb_ep_free_request(ep, req);
317 return NULL;
318 }
319 }
320
321 return req;
322}
323
324/*
325 * gs_free_req
326 *
327 * Free a usb_request and its buffer.
328 */
David Brownell1f1ba112008-08-06 18:49:57 -0700329void gs_free_req(struct usb_ep *ep, struct usb_request *req)
David Brownellc1dca562008-06-19 17:51:44 -0700330{
331 kfree(req->buf);
332 usb_ep_free_request(ep, req);
333}
334
335/*
336 * gs_send_packet
337 *
338 * If there is data to send, a packet is built in the given
339 * buffer and the size is returned. If there is no data to
340 * send, 0 is returned.
341 *
342 * Called with port_lock held.
343 */
344static unsigned
345gs_send_packet(struct gs_port *port, char *packet, unsigned size)
346{
347 unsigned len;
348
349 len = gs_buf_data_avail(&port->port_write_buf);
350 if (len < size)
351 size = len;
352 if (size != 0)
353 size = gs_buf_get(&port->port_write_buf, packet, size);
354 return size;
355}
356
357/*
358 * gs_start_tx
359 *
360 * This function finds available write requests, calls
361 * gs_send_packet to fill these packets with data, and
362 * continues until either there are no more write requests
363 * available or no more data to send. This function is
364 * run whenever data arrives or write requests are available.
365 *
366 * Context: caller owns port_lock; port_usb is non-null.
367 */
368static int gs_start_tx(struct gs_port *port)
369/*
370__releases(&port->port_lock)
371__acquires(&port->port_lock)
372*/
373{
374 struct list_head *pool = &port->write_pool;
375 struct usb_ep *in = port->port_usb->in;
376 int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 static long prev_len;
David Brownellc1dca562008-06-19 17:51:44 -0700378 bool do_tty_wake = false;
379
380 while (!list_empty(pool)) {
381 struct usb_request *req;
382 int len;
383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384 if (port->write_started >= TX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700385 break;
386
David Brownellc1dca562008-06-19 17:51:44 -0700387 req = list_entry(pool->next, struct usb_request, list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388 len = gs_send_packet(port, req->buf, TX_BUF_SIZE);
David Brownellc1dca562008-06-19 17:51:44 -0700389 if (len == 0) {
Rajkumar Raghupathy40985292012-04-12 15:19:53 +0530390 /* Queue zero length packet explicitly to make it
391 * work with UDCs which don't support req->zero flag
392 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 if (prev_len && (prev_len % in->maxpacket == 0)) {
394 req->length = 0;
395 list_del(&req->list);
Devin Kimd5bfef02012-06-20 08:48:13 -0700396 port->write_started++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397 spin_unlock(&port->port_lock);
398 status = usb_ep_queue(in, req, GFP_ATOMIC);
399 spin_lock(&port->port_lock);
400 if (!port->port_usb) {
401 gs_free_req(in, req);
402 break;
403 }
404 if (status) {
405 printk(KERN_ERR "%s: %s err %d\n",
406 __func__, "queue", status);
407 list_add(&req->list, pool);
Devin Kimd5bfef02012-06-20 08:48:13 -0700408 port->write_started--;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 }
410 prev_len = 0;
411 }
David Brownellc1dca562008-06-19 17:51:44 -0700412 wake_up_interruptible(&port->drain_wait);
413 break;
414 }
415 do_tty_wake = true;
416
417 req->length = len;
418 list_del(&req->list);
Devin Kimd5bfef02012-06-20 08:48:13 -0700419 port->write_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700420
David Brownell937ef732008-07-07 12:16:08 -0700421 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
422 port->port_num, len, *((u8 *)req->buf),
David Brownellc1dca562008-06-19 17:51:44 -0700423 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
David Brownellc1dca562008-06-19 17:51:44 -0700424
425 /* Drop lock while we call out of driver; completions
426 * could be issued while we do so. Disconnection may
427 * happen too; maybe immediately before we queue this!
428 *
429 * NOTE that we may keep sending data for a while after
430 * the TTY closed (dev->ioport->port_tty is NULL).
431 */
432 spin_unlock(&port->port_lock);
433 status = usb_ep_queue(in, req, GFP_ATOMIC);
434 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 /*
436 * If port_usb is NULL, gserial disconnect is called
437 * while the spinlock is dropped and all requests are
438 * freed. Free the current request here.
439 */
440 if (!port->port_usb) {
441 do_tty_wake = false;
442 gs_free_req(in, req);
443 break;
444 }
David Brownellc1dca562008-06-19 17:51:44 -0700445 if (status) {
446 pr_debug("%s: %s %s err %d\n",
447 __func__, "queue", in->name, status);
448 list_add(&req->list, pool);
Devin Kimd5bfef02012-06-20 08:48:13 -0700449 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700450 break;
451 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452 prev_len = req->length;
453 port->nbytes_from_tty += req->length;
David Brownellc1dca562008-06-19 17:51:44 -0700454
David Brownellc1dca562008-06-19 17:51:44 -0700455 }
456
457 if (do_tty_wake && port->port_tty)
458 tty_wakeup(port->port_tty);
459 return status;
460}
461
David Brownellc1dca562008-06-19 17:51:44 -0700462/*
463 * Context: caller owns port_lock, and port_usb is set
464 */
465static unsigned gs_start_rx(struct gs_port *port)
466/*
467__releases(&port->port_lock)
468__acquires(&port->port_lock)
469*/
470{
471 struct list_head *pool = &port->read_pool;
472 struct usb_ep *out = port->port_usb->out;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 unsigned started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700474
475 while (!list_empty(pool)) {
476 struct usb_request *req;
477 int status;
478 struct tty_struct *tty;
479
David Brownell937ef732008-07-07 12:16:08 -0700480 /* no more rx if closed */
David Brownellc1dca562008-06-19 17:51:44 -0700481 tty = port->port_tty;
Devin Kimd5bfef02012-06-20 08:48:13 -0700482 if (!tty) {
483 started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700484 break;
Devin Kimd5bfef02012-06-20 08:48:13 -0700485 }
David Brownellc1dca562008-06-19 17:51:44 -0700486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 if (port->read_started >= RX_QUEUE_SIZE)
Jim Sung28609d42010-11-04 18:47:51 -0700488 break;
489
David Brownellc1dca562008-06-19 17:51:44 -0700490 req = list_entry(pool->next, struct usb_request, list);
491 list_del(&req->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492 req->length = RX_BUF_SIZE;
David Brownellc1dca562008-06-19 17:51:44 -0700493
494 /* drop lock while we call out; the controller driver
495 * may need to call us back (e.g. for disconnect)
496 */
497 spin_unlock(&port->port_lock);
498 status = usb_ep_queue(out, req, GFP_ATOMIC);
499 spin_lock(&port->port_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 /*
501 * If port_usb is NULL, gserial disconnect is called
502 * while the spinlock is dropped and all requests are
503 * freed. Free the current request here.
504 */
505 if (!port->port_usb) {
506 started = 0;
507 gs_free_req(out, req);
508 break;
509 }
David Brownellc1dca562008-06-19 17:51:44 -0700510 if (status) {
511 pr_debug("%s: %s %s err %d\n",
512 __func__, "queue", out->name, status);
513 list_add(&req->list, pool);
514 break;
515 }
Jim Sung28609d42010-11-04 18:47:51 -0700516 port->read_started++;
David Brownellc1dca562008-06-19 17:51:44 -0700517
David Brownellc1dca562008-06-19 17:51:44 -0700518 }
Jim Sung28609d42010-11-04 18:47:51 -0700519 return port->read_started;
David Brownellc1dca562008-06-19 17:51:44 -0700520}
521
David Brownell937ef732008-07-07 12:16:08 -0700522/*
523 * RX tasklet takes data out of the RX queue and hands it up to the TTY
524 * layer until it refuses to take any more data (or is throttled back).
525 * Then it issues reads for any further data.
526 *
527 * If the RX queue becomes full enough that no usb_request is queued,
528 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
529 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
530 * can be buffered before the TTY layer's buffers (currently 64 KB).
531 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532static void gs_rx_push(struct work_struct *w)
David Brownell937ef732008-07-07 12:16:08 -0700533{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 struct gs_port *port = container_of(w, struct gs_port, push);
David Brownell937ef732008-07-07 12:16:08 -0700535 struct tty_struct *tty;
536 struct list_head *queue = &port->read_queue;
537 bool disconnect = false;
538 bool do_push = false;
539
540 /* hand any queued data to the tty */
541 spin_lock_irq(&port->port_lock);
542 tty = port->port_tty;
543 while (!list_empty(queue)) {
544 struct usb_request *req;
545
546 req = list_first_entry(queue, struct usb_request, list);
547
548 /* discard data if tty was closed */
549 if (!tty)
550 goto recycle;
551
552 /* leave data queued if tty was rx throttled */
553 if (test_bit(TTY_THROTTLED, &tty->flags))
554 break;
555
556 switch (req->status) {
557 case -ESHUTDOWN:
558 disconnect = true;
559 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
560 break;
561
562 default:
563 /* presumably a transient fault */
564 pr_warning(PREFIX "%d: unexpected RX status %d\n",
565 port->port_num, req->status);
566 /* FALLTHROUGH */
567 case 0:
568 /* normal completion */
569 break;
570 }
571
572 /* push data to (open) tty */
573 if (req->actual) {
574 char *packet = req->buf;
575 unsigned size = req->actual;
576 unsigned n;
577 int count;
578
579 /* we may have pushed part of this packet already... */
580 n = port->n_read;
581 if (n) {
582 packet += n;
583 size -= n;
584 }
585
586 count = tty_insert_flip_string(tty, packet, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587 port->nbytes_to_tty += count;
David Brownell937ef732008-07-07 12:16:08 -0700588 if (count)
589 do_push = true;
590 if (count != size) {
591 /* stop pushing; TTY layer can't handle more */
592 port->n_read += count;
593 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
594 port->port_num,
595 count, req->actual);
596 break;
597 }
598 port->n_read = 0;
599 }
600recycle:
601 list_move(&req->list, &port->read_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700602 port->read_started--;
David Brownell937ef732008-07-07 12:16:08 -0700603 }
604
Jon Povey44a0c012010-06-14 19:41:04 +0900605 /* Push from tty to ldisc; without low_latency set this is handled by
606 * a workqueue, so we won't get callbacks and can hold port_lock
David Brownell937ef732008-07-07 12:16:08 -0700607 */
Shaun Silk50238292011-09-26 11:26:43 +1000608 if (tty && do_push)
David Brownell937ef732008-07-07 12:16:08 -0700609 tty_flip_buffer_push(tty);
David Brownell937ef732008-07-07 12:16:08 -0700610
David Brownell937ef732008-07-07 12:16:08 -0700611 /* We want our data queue to become empty ASAP, keeping data
612 * in the tty and ldisc (not here). If we couldn't push any
613 * this time around, there may be trouble unless there's an
614 * implicit tty_unthrottle() call on its way...
615 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 * REVISIT we should probably add a timer to keep the work queue
David Brownell937ef732008-07-07 12:16:08 -0700617 * from starving ... but it's not clear that case ever happens.
618 */
619 if (!list_empty(queue) && tty) {
620 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
621 if (do_push)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -0700623 else
624 pr_warning(PREFIX "%d: RX not scheduled?\n",
625 port->port_num);
626 }
627 }
628
629 /* If we're still connected, refill the USB RX queue. */
630 if (!disconnect && port->port_usb)
631 gs_start_rx(port);
632
633 spin_unlock_irq(&port->port_lock);
634}
635
David Brownellc1dca562008-06-19 17:51:44 -0700636static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
637{
David Brownellc1dca562008-06-19 17:51:44 -0700638 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700640
David Brownell937ef732008-07-07 12:16:08 -0700641 /* Queue all received data until the tty layer is ready for it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642 spin_lock_irqsave(&port->port_lock, flags);
643 port->nbytes_from_host += req->actual;
David Brownell937ef732008-07-07 12:16:08 -0700644 list_add_tail(&req->list, &port->read_queue);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 queue_work(gserial_wq, &port->push);
646 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700647}
648
649static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
650{
651 struct gs_port *port = ep->driver_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -0700653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 spin_lock_irqsave(&port->port_lock, flags);
655 port->nbytes_to_host += req->actual;
David Brownellc1dca562008-06-19 17:51:44 -0700656 list_add(&req->list, &port->write_pool);
Jim Sung28609d42010-11-04 18:47:51 -0700657 port->write_started--;
David Brownellc1dca562008-06-19 17:51:44 -0700658
659 switch (req->status) {
660 default:
661 /* presumably a transient fault */
662 pr_warning("%s: unexpected %s status %d\n",
663 __func__, ep->name, req->status);
664 /* FALL THROUGH */
665 case 0:
666 /* normal completion */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667 if (port->port_usb)
668 gs_start_tx(port);
David Brownellc1dca562008-06-19 17:51:44 -0700669 break;
670
671 case -ESHUTDOWN:
672 /* disconnect */
673 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
674 break;
675 }
676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -0700678}
679
Jim Sung28609d42010-11-04 18:47:51 -0700680static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
681 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700682{
683 struct usb_request *req;
684
685 while (!list_empty(head)) {
686 req = list_entry(head->next, struct usb_request, list);
687 list_del(&req->list);
688 gs_free_req(ep, req);
Jim Sung28609d42010-11-04 18:47:51 -0700689 if (allocated)
690 (*allocated)--;
David Brownellc1dca562008-06-19 17:51:44 -0700691 }
692}
693
694static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 int num, int size, void (*fn)(struct usb_ep *, struct usb_request *),
Jim Sung28609d42010-11-04 18:47:51 -0700696 int *allocated)
David Brownellc1dca562008-06-19 17:51:44 -0700697{
698 int i;
699 struct usb_request *req;
700
701 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
702 * do quite that many this time, don't fail ... we just won't
703 * be as speedy as we might otherwise be.
704 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 for (i = 0; i < num; i++) {
706 req = gs_alloc_req(ep, size, GFP_ATOMIC);
David Brownellc1dca562008-06-19 17:51:44 -0700707 if (!req)
708 return list_empty(head) ? -ENOMEM : 0;
709 req->complete = fn;
710 list_add_tail(&req->list, head);
Jim Sung28609d42010-11-04 18:47:51 -0700711 if (allocated)
712 (*allocated)++;
David Brownellc1dca562008-06-19 17:51:44 -0700713 }
714 return 0;
715}
716
717/**
718 * gs_start_io - start USB I/O streams
719 * @dev: encapsulates endpoints to use
720 * Context: holding port_lock; port_tty and port_usb are non-null
721 *
722 * We only start I/O when something is connected to both sides of
723 * this port. If nothing is listening on the host side, we may
724 * be pointlessly filling up our TX buffers and FIFO.
725 */
726static int gs_start_io(struct gs_port *port)
727{
728 struct list_head *head = &port->read_pool;
729 struct usb_ep *ep = port->port_usb->out;
730 int status;
731 unsigned started;
732
733 /* Allocate RX and TX I/O buffers. We can't easily do this much
734 * earlier (with GFP_KERNEL) because the requests are coupled to
735 * endpoints, as are the packet sizes we'll be using. Different
736 * configurations may use different endpoints with a given port;
737 * and high speed vs full speed changes packet sizes too.
738 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 status = gs_alloc_requests(ep, head, RX_QUEUE_SIZE, RX_BUF_SIZE,
740 gs_read_complete, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700741 if (status)
742 return status;
743
744 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 TX_QUEUE_SIZE, TX_BUF_SIZE, gs_write_complete, &port->write_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700746 if (status) {
Jim Sung28609d42010-11-04 18:47:51 -0700747 gs_free_requests(ep, head, &port->read_allocated);
David Brownellc1dca562008-06-19 17:51:44 -0700748 return status;
749 }
750
751 /* queue read requests */
David Brownell937ef732008-07-07 12:16:08 -0700752 port->n_read = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700753 started = gs_start_rx(port);
754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 if (!port->port_usb)
756 return -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700757 /* unblock any pending writes into our circular buffer */
758 if (started) {
Devin Kimd5bfef02012-06-20 08:48:13 -0700759 if(port->port_tty)
760 tty_wakeup(port->port_tty);
David Brownellc1dca562008-06-19 17:51:44 -0700761 } else {
Jim Sung28609d42010-11-04 18:47:51 -0700762 gs_free_requests(ep, head, &port->read_allocated);
763 gs_free_requests(port->port_usb->in, &port->write_pool,
764 &port->write_allocated);
David Brownell937ef732008-07-07 12:16:08 -0700765 status = -EIO;
David Brownellc1dca562008-06-19 17:51:44 -0700766 }
767
David Brownell937ef732008-07-07 12:16:08 -0700768 return status;
David Brownellc1dca562008-06-19 17:51:44 -0700769}
770
771/*-------------------------------------------------------------------------*/
772
773/* TTY Driver */
774
775/*
776 * gs_open sets up the link between a gs_port and its associated TTY.
777 * That link is broken *only* by TTY close(), and all driver methods
778 * know that.
779 */
780static int gs_open(struct tty_struct *tty, struct file *file)
781{
782 int port_num = tty->index;
783 struct gs_port *port;
784 int status;
785
David Brownellc1dca562008-06-19 17:51:44 -0700786 do {
787 mutex_lock(&ports[port_num].lock);
788 port = ports[port_num].port;
789 if (!port)
790 status = -ENODEV;
791 else {
792 spin_lock_irq(&port->port_lock);
793
794 /* already open? Great. */
795 if (port->open_count) {
796 status = 0;
797 port->open_count++;
798
799 /* currently opening/closing? wait ... */
800 } else if (port->openclose) {
801 status = -EBUSY;
802
803 /* ... else we do the work */
804 } else {
805 status = -EAGAIN;
806 port->openclose = true;
807 }
808 spin_unlock_irq(&port->port_lock);
809 }
810 mutex_unlock(&ports[port_num].lock);
811
812 switch (status) {
813 default:
814 /* fully handled */
815 return status;
816 case -EAGAIN:
817 /* must do the work */
818 break;
819 case -EBUSY:
820 /* wait for EAGAIN task to finish */
821 msleep(1);
822 /* REVISIT could have a waitchannel here, if
823 * concurrent open performance is important
824 */
825 break;
826 }
827 } while (status != -EAGAIN);
828
829 /* Do the "real open" */
830 spin_lock_irq(&port->port_lock);
831
832 /* allocate circular buffer on first open */
833 if (port->port_write_buf.buf_buf == NULL) {
834
835 spin_unlock_irq(&port->port_lock);
836 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
837 spin_lock_irq(&port->port_lock);
838
839 if (status) {
840 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
841 port->port_num, tty, file);
842 port->openclose = false;
843 goto exit_unlock_port;
844 }
845 }
846
847 /* REVISIT if REMOVED (ports[].port NULL), abort the open
848 * to let rmmod work faster (but this way isn't wrong).
849 */
850
851 /* REVISIT maybe wait for "carrier detect" */
852
853 tty->driver_data = port;
854 port->port_tty = tty;
855
856 port->open_count = 1;
857 port->openclose = false;
858
David Brownellc1dca562008-06-19 17:51:44 -0700859 /* if connected, start the I/O stream */
860 if (port->port_usb) {
David Brownell1f1ba112008-08-06 18:49:57 -0700861 struct gserial *gser = port->port_usb;
862
David Brownellc1dca562008-06-19 17:51:44 -0700863 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
864 gs_start_io(port);
865
David Brownell1f1ba112008-08-06 18:49:57 -0700866 if (gser->connect)
867 gser->connect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700868 }
869
870 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
871
872 status = 0;
873
874exit_unlock_port:
875 spin_unlock_irq(&port->port_lock);
876 return status;
877}
878
879static int gs_writes_finished(struct gs_port *p)
880{
881 int cond;
882
883 /* return true on disconnect or empty buffer */
884 spin_lock_irq(&p->port_lock);
885 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
886 spin_unlock_irq(&p->port_lock);
887
888 return cond;
889}
890
891static void gs_close(struct tty_struct *tty, struct file *file)
892{
893 struct gs_port *port = tty->driver_data;
David Brownell1f1ba112008-08-06 18:49:57 -0700894 struct gserial *gser;
David Brownellc1dca562008-06-19 17:51:44 -0700895
896 spin_lock_irq(&port->port_lock);
897
898 if (port->open_count != 1) {
899 if (port->open_count == 0)
900 WARN_ON(1);
901 else
902 --port->open_count;
903 goto exit;
904 }
905
906 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
907
908 /* mark port as closing but in use; we can drop port lock
909 * and sleep if necessary
910 */
911 port->openclose = true;
912 port->open_count = 0;
913
David Brownell1f1ba112008-08-06 18:49:57 -0700914 gser = port->port_usb;
915 if (gser && gser->disconnect)
916 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -0700917
918 /* wait for circular write buffer to drain, disconnect, or at
919 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
920 */
David Brownell1f1ba112008-08-06 18:49:57 -0700921 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
David Brownellc1dca562008-06-19 17:51:44 -0700922 spin_unlock_irq(&port->port_lock);
923 wait_event_interruptible_timeout(port->drain_wait,
924 gs_writes_finished(port),
925 GS_CLOSE_TIMEOUT * HZ);
926 spin_lock_irq(&port->port_lock);
David Brownell1f1ba112008-08-06 18:49:57 -0700927 gser = port->port_usb;
David Brownellc1dca562008-06-19 17:51:44 -0700928 }
929
930 /* Iff we're disconnected, there can be no I/O in flight so it's
931 * ok to free the circular buffer; else just scrub it. And don't
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 * let the push work queue fire again until we're re-opened.
David Brownellc1dca562008-06-19 17:51:44 -0700933 */
David Brownell1f1ba112008-08-06 18:49:57 -0700934 if (gser == NULL)
David Brownellc1dca562008-06-19 17:51:44 -0700935 gs_buf_free(&port->port_write_buf);
936 else
937 gs_buf_clear(&port->port_write_buf);
938
David Brownellc1dca562008-06-19 17:51:44 -0700939 tty->driver_data = NULL;
940 port->port_tty = NULL;
941
942 port->openclose = false;
943
944 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
945 port->port_num, tty, file);
946
947 wake_up_interruptible(&port->close_wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948
949 /*
950 * Freeing the previously queued requests as they are
951 * allocated again as a part of gs_open()
952 */
953 if (port->port_usb) {
954 spin_unlock_irq(&port->port_lock);
955 usb_ep_fifo_flush(gser->out);
956 usb_ep_fifo_flush(gser->in);
957 spin_lock_irq(&port->port_lock);
958 gs_free_requests(gser->out, &port->read_queue, NULL);
959 gs_free_requests(gser->out, &port->read_pool, NULL);
960 gs_free_requests(gser->in, &port->write_pool, NULL);
961 }
962 port->read_allocated = port->read_started =
963 port->write_allocated = port->write_started = 0;
David Brownellc1dca562008-06-19 17:51:44 -0700964exit:
965 spin_unlock_irq(&port->port_lock);
966}
967
968static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
969{
970 struct gs_port *port = tty->driver_data;
971 unsigned long flags;
972 int status;
973
974 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
975 port->port_num, tty, count);
976
977 spin_lock_irqsave(&port->port_lock, flags);
978 if (count)
979 count = gs_buf_put(&port->port_write_buf, buf, count);
980 /* treat count == 0 as flush_chars() */
981 if (port->port_usb)
982 status = gs_start_tx(port);
983 spin_unlock_irqrestore(&port->port_lock, flags);
984
985 return count;
986}
987
988static int gs_put_char(struct tty_struct *tty, unsigned char ch)
989{
990 struct gs_port *port = tty->driver_data;
991 unsigned long flags;
992 int status;
993
994 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
995 port->port_num, tty, ch, __builtin_return_address(0));
996
997 spin_lock_irqsave(&port->port_lock, flags);
998 status = gs_buf_put(&port->port_write_buf, &ch, 1);
999 spin_unlock_irqrestore(&port->port_lock, flags);
1000
1001 return status;
1002}
1003
1004static void gs_flush_chars(struct tty_struct *tty)
1005{
1006 struct gs_port *port = tty->driver_data;
1007 unsigned long flags;
1008
1009 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
1010
1011 spin_lock_irqsave(&port->port_lock, flags);
1012 if (port->port_usb)
1013 gs_start_tx(port);
1014 spin_unlock_irqrestore(&port->port_lock, flags);
1015}
1016
1017static int gs_write_room(struct tty_struct *tty)
1018{
1019 struct gs_port *port = tty->driver_data;
1020 unsigned long flags;
1021 int room = 0;
1022
1023 spin_lock_irqsave(&port->port_lock, flags);
1024 if (port->port_usb)
1025 room = gs_buf_space_avail(&port->port_write_buf);
1026 spin_unlock_irqrestore(&port->port_lock, flags);
1027
1028 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
1029 port->port_num, tty, room);
1030
1031 return room;
1032}
1033
1034static int gs_chars_in_buffer(struct tty_struct *tty)
1035{
1036 struct gs_port *port = tty->driver_data;
1037 unsigned long flags;
1038 int chars = 0;
1039
1040 spin_lock_irqsave(&port->port_lock, flags);
1041 chars = gs_buf_data_avail(&port->port_write_buf);
1042 spin_unlock_irqrestore(&port->port_lock, flags);
1043
1044 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
1045 port->port_num, tty, chars);
1046
1047 return chars;
1048}
1049
1050/* undo side effects of setting TTY_THROTTLED */
1051static void gs_unthrottle(struct tty_struct *tty)
1052{
1053 struct gs_port *port = tty->driver_data;
1054 unsigned long flags;
David Brownellc1dca562008-06-19 17:51:44 -07001055
Pavankumar Kondeticef04822013-03-14 13:54:32 +05301056 /*
1057 * tty's driver data is set to NULL during port close. Nothing
1058 * to do here.
1059 */
1060 if (!port)
1061 return;
1062
David Brownellc1dca562008-06-19 17:51:44 -07001063 spin_lock_irqsave(&port->port_lock, flags);
David Brownell937ef732008-07-07 12:16:08 -07001064 if (port->port_usb) {
1065 /* Kickstart read queue processing. We don't do xon/xoff,
1066 * rts/cts, or other handshaking with the host, but if the
1067 * read queue backs up enough we'll be NAKing OUT packets.
1068 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069 queue_work(gserial_wq, &port->push);
David Brownell937ef732008-07-07 12:16:08 -07001070 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
1071 }
David Brownellc1dca562008-06-19 17:51:44 -07001072 spin_unlock_irqrestore(&port->port_lock, flags);
David Brownellc1dca562008-06-19 17:51:44 -07001073}
1074
David Brownell1f1ba112008-08-06 18:49:57 -07001075static int gs_break_ctl(struct tty_struct *tty, int duration)
1076{
1077 struct gs_port *port = tty->driver_data;
1078 int status = 0;
1079 struct gserial *gser;
1080
1081 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
1082 port->port_num, duration);
1083
1084 spin_lock_irq(&port->port_lock);
1085 gser = port->port_usb;
1086 if (gser && gser->send_break)
1087 status = gser->send_break(gser, duration);
1088 spin_unlock_irq(&port->port_lock);
1089
1090 return status;
1091}
1092
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001093static int gs_tiocmget(struct tty_struct *tty)
1094{
1095 struct gs_port *port = tty->driver_data;
1096 struct gserial *gser;
1097 unsigned int result = 0;
1098
1099 spin_lock_irq(&port->port_lock);
1100 gser = port->port_usb;
1101 if (!gser) {
1102 result = -ENODEV;
1103 goto fail;
1104 }
1105
1106 if (gser->get_dtr)
1107 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1108
1109 if (gser->get_rts)
1110 result |= (gser->get_rts(gser) ? TIOCM_RTS : 0);
1111
1112 if (gser->serial_state & TIOCM_CD)
1113 result |= TIOCM_CD;
1114
1115 if (gser->serial_state & TIOCM_RI)
1116 result |= TIOCM_RI;
1117fail:
1118 spin_unlock_irq(&port->port_lock);
1119 return result;
1120}
1121
1122static int gs_tiocmset(struct tty_struct *tty,
1123 unsigned int set, unsigned int clear)
1124{
1125 struct gs_port *port = tty->driver_data;
1126 struct gserial *gser;
1127 int status = 0;
1128
1129 spin_lock_irq(&port->port_lock);
1130 gser = port->port_usb;
1131 if (!gser) {
1132 status = -ENODEV;
1133 goto fail;
1134 }
1135
1136 if (set & TIOCM_RI) {
1137 if (gser->send_ring_indicator) {
1138 gser->serial_state |= TIOCM_RI;
1139 status = gser->send_ring_indicator(gser, 1);
1140 }
1141 }
1142 if (clear & TIOCM_RI) {
1143 if (gser->send_ring_indicator) {
1144 gser->serial_state &= ~TIOCM_RI;
1145 status = gser->send_ring_indicator(gser, 0);
1146 }
1147 }
1148 if (set & TIOCM_CD) {
1149 if (gser->send_carrier_detect) {
1150 gser->serial_state |= TIOCM_CD;
1151 status = gser->send_carrier_detect(gser, 1);
1152 }
1153 }
1154 if (clear & TIOCM_CD) {
1155 if (gser->send_carrier_detect) {
1156 gser->serial_state &= ~TIOCM_CD;
1157 status = gser->send_carrier_detect(gser, 0);
1158 }
1159 }
1160fail:
1161 spin_unlock_irq(&port->port_lock);
1162 return status;
1163}
David Brownellc1dca562008-06-19 17:51:44 -07001164static const struct tty_operations gs_tty_ops = {
1165 .open = gs_open,
1166 .close = gs_close,
1167 .write = gs_write,
1168 .put_char = gs_put_char,
1169 .flush_chars = gs_flush_chars,
1170 .write_room = gs_write_room,
1171 .chars_in_buffer = gs_chars_in_buffer,
1172 .unthrottle = gs_unthrottle,
David Brownell1f1ba112008-08-06 18:49:57 -07001173 .break_ctl = gs_break_ctl,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 .tiocmget = gs_tiocmget,
1175 .tiocmset = gs_tiocmset,
David Brownellc1dca562008-06-19 17:51:44 -07001176};
1177
1178/*-------------------------------------------------------------------------*/
1179
1180static struct tty_driver *gs_tty_driver;
1181
Benoit Gobya035fb52011-12-14 18:04:07 -08001182static int
David Brownellc1dca562008-06-19 17:51:44 -07001183gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1184{
1185 struct gs_port *port;
1186
1187 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1188 if (port == NULL)
1189 return -ENOMEM;
1190
1191 spin_lock_init(&port->port_lock);
1192 init_waitqueue_head(&port->close_wait);
1193 init_waitqueue_head(&port->drain_wait);
1194
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 INIT_WORK(&port->push, gs_rx_push);
David Brownellc1dca562008-06-19 17:51:44 -07001196
1197 INIT_LIST_HEAD(&port->read_pool);
David Brownell937ef732008-07-07 12:16:08 -07001198 INIT_LIST_HEAD(&port->read_queue);
David Brownellc1dca562008-06-19 17:51:44 -07001199 INIT_LIST_HEAD(&port->write_pool);
1200
1201 port->port_num = port_num;
1202 port->port_line_coding = *coding;
1203
1204 ports[port_num].port = port;
1205
1206 return 0;
1207}
1208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209
1210#if defined(CONFIG_DEBUG_FS)
1211
1212#define BUF_SIZE 512
1213
1214static ssize_t debug_read_status(struct file *file, char __user *ubuf,
1215 size_t count, loff_t *ppos)
1216{
1217 struct gs_port *ui_dev = file->private_data;
1218 struct tty_struct *tty;
1219 struct gserial *gser;
1220 char *buf;
1221 unsigned long flags;
1222 int i = 0;
1223 int ret;
1224 int result = 0;
1225
1226 tty = ui_dev->port_tty;
1227 gser = ui_dev->port_usb;
1228
1229 buf = kzalloc(sizeof(char) * BUF_SIZE, GFP_KERNEL);
1230 if (!buf)
1231 return -ENOMEM;
1232
1233 spin_lock_irqsave(&ui_dev->port_lock, flags);
1234
1235 i += scnprintf(buf + i, BUF_SIZE - i,
1236 "nbytes_from_host: %lu\n", ui_dev->nbytes_from_host);
1237
1238 i += scnprintf(buf + i, BUF_SIZE - i,
1239 "nbytes_to_tty: %lu\n", ui_dev->nbytes_to_tty);
1240
1241 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_OUT_txr: %lu\n",
1242 (ui_dev->nbytes_from_host - ui_dev->nbytes_to_tty));
1243
1244 i += scnprintf(buf + i, BUF_SIZE - i,
1245 "nbytes_from_tty: %lu\n", ui_dev->nbytes_from_tty);
1246
1247 i += scnprintf(buf + i, BUF_SIZE - i,
1248 "nbytes_to_host: %lu\n", ui_dev->nbytes_to_host);
1249
1250 i += scnprintf(buf + i, BUF_SIZE - i, "nbytes_with_usb_IN_txr: %lu\n",
1251 (ui_dev->nbytes_from_tty - ui_dev->nbytes_to_host));
1252
1253 if (tty)
1254 i += scnprintf(buf + i, BUF_SIZE - i,
1255 "tty_flags: %lu\n", tty->flags);
1256
1257 if (gser->get_dtr) {
1258 result |= (gser->get_dtr(gser) ? TIOCM_DTR : 0);
1259 i += scnprintf(buf + i, BUF_SIZE - i,
1260 "DTR_status: %d\n", result);
1261 }
Devin Kimd5bfef02012-06-20 08:48:13 -07001262 i += scnprintf(buf + i, BUF_SIZE - i, "port_write_buf: %d\n",
1263 gs_buf_data_avail(&ui_dev->port_write_buf));
1264
1265 i += scnprintf(buf + i, BUF_SIZE - i, "write_started: %d\n",
1266 ui_dev->write_started);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267
1268 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1269
1270 ret = simple_read_from_buffer(ubuf, count, ppos, buf, i);
1271
1272 kfree(buf);
1273
1274 return ret;
1275}
1276
1277static ssize_t debug_write_reset(struct file *file, const char __user *buf,
1278 size_t count, loff_t *ppos)
1279{
1280 struct gs_port *ui_dev = file->private_data;
1281 unsigned long flags;
1282
1283 spin_lock_irqsave(&ui_dev->port_lock, flags);
1284 ui_dev->nbytes_from_host = ui_dev->nbytes_to_tty =
1285 ui_dev->nbytes_from_tty = ui_dev->nbytes_to_host = 0;
1286 spin_unlock_irqrestore(&ui_dev->port_lock, flags);
1287
1288 return count;
1289}
1290
1291static int serial_debug_open(struct inode *inode, struct file *file)
1292{
1293 file->private_data = inode->i_private;
1294 return 0;
1295}
1296
1297const struct file_operations debug_rst_ops = {
1298 .open = serial_debug_open,
1299 .write = debug_write_reset,
1300};
1301
1302const struct file_operations debug_adb_ops = {
1303 .open = serial_debug_open,
1304 .read = debug_read_status,
1305};
1306
1307static void usb_debugfs_init(struct gs_port *ui_dev, int port_num)
1308{
1309 struct dentry *dent;
1310 char buf[48];
1311
1312 snprintf(buf, 48, "usb_serial%d", port_num);
1313 dent = debugfs_create_dir(buf, 0);
1314 if (IS_ERR(dent))
1315 return;
1316
1317 debugfs_create_file("readstatus", 0444, dent, ui_dev, &debug_adb_ops);
Vamsi Krishnabbe89272013-03-11 13:17:09 -07001318 debugfs_create_file("reset", S_IRUGO | S_IWUSR,
1319 dent, ui_dev, &debug_rst_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320}
1321#else
1322static void usb_debugfs_init(struct gs_port *ui_dev) {}
1323#endif
1324
David Brownellc1dca562008-06-19 17:51:44 -07001325/**
1326 * gserial_setup - initialize TTY driver for one or more ports
1327 * @g: gadget to associate with these ports
1328 * @count: how many ports to support
1329 * Context: may sleep
1330 *
1331 * The TTY stack needs to know in advance how many devices it should
1332 * plan to manage. Use this call to set up the ports you will be
1333 * exporting through USB. Later, connect them to functions based
1334 * on what configuration is activated by the USB host; and disconnect
1335 * them as appropriate.
1336 *
1337 * An example would be a two-configuration device in which both
1338 * configurations expose port 0, but through different functions.
1339 * One configuration could even expose port 1 while the other
1340 * one doesn't.
1341 *
1342 * Returns negative errno or zero.
1343 */
Benoit Gobya035fb52011-12-14 18:04:07 -08001344int gserial_setup(struct usb_gadget *g, unsigned count)
David Brownellc1dca562008-06-19 17:51:44 -07001345{
1346 unsigned i;
1347 struct usb_cdc_line_coding coding;
1348 int status;
1349
1350 if (count == 0 || count > N_PORTS)
1351 return -EINVAL;
1352
1353 gs_tty_driver = alloc_tty_driver(count);
1354 if (!gs_tty_driver)
1355 return -ENOMEM;
1356
David Brownellc1dca562008-06-19 17:51:44 -07001357 gs_tty_driver->driver_name = "g_serial";
David Brownell937ef732008-07-07 12:16:08 -07001358 gs_tty_driver->name = PREFIX;
David Brownellc1dca562008-06-19 17:51:44 -07001359 /* uses dynamically assigned dev_t values */
1360
1361 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1362 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV
1364 | TTY_DRIVER_RESET_TERMIOS;
David Brownellc1dca562008-06-19 17:51:44 -07001365 gs_tty_driver->init_termios = tty_std_termios;
1366
1367 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1368 * MS-Windows. Otherwise, most of these flags shouldn't affect
1369 * anything unless we were to actually hook up to a serial line.
1370 */
1371 gs_tty_driver->init_termios.c_cflag =
1372 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1373 gs_tty_driver->init_termios.c_ispeed = 9600;
1374 gs_tty_driver->init_termios.c_ospeed = 9600;
1375
Harvey Harrison551509d2009-02-11 14:11:36 -08001376 coding.dwDTERate = cpu_to_le32(9600);
David Brownellc1dca562008-06-19 17:51:44 -07001377 coding.bCharFormat = 8;
1378 coding.bParityType = USB_CDC_NO_PARITY;
1379 coding.bDataBits = USB_CDC_1_STOP_BITS;
1380
1381 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383 gserial_wq = create_singlethread_workqueue("k_gserial");
1384 if (!gserial_wq) {
1385 status = -ENOMEM;
1386 goto fail;
1387 }
1388
David Brownellc1dca562008-06-19 17:51:44 -07001389 /* make devices be openable */
1390 for (i = 0; i < count; i++) {
1391 mutex_init(&ports[i].lock);
1392 status = gs_port_alloc(i, &coding);
1393 if (status) {
1394 count = i;
1395 goto fail;
1396 }
1397 }
1398 n_ports = count;
1399
1400 /* export the driver ... */
1401 status = tty_register_driver(gs_tty_driver);
1402 if (status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001404 pr_err("%s: cannot register, err %d\n",
1405 __func__, status);
1406 goto fail;
1407 }
1408
1409 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1410 for (i = 0; i < count; i++) {
1411 struct device *tty_dev;
1412
1413 tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1414 if (IS_ERR(tty_dev))
1415 pr_warning("%s: no classdev for port %d, err %ld\n",
1416 __func__, i, PTR_ERR(tty_dev));
1417 }
1418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419 for (i = 0; i < count; i++)
1420 usb_debugfs_init(ports[i].port, i);
1421
David Brownellc1dca562008-06-19 17:51:44 -07001422 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1423 count, (count == 1) ? "" : "s");
1424
1425 return status;
1426fail:
1427 while (count--)
1428 kfree(ports[count].port);
Pavankumar Kondetif0f95d82011-09-23 11:38:57 +05301429 if (gserial_wq)
1430 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001431 put_tty_driver(gs_tty_driver);
1432 gs_tty_driver = NULL;
1433 return status;
1434}
1435
1436static int gs_closed(struct gs_port *port)
1437{
1438 int cond;
1439
1440 spin_lock_irq(&port->port_lock);
1441 cond = (port->open_count == 0) && !port->openclose;
1442 spin_unlock_irq(&port->port_lock);
1443 return cond;
1444}
1445
1446/**
1447 * gserial_cleanup - remove TTY-over-USB driver and devices
1448 * Context: may sleep
1449 *
1450 * This is called to free all resources allocated by @gserial_setup().
1451 * Accordingly, it may need to wait until some open /dev/ files have
1452 * closed.
1453 *
1454 * The caller must have issued @gserial_disconnect() for any ports
1455 * that had previously been connected, so that there is never any
1456 * I/O pending when it's called.
1457 */
1458void gserial_cleanup(void)
1459{
1460 unsigned i;
1461 struct gs_port *port;
1462
David Brownellac90e362008-07-01 13:18:20 -07001463 if (!gs_tty_driver)
1464 return;
1465
David Brownellc1dca562008-06-19 17:51:44 -07001466 /* start sysfs and /dev/ttyGS* node removal */
1467 for (i = 0; i < n_ports; i++)
1468 tty_unregister_device(gs_tty_driver, i);
1469
1470 for (i = 0; i < n_ports; i++) {
1471 /* prevent new opens */
1472 mutex_lock(&ports[i].lock);
1473 port = ports[i].port;
1474 ports[i].port = NULL;
1475 mutex_unlock(&ports[i].lock);
1476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 cancel_work_sync(&port->push);
David Brownell937ef732008-07-07 12:16:08 -07001478
David Brownellc1dca562008-06-19 17:51:44 -07001479 /* wait for old opens to finish */
1480 wait_event(port->close_wait, gs_closed(port));
1481
1482 WARN_ON(port->port_usb != NULL);
1483
1484 kfree(port);
1485 }
1486 n_ports = 0;
1487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 destroy_workqueue(gserial_wq);
David Brownellc1dca562008-06-19 17:51:44 -07001489 tty_unregister_driver(gs_tty_driver);
Jon Poveyb23097b2010-06-14 19:42:10 +09001490 put_tty_driver(gs_tty_driver);
David Brownellc1dca562008-06-19 17:51:44 -07001491 gs_tty_driver = NULL;
1492
1493 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1494}
1495
1496/**
1497 * gserial_connect - notify TTY I/O glue that USB link is active
1498 * @gser: the function, set up with endpoints and descriptors
1499 * @port_num: which port is active
1500 * Context: any (usually from irq)
1501 *
1502 * This is called activate endpoints and let the TTY layer know that
1503 * the connection is active ... not unlike "carrier detect". It won't
1504 * necessarily start I/O queues; unless the TTY is held open by any
1505 * task, there would be no point. However, the endpoints will be
1506 * activated so the USB host can perform I/O, subject to basic USB
1507 * hardware flow control.
1508 *
1509 * Caller needs to have set up the endpoints and USB function in @dev
1510 * before calling this, as well as the appropriate (speed-specific)
1511 * endpoint descriptors, and also have set up the TTY driver by calling
1512 * @gserial_setup().
1513 *
1514 * Returns negative errno or zero.
1515 * On success, ep->driver_data will be overwritten.
1516 */
1517int gserial_connect(struct gserial *gser, u8 port_num)
1518{
1519 struct gs_port *port;
1520 unsigned long flags;
1521 int status;
1522
1523 if (!gs_tty_driver || port_num >= n_ports)
1524 return -ENXIO;
1525
1526 /* we "know" gserial_cleanup() hasn't been called */
1527 port = ports[port_num].port;
1528
1529 /* activate the endpoints */
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001530 status = usb_ep_enable(gser->in);
David Brownellc1dca562008-06-19 17:51:44 -07001531 if (status < 0)
1532 return status;
1533 gser->in->driver_data = port;
1534
Tatyana Brokhman72c973d2011-06-28 16:33:48 +03001535 status = usb_ep_enable(gser->out);
David Brownellc1dca562008-06-19 17:51:44 -07001536 if (status < 0)
1537 goto fail_out;
1538 gser->out->driver_data = port;
1539
1540 /* then tell the tty glue that I/O can work */
1541 spin_lock_irqsave(&port->port_lock, flags);
1542 gser->ioport = port;
1543 port->port_usb = gser;
1544
1545 /* REVISIT unclear how best to handle this state...
1546 * we don't really couple it with the Linux TTY.
1547 */
1548 gser->port_line_coding = port->port_line_coding;
1549
1550 /* REVISIT if waiting on "carrier detect", signal. */
1551
David Brownell1f1ba112008-08-06 18:49:57 -07001552 /* if it's already open, start I/O ... and notify the serial
1553 * protocol about open/close status (connect/disconnect).
David Brownellc1dca562008-06-19 17:51:44 -07001554 */
David Brownellc1dca562008-06-19 17:51:44 -07001555 if (port->open_count) {
1556 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1557 gs_start_io(port);
David Brownell1f1ba112008-08-06 18:49:57 -07001558 if (gser->connect)
1559 gser->connect(gser);
1560 } else {
1561 if (gser->disconnect)
1562 gser->disconnect(gser);
David Brownellc1dca562008-06-19 17:51:44 -07001563 }
1564
1565 spin_unlock_irqrestore(&port->port_lock, flags);
1566
1567 return status;
1568
1569fail_out:
1570 usb_ep_disable(gser->in);
1571 gser->in->driver_data = NULL;
1572 return status;
1573}
1574
1575/**
1576 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1577 * @gser: the function, on which gserial_connect() was called
1578 * Context: any (usually from irq)
1579 *
1580 * This is called to deactivate endpoints and let the TTY layer know
1581 * that the connection went inactive ... not unlike "hangup".
1582 *
1583 * On return, the state is as if gserial_connect() had never been called;
1584 * there is no active USB I/O on these endpoints.
1585 */
1586void gserial_disconnect(struct gserial *gser)
1587{
1588 struct gs_port *port = gser->ioport;
1589 unsigned long flags;
1590
1591 if (!port)
1592 return;
1593
1594 /* tell the TTY glue not to do I/O here any more */
1595 spin_lock_irqsave(&port->port_lock, flags);
1596
1597 /* REVISIT as above: how best to track this? */
1598 port->port_line_coding = gser->port_line_coding;
1599
1600 port->port_usb = NULL;
1601 gser->ioport = NULL;
1602 if (port->open_count > 0 || port->openclose) {
1603 wake_up_interruptible(&port->drain_wait);
1604 if (port->port_tty)
1605 tty_hangup(port->port_tty);
1606 }
1607 spin_unlock_irqrestore(&port->port_lock, flags);
1608
1609 /* disable endpoints, aborting down any active I/O */
1610 usb_ep_disable(gser->out);
1611 gser->out->driver_data = NULL;
1612
1613 usb_ep_disable(gser->in);
1614 gser->in->driver_data = NULL;
1615
1616 /* finally, free any unused/unusable I/O buffers */
1617 spin_lock_irqsave(&port->port_lock, flags);
1618 if (port->open_count == 0 && !port->openclose)
1619 gs_buf_free(&port->port_write_buf);
Jim Sung28609d42010-11-04 18:47:51 -07001620 gs_free_requests(gser->out, &port->read_pool, NULL);
1621 gs_free_requests(gser->out, &port->read_queue, NULL);
1622 gs_free_requests(gser->in, &port->write_pool, NULL);
1623
1624 port->read_allocated = port->read_started =
1625 port->write_allocated = port->write_started = 0;
1626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001627 port->nbytes_from_host = port->nbytes_to_tty =
1628 port->nbytes_from_tty = port->nbytes_to_host = 0;
1629
David Brownellc1dca562008-06-19 17:51:44 -07001630 spin_unlock_irqrestore(&port->port_lock, flags);
1631}