blob: 6243093934b2b5d4a6ac69578280bdfe918d9ade [file] [log] [blame]
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001/*
2 * linux/fs/9p/trans_fd.c
3 *
4 * Fd transport layer. Includes deprecated socket layer.
5 *
6 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -06008 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05009 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2
13 * as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to:
22 * Free Software Foundation
23 * 51 Franklin Street, Fifth Floor
24 * Boston, MA 02111-1301 USA
25 *
26 */
27
28#include <linux/in.h>
29#include <linux/module.h>
30#include <linux/net.h>
31#include <linux/ipv6.h>
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -060032#include <linux/kthread.h>
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050033#include <linux/errno.h>
34#include <linux/kernel.h>
35#include <linux/un.h>
36#include <linux/uaccess.h>
37#include <linux/inet.h>
38#include <linux/idr.h>
39#include <linux/file.h>
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050040#include <linux/parser.h>
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050041#include <net/9p/9p.h>
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -050042#include <net/9p/client.h>
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050043#include <net/9p/transport.h>
44
45#define P9_PORT 564
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050046#define MAX_SOCK_BUF (64*1024)
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -060047#define ERREQFLUSH 1
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -060048#define MAXPOLLWADDR 2
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050049
Eric Van Hensbergenee443992008-03-05 07:08:09 -060050/**
51 * struct p9_fd_opts - per-transport options
52 * @rfd: file descriptor for reading (trans=fd)
53 * @wfd: file descriptor for writing (trans=fd)
54 * @port: port to connect to (trans=tcp)
55 *
56 */
57
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050058struct p9_fd_opts {
59 int rfd;
60 int wfd;
61 u16 port;
62};
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050063
Eric Van Hensbergenee443992008-03-05 07:08:09 -060064/**
65 * struct p9_trans_fd - transport state
66 * @rd: reference to file to read from
67 * @wr: reference of file to write to
68 * @conn: connection state reference
69 *
70 */
71
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050072struct p9_trans_fd {
73 struct file *rd;
74 struct file *wr;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -060075 struct p9_conn *conn;
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050076};
77
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050078/*
79 * Option Parsing (code inspired by NFS code)
80 * - a little lazy - parse all fd-transport options
81 */
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -050082
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050083enum {
84 /* Options that take integer arguments */
Latchesar Ionkov55762692007-11-06 08:02:53 -060085 Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050086};
87
Steven Whitehousea447c092008-10-13 10:46:57 +010088static const match_table_t tokens = {
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050089 {Opt_port, "port=%u"},
90 {Opt_rfdno, "rfdno=%u"},
91 {Opt_wfdno, "wfdno=%u"},
Latchesar Ionkov55762692007-11-06 08:02:53 -060092 {Opt_err, NULL},
Eric Van Hensbergena80d9232007-10-17 14:31:07 -050093};
94
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -060095enum {
96 Rworksched = 1, /* read work scheduled or running */
97 Rpending = 2, /* can read */
98 Wworksched = 4, /* write work scheduled or running */
99 Wpending = 8, /* can write */
100};
101
102enum {
103 None,
104 Flushing,
105 Flushed,
106};
107
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600108/**
109 * struct p9_req - fd mux encoding of an rpc transaction
110 * @lock: protects req_list
111 * @tag: numeric tag for rpc transaction
112 * @tcall: request &p9_fcall structure
113 * @rcall: response &p9_fcall structure
114 * @err: error state
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600115 * @flush: flag to indicate RPC has been flushed
116 * @req_list: list link for higher level objects to chain requests
Eric Van Hensbergen21c00362008-10-13 18:45:24 -0500117 * @m: connection this request was issued on
118 * @wqueue: wait queue that client is blocked on for this rpc
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600119 *
120 */
121
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600122struct p9_req {
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600123 spinlock_t lock;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600124 int tag;
125 struct p9_fcall *tcall;
126 struct p9_fcall *rcall;
127 int err;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600128 int flush;
129 struct list_head req_list;
Eric Van Hensbergen21c00362008-10-13 18:45:24 -0500130 struct p9_conn *m;
131 wait_queue_head_t wqueue;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600132};
133
Tejun Heo992b3f12008-10-13 18:45:25 -0500134struct p9_poll_wait {
135 struct p9_conn *conn;
136 wait_queue_t wait;
137 wait_queue_head_t *wait_addr;
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600138};
139
140/**
141 * struct p9_conn - fd mux connection state information
142 * @lock: protects mux_list (?)
143 * @mux_list: list link for mux to manage multiple connections (?)
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500144 * @client: reference to client instance for this connection
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600145 * @err: error state
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600146 * @req_list: accounting for requests which have been sent
147 * @unsent_req_list: accounting for requests that haven't been sent
148 * @rcall: current response &p9_fcall structure
149 * @rpos: read position in current frame
150 * @rbuf: current read buffer
151 * @wpos: write position for current frame
152 * @wsize: amount of data to write for current frame
153 * @wbuf: current write buffer
154 * @poll_wait: array of wait_q's for various worker threads
155 * @poll_waddr: ????
156 * @pt: poll state
157 * @rq: current read work
158 * @wq: current write work
159 * @wsched: ????
160 *
161 */
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600162
163struct p9_conn {
164 spinlock_t lock; /* protect lock structure */
165 struct list_head mux_list;
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500166 struct p9_client *client;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600167 int err;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600168 struct list_head req_list;
169 struct list_head unsent_req_list;
170 struct p9_fcall *rcall;
171 int rpos;
172 char *rbuf;
173 int wpos;
174 int wsize;
175 char *wbuf;
Tejun Heo992b3f12008-10-13 18:45:25 -0500176 struct list_head poll_pending_link;
177 struct p9_poll_wait poll_wait[MAXPOLLWADDR];
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600178 poll_table pt;
179 struct work_struct rq;
180 struct work_struct wq;
181 unsigned long wsched;
182};
183
Tejun Heo992b3f12008-10-13 18:45:25 -0500184static DEFINE_SPINLOCK(p9_poll_lock);
185static LIST_HEAD(p9_poll_pending_list);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600186static struct workqueue_struct *p9_mux_wq;
Tejun Heo992b3f12008-10-13 18:45:25 -0500187static struct task_struct *p9_poll_task;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600188
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600189static void p9_mux_poll_stop(struct p9_conn *m)
190{
Tejun Heo992b3f12008-10-13 18:45:25 -0500191 unsigned long flags;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600192 int i;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600193
Tejun Heo992b3f12008-10-13 18:45:25 -0500194 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
195 struct p9_poll_wait *pwait = &m->poll_wait[i];
196
197 if (pwait->wait_addr) {
198 remove_wait_queue(pwait->wait_addr, &pwait->wait);
199 pwait->wait_addr = NULL;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600200 }
201 }
Tejun Heo992b3f12008-10-13 18:45:25 -0500202
203 spin_lock_irqsave(&p9_poll_lock, flags);
204 list_del_init(&m->poll_pending_link);
205 spin_unlock_irqrestore(&p9_poll_lock, flags);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600206}
207
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500208static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
209{
Eric Van Hensbergenff683452008-10-13 18:45:22 -0500210 if (req->tag != P9_NOTAG &&
211 p9_idpool_check(req->tag, m->client->tagpool))
212 p9_idpool_put(req->tag, m->client->tagpool);
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500213 kfree(req);
214}
215
216static void p9_conn_rpc_cb(struct p9_req *req);
217
218static void p9_mux_flush_cb(struct p9_req *freq)
219{
220 int tag;
221 struct p9_conn *m = freq->m;
222 struct p9_req *req, *rreq, *rptr;
223
224 P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
225 freq->tcall, freq->rcall, freq->err,
226 freq->tcall->params.tflush.oldtag);
227
228 spin_lock(&m->lock);
229 tag = freq->tcall->params.tflush.oldtag;
230 req = NULL;
231 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
232 if (rreq->tag == tag) {
233 req = rreq;
234 list_del(&req->req_list);
235 break;
236 }
237 }
238 spin_unlock(&m->lock);
239
240 if (req) {
241 spin_lock(&req->lock);
242 req->flush = Flushed;
243 spin_unlock(&req->lock);
244
245 p9_conn_rpc_cb(req);
246 }
247
248 kfree(freq->tcall);
249 kfree(freq->rcall);
250 p9_mux_free_request(m, freq);
251}
252
253static void p9_conn_rpc_cb(struct p9_req *req)
254{
255 P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req);
256
257 if (req->tcall->id == P9_TFLUSH) { /* flush callback */
258 P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req);
259 p9_mux_flush_cb(req);
260 } else { /* normal wakeup path */
261 P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req);
262 if (req->flush != None && !req->err)
263 req->err = -ERESTARTSYS;
264
265 wake_up(&req->wqueue);
266 }
267}
268
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600269/**
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500270 * p9_conn_cancel - cancel all pending requests with error
271 * @m: mux data
272 * @err: error code
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600273 *
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600274 */
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600275
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500276void p9_conn_cancel(struct p9_conn *m, int err)
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600277{
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500278 struct p9_req *req, *rtmp;
279 LIST_HEAD(cancel_list);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600280
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500281 P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
282 m->err = err;
283 spin_lock(&m->lock);
284 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
285 list_move(&req->req_list, &cancel_list);
Tejun Heo992b3f12008-10-13 18:45:25 -0500286 }
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500287 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
288 list_move(&req->req_list, &cancel_list);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600289 }
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500290 spin_unlock(&m->lock);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600291
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500292 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
293 list_del(&req->req_list);
294 if (!req->err)
295 req->err = err;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600296
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500297 p9_conn_rpc_cb(req);
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500298 }
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600299}
300
301static void process_request(struct p9_conn *m, struct p9_req *req)
302{
303 int ecode;
304 struct p9_str *ename;
305
306 if (!req->err && req->rcall->id == P9_RERROR) {
307 ecode = req->rcall->params.rerror.errno;
308 ename = &req->rcall->params.rerror.error;
309
310 P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
311 ename->str);
312
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500313 if (m->client->dotu)
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600314 req->err = -ecode;
315
316 if (!req->err) {
317 req->err = p9_errstr2errno(ename->str, ename->len);
318
319 /* string match failed */
320 if (!req->err) {
321 PRINT_FCALL_ERROR("unknown error", req->rcall);
322 req->err = -ESERVERFAULT;
323 }
324 }
325 } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
326 P9_DPRINTK(P9_DEBUG_ERROR,
327 "fcall mismatch: expected %d, got %d\n",
328 req->tcall->id + 1, req->rcall->id);
329 if (!req->err)
330 req->err = -EIO;
331 }
332}
333
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500334static unsigned int
335p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
336{
337 int ret, n;
338 struct p9_trans_fd *ts = NULL;
339
340 if (client && client->status == Connected)
341 ts = client->trans;
342
343 if (!ts)
344 return -EREMOTEIO;
345
346 if (!ts->rd->f_op || !ts->rd->f_op->poll)
347 return -EIO;
348
349 if (!ts->wr->f_op || !ts->wr->f_op->poll)
350 return -EIO;
351
352 ret = ts->rd->f_op->poll(ts->rd, pt);
353 if (ret < 0)
354 return ret;
355
356 if (ts->rd != ts->wr) {
357 n = ts->wr->f_op->poll(ts->wr, pt);
358 if (n < 0)
359 return n;
360 ret = (ret & ~POLLOUT) | (n & ~POLLIN);
361 }
362
363 return ret;
364}
365
366/**
367 * p9_fd_read- read from a fd
368 * @client: client instance
369 * @v: buffer to receive data into
370 * @len: size of receive buffer
371 *
372 */
373
374static int p9_fd_read(struct p9_client *client, void *v, int len)
375{
376 int ret;
377 struct p9_trans_fd *ts = NULL;
378
379 if (client && client->status != Disconnected)
380 ts = client->trans;
381
382 if (!ts)
383 return -EREMOTEIO;
384
385 if (!(ts->rd->f_flags & O_NONBLOCK))
386 P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
387
388 ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
389 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
390 client->status = Disconnected;
391 return ret;
392}
393
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600394/**
395 * p9_read_work - called when there is some data to be read from a transport
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600396 * @work: container of work to be done
397 *
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600398 */
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600399
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600400static void p9_read_work(struct work_struct *work)
401{
402 int n, err;
403 struct p9_conn *m;
404 struct p9_req *req, *rptr, *rreq;
405 struct p9_fcall *rcall;
406 char *rbuf;
407
408 m = container_of(work, struct p9_conn, rq);
409
410 if (m->err < 0)
411 return;
412
413 rcall = NULL;
414 P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
415
416 if (!m->rcall) {
417 m->rcall =
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500418 kmalloc(sizeof(struct p9_fcall) + m->client->msize,
419 GFP_KERNEL);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600420 if (!m->rcall) {
421 err = -ENOMEM;
422 goto error;
423 }
424
425 m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
426 m->rpos = 0;
427 }
428
429 clear_bit(Rpending, &m->wsched);
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500430 err = p9_fd_read(m->client, m->rbuf + m->rpos,
431 m->client->msize - m->rpos);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600432 P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
433 if (err == -EAGAIN) {
434 clear_bit(Rworksched, &m->wsched);
435 return;
436 }
437
438 if (err <= 0)
439 goto error;
440
441 m->rpos += err;
442 while (m->rpos > 4) {
443 n = le32_to_cpu(*(__le32 *) m->rbuf);
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500444 if (n >= m->client->msize) {
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600445 P9_DPRINTK(P9_DEBUG_ERROR,
446 "requested packet size too big: %d\n", n);
447 err = -EIO;
448 goto error;
449 }
450
451 if (m->rpos < n)
452 break;
453
454 err =
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500455 p9_deserialize_fcall(m->rbuf, n, m->rcall, m->client->dotu);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600456 if (err < 0)
457 goto error;
458
459#ifdef CONFIG_NET_9P_DEBUG
460 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
461 char buf[150];
462
463 p9_printfcall(buf, sizeof(buf), m->rcall,
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500464 m->client->dotu);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600465 printk(KERN_NOTICE ">>> %p %s\n", m, buf);
466 }
467#endif
468
469 rcall = m->rcall;
470 rbuf = m->rbuf;
471 if (m->rpos > n) {
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500472 m->rcall = kmalloc(sizeof(struct p9_fcall) +
473 m->client->msize, GFP_KERNEL);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600474 if (!m->rcall) {
475 err = -ENOMEM;
476 goto error;
477 }
478
479 m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
480 memmove(m->rbuf, rbuf + n, m->rpos - n);
481 m->rpos -= n;
482 } else {
483 m->rcall = NULL;
484 m->rbuf = NULL;
485 m->rpos = 0;
486 }
487
488 P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
489 rcall->id, rcall->tag);
490
491 req = NULL;
492 spin_lock(&m->lock);
493 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
494 if (rreq->tag == rcall->tag) {
495 req = rreq;
496 if (req->flush != Flushing)
497 list_del(&req->req_list);
498 break;
499 }
500 }
501 spin_unlock(&m->lock);
502
503 if (req) {
504 req->rcall = rcall;
505 process_request(m, req);
506
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500507 if (req->flush != Flushing)
508 p9_conn_rpc_cb(req);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600509 } else {
510 if (err >= 0 && rcall->id != P9_RFLUSH)
511 P9_DPRINTK(P9_DEBUG_ERROR,
512 "unexpected response mux %p id %d tag %d\n",
513 m, rcall->id, rcall->tag);
514 kfree(rcall);
515 }
516 }
517
518 if (!list_empty(&m->req_list)) {
519 if (test_and_clear_bit(Rpending, &m->wsched))
520 n = POLLIN;
521 else
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500522 n = p9_fd_poll(m->client, NULL);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600523
524 if (n & POLLIN) {
525 P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
526 queue_work(p9_mux_wq, &m->rq);
527 } else
528 clear_bit(Rworksched, &m->wsched);
529 } else
530 clear_bit(Rworksched, &m->wsched);
531
532 return;
533
534error:
535 p9_conn_cancel(m, err);
536 clear_bit(Rworksched, &m->wsched);
537}
538
539/**
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500540 * p9_fd_write - write to a socket
541 * @client: client instance
542 * @v: buffer to send data from
543 * @len: size of send buffer
544 *
545 */
546
547static int p9_fd_write(struct p9_client *client, void *v, int len)
548{
549 int ret;
550 mm_segment_t oldfs;
551 struct p9_trans_fd *ts = NULL;
552
553 if (client && client->status != Disconnected)
554 ts = client->trans;
555
556 if (!ts)
557 return -EREMOTEIO;
558
559 if (!(ts->wr->f_flags & O_NONBLOCK))
560 P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
561
562 oldfs = get_fs();
563 set_fs(get_ds());
564 /* The cast to a user pointer is valid due to the set_fs() */
565 ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
566 set_fs(oldfs);
567
568 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
569 client->status = Disconnected;
570 return ret;
571}
572
573/**
574 * p9_write_work - called when a transport can send some data
575 * @work: container for work to be done
576 *
577 */
578
579static void p9_write_work(struct work_struct *work)
580{
581 int n, err;
582 struct p9_conn *m;
583 struct p9_req *req;
584
585 m = container_of(work, struct p9_conn, wq);
586
587 if (m->err < 0) {
588 clear_bit(Wworksched, &m->wsched);
589 return;
590 }
591
592 if (!m->wsize) {
593 if (list_empty(&m->unsent_req_list)) {
594 clear_bit(Wworksched, &m->wsched);
595 return;
596 }
597
598 spin_lock(&m->lock);
599again:
600 req = list_entry(m->unsent_req_list.next, struct p9_req,
601 req_list);
602 list_move_tail(&req->req_list, &m->req_list);
603 if (req->err == ERREQFLUSH)
604 goto again;
605
606 m->wbuf = req->tcall->sdata;
607 m->wsize = req->tcall->size;
608 m->wpos = 0;
609 spin_unlock(&m->lock);
610 }
611
612 P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
613 m->wsize);
614 clear_bit(Wpending, &m->wsched);
615 err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
616 P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
617 if (err == -EAGAIN) {
618 clear_bit(Wworksched, &m->wsched);
619 return;
620 }
621
622 if (err < 0)
623 goto error;
624 else if (err == 0) {
625 err = -EREMOTEIO;
626 goto error;
627 }
628
629 m->wpos += err;
630 if (m->wpos == m->wsize)
631 m->wpos = m->wsize = 0;
632
633 if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
634 if (test_and_clear_bit(Wpending, &m->wsched))
635 n = POLLOUT;
636 else
637 n = p9_fd_poll(m->client, NULL);
638
639 if (n & POLLOUT) {
640 P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
641 queue_work(p9_mux_wq, &m->wq);
642 } else
643 clear_bit(Wworksched, &m->wsched);
644 } else
645 clear_bit(Wworksched, &m->wsched);
646
647 return;
648
649error:
650 p9_conn_cancel(m, err);
651 clear_bit(Wworksched, &m->wsched);
652}
653
654static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
655{
656 struct p9_poll_wait *pwait =
657 container_of(wait, struct p9_poll_wait, wait);
658 struct p9_conn *m = pwait->conn;
659 unsigned long flags;
660 DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
661
662 spin_lock_irqsave(&p9_poll_lock, flags);
663 if (list_empty(&m->poll_pending_link))
664 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
665 spin_unlock_irqrestore(&p9_poll_lock, flags);
666
667 /* perform the default wake up operation */
668 return default_wake_function(&dummy_wait, mode, sync, key);
669}
670
671/**
672 * p9_pollwait - add poll task to the wait queue
673 * @filp: file pointer being polled
674 * @wait_address: wait_q to block on
675 * @p: poll state
676 *
677 * called by files poll operation to add v9fs-poll task to files wait queue
678 */
679
680static void
681p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
682{
683 struct p9_conn *m = container_of(p, struct p9_conn, pt);
684 struct p9_poll_wait *pwait = NULL;
685 int i;
686
687 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
688 if (m->poll_wait[i].wait_addr == NULL) {
689 pwait = &m->poll_wait[i];
690 break;
691 }
692 }
693
694 if (!pwait) {
695 P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
696 return;
697 }
698
699 if (!wait_address) {
700 P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
701 pwait->wait_addr = ERR_PTR(-EIO);
702 return;
703 }
704
705 pwait->conn = m;
706 pwait->wait_addr = wait_address;
707 init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
708 add_wait_queue(wait_address, &pwait->wait);
709}
710
711/**
712 * p9_conn_create - allocate and initialize the per-session mux data
713 * @client: client instance
714 *
715 * Note: Creates the polling task if this is the first session.
716 */
717
718static struct p9_conn *p9_conn_create(struct p9_client *client)
719{
720 int i, n;
721 struct p9_conn *m;
722
723 P9_DPRINTK(P9_DEBUG_MUX, "client %p msize %d\n", client, client->msize);
724 m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
725 if (!m)
726 return ERR_PTR(-ENOMEM);
727
728 spin_lock_init(&m->lock);
729 INIT_LIST_HEAD(&m->mux_list);
730 m->client = client;
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -0500731
732 INIT_LIST_HEAD(&m->req_list);
733 INIT_LIST_HEAD(&m->unsent_req_list);
734 INIT_WORK(&m->rq, p9_read_work);
735 INIT_WORK(&m->wq, p9_write_work);
736 INIT_LIST_HEAD(&m->poll_pending_link);
737 init_poll_funcptr(&m->pt, p9_pollwait);
738
739 n = p9_fd_poll(client, &m->pt);
740 if (n & POLLIN) {
741 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
742 set_bit(Rpending, &m->wsched);
743 }
744
745 if (n & POLLOUT) {
746 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
747 set_bit(Wpending, &m->wsched);
748 }
749
750 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
751 if (IS_ERR(m->poll_wait[i].wait_addr)) {
752 p9_mux_poll_stop(m);
753 kfree(m);
754 /* return the error code */
755 return (void *)m->poll_wait[i].wait_addr;
756 }
757 }
758
759 return m;
760}
761
762/**
763 * p9_poll_mux - polls a mux and schedules read or write works if necessary
764 * @m: connection to poll
765 *
766 */
767
768static void p9_poll_mux(struct p9_conn *m)
769{
770 int n;
771
772 if (m->err < 0)
773 return;
774
775 n = p9_fd_poll(m->client, NULL);
776 if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
777 P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
778 if (n >= 0)
779 n = -ECONNRESET;
780 p9_conn_cancel(m, n);
781 }
782
783 if (n & POLLIN) {
784 set_bit(Rpending, &m->wsched);
785 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
786 if (!test_and_set_bit(Rworksched, &m->wsched)) {
787 P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
788 queue_work(p9_mux_wq, &m->rq);
789 }
790 }
791
792 if (n & POLLOUT) {
793 set_bit(Wpending, &m->wsched);
794 P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
795 if ((m->wsize || !list_empty(&m->unsent_req_list))
796 && !test_and_set_bit(Wworksched, &m->wsched)) {
797 P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
798 queue_work(p9_mux_wq, &m->wq);
799 }
800 }
801}
802
803/**
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600804 * p9_send_request - send 9P request
805 * The function can sleep until the request is scheduled for sending.
806 * The function can be interrupted. Return from the function is not
807 * a guarantee that the request is sent successfully. Can return errors
808 * that can be retrieved by PTR_ERR macros.
809 *
810 * @m: mux data
811 * @tc: request to be sent
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600812 *
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600813 */
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600814
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500815static struct p9_req *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600816{
817 int n;
818 struct p9_req *req;
819
820 P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
821 tc, tc->id);
822 if (m->err < 0)
823 return ERR_PTR(m->err);
824
825 req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
826 if (!req)
827 return ERR_PTR(-ENOMEM);
828
Eric Van Hensbergenff683452008-10-13 18:45:22 -0500829 n = P9_NOTAG;
830 if (tc->id != P9_TVERSION) {
831 n = p9_idpool_get(m->client->tagpool);
832 if (n < 0) {
833 kfree(req);
834 return ERR_PTR(-ENOMEM);
835 }
Julia Lawall62067822008-09-24 16:22:22 -0500836 }
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600837
838 p9_set_tag(tc, n);
839
840#ifdef CONFIG_NET_9P_DEBUG
841 if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
842 char buf[150];
843
Eric Van Hensbergenbead27f2008-10-13 18:45:24 -0500844 p9_printfcall(buf, sizeof(buf), tc, m->client->dotu);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600845 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
846 }
847#endif
848
849 spin_lock_init(&req->lock);
Eric Van Hensbergen21c00362008-10-13 18:45:24 -0500850 req->m = m;
851 init_waitqueue_head(&req->wqueue);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600852 req->tag = n;
853 req->tcall = tc;
854 req->rcall = NULL;
855 req->err = 0;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600856 req->flush = None;
857
858 spin_lock(&m->lock);
859 list_add_tail(&req->req_list, &m->unsent_req_list);
860 spin_unlock(&m->lock);
861
862 if (test_and_clear_bit(Wpending, &m->wsched))
863 n = POLLOUT;
864 else
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500865 n = p9_fd_poll(m->client, NULL);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600866
867 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
868 queue_work(p9_mux_wq, &m->wq);
869
870 return req;
871}
872
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600873static int
874p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
875{
876 struct p9_fcall *fc;
877 struct p9_req *rreq, *rptr;
878
879 P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
880
881 /* if a response was received for a request, do nothing */
882 spin_lock(&req->lock);
883 if (req->rcall || req->err) {
884 spin_unlock(&req->lock);
885 P9_DPRINTK(P9_DEBUG_MUX,
886 "mux %p req %p response already received\n", m, req);
887 return 0;
888 }
889
890 req->flush = Flushing;
891 spin_unlock(&req->lock);
892
893 spin_lock(&m->lock);
894 /* if the request is not sent yet, just remove it from the list */
895 list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
896 if (rreq->tag == req->tag) {
897 P9_DPRINTK(P9_DEBUG_MUX,
898 "mux %p req %p request is not sent yet\n", m, req);
899 list_del(&rreq->req_list);
900 req->flush = Flushed;
901 spin_unlock(&m->lock);
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500902 p9_conn_rpc_cb(req);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600903 return 0;
904 }
905 }
906 spin_unlock(&m->lock);
907
908 clear_thread_flag(TIF_SIGPENDING);
909 fc = p9_create_tflush(req->tag);
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500910 p9_send_request(m, fc);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600911 return 1;
912}
913
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600914/**
915 * p9_fd_rpc- sends 9P request and waits until a response is available.
916 * The function can be interrupted.
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500917 * @client: client instance
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600918 * @tc: request to be sent
919 * @rc: pointer where a pointer to the response is stored
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600920 *
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600921 */
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600922
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600923int
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500924p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600925{
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500926 struct p9_trans_fd *p = client->trans;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600927 struct p9_conn *m = p->conn;
928 int err, sigpending;
929 unsigned long flags;
930 struct p9_req *req;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600931
932 if (rc)
933 *rc = NULL;
934
935 sigpending = 0;
936 if (signal_pending(current)) {
937 sigpending = 1;
938 clear_thread_flag(TIF_SIGPENDING);
939 }
940
Eric Van Hensbergen044c7762008-10-13 18:45:23 -0500941 req = p9_send_request(m, tc);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600942 if (IS_ERR(req)) {
943 err = PTR_ERR(req);
944 P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
945 return err;
946 }
947
Eric Van Hensbergen21c00362008-10-13 18:45:24 -0500948 err = wait_event_interruptible(req->wqueue, req->rcall != NULL ||
949 req->err < 0);
950 if (req->err < 0)
951 err = req->err;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600952
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -0500953 if (err == -ERESTARTSYS && client->status == Connected
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600954 && m->err == 0) {
955 if (p9_mux_flush_request(m, req)) {
956 /* wait until we get response of the flush message */
957 do {
958 clear_thread_flag(TIF_SIGPENDING);
Eric Van Hensbergen21c00362008-10-13 18:45:24 -0500959 err = wait_event_interruptible(req->wqueue,
960 req->rcall || req->err);
961 } while (!req->rcall && !req->err &&
962 err == -ERESTARTSYS &&
963 client->status == Connected && !m->err);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600964
965 err = -ERESTARTSYS;
966 }
967 sigpending = 1;
968 }
969
970 if (sigpending) {
971 spin_lock_irqsave(&current->sighand->siglock, flags);
972 recalc_sigpending();
973 spin_unlock_irqrestore(&current->sighand->siglock, flags);
974 }
975
976 if (rc)
Eric Van Hensbergen21c00362008-10-13 18:45:24 -0500977 *rc = req->rcall;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600978 else
Eric Van Hensbergen21c00362008-10-13 18:45:24 -0500979 kfree(req->rcall);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -0600980
981 p9_mux_free_request(m, req);
982 if (err > 0)
983 err = -EIO;
984
985 return err;
986}
987
Eric Van Hensbergena80d9232007-10-17 14:31:07 -0500988/**
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -0600989 * parse_options - parse mount options into session structure
Eric Van Hensbergena80d9232007-10-17 14:31:07 -0500990 * @options: options string passed from mount
Eric Van Hensbergenee443992008-03-05 07:08:09 -0600991 * @opts: transport-specific structure to parse options into
Eric Van Hensbergena80d9232007-10-17 14:31:07 -0500992 *
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -0600993 * Returns 0 upon success, -ERRNO upon failure
Eric Van Hensbergena80d9232007-10-17 14:31:07 -0500994 */
995
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -0600996static int parse_opts(char *params, struct p9_fd_opts *opts)
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -0500997{
Eric Van Hensbergena80d9232007-10-17 14:31:07 -0500998 char *p;
999 substring_t args[MAX_OPT_ARGS];
1000 int option;
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -06001001 char *options;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001002 int ret;
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001003
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001004 opts->port = P9_PORT;
1005 opts->rfd = ~0;
1006 opts->wfd = ~0;
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001007
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -06001008 if (!params)
1009 return 0;
1010
1011 options = kstrdup(params, GFP_KERNEL);
1012 if (!options) {
1013 P9_DPRINTK(P9_DEBUG_ERROR,
1014 "failed to allocate copy of option string\n");
1015 return -ENOMEM;
1016 }
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001017
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001018 while ((p = strsep(&options, ",")) != NULL) {
1019 int token;
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -06001020 int r;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001021 if (!*p)
1022 continue;
1023 token = match_token(p, tokens, args);
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -06001024 r = match_int(&args[0], &option);
1025 if (r < 0) {
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001026 P9_DPRINTK(P9_DEBUG_ERROR,
1027 "integer field, but no integer?\n");
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -06001028 ret = r;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001029 continue;
1030 }
1031 switch (token) {
1032 case Opt_port:
1033 opts->port = option;
1034 break;
1035 case Opt_rfdno:
1036 opts->rfd = option;
1037 break;
1038 case Opt_wfdno:
1039 opts->wfd = option;
1040 break;
1041 default:
1042 continue;
1043 }
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001044 }
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -06001045 kfree(options);
1046 return 0;
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001047}
1048
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001049static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001050{
1051 struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
1052 GFP_KERNEL);
1053 if (!ts)
1054 return -ENOMEM;
1055
1056 ts->rd = fget(rfd);
1057 ts->wr = fget(wfd);
1058 if (!ts->rd || !ts->wr) {
1059 if (ts->rd)
1060 fput(ts->rd);
1061 if (ts->wr)
1062 fput(ts->wr);
1063 kfree(ts);
1064 return -EIO;
1065 }
1066
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001067 client->trans = ts;
1068 client->status = Connected;
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001069
1070 return 0;
1071}
1072
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001073static int p9_socket_open(struct p9_client *client, struct socket *csocket)
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001074{
1075 int fd, ret;
1076
1077 csocket->sk->sk_allocation = GFP_NOIO;
Ulrich Dreppera677a032008-07-23 21:29:17 -07001078 fd = sock_map_fd(csocket, 0);
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001079 if (fd < 0) {
1080 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
1081 return fd;
1082 }
1083
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001084 ret = p9_fd_open(client, fd, fd);
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001085 if (ret < 0) {
1086 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
1087 sockfd_put(csocket);
1088 return ret;
1089 }
1090
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001091 ((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001092
1093 return 0;
1094}
1095
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001096/**
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001097 * p9_mux_destroy - cancels all pending requests and frees mux resources
1098 * @m: mux to destroy
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001099 *
1100 */
Eric Van Hensbergenee443992008-03-05 07:08:09 -06001101
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001102static void p9_conn_destroy(struct p9_conn *m)
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001103{
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001104 P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
1105 m->mux_list.prev, m->mux_list.next);
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001106
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001107 p9_mux_poll_stop(m);
1108 cancel_work_sync(&m->rq);
1109 cancel_work_sync(&m->wq);
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001110
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001111 p9_conn_cancel(m, -ECONNRESET);
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001112
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001113 m->client = NULL;
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001114 kfree(m);
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001115}
1116
1117/**
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001118 * p9_fd_close - shutdown file descriptor transport
1119 * @client: client instance
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001120 *
1121 */
Eric Van Hensbergenee443992008-03-05 07:08:09 -06001122
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001123static void p9_fd_close(struct p9_client *client)
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001124{
1125 struct p9_trans_fd *ts;
1126
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001127 if (!client)
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001128 return;
1129
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001130 ts = client->trans;
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001131 if (!ts)
1132 return;
1133
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001134 client->status = Disconnected;
1135
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -06001136 p9_conn_destroy(ts->conn);
1137
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001138 if (ts->rd)
1139 fput(ts->rd);
1140 if (ts->wr)
1141 fput(ts->wr);
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001142
Latchesar Ionkovbd238fb2007-07-10 17:57:28 -05001143 kfree(ts);
1144}
1145
Eric Van Hensbergen887b3ec2008-05-08 20:26:37 -05001146/*
1147 * stolen from NFS - maybe should be made a generic function?
1148 */
1149static inline int valid_ipaddr4(const char *buf)
1150{
1151 int rc, count, in[4];
1152
1153 rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
1154 if (rc != 4)
1155 return -EINVAL;
1156 for (count = 0; count < 4; count++) {
1157 if (in[count] > 255)
1158 return -EINVAL;
1159 }
1160 return 0;
1161}
1162
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001163static int
1164p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001165{
1166 int err;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001167 struct socket *csocket;
1168 struct sockaddr_in sin_server;
1169 struct p9_fd_opts opts;
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001170 struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001171
Eric Van Hensbergenbb8ffdf2008-03-07 10:53:53 -06001172 err = parse_opts(args, &opts);
1173 if (err < 0)
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001174 return err;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001175
Eric Van Hensbergen887b3ec2008-05-08 20:26:37 -05001176 if (valid_ipaddr4(addr) < 0)
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001177 return -EINVAL;
Eric Van Hensbergen887b3ec2008-05-08 20:26:37 -05001178
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001179 csocket = NULL;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001180
1181 sin_server.sin_family = AF_INET;
1182 sin_server.sin_addr.s_addr = in_aton(addr);
1183 sin_server.sin_port = htons(opts.port);
1184 sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
1185
1186 if (!csocket) {
1187 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
1188 err = -EIO;
1189 goto error;
1190 }
1191
1192 err = csocket->ops->connect(csocket,
1193 (struct sockaddr *)&sin_server,
1194 sizeof(struct sockaddr_in), 0);
1195 if (err < 0) {
1196 P9_EPRINTK(KERN_ERR,
1197 "p9_trans_tcp: problem connecting socket to %s\n",
1198 addr);
1199 goto error;
1200 }
1201
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001202 err = p9_socket_open(client, csocket);
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001203 if (err < 0)
1204 goto error;
1205
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001206 p = (struct p9_trans_fd *) client->trans;
1207 p->conn = p9_conn_create(client);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -06001208 if (IS_ERR(p->conn)) {
1209 err = PTR_ERR(p->conn);
1210 p->conn = NULL;
1211 goto error;
1212 }
1213
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001214 return 0;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001215
1216error:
1217 if (csocket)
1218 sock_release(csocket);
1219
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001220 kfree(p);
1221
1222 return err;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001223}
1224
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001225static int
1226p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001227{
1228 int err;
1229 struct socket *csocket;
1230 struct sockaddr_un sun_server;
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001231 struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001232
1233 csocket = NULL;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001234
1235 if (strlen(addr) > UNIX_PATH_MAX) {
1236 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
1237 addr);
1238 err = -ENAMETOOLONG;
1239 goto error;
1240 }
1241
1242 sun_server.sun_family = PF_UNIX;
1243 strcpy(sun_server.sun_path, addr);
1244 sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
1245 err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1246 sizeof(struct sockaddr_un) - 1, 0);
1247 if (err < 0) {
1248 P9_EPRINTK(KERN_ERR,
1249 "p9_trans_unix: problem connecting socket: %s: %d\n",
1250 addr, err);
1251 goto error;
1252 }
1253
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001254 err = p9_socket_open(client, csocket);
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001255 if (err < 0)
1256 goto error;
1257
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001258 p = (struct p9_trans_fd *) client->trans;
1259 p->conn = p9_conn_create(client);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -06001260 if (IS_ERR(p->conn)) {
1261 err = PTR_ERR(p->conn);
1262 p->conn = NULL;
1263 goto error;
1264 }
1265
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001266 return 0;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001267
1268error:
1269 if (csocket)
1270 sock_release(csocket);
1271
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001272 kfree(p);
1273 return err;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001274}
1275
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001276static int
1277p9_fd_create(struct p9_client *client, const char *addr, char *args)
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001278{
1279 int err;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001280 struct p9_fd_opts opts;
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001281 struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001282
1283 parse_opts(args, &opts);
1284
1285 if (opts.rfd == ~0 || opts.wfd == ~0) {
1286 printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001287 return -ENOPROTOOPT;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001288 }
1289
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001290 err = p9_fd_open(client, opts.rfd, opts.wfd);
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001291 if (err < 0)
1292 goto error;
1293
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001294 p = (struct p9_trans_fd *) client->trans;
1295 p->conn = p9_conn_create(client);
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -06001296 if (IS_ERR(p->conn)) {
1297 err = PTR_ERR(p->conn);
1298 p->conn = NULL;
1299 goto error;
1300 }
1301
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001302 return 0;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001303
1304error:
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001305 kfree(p);
1306 return err;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001307}
1308
1309static struct p9_trans_module p9_tcp_trans = {
1310 .name = "tcp",
1311 .maxsize = MAX_SOCK_BUF,
1312 .def = 1,
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001313 .create = p9_fd_create_tcp,
1314 .close = p9_fd_close,
1315 .rpc = p9_fd_rpc,
Tejun Heo72029fe2008-09-24 16:22:23 -05001316 .owner = THIS_MODULE,
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001317};
1318
1319static struct p9_trans_module p9_unix_trans = {
1320 .name = "unix",
1321 .maxsize = MAX_SOCK_BUF,
1322 .def = 0,
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001323 .create = p9_fd_create_unix,
1324 .close = p9_fd_close,
1325 .rpc = p9_fd_rpc,
Tejun Heo72029fe2008-09-24 16:22:23 -05001326 .owner = THIS_MODULE,
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001327};
1328
1329static struct p9_trans_module p9_fd_trans = {
1330 .name = "fd",
1331 .maxsize = MAX_SOCK_BUF,
1332 .def = 0,
Eric Van Hensbergen8b81ef52008-10-13 18:45:25 -05001333 .create = p9_fd_create,
1334 .close = p9_fd_close,
1335 .rpc = p9_fd_rpc,
Tejun Heo72029fe2008-09-24 16:22:23 -05001336 .owner = THIS_MODULE,
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001337};
1338
Eric Van Hensbergen5503ac52008-10-13 18:45:24 -05001339/**
1340 * p9_poll_proc - poll worker thread
1341 * @a: thread state and arguments
1342 *
1343 * polls all v9fs transports for new events and queues the appropriate
1344 * work to the work queue
1345 *
1346 */
1347
1348static int p9_poll_proc(void *a)
1349{
1350 unsigned long flags;
1351
1352 P9_DPRINTK(P9_DEBUG_MUX, "start %p\n", current);
1353 repeat:
1354 spin_lock_irqsave(&p9_poll_lock, flags);
1355 while (!list_empty(&p9_poll_pending_list)) {
1356 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1357 struct p9_conn,
1358 poll_pending_link);
1359 list_del_init(&conn->poll_pending_link);
1360 spin_unlock_irqrestore(&p9_poll_lock, flags);
1361
1362 p9_poll_mux(conn);
1363
1364 spin_lock_irqsave(&p9_poll_lock, flags);
1365 }
1366 spin_unlock_irqrestore(&p9_poll_lock, flags);
1367
1368 set_current_state(TASK_INTERRUPTIBLE);
1369 if (list_empty(&p9_poll_pending_list)) {
1370 P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
1371 schedule();
1372 }
1373 __set_current_state(TASK_RUNNING);
1374
1375 if (!kthread_should_stop())
1376 goto repeat;
1377
1378 P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
1379 return 0;
1380}
1381
Eric Van Hensbergen887b3ec2008-05-08 20:26:37 -05001382int p9_trans_fd_init(void)
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001383{
Tejun Heo206ca502008-09-24 16:22:23 -05001384 p9_mux_wq = create_workqueue("v9fs");
1385 if (!p9_mux_wq) {
1386 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1387 return -ENOMEM;
Eric Van Hensbergen8a0dc952008-02-06 19:25:03 -06001388 }
1389
Tejun Heo992b3f12008-10-13 18:45:25 -05001390 p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
1391 if (IS_ERR(p9_poll_task)) {
1392 destroy_workqueue(p9_mux_wq);
1393 printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
1394 return PTR_ERR(p9_poll_task);
1395 }
1396
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001397 v9fs_register_trans(&p9_tcp_trans);
1398 v9fs_register_trans(&p9_unix_trans);
1399 v9fs_register_trans(&p9_fd_trans);
1400
Andrew Morton3387b802008-03-28 14:15:57 -07001401 return 0;
Eric Van Hensbergena80d9232007-10-17 14:31:07 -05001402}
Tejun Heo72029fe2008-09-24 16:22:23 -05001403
1404void p9_trans_fd_exit(void)
1405{
Tejun Heo992b3f12008-10-13 18:45:25 -05001406 kthread_stop(p9_poll_task);
Tejun Heo72029fe2008-09-24 16:22:23 -05001407 v9fs_unregister_trans(&p9_tcp_trans);
1408 v9fs_unregister_trans(&p9_unix_trans);
1409 v9fs_unregister_trans(&p9_fd_trans);
Tejun Heo206ca502008-09-24 16:22:23 -05001410
1411 destroy_workqueue(p9_mux_wq);
Tejun Heo72029fe2008-09-24 16:22:23 -05001412}