blob: a269dbeff1503abde1b90805e483bc4a502c3d0d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <andros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sunrpc/clnt.h>
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -050035#include <linux/sunrpc/svc_xprt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Boaz Harrosh9a74af22009-12-03 20:30:56 +020037#include "nfsd.h"
38#include "state.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#define NFSDDBG_FACILITY NFSDDBG_PROC
41
42#define NFSPROC4_CB_NULL 0
43#define NFSPROC4_CB_COMPOUND 1
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* Index of predefined Linux callback client operations */
46
47enum {
Benny Halevy4be36ca2009-09-10 12:25:46 +030048 NFSPROC4_CLNT_CB_NULL = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 NFSPROC4_CLNT_CB_RECALL,
Andy Adamson38524ab2009-09-10 12:25:59 +030050 NFSPROC4_CLNT_CB_SEQUENCE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070051};
52
53enum nfs_cb_opnum4 {
54 OP_CB_RECALL = 4,
Andy Adamson38524ab2009-09-10 12:25:59 +030055 OP_CB_SEQUENCE = 11,
Linus Torvalds1da177e2005-04-16 15:20:36 -070056};
57
58#define NFS4_MAXTAGLEN 20
59
60#define NFS4_enc_cb_null_sz 0
61#define NFS4_dec_cb_null_sz 0
62#define cb_compound_enc_hdr_sz 4
63#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
Andy Adamson38524ab2009-09-10 12:25:59 +030064#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
65#define cb_sequence_enc_sz (sessionid_sz + 4 + \
66 1 /* no referring calls list yet */)
67#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#define op_enc_sz 1
70#define op_dec_sz 2
71#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
Benny Halevy0ac68d12007-07-17 04:04:37 -070072#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030074 cb_sequence_enc_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 1 + enc_stateid_sz + \
76 enc_nfs4_fh_sz)
77
78#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
Andy Adamson38524ab2009-09-10 12:25:59 +030079 cb_sequence_dec_sz + \
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 op_dec_sz)
81
82/*
83* Generic encode routines from fs/nfs/nfs4xdr.c
84*/
Al Virof00f3282006-10-19 23:29:01 -070085static inline __be32 *
86xdr_writemem(__be32 *p, const void *ptr, int nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 int tmp = XDR_QUADLEN(nbytes);
89 if (!tmp)
90 return p;
91 p[tmp-1] = 0;
92 memcpy(p, ptr, nbytes);
93 return p + tmp;
94}
95
96#define WRITE32(n) *p++ = htonl(n)
97#define WRITEMEM(ptr,nbytes) do { \
98 p = xdr_writemem(p, ptr, nbytes); \
99} while (0)
100#define RESERVE_SPACE(nbytes) do { \
101 p = xdr_reserve_space(xdr, nbytes); \
Harvey Harrison8e24eea2008-04-30 00:55:09 -0700102 if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 BUG_ON(!p); \
104} while (0)
105
106/*
107 * Generic decode routines from fs/nfs/nfs4xdr.c
108 */
109#define DECODE_TAIL \
110 status = 0; \
111out: \
112 return status; \
113xdr_error: \
114 dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
115 status = -EIO; \
116 goto out
117
118#define READ32(x) (x) = ntohl(*p++)
119#define READ64(x) do { \
120 (x) = (u64)ntohl(*p++) << 32; \
121 (x) |= ntohl(*p++); \
122} while (0)
123#define READTIME(x) do { \
124 p++; \
125 (x.tv_sec) = ntohl(*p++); \
126 (x.tv_nsec) = ntohl(*p++); \
127} while (0)
128#define READ_BUF(nbytes) do { \
129 p = xdr_inline_decode(xdr, nbytes); \
130 if (!p) { \
Greg Banks3e3b4802006-10-02 02:17:41 -0700131 dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
Harvey Harrison8e24eea2008-04-30 00:55:09 -0700132 __func__, __LINE__); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 return -EIO; \
134 } \
135} while (0)
136
137struct nfs4_cb_compound_hdr {
Andy Adamson38524ab2009-09-10 12:25:59 +0300138 /* args */
139 u32 ident; /* minorversion 0 only */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 u32 nops;
Andy Adamsonef52bff2009-06-16 04:20:50 +0300141 __be32 *nops_p;
Andy Adamsonab52ae62009-06-16 04:20:53 +0300142 u32 minorversion;
Andy Adamson38524ab2009-09-10 12:25:59 +0300143 /* res */
144 int status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145};
146
147static struct {
148int stat;
149int errno;
150} nfs_cb_errtbl[] = {
151 { NFS4_OK, 0 },
152 { NFS4ERR_PERM, EPERM },
153 { NFS4ERR_NOENT, ENOENT },
154 { NFS4ERR_IO, EIO },
155 { NFS4ERR_NXIO, ENXIO },
156 { NFS4ERR_ACCESS, EACCES },
157 { NFS4ERR_EXIST, EEXIST },
158 { NFS4ERR_XDEV, EXDEV },
159 { NFS4ERR_NOTDIR, ENOTDIR },
160 { NFS4ERR_ISDIR, EISDIR },
161 { NFS4ERR_INVAL, EINVAL },
162 { NFS4ERR_FBIG, EFBIG },
163 { NFS4ERR_NOSPC, ENOSPC },
164 { NFS4ERR_ROFS, EROFS },
165 { NFS4ERR_MLINK, EMLINK },
166 { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
167 { NFS4ERR_NOTEMPTY, ENOTEMPTY },
168 { NFS4ERR_DQUOT, EDQUOT },
169 { NFS4ERR_STALE, ESTALE },
170 { NFS4ERR_BADHANDLE, EBADHANDLE },
171 { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
172 { NFS4ERR_NOTSUPP, ENOTSUPP },
173 { NFS4ERR_TOOSMALL, ETOOSMALL },
174 { NFS4ERR_SERVERFAULT, ESERVERFAULT },
175 { NFS4ERR_BADTYPE, EBADTYPE },
176 { NFS4ERR_LOCKED, EAGAIN },
177 { NFS4ERR_RESOURCE, EREMOTEIO },
178 { NFS4ERR_SYMLINK, ELOOP },
179 { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
180 { NFS4ERR_DEADLOCK, EDEADLK },
181 { -1, EIO }
182};
183
184static int
185nfs_cb_stat_to_errno(int stat)
186{
187 int i;
188 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
189 if (nfs_cb_errtbl[i].stat == stat)
190 return nfs_cb_errtbl[i].errno;
191 }
192 /* If we cannot translate the error, the recovery routines should
193 * handle it.
194 * Note: remaining NFSv4 error codes have values > 10000, so should
195 * not conflict with native Linux error codes.
196 */
197 return stat;
198}
199
200/*
201 * XDR encode
202 */
203
Andy Adamsonef52bff2009-06-16 04:20:50 +0300204static void
Benny Halevy9303bbd2010-05-25 09:50:23 +0300205encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
206{
207 __be32 *p;
208
209 RESERVE_SPACE(sizeof(stateid_t));
210 WRITE32(sid->si_generation);
211 WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
212}
213
214static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
216{
Al Virof00f3282006-10-19 23:29:01 -0700217 __be32 * p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 RESERVE_SPACE(16);
220 WRITE32(0); /* tag length is always 0 */
Andy Adamsonab52ae62009-06-16 04:20:53 +0300221 WRITE32(hdr->minorversion);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 WRITE32(hdr->ident);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300223 hdr->nops_p = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 WRITE32(hdr->nops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
Andy Adamsonef52bff2009-06-16 04:20:50 +0300227static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
228{
229 *hdr->nops_p = htonl(hdr->nops);
230}
231
232static void
233encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
234 struct nfs4_cb_compound_hdr *hdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
Al Virof00f3282006-10-19 23:29:01 -0700236 __be32 *p;
J. Bruce Fieldsb53d40c2009-05-01 19:50:00 -0400237 int len = dp->dl_fh.fh_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Benny Halevy9303bbd2010-05-25 09:50:23 +0300239 RESERVE_SPACE(4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 WRITE32(OP_CB_RECALL);
Benny Halevy9303bbd2010-05-25 09:50:23 +0300241 encode_stateid(xdr, &dp->dl_stateid);
242 RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
J. Bruce Fields6707bd32009-05-01 19:57:46 -0400243 WRITE32(0); /* truncate optimization not implemented */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 WRITE32(len);
J. Bruce Fieldsb53d40c2009-05-01 19:50:00 -0400245 WRITEMEM(&dp->dl_fh.fh_base, len);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300246 hdr->nops++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Benny Halevy2af73582009-09-10 12:26:51 +0300249static void
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400250encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
Benny Halevy2af73582009-09-10 12:26:51 +0300251 struct nfs4_cb_compound_hdr *hdr)
252{
253 __be32 *p;
254
255 if (hdr->minorversion == 0)
256 return;
257
258 RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
259
260 WRITE32(OP_CB_SEQUENCE);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400261 WRITEMEM(cb->cb_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN);
262 WRITE32(cb->cb_clp->cl_cb_seq_nr);
Benny Halevy2af73582009-09-10 12:26:51 +0300263 WRITE32(0); /* slotid, always 0 */
264 WRITE32(0); /* highest slotid always 0 */
265 WRITE32(0); /* cachethis always 0 */
266 WRITE32(0); /* FIXME: support referring_call_lists */
267 hdr->nops++;
268}
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270static int
Al Virof00f3282006-10-19 23:29:01 -0700271nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
273 struct xdr_stream xdrs, *xdr = &xdrs;
274
275 xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
276 RESERVE_SPACE(0);
277 return 0;
278}
279
280static int
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300281nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400282 struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
284 struct xdr_stream xdr;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400285 struct nfs4_delegation *args = cb->cb_op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 struct nfs4_cb_compound_hdr hdr = {
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400287 .ident = cb->cb_clp->cl_cb_ident,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400288 .minorversion = cb->cb_minorversion,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 };
290
291 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
292 encode_cb_compound_hdr(&xdr, &hdr);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400293 encode_cb_sequence(&xdr, cb, &hdr);
Andy Adamsonef52bff2009-06-16 04:20:50 +0300294 encode_cb_recall(&xdr, args, &hdr);
295 encode_cb_nops(&hdr);
296 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
299
300static int
301decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
Al Virof00f3282006-10-19 23:29:01 -0700302 __be32 *p;
J. Bruce Fields68a4b482010-05-27 09:30:39 -0400303 u32 taglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 READ_BUF(8);
306 READ32(hdr->status);
J. Bruce Fields68a4b482010-05-27 09:30:39 -0400307 /* We've got no use for the tag; ignore it: */
308 READ32(taglen);
309 READ_BUF(taglen + 4);
310 p += XDR_QUADLEN(taglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 READ32(hdr->nops);
312 return 0;
313}
314
315static int
316decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
317{
Al Virof00f3282006-10-19 23:29:01 -0700318 __be32 *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 u32 op;
320 int32_t nfserr;
321
322 READ_BUF(8);
323 READ32(op);
324 if (op != expected) {
325 dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
326 " operation %d but we issued a request for %d\n",
327 op, expected);
328 return -EIO;
329 }
330 READ32(nfserr);
331 if (nfserr != NFS_OK)
332 return -nfs_cb_stat_to_errno(nfserr);
333 return 0;
334}
335
Benny Halevy2af73582009-09-10 12:26:51 +0300336/*
337 * Our current back channel implmentation supports a single backchannel
338 * with a single slot.
339 */
340static int
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400341decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
Benny Halevy2af73582009-09-10 12:26:51 +0300342 struct rpc_rqst *rqstp)
343{
344 struct nfs4_sessionid id;
345 int status;
346 u32 dummy;
347 __be32 *p;
348
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400349 if (cb->cb_minorversion == 0)
Benny Halevy2af73582009-09-10 12:26:51 +0300350 return 0;
351
352 status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
353 if (status)
354 return status;
355
356 /*
357 * If the server returns different values for sessionID, slotID or
358 * sequence number, the server is looney tunes.
359 */
360 status = -ESERVERFAULT;
361
362 READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
363 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
364 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400365 if (memcmp(id.data, cb->cb_clp->cl_sessionid.data,
Benny Halevy2af73582009-09-10 12:26:51 +0300366 NFS4_MAX_SESSIONID_LEN)) {
367 dprintk("%s Invalid session id\n", __func__);
368 goto out;
369 }
370 READ32(dummy);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400371 if (dummy != cb->cb_clp->cl_cb_seq_nr) {
Benny Halevy2af73582009-09-10 12:26:51 +0300372 dprintk("%s Invalid sequence number\n", __func__);
373 goto out;
374 }
375 READ32(dummy); /* slotid must be 0 */
376 if (dummy != 0) {
377 dprintk("%s Invalid slotid\n", __func__);
378 goto out;
379 }
380 /* FIXME: process highest slotid and target highest slotid */
381 status = 0;
382out:
383 return status;
384}
385
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387static int
Al Virof00f3282006-10-19 23:29:01 -0700388nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389{
390 return 0;
391}
392
393static int
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300394nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400395 struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396{
397 struct xdr_stream xdr;
398 struct nfs4_cb_compound_hdr hdr;
399 int status;
400
401 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
402 status = decode_cb_compound_hdr(&xdr, &hdr);
403 if (status)
404 goto out;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400405 if (cb) {
406 status = decode_cb_sequence(&xdr, cb, rqstp);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300407 if (status)
408 goto out;
409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
411out:
412 return status;
413}
414
415/*
416 * RPC procedure tables
417 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418#define PROC(proc, call, argtype, restype) \
419[NFSPROC4_CLNT_##proc] = { \
420 .p_proc = NFSPROC4_CB_##call, \
421 .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
422 .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
Chuck Lever2bea90d2007-03-29 16:47:53 -0400423 .p_arglen = NFS4_##argtype##_sz, \
424 .p_replen = NFS4_##restype##_sz, \
Chuck Levercc0175c2006-03-20 13:44:22 -0500425 .p_statidx = NFSPROC4_CB_##call, \
426 .p_name = #proc, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427}
428
NeilBrownfd39ca92005-06-23 22:04:03 -0700429static struct rpc_procinfo nfs4_cb_procedures[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null),
431 PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall),
432};
433
NeilBrownfd39ca92005-06-23 22:04:03 -0700434static struct rpc_version nfs_cb_version4 = {
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400435/*
436 * Note on the callback rpc program version number: despite language in rfc
437 * 5661 section 18.36.3 requiring servers to use 4 in this field, the
438 * official xdr descriptions for both 4.0 and 4.1 specify version 1, and
439 * in practice that appears to be what implementations use. The section
440 * 18.36.3 language is expected to be fixed in an erratum.
441 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 .number = 1,
Tobias Klausere8c96f82006-03-24 03:15:34 -0800443 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 .procs = nfs4_cb_procedures
445};
446
447static struct rpc_version * nfs_cb_version[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 &nfs_cb_version4,
449};
450
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400451static struct rpc_program cb_program;
452
453static struct rpc_stat cb_stats = {
454 .program = &cb_program
455};
456
457#define NFS4_CALLBACK 0x40000000
458static struct rpc_program cb_program = {
459 .name = "nfs4_cb",
460 .number = NFS4_CALLBACK,
461 .nrvers = ARRAY_SIZE(nfs_cb_version),
462 .version = nfs_cb_version,
463 .stats = &cb_stats,
Olga Kornievskaia61054b12008-12-23 16:19:00 -0500464 .pipe_dir_name = "/nfsd4_cb",
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400465};
466
J. Bruce Fields595947a2009-03-05 17:18:10 -0500467static int max_cb_time(void)
468{
J. Bruce Fieldscf07d2e2010-02-28 23:20:19 -0500469 return max(nfsd4_lease/10, (time_t)1) * HZ;
J. Bruce Fields595947a2009-03-05 17:18:10 -0500470}
471
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400472/* Reference counting, callback cleanup, etc., all look racy as heck.
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500473 * And why is cl_cb_set an atomic? */
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400474
J. Bruce Fields07263f12010-05-31 19:09:40 -0400475int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
J. Bruce Fields2b47eec2007-07-27 18:06:50 -0400476{
Chuck Leverae5c7942006-08-22 20:06:21 -0400477 struct rpc_timeout timeparms = {
J. Bruce Fields595947a2009-03-05 17:18:10 -0500478 .to_initval = max_cb_time(),
479 .to_retries = 0,
Chuck Leverae5c7942006-08-22 20:06:21 -0400480 };
Chuck Leverae5c7942006-08-22 20:06:21 -0400481 struct rpc_create_args args = {
Pavel Emelyanovc653ce32010-09-29 16:04:45 +0400482 .net = &init_net,
Alexandros Batsakis3ddc8bf2009-09-10 12:27:21 +0300483 .protocol = XPRT_TRANSPORT_TCP,
J. Bruce Fields07263f12010-05-31 19:09:40 -0400484 .address = (struct sockaddr *) &conn->cb_addr,
485 .addrsize = conn->cb_addrlen,
Chuck Leverae5c7942006-08-22 20:06:21 -0400486 .timeout = &timeparms,
Olga Kornievskaiaff7d9752008-03-28 16:04:56 -0400487 .program = &cb_program,
J. Bruce Fields07263f12010-05-31 19:09:40 -0400488 .prognumber = conn->cb_prog,
J. Bruce Fieldsb7299f42010-05-14 17:57:35 -0400489 .version = 0,
Olga Kornievskaia61054b12008-12-23 16:19:00 -0500490 .authflavor = clp->cl_flavor,
Olga Kornievskaiab6b61522008-06-09 16:51:31 -0400491 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
Olga Kornievskaia608207e2008-12-23 16:17:40 -0500492 .client_name = clp->cl_principal,
Chuck Leverae5c7942006-08-22 20:06:21 -0400493 };
J. Bruce Fields63c86712007-10-25 19:00:26 -0400494 struct rpc_clnt *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
J. Bruce Fields418cd202009-02-22 15:52:13 -0800496 if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800497 return -EINVAL;
J. Bruce Fields07263f12010-05-31 19:09:40 -0400498 if (conn->cb_minorversion) {
499 args.bc_xprt = conn->cb_xprt;
Alexandros Batsakis3ddc8bf2009-09-10 12:27:21 +0300500 args.protocol = XPRT_TRANSPORT_BC_TCP;
501 }
Chuck Leverae5c7942006-08-22 20:06:21 -0400502 /* Create RPC client */
J. Bruce Fields63c86712007-10-25 19:00:26 -0400503 client = rpc_create(&args);
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800504 if (IS_ERR(client)) {
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800505 dprintk("NFSD: couldn't create callback client: %ld\n",
506 PTR_ERR(client));
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800507 return PTR_ERR(client);
508 }
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400509 clp->cl_cb_ident = conn->cb_ident;
510 clp->cl_cb_client = client;
J. Bruce Fieldse1cab5a52009-02-23 10:45:27 -0800511 return 0;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800512
513}
514
J. Bruce Fieldsecdd03b2009-02-23 19:35:22 -0800515static void warn_no_callback_path(struct nfs4_client *clp, int reason)
516{
517 dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
518 (int)clp->cl_name.len, clp->cl_name.data, reason);
519}
520
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500521static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
522{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400523 struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500524
525 if (task->tk_status)
526 warn_no_callback_path(clp, task->tk_status);
527 else
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500528 atomic_set(&clp->cl_cb_set, 1);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500529}
530
531static const struct rpc_call_ops nfsd4_cb_probe_ops = {
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400532 /* XXX: release method to ensure we set the cb channel down if
533 * necessary on early failure? */
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500534 .rpc_call_done = nfsd4_cb_probe_done,
535};
536
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400537static struct rpc_cred *callback_cred;
J. Bruce Fields3cef9ab2009-02-23 21:42:10 -0800538
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400539int set_callback_cred(void)
540{
J. Bruce Fields8d75da82010-03-03 16:13:29 -0500541 if (callback_cred)
542 return 0;
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400543 callback_cred = rpc_lookup_machine_cred();
544 if (!callback_cred)
545 return -ENOMEM;
546 return 0;
J. Bruce Fields3cef9ab2009-02-23 21:42:10 -0800547}
548
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400549static struct workqueue_struct *callback_wq;
J. Bruce Fields80fc0152009-09-15 18:07:35 -0400550
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500551void do_probe_callback(struct nfs4_client *clp)
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800552{
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400553 struct nfsd4_callback *cb = &clp->cl_cb_null;
J. Bruce Fieldsa601cae2009-02-22 16:43:45 -0800554
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400555 cb->cb_op = NULL;
556 cb->cb_clp = clp;
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400557
558 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL];
559 cb->cb_msg.rpc_argp = NULL;
560 cb->cb_msg.rpc_resp = NULL;
561 cb->cb_msg.rpc_cred = callback_cred;
562
563 cb->cb_ops = &nfsd4_cb_probe_ops;
564
565 queue_work(callback_wq, &cb->cb_work);
J. Bruce Fields63c86712007-10-25 19:00:26 -0400566}
567
568/*
569 * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
570 */
J. Bruce Fields07263f12010-05-31 19:09:40 -0400571void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
J. Bruce Fields63c86712007-10-25 19:00:26 -0400572{
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500573 BUG_ON(atomic_read(&clp->cl_cb_set));
J. Bruce Fields63c86712007-10-25 19:00:26 -0400574
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400575 spin_lock(&clp->cl_lock);
576 memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
577 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
578 spin_unlock(&clp->cl_lock);
J. Bruce Fieldse300a632009-03-05 15:01:11 -0500579 do_probe_callback(clp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580}
581
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300582/*
583 * There's currently a single callback channel slot.
584 * If the slot is available, then mark it busy. Otherwise, set the
585 * thread for sleeping on the callback RPC wait queue.
586 */
587static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
588 struct rpc_task *task)
589{
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300590 u32 *ptr = (u32 *)clp->cl_sessionid.data;
591 int status = 0;
592
593 dprintk("%s: %u:%u:%u:%u\n", __func__,
594 ptr[0], ptr[1], ptr[2], ptr[3]);
595
596 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
597 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
598 dprintk("%s slot is busy\n", __func__);
599 status = -EAGAIN;
600 goto out;
601 }
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300602out:
603 dprintk("%s status=%d\n", __func__, status);
604 return status;
605}
606
607/*
608 * TODO: cb_sequence should support referring call lists, cachethis, multiple
609 * slots, and mark callback channel down on communication errors.
610 */
611static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
612{
J. Bruce Fields58784532010-05-16 16:47:08 -0400613 struct nfsd4_callback *cb = calldata;
614 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300615 struct nfs4_client *clp = dp->dl_client;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300616 u32 minorversion = clp->cl_cb_conn.cb_minorversion;
617 int status = 0;
618
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400619 cb->cb_minorversion = minorversion;
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300620 if (minorversion) {
621 status = nfsd41_cb_setup_sequence(clp, task);
622 if (status) {
623 if (status != -EAGAIN) {
624 /* terminate rpc task */
625 task->tk_status = status;
626 task->tk_action = NULL;
627 }
628 return;
629 }
630 }
631 rpc_call_start(task);
632}
633
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300634static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
635{
J. Bruce Fields58784532010-05-16 16:47:08 -0400636 struct nfsd4_callback *cb = calldata;
637 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300638 struct nfs4_client *clp = dp->dl_client;
639
640 dprintk("%s: minorversion=%d\n", __func__,
641 clp->cl_cb_conn.cb_minorversion);
642
643 if (clp->cl_cb_conn.cb_minorversion) {
644 /* No need for lock, access serialized in nfsd4_cb_prepare */
645 ++clp->cl_cb_seq_nr;
646 clear_bit(0, &clp->cl_cb_slot_busy);
647 rpc_wake_up_next(&clp->cl_cb_waitq);
648 dprintk("%s: freed slot, new seqid=%d\n", __func__,
649 clp->cl_cb_seq_nr);
650
651 /* We're done looking into the sequence information */
652 task->tk_msg.rpc_resp = NULL;
653 }
654}
655
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500656
J. Bruce Fields63e48632009-05-01 22:36:55 -0400657static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
658{
J. Bruce Fields58784532010-05-16 16:47:08 -0400659 struct nfsd4_callback *cb = calldata;
660 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400661 struct nfs4_client *clp = dp->dl_client;
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500662 struct rpc_clnt *current_rpc_client = clp->cl_cb_client;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400663
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300664 nfsd4_cb_done(task, calldata);
665
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500666 if (current_rpc_client == NULL) {
667 /* We're shutting down; give up. */
668 /* XXX: err, or is it ok just to fall through
669 * and rpc_restart_call? */
670 return;
671 }
672
J. Bruce Fields63e48632009-05-01 22:36:55 -0400673 switch (task->tk_status) {
J. Bruce Fields172c85d2010-05-30 11:53:12 -0400674 case 0:
675 return;
676 case -EBADHANDLE:
677 case -NFS4ERR_BAD_STATEID:
678 /* Race: client probably got cb_recall
679 * before open reply granting delegation */
680 break;
681 default:
J. Bruce Fields63e48632009-05-01 22:36:55 -0400682 /* Network partition? */
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500683 atomic_set(&clp->cl_cb_set, 0);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400684 warn_no_callback_path(clp, task->tk_status);
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500685 if (current_rpc_client != task->tk_client) {
686 /* queue a callback on the new connection: */
J. Bruce Fieldscba9ba42010-06-01 11:21:40 -0400687 atomic_inc(&dp->dl_count);
J. Bruce Fields4b21d0d2010-03-07 23:39:01 -0500688 nfsd4_cb_recall(dp);
689 return;
690 }
J. Bruce Fields63e48632009-05-01 22:36:55 -0400691 }
692 if (dp->dl_retries--) {
693 rpc_delay(task, 2*HZ);
694 task->tk_status = 0;
Boaz Harroshc18c8212010-06-29 14:33:55 +0300695 rpc_restart_call_prepare(task);
Ricardo Labiaga0421b5c2009-09-10 12:27:04 +0300696 return;
J. Bruce Fields63e48632009-05-01 22:36:55 -0400697 } else {
J. Bruce Fields2bf23872010-03-08 12:37:27 -0500698 atomic_set(&clp->cl_cb_set, 0);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400699 warn_no_callback_path(clp, task->tk_status);
700 }
701}
702
703static void nfsd4_cb_recall_release(void *calldata)
704{
J. Bruce Fields58784532010-05-16 16:47:08 -0400705 struct nfsd4_callback *cb = calldata;
706 struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400707
708 nfs4_put_delegation(dp);
J. Bruce Fields63e48632009-05-01 22:36:55 -0400709}
710
711static const struct rpc_call_ops nfsd4_cb_recall_ops = {
Ricardo Labiaga2a1d1b52009-09-10 12:26:38 +0300712 .rpc_call_prepare = nfsd4_cb_prepare,
J. Bruce Fields63e48632009-05-01 22:36:55 -0400713 .rpc_call_done = nfsd4_cb_recall_done,
714 .rpc_release = nfsd4_cb_recall_release,
715};
716
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500717int nfsd4_create_callback_queue(void)
718{
719 callback_wq = create_singlethread_workqueue("nfsd4_callbacks");
720 if (!callback_wq)
721 return -ENOMEM;
722 return 0;
723}
724
725void nfsd4_destroy_callback_queue(void)
726{
727 destroy_workqueue(callback_wq);
728}
729
Benny Halevyab707e152010-05-12 00:14:06 +0300730/* must be called under the state lock */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400731void nfsd4_shutdown_callback(struct nfs4_client *clp)
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500732{
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400733 set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500734 /*
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400735 * Note this won't actually result in a null callback;
736 * instead, nfsd4_do_callback_rpc() will detect the killed
737 * client, destroy the rpc client, and stop:
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500738 */
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400739 do_probe_callback(clp);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500740 flush_workqueue(callback_wq);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500741}
742
J. Bruce Fields58784532010-05-16 16:47:08 -0400743void nfsd4_release_cb(struct nfsd4_callback *cb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744{
J. Bruce Fields58784532010-05-16 16:47:08 -0400745 if (cb->cb_ops->rpc_release)
746 cb->cb_ops->rpc_release(cb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500748
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400749void nfsd4_process_cb_update(struct nfsd4_callback *cb)
750{
751 struct nfs4_cb_conn conn;
752 struct nfs4_client *clp = cb->cb_clp;
753 int err;
754
755 /*
756 * This is either an update, or the client dying; in either case,
757 * kill the old client:
758 */
759 if (clp->cl_cb_client) {
760 rpc_shutdown_client(clp->cl_cb_client);
761 clp->cl_cb_client = NULL;
762 }
763 if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags))
764 return;
765 spin_lock(&clp->cl_lock);
766 /*
767 * Only serialized callback code is allowed to clear these
768 * flags; main nfsd code can only set them:
769 */
770 BUG_ON(!clp->cl_cb_flags);
771 clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags);
772 memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
773 spin_unlock(&clp->cl_lock);
774
775 err = setup_callback_client(clp, &conn);
776 if (err)
777 warn_no_callback_path(clp, err);
778}
779
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500780void nfsd4_do_callback_rpc(struct work_struct *w)
781{
J. Bruce Fields58784532010-05-16 16:47:08 -0400782 struct nfsd4_callback *cb = container_of(w, struct nfsd4_callback, cb_work);
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400783 struct nfs4_client *clp = cb->cb_clp;
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400784 struct rpc_clnt *clnt;
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500785
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400786 if (clp->cl_cb_flags)
787 nfsd4_process_cb_update(cb);
788
789 clnt = clp->cl_cb_client;
790 if (!clnt) {
791 /* Callback channel broken, or client killed; give up: */
J. Bruce Fields58784532010-05-16 16:47:08 -0400792 nfsd4_release_cb(cb);
J. Bruce Fields6ff8da02010-06-04 20:04:45 -0400793 return;
J. Bruce Fields58784532010-05-16 16:47:08 -0400794 }
J. Bruce Fieldscee277d2010-05-26 17:52:14 -0400795 rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
796 cb->cb_ops, cb);
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500797}
798
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500799void nfsd4_cb_recall(struct nfs4_delegation *dp)
800{
J. Bruce Fields58784532010-05-16 16:47:08 -0400801 struct nfsd4_callback *cb = &dp->dl_recall;
802
803 dp->dl_retries = 1;
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400804 cb->cb_op = dp;
805 cb->cb_clp = dp->dl_client;
J. Bruce Fields58784532010-05-16 16:47:08 -0400806 cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL];
J. Bruce Fieldsfb003922010-05-31 18:21:37 -0400807 cb->cb_msg.rpc_argp = cb;
808 cb->cb_msg.rpc_resp = cb;
J. Bruce Fields58784532010-05-16 16:47:08 -0400809 cb->cb_msg.rpc_cred = callback_cred;
810
811 cb->cb_ops = &nfsd4_cb_recall_ops;
812 dp->dl_retries = 1;
813
J. Bruce Fieldsb5a1a812010-03-03 14:52:55 -0500814 queue_work(callback_wq, &dp->dl_recall.cb_work);
815}