| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * linux/net/sunrpc/xdr.c | 
 | 3 |  * | 
 | 4 |  * Generic XDR support. | 
 | 5 |  * | 
 | 6 |  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <linux/types.h> | 
 | 10 | #include <linux/socket.h> | 
 | 11 | #include <linux/string.h> | 
 | 12 | #include <linux/kernel.h> | 
 | 13 | #include <linux/pagemap.h> | 
 | 14 | #include <linux/errno.h> | 
 | 15 | #include <linux/in.h> | 
 | 16 | #include <linux/net.h> | 
 | 17 | #include <net/sock.h> | 
 | 18 | #include <linux/sunrpc/xdr.h> | 
 | 19 | #include <linux/sunrpc/msg_prot.h> | 
 | 20 |  | 
 | 21 | /* | 
 | 22 |  * XDR functions for basic NFS types | 
 | 23 |  */ | 
 | 24 | u32 * | 
 | 25 | xdr_encode_netobj(u32 *p, const struct xdr_netobj *obj) | 
 | 26 | { | 
 | 27 | 	unsigned int	quadlen = XDR_QUADLEN(obj->len); | 
 | 28 |  | 
 | 29 | 	p[quadlen] = 0;		/* zero trailing bytes */ | 
 | 30 | 	*p++ = htonl(obj->len); | 
 | 31 | 	memcpy(p, obj->data, obj->len); | 
 | 32 | 	return p + XDR_QUADLEN(obj->len); | 
 | 33 | } | 
 | 34 |  | 
 | 35 | u32 * | 
 | 36 | xdr_decode_netobj(u32 *p, struct xdr_netobj *obj) | 
 | 37 | { | 
 | 38 | 	unsigned int	len; | 
 | 39 |  | 
 | 40 | 	if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ) | 
 | 41 | 		return NULL; | 
 | 42 | 	obj->len  = len; | 
 | 43 | 	obj->data = (u8 *) p; | 
 | 44 | 	return p + XDR_QUADLEN(len); | 
 | 45 | } | 
 | 46 |  | 
 | 47 | /** | 
 | 48 |  * xdr_encode_opaque_fixed - Encode fixed length opaque data | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 49 |  * @p: pointer to current position in XDR buffer. | 
 | 50 |  * @ptr: pointer to data to encode (or NULL) | 
 | 51 |  * @nbytes: size of data. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 |  * | 
 | 53 |  * Copy the array of data of length nbytes at ptr to the XDR buffer | 
 | 54 |  * at position p, then align to the next 32-bit boundary by padding | 
 | 55 |  * with zero bytes (see RFC1832). | 
 | 56 |  * Note: if ptr is NULL, only the padding is performed. | 
 | 57 |  * | 
 | 58 |  * Returns the updated current XDR buffer position | 
 | 59 |  * | 
 | 60 |  */ | 
 | 61 | u32 *xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int nbytes) | 
 | 62 | { | 
 | 63 | 	if (likely(nbytes != 0)) { | 
 | 64 | 		unsigned int quadlen = XDR_QUADLEN(nbytes); | 
 | 65 | 		unsigned int padding = (quadlen << 2) - nbytes; | 
 | 66 |  | 
 | 67 | 		if (ptr != NULL) | 
 | 68 | 			memcpy(p, ptr, nbytes); | 
 | 69 | 		if (padding != 0) | 
 | 70 | 			memset((char *)p + nbytes, 0, padding); | 
 | 71 | 		p += quadlen; | 
 | 72 | 	} | 
 | 73 | 	return p; | 
 | 74 | } | 
 | 75 | EXPORT_SYMBOL(xdr_encode_opaque_fixed); | 
 | 76 |  | 
 | 77 | /** | 
 | 78 |  * xdr_encode_opaque - Encode variable length opaque data | 
| Pavel Pisa | 4dc3b16 | 2005-05-01 08:59:25 -0700 | [diff] [blame] | 79 |  * @p: pointer to current position in XDR buffer. | 
 | 80 |  * @ptr: pointer to data to encode (or NULL) | 
 | 81 |  * @nbytes: size of data. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 |  * | 
 | 83 |  * Returns the updated current XDR buffer position | 
 | 84 |  */ | 
 | 85 | u32 *xdr_encode_opaque(u32 *p, const void *ptr, unsigned int nbytes) | 
 | 86 | { | 
 | 87 | 	*p++ = htonl(nbytes); | 
 | 88 | 	return xdr_encode_opaque_fixed(p, ptr, nbytes); | 
 | 89 | } | 
 | 90 | EXPORT_SYMBOL(xdr_encode_opaque); | 
 | 91 |  | 
 | 92 | u32 * | 
 | 93 | xdr_encode_string(u32 *p, const char *string) | 
 | 94 | { | 
 | 95 | 	return xdr_encode_array(p, string, strlen(string)); | 
 | 96 | } | 
 | 97 |  | 
 | 98 | u32 * | 
 | 99 | xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen) | 
 | 100 | { | 
 | 101 | 	unsigned int	len; | 
 | 102 | 	char		*string; | 
 | 103 |  | 
 | 104 | 	if ((len = ntohl(*p++)) > maxlen) | 
 | 105 | 		return NULL; | 
 | 106 | 	if (lenp) | 
 | 107 | 		*lenp = len; | 
 | 108 | 	if ((len % 4) != 0) { | 
 | 109 | 		string = (char *) p; | 
 | 110 | 	} else { | 
 | 111 | 		string = (char *) (p - 1); | 
 | 112 | 		memmove(string, p, len); | 
 | 113 | 	} | 
 | 114 | 	string[len] = '\0'; | 
 | 115 | 	*sp = string; | 
 | 116 | 	return p + XDR_QUADLEN(len); | 
 | 117 | } | 
 | 118 |  | 
 | 119 | u32 * | 
 | 120 | xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen) | 
 | 121 | { | 
 | 122 | 	unsigned int	len; | 
 | 123 |  | 
 | 124 | 	if ((len = ntohl(*p++)) > maxlen) | 
 | 125 | 		return NULL; | 
 | 126 | 	*lenp = len; | 
 | 127 | 	*sp = (char *) p; | 
 | 128 | 	return p + XDR_QUADLEN(len); | 
 | 129 | } | 
 | 130 |  | 
 | 131 | void | 
 | 132 | xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base, | 
 | 133 | 		 unsigned int len) | 
 | 134 | { | 
 | 135 | 	struct kvec *tail = xdr->tail; | 
 | 136 | 	u32 *p; | 
 | 137 |  | 
 | 138 | 	xdr->pages = pages; | 
 | 139 | 	xdr->page_base = base; | 
 | 140 | 	xdr->page_len = len; | 
 | 141 |  | 
 | 142 | 	p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len); | 
 | 143 | 	tail->iov_base = p; | 
 | 144 | 	tail->iov_len = 0; | 
 | 145 |  | 
 | 146 | 	if (len & 3) { | 
 | 147 | 		unsigned int pad = 4 - (len & 3); | 
 | 148 |  | 
 | 149 | 		*p = 0; | 
 | 150 | 		tail->iov_base = (char *)p + (len & 3); | 
 | 151 | 		tail->iov_len  = pad; | 
 | 152 | 		len += pad; | 
 | 153 | 	} | 
 | 154 | 	xdr->buflen += len; | 
 | 155 | 	xdr->len += len; | 
 | 156 | } | 
 | 157 |  | 
 | 158 | void | 
 | 159 | xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, | 
 | 160 | 		 struct page **pages, unsigned int base, unsigned int len) | 
 | 161 | { | 
 | 162 | 	struct kvec *head = xdr->head; | 
 | 163 | 	struct kvec *tail = xdr->tail; | 
 | 164 | 	char *buf = (char *)head->iov_base; | 
 | 165 | 	unsigned int buflen = head->iov_len; | 
 | 166 |  | 
 | 167 | 	head->iov_len  = offset; | 
 | 168 |  | 
 | 169 | 	xdr->pages = pages; | 
 | 170 | 	xdr->page_base = base; | 
 | 171 | 	xdr->page_len = len; | 
 | 172 |  | 
 | 173 | 	tail->iov_base = buf + offset; | 
 | 174 | 	tail->iov_len = buflen - offset; | 
 | 175 |  | 
 | 176 | 	xdr->buflen += len; | 
 | 177 | } | 
 | 178 |  | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 179 | ssize_t | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, | 
 | 181 | 			  skb_reader_t *desc, | 
 | 182 | 			  skb_read_actor_t copy_actor) | 
 | 183 | { | 
 | 184 | 	struct page	**ppage = xdr->pages; | 
 | 185 | 	unsigned int	len, pglen = xdr->page_len; | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 186 | 	ssize_t		copied = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | 	int		ret; | 
 | 188 |  | 
 | 189 | 	len = xdr->head[0].iov_len; | 
 | 190 | 	if (base < len) { | 
 | 191 | 		len -= base; | 
 | 192 | 		ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 193 | 		copied += ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | 		if (ret != len || !desc->count) | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 195 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | 		base = 0; | 
 | 197 | 	} else | 
 | 198 | 		base -= len; | 
 | 199 |  | 
 | 200 | 	if (pglen == 0) | 
 | 201 | 		goto copy_tail; | 
 | 202 | 	if (base >= pglen) { | 
 | 203 | 		base -= pglen; | 
 | 204 | 		goto copy_tail; | 
 | 205 | 	} | 
 | 206 | 	if (base || xdr->page_base) { | 
 | 207 | 		pglen -= base; | 
 | 208 | 		base  += xdr->page_base; | 
 | 209 | 		ppage += base >> PAGE_CACHE_SHIFT; | 
 | 210 | 		base &= ~PAGE_CACHE_MASK; | 
 | 211 | 	} | 
 | 212 | 	do { | 
 | 213 | 		char *kaddr; | 
 | 214 |  | 
| Olaf Kirch | e053d1a | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 215 | 		/* ACL likes to be lazy in allocating pages - ACLs | 
 | 216 | 		 * are small by default but can get huge. */ | 
 | 217 | 		if (unlikely(*ppage == NULL)) { | 
 | 218 | 			*ppage = alloc_page(GFP_ATOMIC); | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 219 | 			if (unlikely(*ppage == NULL)) { | 
 | 220 | 				if (copied == 0) | 
 | 221 | 					copied = -ENOMEM; | 
 | 222 | 				goto out; | 
 | 223 | 			} | 
| Olaf Kirch | e053d1a | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 224 | 		} | 
 | 225 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | 		len = PAGE_CACHE_SIZE; | 
 | 227 | 		kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); | 
 | 228 | 		if (base) { | 
 | 229 | 			len -= base; | 
 | 230 | 			if (pglen < len) | 
 | 231 | 				len = pglen; | 
 | 232 | 			ret = copy_actor(desc, kaddr + base, len); | 
 | 233 | 			base = 0; | 
 | 234 | 		} else { | 
 | 235 | 			if (pglen < len) | 
 | 236 | 				len = pglen; | 
 | 237 | 			ret = copy_actor(desc, kaddr, len); | 
 | 238 | 		} | 
 | 239 | 		flush_dcache_page(*ppage); | 
 | 240 | 		kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 241 | 		copied += ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | 		if (ret != len || !desc->count) | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 243 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | 		ppage++; | 
 | 245 | 	} while ((pglen -= len) != 0); | 
 | 246 | copy_tail: | 
 | 247 | 	len = xdr->tail[0].iov_len; | 
 | 248 | 	if (base < len) | 
| Trond Myklebust | 7e06b53 | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 249 | 		copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); | 
 | 250 | out: | 
 | 251 | 	return copied; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } | 
 | 253 |  | 
 | 254 |  | 
 | 255 | int | 
 | 256 | xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, | 
 | 257 | 		struct xdr_buf *xdr, unsigned int base, int msgflags) | 
 | 258 | { | 
 | 259 | 	struct page **ppage = xdr->pages; | 
 | 260 | 	unsigned int len, pglen = xdr->page_len; | 
 | 261 | 	int err, ret = 0; | 
 | 262 | 	ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); | 
 | 263 |  | 
 | 264 | 	len = xdr->head[0].iov_len; | 
 | 265 | 	if (base < len || (addr != NULL && base == 0)) { | 
 | 266 | 		struct kvec iov = { | 
 | 267 | 			.iov_base = xdr->head[0].iov_base + base, | 
 | 268 | 			.iov_len  = len - base, | 
 | 269 | 		}; | 
 | 270 | 		struct msghdr msg = { | 
 | 271 | 			.msg_name    = addr, | 
 | 272 | 			.msg_namelen = addrlen, | 
 | 273 | 			.msg_flags   = msgflags, | 
 | 274 | 		}; | 
 | 275 | 		if (xdr->len > len) | 
 | 276 | 			msg.msg_flags |= MSG_MORE; | 
 | 277 |  | 
 | 278 | 		if (iov.iov_len != 0) | 
 | 279 | 			err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | 
 | 280 | 		else | 
 | 281 | 			err = kernel_sendmsg(sock, &msg, NULL, 0, 0); | 
 | 282 | 		if (ret == 0) | 
 | 283 | 			ret = err; | 
 | 284 | 		else if (err > 0) | 
 | 285 | 			ret += err; | 
 | 286 | 		if (err != iov.iov_len) | 
 | 287 | 			goto out; | 
 | 288 | 		base = 0; | 
 | 289 | 	} else | 
 | 290 | 		base -= len; | 
 | 291 |  | 
 | 292 | 	if (pglen == 0) | 
 | 293 | 		goto copy_tail; | 
 | 294 | 	if (base >= pglen) { | 
 | 295 | 		base -= pglen; | 
 | 296 | 		goto copy_tail; | 
 | 297 | 	} | 
 | 298 | 	if (base || xdr->page_base) { | 
 | 299 | 		pglen -= base; | 
 | 300 | 		base  += xdr->page_base; | 
 | 301 | 		ppage += base >> PAGE_CACHE_SHIFT; | 
 | 302 | 		base &= ~PAGE_CACHE_MASK; | 
 | 303 | 	} | 
 | 304 |  | 
 | 305 | 	sendpage = sock->ops->sendpage ? : sock_no_sendpage; | 
 | 306 | 	do { | 
 | 307 | 		int flags = msgflags; | 
 | 308 |  | 
 | 309 | 		len = PAGE_CACHE_SIZE; | 
 | 310 | 		if (base) | 
 | 311 | 			len -= base; | 
 | 312 | 		if (pglen < len) | 
 | 313 | 			len = pglen; | 
 | 314 |  | 
 | 315 | 		if (pglen != len || xdr->tail[0].iov_len != 0) | 
 | 316 | 			flags |= MSG_MORE; | 
 | 317 |  | 
 | 318 | 		/* Hmm... We might be dealing with highmem pages */ | 
 | 319 | 		if (PageHighMem(*ppage)) | 
 | 320 | 			sendpage = sock_no_sendpage; | 
 | 321 | 		err = sendpage(sock, *ppage, base, len, flags); | 
 | 322 | 		if (ret == 0) | 
 | 323 | 			ret = err; | 
 | 324 | 		else if (err > 0) | 
 | 325 | 			ret += err; | 
 | 326 | 		if (err != len) | 
 | 327 | 			goto out; | 
 | 328 | 		base = 0; | 
 | 329 | 		ppage++; | 
 | 330 | 	} while ((pglen -= len) != 0); | 
 | 331 | copy_tail: | 
 | 332 | 	len = xdr->tail[0].iov_len; | 
 | 333 | 	if (base < len) { | 
 | 334 | 		struct kvec iov = { | 
 | 335 | 			.iov_base = xdr->tail[0].iov_base + base, | 
 | 336 | 			.iov_len  = len - base, | 
 | 337 | 		}; | 
 | 338 | 		struct msghdr msg = { | 
 | 339 | 			.msg_flags   = msgflags, | 
 | 340 | 		}; | 
 | 341 | 		err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | 
 | 342 | 		if (ret == 0) | 
 | 343 | 			ret = err; | 
 | 344 | 		else if (err > 0) | 
 | 345 | 			ret += err; | 
 | 346 | 	} | 
 | 347 | out: | 
 | 348 | 	return ret; | 
 | 349 | } | 
 | 350 |  | 
 | 351 |  | 
 | 352 | /* | 
 | 353 |  * Helper routines for doing 'memmove' like operations on a struct xdr_buf | 
 | 354 |  * | 
 | 355 |  * _shift_data_right_pages | 
 | 356 |  * @pages: vector of pages containing both the source and dest memory area. | 
 | 357 |  * @pgto_base: page vector address of destination | 
 | 358 |  * @pgfrom_base: page vector address of source | 
 | 359 |  * @len: number of bytes to copy | 
 | 360 |  * | 
 | 361 |  * Note: the addresses pgto_base and pgfrom_base are both calculated in | 
 | 362 |  *       the same way: | 
 | 363 |  *            if a memory area starts at byte 'base' in page 'pages[i]', | 
 | 364 |  *            then its address is given as (i << PAGE_CACHE_SHIFT) + base | 
 | 365 |  * Also note: pgfrom_base must be < pgto_base, but the memory areas | 
 | 366 |  * 	they point to may overlap. | 
 | 367 |  */ | 
 | 368 | static void | 
 | 369 | _shift_data_right_pages(struct page **pages, size_t pgto_base, | 
 | 370 | 		size_t pgfrom_base, size_t len) | 
 | 371 | { | 
 | 372 | 	struct page **pgfrom, **pgto; | 
 | 373 | 	char *vfrom, *vto; | 
 | 374 | 	size_t copy; | 
 | 375 |  | 
 | 376 | 	BUG_ON(pgto_base <= pgfrom_base); | 
 | 377 |  | 
 | 378 | 	pgto_base += len; | 
 | 379 | 	pgfrom_base += len; | 
 | 380 |  | 
 | 381 | 	pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); | 
 | 382 | 	pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); | 
 | 383 |  | 
 | 384 | 	pgto_base &= ~PAGE_CACHE_MASK; | 
 | 385 | 	pgfrom_base &= ~PAGE_CACHE_MASK; | 
 | 386 |  | 
 | 387 | 	do { | 
 | 388 | 		/* Are any pointers crossing a page boundary? */ | 
 | 389 | 		if (pgto_base == 0) { | 
 | 390 | 			flush_dcache_page(*pgto); | 
 | 391 | 			pgto_base = PAGE_CACHE_SIZE; | 
 | 392 | 			pgto--; | 
 | 393 | 		} | 
 | 394 | 		if (pgfrom_base == 0) { | 
 | 395 | 			pgfrom_base = PAGE_CACHE_SIZE; | 
 | 396 | 			pgfrom--; | 
 | 397 | 		} | 
 | 398 |  | 
 | 399 | 		copy = len; | 
 | 400 | 		if (copy > pgto_base) | 
 | 401 | 			copy = pgto_base; | 
 | 402 | 		if (copy > pgfrom_base) | 
 | 403 | 			copy = pgfrom_base; | 
 | 404 | 		pgto_base -= copy; | 
 | 405 | 		pgfrom_base -= copy; | 
 | 406 |  | 
 | 407 | 		vto = kmap_atomic(*pgto, KM_USER0); | 
 | 408 | 		vfrom = kmap_atomic(*pgfrom, KM_USER1); | 
 | 409 | 		memmove(vto + pgto_base, vfrom + pgfrom_base, copy); | 
 | 410 | 		kunmap_atomic(vfrom, KM_USER1); | 
 | 411 | 		kunmap_atomic(vto, KM_USER0); | 
 | 412 |  | 
 | 413 | 	} while ((len -= copy) != 0); | 
 | 414 | 	flush_dcache_page(*pgto); | 
 | 415 | } | 
 | 416 |  | 
 | 417 | /* | 
 | 418 |  * _copy_to_pages | 
 | 419 |  * @pages: array of pages | 
 | 420 |  * @pgbase: page vector address of destination | 
 | 421 |  * @p: pointer to source data | 
 | 422 |  * @len: length | 
 | 423 |  * | 
 | 424 |  * Copies data from an arbitrary memory location into an array of pages | 
 | 425 |  * The copy is assumed to be non-overlapping. | 
 | 426 |  */ | 
 | 427 | static void | 
 | 428 | _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) | 
 | 429 | { | 
 | 430 | 	struct page **pgto; | 
 | 431 | 	char *vto; | 
 | 432 | 	size_t copy; | 
 | 433 |  | 
 | 434 | 	pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); | 
 | 435 | 	pgbase &= ~PAGE_CACHE_MASK; | 
 | 436 |  | 
 | 437 | 	do { | 
 | 438 | 		copy = PAGE_CACHE_SIZE - pgbase; | 
 | 439 | 		if (copy > len) | 
 | 440 | 			copy = len; | 
 | 441 |  | 
 | 442 | 		vto = kmap_atomic(*pgto, KM_USER0); | 
 | 443 | 		memcpy(vto + pgbase, p, copy); | 
 | 444 | 		kunmap_atomic(vto, KM_USER0); | 
 | 445 |  | 
 | 446 | 		pgbase += copy; | 
 | 447 | 		if (pgbase == PAGE_CACHE_SIZE) { | 
 | 448 | 			flush_dcache_page(*pgto); | 
 | 449 | 			pgbase = 0; | 
 | 450 | 			pgto++; | 
 | 451 | 		} | 
 | 452 | 		p += copy; | 
 | 453 |  | 
 | 454 | 	} while ((len -= copy) != 0); | 
 | 455 | 	flush_dcache_page(*pgto); | 
 | 456 | } | 
 | 457 |  | 
 | 458 | /* | 
 | 459 |  * _copy_from_pages | 
 | 460 |  * @p: pointer to destination | 
 | 461 |  * @pages: array of pages | 
 | 462 |  * @pgbase: offset of source data | 
 | 463 |  * @len: length | 
 | 464 |  * | 
 | 465 |  * Copies data into an arbitrary memory location from an array of pages | 
 | 466 |  * The copy is assumed to be non-overlapping. | 
 | 467 |  */ | 
 | 468 | static void | 
 | 469 | _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) | 
 | 470 | { | 
 | 471 | 	struct page **pgfrom; | 
 | 472 | 	char *vfrom; | 
 | 473 | 	size_t copy; | 
 | 474 |  | 
 | 475 | 	pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); | 
 | 476 | 	pgbase &= ~PAGE_CACHE_MASK; | 
 | 477 |  | 
 | 478 | 	do { | 
 | 479 | 		copy = PAGE_CACHE_SIZE - pgbase; | 
 | 480 | 		if (copy > len) | 
 | 481 | 			copy = len; | 
 | 482 |  | 
 | 483 | 		vfrom = kmap_atomic(*pgfrom, KM_USER0); | 
 | 484 | 		memcpy(p, vfrom + pgbase, copy); | 
 | 485 | 		kunmap_atomic(vfrom, KM_USER0); | 
 | 486 |  | 
 | 487 | 		pgbase += copy; | 
 | 488 | 		if (pgbase == PAGE_CACHE_SIZE) { | 
 | 489 | 			pgbase = 0; | 
 | 490 | 			pgfrom++; | 
 | 491 | 		} | 
 | 492 | 		p += copy; | 
 | 493 |  | 
 | 494 | 	} while ((len -= copy) != 0); | 
 | 495 | } | 
 | 496 |  | 
 | 497 | /* | 
 | 498 |  * xdr_shrink_bufhead | 
 | 499 |  * @buf: xdr_buf | 
 | 500 |  * @len: bytes to remove from buf->head[0] | 
 | 501 |  * | 
 | 502 |  * Shrinks XDR buffer's header kvec buf->head[0] by  | 
 | 503 |  * 'len' bytes. The extra data is not lost, but is instead | 
 | 504 |  * moved into the inlined pages and/or the tail. | 
 | 505 |  */ | 
 | 506 | static void | 
 | 507 | xdr_shrink_bufhead(struct xdr_buf *buf, size_t len) | 
 | 508 | { | 
 | 509 | 	struct kvec *head, *tail; | 
 | 510 | 	size_t copy, offs; | 
 | 511 | 	unsigned int pglen = buf->page_len; | 
 | 512 |  | 
 | 513 | 	tail = buf->tail; | 
 | 514 | 	head = buf->head; | 
 | 515 | 	BUG_ON (len > head->iov_len); | 
 | 516 |  | 
 | 517 | 	/* Shift the tail first */ | 
 | 518 | 	if (tail->iov_len != 0) { | 
 | 519 | 		if (tail->iov_len > len) { | 
 | 520 | 			copy = tail->iov_len - len; | 
 | 521 | 			memmove((char *)tail->iov_base + len, | 
 | 522 | 					tail->iov_base, copy); | 
 | 523 | 		} | 
 | 524 | 		/* Copy from the inlined pages into the tail */ | 
 | 525 | 		copy = len; | 
 | 526 | 		if (copy > pglen) | 
 | 527 | 			copy = pglen; | 
 | 528 | 		offs = len - copy; | 
 | 529 | 		if (offs >= tail->iov_len) | 
 | 530 | 			copy = 0; | 
 | 531 | 		else if (copy > tail->iov_len - offs) | 
 | 532 | 			copy = tail->iov_len - offs; | 
 | 533 | 		if (copy != 0) | 
 | 534 | 			_copy_from_pages((char *)tail->iov_base + offs, | 
 | 535 | 					buf->pages, | 
 | 536 | 					buf->page_base + pglen + offs - len, | 
 | 537 | 					copy); | 
 | 538 | 		/* Do we also need to copy data from the head into the tail ? */ | 
 | 539 | 		if (len > pglen) { | 
 | 540 | 			offs = copy = len - pglen; | 
 | 541 | 			if (copy > tail->iov_len) | 
 | 542 | 				copy = tail->iov_len; | 
 | 543 | 			memcpy(tail->iov_base, | 
 | 544 | 					(char *)head->iov_base + | 
 | 545 | 					head->iov_len - offs, | 
 | 546 | 					copy); | 
 | 547 | 		} | 
 | 548 | 	} | 
 | 549 | 	/* Now handle pages */ | 
 | 550 | 	if (pglen != 0) { | 
 | 551 | 		if (pglen > len) | 
 | 552 | 			_shift_data_right_pages(buf->pages, | 
 | 553 | 					buf->page_base + len, | 
 | 554 | 					buf->page_base, | 
 | 555 | 					pglen - len); | 
 | 556 | 		copy = len; | 
 | 557 | 		if (len > pglen) | 
 | 558 | 			copy = pglen; | 
 | 559 | 		_copy_to_pages(buf->pages, buf->page_base, | 
 | 560 | 				(char *)head->iov_base + head->iov_len - len, | 
 | 561 | 				copy); | 
 | 562 | 	} | 
 | 563 | 	head->iov_len -= len; | 
 | 564 | 	buf->buflen -= len; | 
 | 565 | 	/* Have we truncated the message? */ | 
 | 566 | 	if (buf->len > buf->buflen) | 
 | 567 | 		buf->len = buf->buflen; | 
 | 568 | } | 
 | 569 |  | 
 | 570 | /* | 
 | 571 |  * xdr_shrink_pagelen | 
 | 572 |  * @buf: xdr_buf | 
 | 573 |  * @len: bytes to remove from buf->pages | 
 | 574 |  * | 
 | 575 |  * Shrinks XDR buffer's page array buf->pages by  | 
 | 576 |  * 'len' bytes. The extra data is not lost, but is instead | 
 | 577 |  * moved into the tail. | 
 | 578 |  */ | 
 | 579 | static void | 
 | 580 | xdr_shrink_pagelen(struct xdr_buf *buf, size_t len) | 
 | 581 | { | 
 | 582 | 	struct kvec *tail; | 
 | 583 | 	size_t copy; | 
 | 584 | 	char *p; | 
 | 585 | 	unsigned int pglen = buf->page_len; | 
 | 586 |  | 
 | 587 | 	tail = buf->tail; | 
 | 588 | 	BUG_ON (len > pglen); | 
 | 589 |  | 
 | 590 | 	/* Shift the tail first */ | 
 | 591 | 	if (tail->iov_len != 0) { | 
 | 592 | 		p = (char *)tail->iov_base + len; | 
 | 593 | 		if (tail->iov_len > len) { | 
 | 594 | 			copy = tail->iov_len - len; | 
 | 595 | 			memmove(p, tail->iov_base, copy); | 
 | 596 | 		} else | 
 | 597 | 			buf->buflen -= len; | 
 | 598 | 		/* Copy from the inlined pages into the tail */ | 
 | 599 | 		copy = len; | 
 | 600 | 		if (copy > tail->iov_len) | 
 | 601 | 			copy = tail->iov_len; | 
 | 602 | 		_copy_from_pages((char *)tail->iov_base, | 
 | 603 | 				buf->pages, buf->page_base + pglen - len, | 
 | 604 | 				copy); | 
 | 605 | 	} | 
 | 606 | 	buf->page_len -= len; | 
 | 607 | 	buf->buflen -= len; | 
 | 608 | 	/* Have we truncated the message? */ | 
 | 609 | 	if (buf->len > buf->buflen) | 
 | 610 | 		buf->len = buf->buflen; | 
 | 611 | } | 
 | 612 |  | 
 | 613 | void | 
 | 614 | xdr_shift_buf(struct xdr_buf *buf, size_t len) | 
 | 615 | { | 
 | 616 | 	xdr_shrink_bufhead(buf, len); | 
 | 617 | } | 
 | 618 |  | 
 | 619 | /** | 
 | 620 |  * xdr_init_encode - Initialize a struct xdr_stream for sending data. | 
 | 621 |  * @xdr: pointer to xdr_stream struct | 
 | 622 |  * @buf: pointer to XDR buffer in which to encode data | 
 | 623 |  * @p: current pointer inside XDR buffer | 
 | 624 |  * | 
 | 625 |  * Note: at the moment the RPC client only passes the length of our | 
 | 626 |  *	 scratch buffer in the xdr_buf's header kvec. Previously this | 
 | 627 |  *	 meant we needed to call xdr_adjust_iovec() after encoding the | 
 | 628 |  *	 data. With the new scheme, the xdr_stream manages the details | 
 | 629 |  *	 of the buffer length, and takes care of adjusting the kvec | 
 | 630 |  *	 length for us. | 
 | 631 |  */ | 
 | 632 | void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) | 
 | 633 | { | 
 | 634 | 	struct kvec *iov = buf->head; | 
| Trond Myklebust | 334ccfd | 2005-06-22 17:16:19 +0000 | [diff] [blame] | 635 | 	int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 |  | 
| Trond Myklebust | 334ccfd | 2005-06-22 17:16:19 +0000 | [diff] [blame] | 637 | 	BUG_ON(scratch_len < 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | 	xdr->buf = buf; | 
 | 639 | 	xdr->iov = iov; | 
| Trond Myklebust | 334ccfd | 2005-06-22 17:16:19 +0000 | [diff] [blame] | 640 | 	xdr->p = (uint32_t *)((char *)iov->iov_base + iov->iov_len); | 
 | 641 | 	xdr->end = (uint32_t *)((char *)iov->iov_base + scratch_len); | 
 | 642 | 	BUG_ON(iov->iov_len > scratch_len); | 
 | 643 |  | 
 | 644 | 	if (p != xdr->p && p != NULL) { | 
 | 645 | 		size_t len; | 
 | 646 |  | 
 | 647 | 		BUG_ON(p < xdr->p || p > xdr->end); | 
 | 648 | 		len = (char *)p - (char *)xdr->p; | 
 | 649 | 		xdr->p = p; | 
 | 650 | 		buf->len += len; | 
 | 651 | 		iov->iov_len += len; | 
 | 652 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | } | 
 | 654 | EXPORT_SYMBOL(xdr_init_encode); | 
 | 655 |  | 
 | 656 | /** | 
 | 657 |  * xdr_reserve_space - Reserve buffer space for sending | 
 | 658 |  * @xdr: pointer to xdr_stream | 
 | 659 |  * @nbytes: number of bytes to reserve | 
 | 660 |  * | 
 | 661 |  * Checks that we have enough buffer space to encode 'nbytes' more | 
 | 662 |  * bytes of data. If so, update the total xdr_buf length, and | 
 | 663 |  * adjust the length of the current kvec. | 
 | 664 |  */ | 
 | 665 | uint32_t * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) | 
 | 666 | { | 
 | 667 | 	uint32_t *p = xdr->p; | 
 | 668 | 	uint32_t *q; | 
 | 669 |  | 
 | 670 | 	/* align nbytes on the next 32-bit boundary */ | 
 | 671 | 	nbytes += 3; | 
 | 672 | 	nbytes &= ~3; | 
 | 673 | 	q = p + (nbytes >> 2); | 
 | 674 | 	if (unlikely(q > xdr->end || q < p)) | 
 | 675 | 		return NULL; | 
 | 676 | 	xdr->p = q; | 
 | 677 | 	xdr->iov->iov_len += nbytes; | 
 | 678 | 	xdr->buf->len += nbytes; | 
 | 679 | 	return p; | 
 | 680 | } | 
 | 681 | EXPORT_SYMBOL(xdr_reserve_space); | 
 | 682 |  | 
 | 683 | /** | 
 | 684 |  * xdr_write_pages - Insert a list of pages into an XDR buffer for sending | 
 | 685 |  * @xdr: pointer to xdr_stream | 
 | 686 |  * @pages: list of pages | 
 | 687 |  * @base: offset of first byte | 
 | 688 |  * @len: length of data in bytes | 
 | 689 |  * | 
 | 690 |  */ | 
 | 691 | void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base, | 
 | 692 | 		 unsigned int len) | 
 | 693 | { | 
 | 694 | 	struct xdr_buf *buf = xdr->buf; | 
 | 695 | 	struct kvec *iov = buf->tail; | 
 | 696 | 	buf->pages = pages; | 
 | 697 | 	buf->page_base = base; | 
 | 698 | 	buf->page_len = len; | 
 | 699 |  | 
 | 700 | 	iov->iov_base = (char *)xdr->p; | 
 | 701 | 	iov->iov_len  = 0; | 
 | 702 | 	xdr->iov = iov; | 
 | 703 |  | 
 | 704 | 	if (len & 3) { | 
 | 705 | 		unsigned int pad = 4 - (len & 3); | 
 | 706 |  | 
 | 707 | 		BUG_ON(xdr->p >= xdr->end); | 
 | 708 | 		iov->iov_base = (char *)xdr->p + (len & 3); | 
 | 709 | 		iov->iov_len  += pad; | 
 | 710 | 		len += pad; | 
 | 711 | 		*xdr->p++ = 0; | 
 | 712 | 	} | 
 | 713 | 	buf->buflen += len; | 
 | 714 | 	buf->len += len; | 
 | 715 | } | 
 | 716 | EXPORT_SYMBOL(xdr_write_pages); | 
 | 717 |  | 
 | 718 | /** | 
 | 719 |  * xdr_init_decode - Initialize an xdr_stream for decoding data. | 
 | 720 |  * @xdr: pointer to xdr_stream struct | 
 | 721 |  * @buf: pointer to XDR buffer from which to decode data | 
 | 722 |  * @p: current pointer inside XDR buffer | 
 | 723 |  */ | 
 | 724 | void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p) | 
 | 725 | { | 
 | 726 | 	struct kvec *iov = buf->head; | 
 | 727 | 	unsigned int len = iov->iov_len; | 
 | 728 |  | 
 | 729 | 	if (len > buf->len) | 
 | 730 | 		len = buf->len; | 
 | 731 | 	xdr->buf = buf; | 
 | 732 | 	xdr->iov = iov; | 
 | 733 | 	xdr->p = p; | 
 | 734 | 	xdr->end = (uint32_t *)((char *)iov->iov_base + len); | 
 | 735 | } | 
 | 736 | EXPORT_SYMBOL(xdr_init_decode); | 
 | 737 |  | 
 | 738 | /** | 
 | 739 |  * xdr_inline_decode - Retrieve non-page XDR data to decode | 
 | 740 |  * @xdr: pointer to xdr_stream struct | 
 | 741 |  * @nbytes: number of bytes of data to decode | 
 | 742 |  * | 
 | 743 |  * Check if the input buffer is long enough to enable us to decode | 
 | 744 |  * 'nbytes' more bytes of data starting at the current position. | 
 | 745 |  * If so return the current pointer, then update the current | 
 | 746 |  * pointer position. | 
 | 747 |  */ | 
 | 748 | uint32_t * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) | 
 | 749 | { | 
 | 750 | 	uint32_t *p = xdr->p; | 
 | 751 | 	uint32_t *q = p + XDR_QUADLEN(nbytes); | 
 | 752 |  | 
 | 753 | 	if (unlikely(q > xdr->end || q < p)) | 
 | 754 | 		return NULL; | 
 | 755 | 	xdr->p = q; | 
 | 756 | 	return p; | 
 | 757 | } | 
 | 758 | EXPORT_SYMBOL(xdr_inline_decode); | 
 | 759 |  | 
 | 760 | /** | 
 | 761 |  * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position | 
 | 762 |  * @xdr: pointer to xdr_stream struct | 
 | 763 |  * @len: number of bytes of page data | 
 | 764 |  * | 
 | 765 |  * Moves data beyond the current pointer position from the XDR head[] buffer | 
 | 766 |  * into the page list. Any data that lies beyond current position + "len" | 
 | 767 |  * bytes is moved into the XDR tail[]. The current pointer is then | 
 | 768 |  * repositioned at the beginning of the XDR tail. | 
 | 769 |  */ | 
 | 770 | void xdr_read_pages(struct xdr_stream *xdr, unsigned int len) | 
 | 771 | { | 
 | 772 | 	struct xdr_buf *buf = xdr->buf; | 
 | 773 | 	struct kvec *iov; | 
 | 774 | 	ssize_t shift; | 
 | 775 | 	unsigned int end; | 
 | 776 | 	int padding; | 
 | 777 |  | 
 | 778 | 	/* Realign pages to current pointer position */ | 
 | 779 | 	iov  = buf->head; | 
 | 780 | 	shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p; | 
 | 781 | 	if (shift > 0) | 
 | 782 | 		xdr_shrink_bufhead(buf, shift); | 
 | 783 |  | 
 | 784 | 	/* Truncate page data and move it into the tail */ | 
 | 785 | 	if (buf->page_len > len) | 
 | 786 | 		xdr_shrink_pagelen(buf, buf->page_len - len); | 
 | 787 | 	padding = (XDR_QUADLEN(len) << 2) - len; | 
 | 788 | 	xdr->iov = iov = buf->tail; | 
 | 789 | 	/* Compute remaining message length.  */ | 
 | 790 | 	end = iov->iov_len; | 
 | 791 | 	shift = buf->buflen - buf->len; | 
 | 792 | 	if (shift < end) | 
 | 793 | 		end -= shift; | 
 | 794 | 	else if (shift > 0) | 
 | 795 | 		end = 0; | 
 | 796 | 	/* | 
 | 797 | 	 * Position current pointer at beginning of tail, and | 
 | 798 | 	 * set remaining message length. | 
 | 799 | 	 */ | 
 | 800 | 	xdr->p = (uint32_t *)((char *)iov->iov_base + padding); | 
 | 801 | 	xdr->end = (uint32_t *)((char *)iov->iov_base + end); | 
 | 802 | } | 
 | 803 | EXPORT_SYMBOL(xdr_read_pages); | 
 | 804 |  | 
 | 805 | static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0}; | 
 | 806 |  | 
 | 807 | void | 
 | 808 | xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) | 
 | 809 | { | 
 | 810 | 	buf->head[0] = *iov; | 
 | 811 | 	buf->tail[0] = empty_iov; | 
 | 812 | 	buf->page_len = 0; | 
 | 813 | 	buf->buflen = buf->len = iov->iov_len; | 
 | 814 | } | 
 | 815 |  | 
 | 816 | /* Sets subiov to the intersection of iov with the buffer of length len | 
 | 817 |  * starting base bytes after iov.  Indicates empty intersection by setting | 
 | 818 |  * length of subiov to zero.  Decrements len by length of subiov, sets base | 
 | 819 |  * to zero (or decrements it by length of iov if subiov is empty). */ | 
 | 820 | static void | 
 | 821 | iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len) | 
 | 822 | { | 
 | 823 | 	if (*base > iov->iov_len) { | 
 | 824 | 		subiov->iov_base = NULL; | 
 | 825 | 		subiov->iov_len = 0; | 
 | 826 | 		*base -= iov->iov_len; | 
 | 827 | 	} else { | 
 | 828 | 		subiov->iov_base = iov->iov_base + *base; | 
 | 829 | 		subiov->iov_len = min(*len, (int)iov->iov_len - *base); | 
 | 830 | 		*base = 0; | 
 | 831 | 	} | 
 | 832 | 	*len -= subiov->iov_len;  | 
 | 833 | } | 
 | 834 |  | 
 | 835 | /* Sets subbuf to the portion of buf of length len beginning base bytes | 
 | 836 |  * from the start of buf. Returns -1 if base of length are out of bounds. */ | 
 | 837 | int | 
 | 838 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | 
 | 839 | 			int base, int len) | 
 | 840 | { | 
 | 841 | 	int i; | 
 | 842 |  | 
 | 843 | 	subbuf->buflen = subbuf->len = len; | 
 | 844 | 	iov_subsegment(buf->head, subbuf->head, &base, &len); | 
 | 845 |  | 
 | 846 | 	if (base < buf->page_len) { | 
 | 847 | 		i = (base + buf->page_base) >> PAGE_CACHE_SHIFT; | 
 | 848 | 		subbuf->pages = &buf->pages[i]; | 
 | 849 | 		subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK; | 
 | 850 | 		subbuf->page_len = min((int)buf->page_len - base, len); | 
 | 851 | 		len -= subbuf->page_len; | 
 | 852 | 		base = 0; | 
 | 853 | 	} else { | 
 | 854 | 		base -= buf->page_len; | 
 | 855 | 		subbuf->page_len = 0; | 
 | 856 | 	} | 
 | 857 |  | 
 | 858 | 	iov_subsegment(buf->tail, subbuf->tail, &base, &len); | 
 | 859 | 	if (base || len) | 
 | 860 | 		return -1; | 
 | 861 | 	return 0; | 
 | 862 | } | 
 | 863 |  | 
 | 864 | /* obj is assumed to point to allocated memory of size at least len: */ | 
 | 865 | int | 
 | 866 | read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) | 
 | 867 | { | 
 | 868 | 	struct xdr_buf subbuf; | 
 | 869 | 	int this_len; | 
 | 870 | 	int status; | 
 | 871 |  | 
 | 872 | 	status = xdr_buf_subsegment(buf, &subbuf, base, len); | 
 | 873 | 	if (status) | 
 | 874 | 		goto out; | 
 | 875 | 	this_len = min(len, (int)subbuf.head[0].iov_len); | 
 | 876 | 	memcpy(obj, subbuf.head[0].iov_base, this_len); | 
 | 877 | 	len -= this_len; | 
 | 878 | 	obj += this_len; | 
 | 879 | 	this_len = min(len, (int)subbuf.page_len); | 
 | 880 | 	if (this_len) | 
 | 881 | 		_copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); | 
 | 882 | 	len -= this_len; | 
 | 883 | 	obj += this_len; | 
 | 884 | 	this_len = min(len, (int)subbuf.tail[0].iov_len); | 
 | 885 | 	memcpy(obj, subbuf.tail[0].iov_base, this_len); | 
 | 886 | out: | 
 | 887 | 	return status; | 
 | 888 | } | 
 | 889 |  | 
| Andreas Gruenbacher | bd8100e | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 890 | /* obj is assumed to point to allocated memory of size at least len: */ | 
 | 891 | int | 
 | 892 | write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) | 
 | 893 | { | 
 | 894 | 	struct xdr_buf subbuf; | 
 | 895 | 	int this_len; | 
 | 896 | 	int status; | 
 | 897 |  | 
 | 898 | 	status = xdr_buf_subsegment(buf, &subbuf, base, len); | 
 | 899 | 	if (status) | 
 | 900 | 		goto out; | 
 | 901 | 	this_len = min(len, (int)subbuf.head[0].iov_len); | 
 | 902 | 	memcpy(subbuf.head[0].iov_base, obj, this_len); | 
 | 903 | 	len -= this_len; | 
 | 904 | 	obj += this_len; | 
 | 905 | 	this_len = min(len, (int)subbuf.page_len); | 
 | 906 | 	if (this_len) | 
 | 907 | 		_copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); | 
 | 908 | 	len -= this_len; | 
 | 909 | 	obj += this_len; | 
 | 910 | 	this_len = min(len, (int)subbuf.tail[0].iov_len); | 
 | 911 | 	memcpy(subbuf.tail[0].iov_base, obj, this_len); | 
 | 912 | out: | 
 | 913 | 	return status; | 
 | 914 | } | 
 | 915 |  | 
 | 916 | int | 
 | 917 | xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | { | 
 | 919 | 	u32	raw; | 
 | 920 | 	int	status; | 
 | 921 |  | 
 | 922 | 	status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); | 
 | 923 | 	if (status) | 
 | 924 | 		return status; | 
 | 925 | 	*obj = ntohl(raw); | 
 | 926 | 	return 0; | 
 | 927 | } | 
 | 928 |  | 
| Andreas Gruenbacher | bd8100e | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 929 | int | 
 | 930 | xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) | 
 | 931 | { | 
 | 932 | 	u32	raw = htonl(obj); | 
 | 933 |  | 
 | 934 | 	return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); | 
 | 935 | } | 
 | 936 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 | /* If the netobj starting offset bytes from the start of xdr_buf is contained | 
 | 938 |  * entirely in the head or the tail, set object to point to it; otherwise | 
 | 939 |  * try to find space for it at the end of the tail, copy it there, and | 
 | 940 |  * set obj to point to it. */ | 
 | 941 | int | 
 | 942 | xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset) | 
 | 943 | { | 
 | 944 | 	u32	tail_offset = buf->head[0].iov_len + buf->page_len; | 
 | 945 | 	u32	obj_end_offset; | 
 | 946 |  | 
| Andreas Gruenbacher | bd8100e | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 947 | 	if (xdr_decode_word(buf, offset, &obj->len)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | 		goto out; | 
 | 949 | 	obj_end_offset = offset + 4 + obj->len; | 
 | 950 |  | 
 | 951 | 	if (obj_end_offset <= buf->head[0].iov_len) { | 
 | 952 | 		/* The obj is contained entirely in the head: */ | 
 | 953 | 		obj->data = buf->head[0].iov_base + offset + 4; | 
 | 954 | 	} else if (offset + 4 >= tail_offset) { | 
 | 955 | 		if (obj_end_offset - tail_offset | 
 | 956 | 				> buf->tail[0].iov_len) | 
 | 957 | 			goto out; | 
 | 958 | 		/* The obj is contained entirely in the tail: */ | 
 | 959 | 		obj->data = buf->tail[0].iov_base | 
 | 960 | 			+ offset - tail_offset + 4; | 
 | 961 | 	} else { | 
 | 962 | 		/* use end of tail as storage for obj: | 
 | 963 | 		 * (We don't copy to the beginning because then we'd have | 
 | 964 | 		 * to worry about doing a potentially overlapping copy. | 
 | 965 | 		 * This assumes the object is at most half the length of the | 
 | 966 | 		 * tail.) */ | 
 | 967 | 		if (obj->len > buf->tail[0].iov_len) | 
 | 968 | 			goto out; | 
 | 969 | 		obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len -  | 
 | 970 | 				obj->len; | 
 | 971 | 		if (read_bytes_from_xdr_buf(buf, offset + 4, | 
 | 972 | 					obj->data, obj->len)) | 
 | 973 | 			goto out; | 
 | 974 |  | 
 | 975 | 	} | 
 | 976 | 	return 0; | 
 | 977 | out: | 
 | 978 | 	return -1; | 
 | 979 | } | 
| Andreas Gruenbacher | bd8100e | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 980 |  | 
 | 981 | /* Returns 0 on success, or else a negative error code. */ | 
 | 982 | static int | 
 | 983 | xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, | 
 | 984 | 		 struct xdr_array2_desc *desc, int encode) | 
 | 985 | { | 
 | 986 | 	char *elem = NULL, *c; | 
 | 987 | 	unsigned int copied = 0, todo, avail_here; | 
 | 988 | 	struct page **ppages = NULL; | 
 | 989 | 	int err; | 
 | 990 |  | 
 | 991 | 	if (encode) { | 
 | 992 | 		if (xdr_encode_word(buf, base, desc->array_len) != 0) | 
 | 993 | 			return -EINVAL; | 
 | 994 | 	} else { | 
 | 995 | 		if (xdr_decode_word(buf, base, &desc->array_len) != 0 || | 
| Trond Myklebust | 58fcb8d | 2005-08-10 18:15:12 -0400 | [diff] [blame] | 996 | 		    desc->array_len > desc->array_maxlen || | 
| Andreas Gruenbacher | bd8100e | 2005-06-22 17:16:24 +0000 | [diff] [blame] | 997 | 		    (unsigned long) base + 4 + desc->array_len * | 
 | 998 | 				    desc->elem_size > buf->len) | 
 | 999 | 			return -EINVAL; | 
 | 1000 | 	} | 
 | 1001 | 	base += 4; | 
 | 1002 |  | 
 | 1003 | 	if (!desc->xcode) | 
 | 1004 | 		return 0; | 
 | 1005 |  | 
 | 1006 | 	todo = desc->array_len * desc->elem_size; | 
 | 1007 |  | 
 | 1008 | 	/* process head */ | 
 | 1009 | 	if (todo && base < buf->head->iov_len) { | 
 | 1010 | 		c = buf->head->iov_base + base; | 
 | 1011 | 		avail_here = min_t(unsigned int, todo, | 
 | 1012 | 				   buf->head->iov_len - base); | 
 | 1013 | 		todo -= avail_here; | 
 | 1014 |  | 
 | 1015 | 		while (avail_here >= desc->elem_size) { | 
 | 1016 | 			err = desc->xcode(desc, c); | 
 | 1017 | 			if (err) | 
 | 1018 | 				goto out; | 
 | 1019 | 			c += desc->elem_size; | 
 | 1020 | 			avail_here -= desc->elem_size; | 
 | 1021 | 		} | 
 | 1022 | 		if (avail_here) { | 
 | 1023 | 			if (!elem) { | 
 | 1024 | 				elem = kmalloc(desc->elem_size, GFP_KERNEL); | 
 | 1025 | 				err = -ENOMEM; | 
 | 1026 | 				if (!elem) | 
 | 1027 | 					goto out; | 
 | 1028 | 			} | 
 | 1029 | 			if (encode) { | 
 | 1030 | 				err = desc->xcode(desc, elem); | 
 | 1031 | 				if (err) | 
 | 1032 | 					goto out; | 
 | 1033 | 				memcpy(c, elem, avail_here); | 
 | 1034 | 			} else | 
 | 1035 | 				memcpy(elem, c, avail_here); | 
 | 1036 | 			copied = avail_here; | 
 | 1037 | 		} | 
 | 1038 | 		base = buf->head->iov_len;  /* align to start of pages */ | 
 | 1039 | 	} | 
 | 1040 |  | 
 | 1041 | 	/* process pages array */ | 
 | 1042 | 	base -= buf->head->iov_len; | 
 | 1043 | 	if (todo && base < buf->page_len) { | 
 | 1044 | 		unsigned int avail_page; | 
 | 1045 |  | 
 | 1046 | 		avail_here = min(todo, buf->page_len - base); | 
 | 1047 | 		todo -= avail_here; | 
 | 1048 |  | 
 | 1049 | 		base += buf->page_base; | 
 | 1050 | 		ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); | 
 | 1051 | 		base &= ~PAGE_CACHE_MASK; | 
 | 1052 | 		avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, | 
 | 1053 | 					avail_here); | 
 | 1054 | 		c = kmap(*ppages) + base; | 
 | 1055 |  | 
 | 1056 | 		while (avail_here) { | 
 | 1057 | 			avail_here -= avail_page; | 
 | 1058 | 			if (copied || avail_page < desc->elem_size) { | 
 | 1059 | 				unsigned int l = min(avail_page, | 
 | 1060 | 					desc->elem_size - copied); | 
 | 1061 | 				if (!elem) { | 
 | 1062 | 					elem = kmalloc(desc->elem_size, | 
 | 1063 | 						       GFP_KERNEL); | 
 | 1064 | 					err = -ENOMEM; | 
 | 1065 | 					if (!elem) | 
 | 1066 | 						goto out; | 
 | 1067 | 				} | 
 | 1068 | 				if (encode) { | 
 | 1069 | 					if (!copied) { | 
 | 1070 | 						err = desc->xcode(desc, elem); | 
 | 1071 | 						if (err) | 
 | 1072 | 							goto out; | 
 | 1073 | 					} | 
 | 1074 | 					memcpy(c, elem + copied, l); | 
 | 1075 | 					copied += l; | 
 | 1076 | 					if (copied == desc->elem_size) | 
 | 1077 | 						copied = 0; | 
 | 1078 | 				} else { | 
 | 1079 | 					memcpy(elem + copied, c, l); | 
 | 1080 | 					copied += l; | 
 | 1081 | 					if (copied == desc->elem_size) { | 
 | 1082 | 						err = desc->xcode(desc, elem); | 
 | 1083 | 						if (err) | 
 | 1084 | 							goto out; | 
 | 1085 | 						copied = 0; | 
 | 1086 | 					} | 
 | 1087 | 				} | 
 | 1088 | 				avail_page -= l; | 
 | 1089 | 				c += l; | 
 | 1090 | 			} | 
 | 1091 | 			while (avail_page >= desc->elem_size) { | 
 | 1092 | 				err = desc->xcode(desc, c); | 
 | 1093 | 				if (err) | 
 | 1094 | 					goto out; | 
 | 1095 | 				c += desc->elem_size; | 
 | 1096 | 				avail_page -= desc->elem_size; | 
 | 1097 | 			} | 
 | 1098 | 			if (avail_page) { | 
 | 1099 | 				unsigned int l = min(avail_page, | 
 | 1100 | 					    desc->elem_size - copied); | 
 | 1101 | 				if (!elem) { | 
 | 1102 | 					elem = kmalloc(desc->elem_size, | 
 | 1103 | 						       GFP_KERNEL); | 
 | 1104 | 					err = -ENOMEM; | 
 | 1105 | 					if (!elem) | 
 | 1106 | 						goto out; | 
 | 1107 | 				} | 
 | 1108 | 				if (encode) { | 
 | 1109 | 					if (!copied) { | 
 | 1110 | 						err = desc->xcode(desc, elem); | 
 | 1111 | 						if (err) | 
 | 1112 | 							goto out; | 
 | 1113 | 					} | 
 | 1114 | 					memcpy(c, elem + copied, l); | 
 | 1115 | 					copied += l; | 
 | 1116 | 					if (copied == desc->elem_size) | 
 | 1117 | 						copied = 0; | 
 | 1118 | 				} else { | 
 | 1119 | 					memcpy(elem + copied, c, l); | 
 | 1120 | 					copied += l; | 
 | 1121 | 					if (copied == desc->elem_size) { | 
 | 1122 | 						err = desc->xcode(desc, elem); | 
 | 1123 | 						if (err) | 
 | 1124 | 							goto out; | 
 | 1125 | 						copied = 0; | 
 | 1126 | 					} | 
 | 1127 | 				} | 
 | 1128 | 			} | 
 | 1129 | 			if (avail_here) { | 
 | 1130 | 				kunmap(*ppages); | 
 | 1131 | 				ppages++; | 
 | 1132 | 				c = kmap(*ppages); | 
 | 1133 | 			} | 
 | 1134 |  | 
 | 1135 | 			avail_page = min(avail_here, | 
 | 1136 | 				 (unsigned int) PAGE_CACHE_SIZE); | 
 | 1137 | 		} | 
 | 1138 | 		base = buf->page_len;  /* align to start of tail */ | 
 | 1139 | 	} | 
 | 1140 |  | 
 | 1141 | 	/* process tail */ | 
 | 1142 | 	base -= buf->page_len; | 
 | 1143 | 	if (todo) { | 
 | 1144 | 		c = buf->tail->iov_base + base; | 
 | 1145 | 		if (copied) { | 
 | 1146 | 			unsigned int l = desc->elem_size - copied; | 
 | 1147 |  | 
 | 1148 | 			if (encode) | 
 | 1149 | 				memcpy(c, elem + copied, l); | 
 | 1150 | 			else { | 
 | 1151 | 				memcpy(elem + copied, c, l); | 
 | 1152 | 				err = desc->xcode(desc, elem); | 
 | 1153 | 				if (err) | 
 | 1154 | 					goto out; | 
 | 1155 | 			} | 
 | 1156 | 			todo -= l; | 
 | 1157 | 			c += l; | 
 | 1158 | 		} | 
 | 1159 | 		while (todo) { | 
 | 1160 | 			err = desc->xcode(desc, c); | 
 | 1161 | 			if (err) | 
 | 1162 | 				goto out; | 
 | 1163 | 			c += desc->elem_size; | 
 | 1164 | 			todo -= desc->elem_size; | 
 | 1165 | 		} | 
 | 1166 | 	} | 
 | 1167 | 	err = 0; | 
 | 1168 |  | 
 | 1169 | out: | 
 | 1170 | 	if (elem) | 
 | 1171 | 		kfree(elem); | 
 | 1172 | 	if (ppages) | 
 | 1173 | 		kunmap(*ppages); | 
 | 1174 | 	return err; | 
 | 1175 | } | 
 | 1176 |  | 
 | 1177 | int | 
 | 1178 | xdr_decode_array2(struct xdr_buf *buf, unsigned int base, | 
 | 1179 | 		  struct xdr_array2_desc *desc) | 
 | 1180 | { | 
 | 1181 | 	if (base >= buf->len) | 
 | 1182 | 		return -EINVAL; | 
 | 1183 |  | 
 | 1184 | 	return xdr_xcode_array2(buf, base, desc, 0); | 
 | 1185 | } | 
 | 1186 |  | 
 | 1187 | int | 
 | 1188 | xdr_encode_array2(struct xdr_buf *buf, unsigned int base, | 
 | 1189 | 		  struct xdr_array2_desc *desc) | 
 | 1190 | { | 
 | 1191 | 	if ((unsigned long) base + 4 + desc->array_len * desc->elem_size > | 
 | 1192 | 	    buf->head->iov_len + buf->page_len + buf->tail->iov_len) | 
 | 1193 | 		return -EINVAL; | 
 | 1194 |  | 
 | 1195 | 	return xdr_xcode_array2(buf, base, desc, 1); | 
 | 1196 | } |