| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 1 | /* file-nommu.c: no-MMU version of ramfs | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. | 
 | 4 |  * Written by David Howells (dhowells@redhat.com) | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or | 
 | 7 |  * modify it under the terms of the GNU General Public License | 
 | 8 |  * as published by the Free Software Foundation; either version | 
 | 9 |  * 2 of the License, or (at your option) any later version. | 
 | 10 |  */ | 
 | 11 |  | 
 | 12 | #include <linux/module.h> | 
 | 13 | #include <linux/fs.h> | 
| Dimitri Gorokhovik | 131612d | 2006-12-29 16:48:24 -0800 | [diff] [blame] | 14 | #include <linux/mm.h> | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 15 | #include <linux/pagemap.h> | 
 | 16 | #include <linux/highmem.h> | 
 | 17 | #include <linux/init.h> | 
 | 18 | #include <linux/string.h> | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 19 | #include <linux/backing-dev.h> | 
 | 20 | #include <linux/ramfs.h> | 
 | 21 | #include <linux/quotaops.h> | 
 | 22 | #include <linux/pagevec.h> | 
 | 23 | #include <linux/mman.h> | 
 | 24 |  | 
 | 25 | #include <asm/uaccess.h> | 
 | 26 | #include "internal.h" | 
 | 27 |  | 
 | 28 | static int ramfs_nommu_setattr(struct dentry *, struct iattr *); | 
 | 29 |  | 
| Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 30 | const struct address_space_operations ramfs_aops = { | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 31 | 	.readpage		= simple_readpage, | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 32 | 	.write_begin		= simple_write_begin, | 
 | 33 | 	.write_end		= simple_write_end, | 
| Ken Chen | 4662629 | 2007-02-10 01:43:17 -0800 | [diff] [blame] | 34 | 	.set_page_dirty		= __set_page_dirty_no_writeback, | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 35 | }; | 
 | 36 |  | 
| Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 37 | const struct file_operations ramfs_file_operations = { | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 38 | 	.mmap			= ramfs_nommu_mmap, | 
 | 39 | 	.get_unmapped_area	= ramfs_nommu_get_unmapped_area, | 
| Badari Pulavarty | 543ade1 | 2006-09-30 23:28:48 -0700 | [diff] [blame] | 40 | 	.read			= do_sync_read, | 
 | 41 | 	.aio_read		= generic_file_aio_read, | 
 | 42 | 	.write			= do_sync_write, | 
 | 43 | 	.aio_write		= generic_file_aio_write, | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 44 | 	.fsync			= simple_sync_file, | 
| Jens Axboe | 5ffc4ef | 2007-06-01 11:49:19 +0200 | [diff] [blame] | 45 | 	.splice_read		= generic_file_splice_read, | 
| Octavian Purdila | 8b3d356 | 2008-07-04 09:33:33 +0200 | [diff] [blame] | 46 | 	.splice_write		= generic_file_splice_write, | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 47 | 	.llseek			= generic_file_llseek, | 
 | 48 | }; | 
 | 49 |  | 
| Arjan van de Ven | c5ef1c4 | 2007-02-12 00:55:40 -0800 | [diff] [blame] | 50 | const struct inode_operations ramfs_file_inode_operations = { | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 51 | 	.setattr		= ramfs_nommu_setattr, | 
 | 52 | 	.getattr		= simple_getattr, | 
 | 53 | }; | 
 | 54 |  | 
 | 55 | /*****************************************************************************/ | 
 | 56 | /* | 
 | 57 |  * add a contiguous set of pages into a ramfs inode when it's truncated from | 
 | 58 |  * size 0 on the assumption that it's going to be used for an mmap of shared | 
 | 59 |  * memory | 
 | 60 |  */ | 
| Nick Piggin | 4b19de6 | 2008-10-02 14:50:16 -0700 | [diff] [blame] | 61 | int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 62 | { | 
 | 63 | 	struct pagevec lru_pvec; | 
 | 64 | 	unsigned long npages, xpages, loop, limit; | 
 | 65 | 	struct page *pages; | 
 | 66 | 	unsigned order; | 
 | 67 | 	void *data; | 
 | 68 | 	int ret; | 
 | 69 |  | 
 | 70 | 	/* make various checks */ | 
 | 71 | 	order = get_order(newsize); | 
 | 72 | 	if (unlikely(order >= MAX_ORDER)) | 
 | 73 | 		goto too_big; | 
 | 74 |  | 
 | 75 | 	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | 
 | 76 | 	if (limit != RLIM_INFINITY && newsize > limit) | 
 | 77 | 		goto fsize_exceeded; | 
 | 78 |  | 
 | 79 | 	if (newsize > inode->i_sb->s_maxbytes) | 
 | 80 | 		goto too_big; | 
 | 81 |  | 
 | 82 | 	i_size_write(inode, newsize); | 
 | 83 |  | 
 | 84 | 	/* allocate enough contiguous pages to be able to satisfy the | 
 | 85 | 	 * request */ | 
 | 86 | 	pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); | 
 | 87 | 	if (!pages) | 
 | 88 | 		return -ENOMEM; | 
 | 89 |  | 
 | 90 | 	/* split the high-order page into an array of single pages */ | 
 | 91 | 	xpages = 1UL << order; | 
 | 92 | 	npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 93 |  | 
| Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 94 | 	split_page(pages, order); | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 95 |  | 
 | 96 | 	/* trim off any pages we don't actually require */ | 
 | 97 | 	for (loop = npages; loop < xpages; loop++) | 
 | 98 | 		__free_page(pages + loop); | 
 | 99 |  | 
 | 100 | 	/* clear the memory we allocated */ | 
 | 101 | 	newsize = PAGE_SIZE * npages; | 
 | 102 | 	data = page_address(pages); | 
 | 103 | 	memset(data, 0, newsize); | 
 | 104 |  | 
 | 105 | 	/* attach all the pages to the inode's address space */ | 
 | 106 | 	pagevec_init(&lru_pvec, 0); | 
 | 107 | 	for (loop = 0; loop < npages; loop++) { | 
 | 108 | 		struct page *page = pages + loop; | 
 | 109 |  | 
 | 110 | 		ret = add_to_page_cache(page, inode->i_mapping, loop, GFP_KERNEL); | 
 | 111 | 		if (ret < 0) | 
 | 112 | 			goto add_error; | 
 | 113 |  | 
 | 114 | 		if (!pagevec_add(&lru_pvec, page)) | 
 | 115 | 			__pagevec_lru_add(&lru_pvec); | 
 | 116 |  | 
 | 117 | 		unlock_page(page); | 
 | 118 | 	} | 
 | 119 |  | 
 | 120 | 	pagevec_lru_add(&lru_pvec); | 
 | 121 | 	return 0; | 
 | 122 |  | 
 | 123 |  fsize_exceeded: | 
 | 124 | 	send_sig(SIGXFSZ, current, 0); | 
 | 125 |  too_big: | 
 | 126 | 	return -EFBIG; | 
 | 127 |  | 
 | 128 |  add_error: | 
 | 129 | 	page_cache_release(pages + loop); | 
 | 130 | 	for (loop++; loop < npages; loop++) | 
 | 131 | 		__free_page(pages + loop); | 
 | 132 | 	return ret; | 
 | 133 | } | 
 | 134 |  | 
 | 135 | /*****************************************************************************/ | 
 | 136 | /* | 
 | 137 |  * check that file shrinkage doesn't leave any VMAs dangling in midair | 
 | 138 |  */ | 
 | 139 | static int ramfs_nommu_check_mappings(struct inode *inode, | 
 | 140 | 				      size_t newsize, size_t size) | 
 | 141 | { | 
 | 142 | 	struct vm_area_struct *vma; | 
 | 143 | 	struct prio_tree_iter iter; | 
 | 144 |  | 
 | 145 | 	/* search for VMAs that fall within the dead zone */ | 
 | 146 | 	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, | 
 | 147 | 			      newsize >> PAGE_SHIFT, | 
 | 148 | 			      (size + PAGE_SIZE - 1) >> PAGE_SHIFT | 
 | 149 | 			      ) { | 
 | 150 | 		/* found one - only interested if it's shared out of the page | 
 | 151 | 		 * cache */ | 
 | 152 | 		if (vma->vm_flags & VM_SHARED) | 
 | 153 | 			return -ETXTBSY; /* not quite true, but near enough */ | 
 | 154 | 	} | 
 | 155 |  | 
 | 156 | 	return 0; | 
 | 157 | } | 
 | 158 |  | 
 | 159 | /*****************************************************************************/ | 
 | 160 | /* | 
 | 161 |  * | 
 | 162 |  */ | 
 | 163 | static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) | 
 | 164 | { | 
 | 165 | 	int ret; | 
 | 166 |  | 
 | 167 | 	/* assume a truncate from zero size is going to be for the purposes of | 
 | 168 | 	 * shared mmap */ | 
 | 169 | 	if (size == 0) { | 
 | 170 | 		if (unlikely(newsize >> 32)) | 
 | 171 | 			return -EFBIG; | 
 | 172 |  | 
 | 173 | 		return ramfs_nommu_expand_for_mapping(inode, newsize); | 
 | 174 | 	} | 
 | 175 |  | 
 | 176 | 	/* check that a decrease in size doesn't cut off any shared mappings */ | 
 | 177 | 	if (newsize < size) { | 
 | 178 | 		ret = ramfs_nommu_check_mappings(inode, newsize, size); | 
 | 179 | 		if (ret < 0) | 
 | 180 | 			return ret; | 
 | 181 | 	} | 
 | 182 |  | 
| Bryan Wu | 3f0a676 | 2007-05-31 11:31:55 +0800 | [diff] [blame] | 183 | 	ret = vmtruncate(inode, newsize); | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 184 |  | 
 | 185 | 	return ret; | 
 | 186 | } | 
 | 187 |  | 
 | 188 | /*****************************************************************************/ | 
 | 189 | /* | 
 | 190 |  * handle a change of attributes | 
 | 191 |  * - we're specifically interested in a change of size | 
 | 192 |  */ | 
 | 193 | static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) | 
 | 194 | { | 
 | 195 | 	struct inode *inode = dentry->d_inode; | 
 | 196 | 	unsigned int old_ia_valid = ia->ia_valid; | 
 | 197 | 	int ret = 0; | 
 | 198 |  | 
| Bryan Wu | 85f6038 | 2007-06-05 11:02:01 +0800 | [diff] [blame] | 199 | 	/* POSIX UID/GID verification for setting inode attributes */ | 
 | 200 | 	ret = inode_change_ok(inode, ia); | 
 | 201 | 	if (ret) | 
 | 202 | 		return ret; | 
 | 203 |  | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 204 | 	/* by providing our own setattr() method, we skip this quotaism */ | 
 | 205 | 	if ((old_ia_valid & ATTR_UID && ia->ia_uid != inode->i_uid) || | 
 | 206 | 	    (old_ia_valid & ATTR_GID && ia->ia_gid != inode->i_gid)) | 
 | 207 | 		ret = DQUOT_TRANSFER(inode, ia) ? -EDQUOT : 0; | 
 | 208 |  | 
 | 209 | 	/* pick out size-changing events */ | 
 | 210 | 	if (ia->ia_valid & ATTR_SIZE) { | 
 | 211 | 		loff_t size = i_size_read(inode); | 
 | 212 | 		if (ia->ia_size != size) { | 
 | 213 | 			ret = ramfs_nommu_resize(inode, ia->ia_size, size); | 
 | 214 | 			if (ret < 0 || ia->ia_valid == ATTR_SIZE) | 
 | 215 | 				goto out; | 
 | 216 | 		} else { | 
 | 217 | 			/* we skipped the truncate but must still update | 
 | 218 | 			 * timestamps | 
 | 219 | 			 */ | 
 | 220 | 			ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; | 
 | 221 | 		} | 
 | 222 | 	} | 
 | 223 |  | 
 | 224 | 	ret = inode_setattr(inode, ia); | 
 | 225 |  out: | 
 | 226 | 	ia->ia_valid = old_ia_valid; | 
 | 227 | 	return ret; | 
 | 228 | } | 
 | 229 |  | 
 | 230 | /*****************************************************************************/ | 
 | 231 | /* | 
 | 232 |  * try to determine where a shared mapping can be made | 
 | 233 |  * - we require that: | 
 | 234 |  *   - the pages to be mapped must exist | 
 | 235 |  *   - the pages be physically contiguous in sequence | 
 | 236 |  */ | 
 | 237 | unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | 
 | 238 | 					    unsigned long addr, unsigned long len, | 
 | 239 | 					    unsigned long pgoff, unsigned long flags) | 
 | 240 | { | 
 | 241 | 	unsigned long maxpages, lpages, nr, loop, ret; | 
| Josef Sipek | a57c4d6 | 2006-12-08 02:37:32 -0800 | [diff] [blame] | 242 | 	struct inode *inode = file->f_path.dentry->d_inode; | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 243 | 	struct page **pages = NULL, **ptr, *page; | 
 | 244 | 	loff_t isize; | 
 | 245 |  | 
 | 246 | 	if (!(flags & MAP_SHARED)) | 
 | 247 | 		return addr; | 
 | 248 |  | 
 | 249 | 	/* the mapping mustn't extend beyond the EOF */ | 
 | 250 | 	lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 251 | 	isize = i_size_read(inode); | 
 | 252 |  | 
 | 253 | 	ret = -EINVAL; | 
 | 254 | 	maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; | 
 | 255 | 	if (pgoff >= maxpages) | 
 | 256 | 		goto out; | 
 | 257 |  | 
 | 258 | 	if (maxpages - pgoff < lpages) | 
 | 259 | 		goto out; | 
 | 260 |  | 
 | 261 | 	/* gang-find the pages */ | 
 | 262 | 	ret = -ENOMEM; | 
 | 263 | 	pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); | 
 | 264 | 	if (!pages) | 
 | 265 | 		goto out; | 
 | 266 |  | 
 | 267 | 	nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); | 
 | 268 | 	if (nr != lpages) | 
 | 269 | 		goto out; /* leave if some pages were missing */ | 
 | 270 |  | 
 | 271 | 	/* check the pages for physical adjacency */ | 
 | 272 | 	ptr = pages; | 
 | 273 | 	page = *ptr++; | 
 | 274 | 	page++; | 
 | 275 | 	for (loop = lpages; loop > 1; loop--) | 
 | 276 | 		if (*ptr++ != page++) | 
 | 277 | 			goto out; | 
 | 278 |  | 
 | 279 | 	/* okay - all conditions fulfilled */ | 
 | 280 | 	ret = (unsigned long) page_address(pages[0]); | 
 | 281 |  | 
 | 282 |  out: | 
 | 283 | 	if (pages) { | 
 | 284 | 		ptr = pages; | 
 | 285 | 		for (loop = lpages; loop > 0; loop--) | 
 | 286 | 			put_page(*ptr++); | 
 | 287 | 		kfree(pages); | 
 | 288 | 	} | 
 | 289 |  | 
 | 290 | 	return ret; | 
 | 291 | } | 
 | 292 |  | 
 | 293 | /*****************************************************************************/ | 
 | 294 | /* | 
| David Howells | 21ff821 | 2006-07-10 04:44:52 -0700 | [diff] [blame] | 295 |  * set up a mapping for shared memory segments | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 296 |  */ | 
 | 297 | int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) | 
 | 298 | { | 
| David Howells | 2e92a3b | 2007-07-31 00:37:24 -0700 | [diff] [blame] | 299 | 	if (!(vma->vm_flags & VM_SHARED)) | 
 | 300 | 		return -ENOSYS; | 
 | 301 |  | 
 | 302 | 	file_accessed(file); | 
 | 303 | 	vma->vm_ops = &generic_file_vm_ops; | 
 | 304 | 	return 0; | 
| David Howells | 642fb4d | 2006-01-06 00:11:41 -0800 | [diff] [blame] | 305 | } |