| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	linux/mm/filemap_xip.c | 
 | 3 |  * | 
 | 4 |  * Copyright (C) 2005 IBM Corporation | 
 | 5 |  * Author: Carsten Otte <cotte@de.ibm.com> | 
 | 6 |  * | 
 | 7 |  * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds | 
 | 8 |  * | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/fs.h> | 
 | 12 | #include <linux/pagemap.h> | 
 | 13 | #include <linux/module.h> | 
 | 14 | #include <linux/uio.h> | 
 | 15 | #include <linux/rmap.h> | 
 | 16 | #include <asm/tlbflush.h> | 
 | 17 | #include "filemap.h" | 
 | 18 |  | 
 | 19 | /* | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 20 |  * We do use our own empty page to avoid interference with other users | 
 | 21 |  * of ZERO_PAGE(), such as /dev/zero | 
 | 22 |  */ | 
 | 23 | static struct page *__xip_sparse_page; | 
 | 24 |  | 
 | 25 | static struct page *xip_sparse_page(void) | 
 | 26 | { | 
 | 27 | 	if (!__xip_sparse_page) { | 
 | 28 | 		unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER); | 
 | 29 | 		if (zeroes) { | 
 | 30 | 			static DEFINE_SPINLOCK(xip_alloc_lock); | 
 | 31 | 			spin_lock(&xip_alloc_lock); | 
 | 32 | 			if (!__xip_sparse_page) | 
 | 33 | 				__xip_sparse_page = virt_to_page(zeroes); | 
 | 34 | 			else | 
 | 35 | 				free_page(zeroes); | 
 | 36 | 			spin_unlock(&xip_alloc_lock); | 
 | 37 | 		} | 
 | 38 | 	} | 
 | 39 | 	return __xip_sparse_page; | 
 | 40 | } | 
 | 41 |  | 
 | 42 | /* | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 43 |  * This is a file read routine for execute in place files, and uses | 
 | 44 |  * the mapping->a_ops->get_xip_page() function for the actual low-level | 
 | 45 |  * stuff. | 
 | 46 |  * | 
 | 47 |  * Note the struct file* is not used at all.  It may be NULL. | 
 | 48 |  */ | 
 | 49 | static void | 
 | 50 | do_xip_mapping_read(struct address_space *mapping, | 
 | 51 | 		    struct file_ra_state *_ra, | 
 | 52 | 		    struct file *filp, | 
 | 53 | 		    loff_t *ppos, | 
 | 54 | 		    read_descriptor_t *desc, | 
 | 55 | 		    read_actor_t actor) | 
 | 56 | { | 
 | 57 | 	struct inode *inode = mapping->host; | 
 | 58 | 	unsigned long index, end_index, offset; | 
 | 59 | 	loff_t isize; | 
 | 60 |  | 
 | 61 | 	BUG_ON(!mapping->a_ops->get_xip_page); | 
 | 62 |  | 
 | 63 | 	index = *ppos >> PAGE_CACHE_SHIFT; | 
 | 64 | 	offset = *ppos & ~PAGE_CACHE_MASK; | 
 | 65 |  | 
 | 66 | 	isize = i_size_read(inode); | 
 | 67 | 	if (!isize) | 
 | 68 | 		goto out; | 
 | 69 |  | 
 | 70 | 	end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | 
 | 71 | 	for (;;) { | 
 | 72 | 		struct page *page; | 
 | 73 | 		unsigned long nr, ret; | 
 | 74 |  | 
 | 75 | 		/* nr is the maximum number of bytes to copy from this page */ | 
 | 76 | 		nr = PAGE_CACHE_SIZE; | 
 | 77 | 		if (index >= end_index) { | 
 | 78 | 			if (index > end_index) | 
 | 79 | 				goto out; | 
 | 80 | 			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | 
 | 81 | 			if (nr <= offset) { | 
 | 82 | 				goto out; | 
 | 83 | 			} | 
 | 84 | 		} | 
 | 85 | 		nr = nr - offset; | 
 | 86 |  | 
 | 87 | 		page = mapping->a_ops->get_xip_page(mapping, | 
 | 88 | 			index*(PAGE_SIZE/512), 0); | 
 | 89 | 		if (!page) | 
 | 90 | 			goto no_xip_page; | 
 | 91 | 		if (unlikely(IS_ERR(page))) { | 
 | 92 | 			if (PTR_ERR(page) == -ENODATA) { | 
 | 93 | 				/* sparse */ | 
| Carsten Otte | afa597b | 2005-07-15 03:56:30 -0700 | [diff] [blame] | 94 | 				page = ZERO_PAGE(0); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 95 | 			} else { | 
 | 96 | 				desc->error = PTR_ERR(page); | 
 | 97 | 				goto out; | 
 | 98 | 			} | 
| Carsten Otte | afa597b | 2005-07-15 03:56:30 -0700 | [diff] [blame] | 99 | 		} | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 100 |  | 
 | 101 | 		/* If users can be writing to this page using arbitrary | 
 | 102 | 		 * virtual addresses, take care about potential aliasing | 
 | 103 | 		 * before reading the page on the kernel side. | 
 | 104 | 		 */ | 
 | 105 | 		if (mapping_writably_mapped(mapping)) | 
 | 106 | 			flush_dcache_page(page); | 
 | 107 |  | 
 | 108 | 		/* | 
| Carsten Otte | afa597b | 2005-07-15 03:56:30 -0700 | [diff] [blame] | 109 | 		 * Ok, we have the page, so now we can copy it to user space... | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 110 | 		 * | 
 | 111 | 		 * The actor routine returns how many bytes were actually used.. | 
 | 112 | 		 * NOTE! This may not be the same as how much of a user buffer | 
 | 113 | 		 * we filled up (we may be padding etc), so we can only update | 
 | 114 | 		 * "pos" here (the actor routine has to update the user buffer | 
 | 115 | 		 * pointers and the remaining count). | 
 | 116 | 		 */ | 
 | 117 | 		ret = actor(desc, page, offset, nr); | 
 | 118 | 		offset += ret; | 
 | 119 | 		index += offset >> PAGE_CACHE_SHIFT; | 
 | 120 | 		offset &= ~PAGE_CACHE_MASK; | 
 | 121 |  | 
 | 122 | 		if (ret == nr && desc->count) | 
 | 123 | 			continue; | 
 | 124 | 		goto out; | 
 | 125 |  | 
 | 126 | no_xip_page: | 
 | 127 | 		/* Did not get the page. Report it */ | 
 | 128 | 		desc->error = -EIO; | 
 | 129 | 		goto out; | 
 | 130 | 	} | 
 | 131 |  | 
 | 132 | out: | 
 | 133 | 	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | 
 | 134 | 	if (filp) | 
 | 135 | 		file_accessed(filp); | 
 | 136 | } | 
 | 137 |  | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 138 | ssize_t | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 139 | xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 140 | { | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 141 | 	read_descriptor_t desc; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 142 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 143 | 	if (!access_ok(VERIFY_WRITE, buf, len)) | 
 | 144 | 		return -EFAULT; | 
 | 145 |  | 
 | 146 | 	desc.written = 0; | 
 | 147 | 	desc.arg.buf = buf; | 
 | 148 | 	desc.count = len; | 
 | 149 | 	desc.error = 0; | 
 | 150 |  | 
 | 151 | 	do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, | 
 | 152 | 			    ppos, &desc, file_read_actor); | 
 | 153 |  | 
 | 154 | 	if (desc.written) | 
 | 155 | 		return desc.written; | 
 | 156 | 	else | 
 | 157 | 		return desc.error; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 158 | } | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 159 | EXPORT_SYMBOL_GPL(xip_file_read); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 160 |  | 
 | 161 | ssize_t | 
 | 162 | xip_file_sendfile(struct file *in_file, loff_t *ppos, | 
 | 163 | 	     size_t count, read_actor_t actor, void *target) | 
 | 164 | { | 
 | 165 | 	read_descriptor_t desc; | 
 | 166 |  | 
 | 167 | 	if (!count) | 
 | 168 | 		return 0; | 
 | 169 |  | 
 | 170 | 	desc.written = 0; | 
 | 171 | 	desc.count = count; | 
 | 172 | 	desc.arg.data = target; | 
 | 173 | 	desc.error = 0; | 
 | 174 |  | 
 | 175 | 	do_xip_mapping_read(in_file->f_mapping, &in_file->f_ra, in_file, | 
 | 176 | 			    ppos, &desc, actor); | 
 | 177 | 	if (desc.written) | 
 | 178 | 		return desc.written; | 
 | 179 | 	return desc.error; | 
 | 180 | } | 
 | 181 | EXPORT_SYMBOL_GPL(xip_file_sendfile); | 
 | 182 |  | 
 | 183 | /* | 
 | 184 |  * __xip_unmap is invoked from xip_unmap and | 
 | 185 |  * xip_write | 
 | 186 |  * | 
 | 187 |  * This function walks all vmas of the address_space and unmaps the | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 188 |  * __xip_sparse_page when found at pgoff. | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 189 |  */ | 
 | 190 | static void | 
 | 191 | __xip_unmap (struct address_space * mapping, | 
 | 192 | 		     unsigned long pgoff) | 
 | 193 | { | 
 | 194 | 	struct vm_area_struct *vma; | 
 | 195 | 	struct mm_struct *mm; | 
 | 196 | 	struct prio_tree_iter iter; | 
 | 197 | 	unsigned long address; | 
 | 198 | 	pte_t *pte; | 
 | 199 | 	pte_t pteval; | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 200 | 	spinlock_t *ptl; | 
| Hugh Dickins | 67b02f1 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 201 | 	struct page *page; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 202 |  | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 203 | 	page = __xip_sparse_page; | 
 | 204 | 	if (!page) | 
 | 205 | 		return; | 
 | 206 |  | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 207 | 	spin_lock(&mapping->i_mmap_lock); | 
 | 208 | 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 
 | 209 | 		mm = vma->vm_mm; | 
 | 210 | 		address = vma->vm_start + | 
 | 211 | 			((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | 
 | 212 | 		BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 213 | 		pte = page_check_address(page, mm, address, &ptl); | 
 | 214 | 		if (pte) { | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 215 | 			/* Nuke the page table entry. */ | 
| Geert Uytterhoeven | 082ff0a | 2005-07-12 13:58:18 -0700 | [diff] [blame] | 216 | 			flush_cache_page(vma, address, pte_pfn(*pte)); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 217 | 			pteval = ptep_clear_flush(vma, address, pte); | 
| Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 218 | 			page_remove_rmap(page, vma); | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 219 | 			dec_mm_counter(mm, file_rss); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 220 | 			BUG_ON(pte_dirty(pteval)); | 
| Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 221 | 			pte_unmap_unlock(pte, ptl); | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 222 | 			page_cache_release(page); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 223 | 		} | 
 | 224 | 	} | 
 | 225 | 	spin_unlock(&mapping->i_mmap_lock); | 
 | 226 | } | 
 | 227 |  | 
 | 228 | /* | 
 | 229 |  * xip_nopage() is invoked via the vma operations vector for a | 
 | 230 |  * mapped memory region to read in file data during a page fault. | 
 | 231 |  * | 
 | 232 |  * This function is derived from filemap_nopage, but used for execute in place | 
 | 233 |  */ | 
 | 234 | static struct page * | 
 | 235 | xip_file_nopage(struct vm_area_struct * area, | 
 | 236 | 		   unsigned long address, | 
 | 237 | 		   int *type) | 
 | 238 | { | 
 | 239 | 	struct file *file = area->vm_file; | 
 | 240 | 	struct address_space *mapping = file->f_mapping; | 
 | 241 | 	struct inode *inode = mapping->host; | 
 | 242 | 	struct page *page; | 
 | 243 | 	unsigned long size, pgoff, endoff; | 
 | 244 |  | 
 | 245 | 	pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) | 
 | 246 | 		+ area->vm_pgoff; | 
 | 247 | 	endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) | 
 | 248 | 		+ area->vm_pgoff; | 
 | 249 |  | 
 | 250 | 	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 251 | 	if (pgoff >= size) | 
 | 252 | 		return NOPAGE_SIGBUS; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 253 |  | 
 | 254 | 	page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 255 | 	if (!IS_ERR(page)) | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 256 | 		goto out; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 257 | 	if (PTR_ERR(page) != -ENODATA) | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 258 | 		return NOPAGE_SIGBUS; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 259 |  | 
 | 260 | 	/* sparse block */ | 
 | 261 | 	if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) && | 
 | 262 | 	    (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) && | 
 | 263 | 	    (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { | 
 | 264 | 		/* maybe shared writable, allocate new block */ | 
 | 265 | 		page = mapping->a_ops->get_xip_page (mapping, | 
 | 266 | 			pgoff*(PAGE_SIZE/512), 1); | 
 | 267 | 		if (IS_ERR(page)) | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 268 | 			return NOPAGE_SIGBUS; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 269 | 		/* unmap page at pgoff from all other vmas */ | 
 | 270 | 		__xip_unmap(mapping, pgoff); | 
 | 271 | 	} else { | 
| Carsten Otte | a76c0b9 | 2007-03-29 01:20:39 -0700 | [diff] [blame] | 272 | 		/* not shared and writable, use xip_sparse_page() */ | 
 | 273 | 		page = xip_sparse_page(); | 
 | 274 | 		if (!page) | 
 | 275 | 			return NOPAGE_OOM; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 276 | 	} | 
 | 277 |  | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 278 | out: | 
 | 279 | 	page_cache_get(page); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 280 | 	return page; | 
 | 281 | } | 
 | 282 |  | 
 | 283 | static struct vm_operations_struct xip_file_vm_ops = { | 
 | 284 | 	.nopage         = xip_file_nopage, | 
 | 285 | }; | 
 | 286 |  | 
 | 287 | int xip_file_mmap(struct file * file, struct vm_area_struct * vma) | 
 | 288 | { | 
 | 289 | 	BUG_ON(!file->f_mapping->a_ops->get_xip_page); | 
 | 290 |  | 
 | 291 | 	file_accessed(file); | 
 | 292 | 	vma->vm_ops = &xip_file_vm_ops; | 
 | 293 | 	return 0; | 
 | 294 | } | 
 | 295 | EXPORT_SYMBOL_GPL(xip_file_mmap); | 
 | 296 |  | 
 | 297 | static ssize_t | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 298 | __xip_file_write(struct file *filp, const char __user *buf, | 
 | 299 | 		  size_t count, loff_t pos, loff_t *ppos) | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 300 | { | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 301 | 	struct address_space * mapping = filp->f_mapping; | 
| Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 302 | 	const struct address_space_operations *a_ops = mapping->a_ops; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 303 | 	struct inode 	*inode = mapping->host; | 
 | 304 | 	long		status = 0; | 
 | 305 | 	struct page	*page; | 
 | 306 | 	size_t		bytes; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 307 | 	ssize_t		written = 0; | 
 | 308 |  | 
 | 309 | 	BUG_ON(!mapping->a_ops->get_xip_page); | 
 | 310 |  | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 311 | 	do { | 
 | 312 | 		unsigned long index; | 
 | 313 | 		unsigned long offset; | 
 | 314 | 		size_t copied; | 
 | 315 |  | 
 | 316 | 		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | 
 | 317 | 		index = pos >> PAGE_CACHE_SHIFT; | 
 | 318 | 		bytes = PAGE_CACHE_SIZE - offset; | 
 | 319 | 		if (bytes > count) | 
 | 320 | 			bytes = count; | 
 | 321 |  | 
 | 322 | 		/* | 
 | 323 | 		 * Bring in the user page that we will copy from _first_. | 
 | 324 | 		 * Otherwise there's a nasty deadlock on copying from the | 
 | 325 | 		 * same page as we're writing to, without it being marked | 
 | 326 | 		 * up-to-date. | 
 | 327 | 		 */ | 
 | 328 | 		fault_in_pages_readable(buf, bytes); | 
 | 329 |  | 
 | 330 | 		page = a_ops->get_xip_page(mapping, | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 331 | 					   index*(PAGE_SIZE/512), 0); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 332 | 		if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) { | 
 | 333 | 			/* we allocate a new page unmap it */ | 
 | 334 | 			page = a_ops->get_xip_page(mapping, | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 335 | 						   index*(PAGE_SIZE/512), 1); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 336 | 			if (!IS_ERR(page)) | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 337 | 				/* unmap page at pgoff from all other vmas */ | 
 | 338 | 				__xip_unmap(mapping, index); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 339 | 		} | 
 | 340 |  | 
 | 341 | 		if (IS_ERR(page)) { | 
 | 342 | 			status = PTR_ERR(page); | 
 | 343 | 			break; | 
 | 344 | 		} | 
 | 345 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 346 | 		copied = filemap_copy_from_user(page, offset, buf, bytes); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 347 | 		flush_dcache_page(page); | 
 | 348 | 		if (likely(copied > 0)) { | 
 | 349 | 			status = copied; | 
 | 350 |  | 
 | 351 | 			if (status >= 0) { | 
 | 352 | 				written += status; | 
 | 353 | 				count -= status; | 
 | 354 | 				pos += status; | 
 | 355 | 				buf += status; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 356 | 			} | 
 | 357 | 		} | 
 | 358 | 		if (unlikely(copied != bytes)) | 
 | 359 | 			if (status >= 0) | 
 | 360 | 				status = -EFAULT; | 
 | 361 | 		if (status < 0) | 
 | 362 | 			break; | 
 | 363 | 	} while (count); | 
 | 364 | 	*ppos = pos; | 
 | 365 | 	/* | 
 | 366 | 	 * No need to use i_size_read() here, the i_size | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 367 | 	 * cannot change under us because we hold i_mutex. | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 368 | 	 */ | 
 | 369 | 	if (pos > inode->i_size) { | 
 | 370 | 		i_size_write(inode, pos); | 
 | 371 | 		mark_inode_dirty(inode); | 
 | 372 | 	} | 
 | 373 |  | 
 | 374 | 	return written ? written : status; | 
 | 375 | } | 
 | 376 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 377 | ssize_t | 
 | 378 | xip_file_write(struct file *filp, const char __user *buf, size_t len, | 
 | 379 | 	       loff_t *ppos) | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 380 | { | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 381 | 	struct address_space *mapping = filp->f_mapping; | 
 | 382 | 	struct inode *inode = mapping->host; | 
 | 383 | 	size_t count; | 
 | 384 | 	loff_t pos; | 
 | 385 | 	ssize_t ret; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 386 |  | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 387 | 	mutex_lock(&inode->i_mutex); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 388 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 389 | 	if (!access_ok(VERIFY_READ, buf, len)) { | 
 | 390 | 		ret=-EFAULT; | 
 | 391 | 		goto out_up; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 392 | 	} | 
 | 393 |  | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 394 | 	pos = *ppos; | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 395 | 	count = len; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 396 |  | 
 | 397 | 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | 
 | 398 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 399 | 	/* We can write back this queue in page reclaim */ | 
 | 400 | 	current->backing_dev_info = mapping->backing_dev_info; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 401 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 402 | 	ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); | 
 | 403 | 	if (ret) | 
 | 404 | 		goto out_backing; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 405 | 	if (count == 0) | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 406 | 		goto out_backing; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 407 |  | 
| Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 408 | 	ret = remove_suid(filp->f_path.dentry); | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 409 | 	if (ret) | 
 | 410 | 		goto out_backing; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 411 |  | 
| Christoph Hellwig | 870f481 | 2006-01-09 20:52:01 -0800 | [diff] [blame] | 412 | 	file_update_time(filp); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 413 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 414 | 	ret = __xip_file_write (filp, buf, count, pos, ppos); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 415 |  | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 416 |  out_backing: | 
 | 417 | 	current->backing_dev_info = NULL; | 
 | 418 |  out_up: | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 419 | 	mutex_unlock(&inode->i_mutex); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 420 | 	return ret; | 
 | 421 | } | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 422 | EXPORT_SYMBOL_GPL(xip_file_write); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 423 |  | 
 | 424 | /* | 
 | 425 |  * truncate a page used for execute in place | 
 | 426 |  * functionality is analog to block_truncate_page but does use get_xip_page | 
 | 427 |  * to get the page instead of page cache | 
 | 428 |  */ | 
 | 429 | int | 
 | 430 | xip_truncate_page(struct address_space *mapping, loff_t from) | 
 | 431 | { | 
 | 432 | 	pgoff_t index = from >> PAGE_CACHE_SHIFT; | 
 | 433 | 	unsigned offset = from & (PAGE_CACHE_SIZE-1); | 
 | 434 | 	unsigned blocksize; | 
 | 435 | 	unsigned length; | 
 | 436 | 	struct page *page; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 437 |  | 
 | 438 | 	BUG_ON(!mapping->a_ops->get_xip_page); | 
 | 439 |  | 
 | 440 | 	blocksize = 1 << mapping->host->i_blkbits; | 
 | 441 | 	length = offset & (blocksize - 1); | 
 | 442 |  | 
 | 443 | 	/* Block boundary? Nothing to do */ | 
 | 444 | 	if (!length) | 
 | 445 | 		return 0; | 
 | 446 |  | 
 | 447 | 	length = blocksize - length; | 
 | 448 |  | 
 | 449 | 	page = mapping->a_ops->get_xip_page(mapping, | 
 | 450 | 					    index*(PAGE_SIZE/512), 0); | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 451 | 	if (!page) | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 452 | 		return -ENOMEM; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 453 | 	if (unlikely(IS_ERR(page))) { | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 454 | 		if (PTR_ERR(page) == -ENODATA) | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 455 | 			/* Hole? No need to truncate */ | 
 | 456 | 			return 0; | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 457 | 		else | 
 | 458 | 			return PTR_ERR(page); | 
| Carsten Otte | afa597b | 2005-07-15 03:56:30 -0700 | [diff] [blame] | 459 | 	} | 
| Nate Diller | 01f2705 | 2007-05-09 02:35:07 -0700 | [diff] [blame] | 460 | 	zero_user_page(page, offset, length, KM_USER0); | 
| Carsten Otte | eb6fe0c | 2005-06-23 22:05:28 -0700 | [diff] [blame] | 461 | 	return 0; | 
| Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 462 | } | 
 | 463 | EXPORT_SYMBOL_GPL(xip_truncate_page); |