blob: 04e886e2c9092dd5c15afb1c81b22889106e0cea [file] [log] [blame]
Robert Love6a4e6432008-10-14 10:00:47 -04001/* mm/ashmem.c
2**
3** Anonymous Shared Memory Subsystem, ashmem
4**
5** Copyright (C) 2008 Google, Inc.
6**
7** Robert Love <rlove@google.com>
8**
9** This software is licensed under the terms of the GNU General Public
10** License version 2, as published by the Free Software Foundation, and
11** may be copied, distributed, and modified under those terms.
12**
13** This program is distributed in the hope that it will be useful,
14** but WITHOUT ANY WARRANTY; without even the implied warranty of
15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16** GNU General Public License for more details.
17*/
18
19#include <linux/module.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/security.h>
24#include <linux/mm.h>
25#include <linux/mman.h>
26#include <linux/uaccess.h>
27#include <linux/personality.h>
28#include <linux/bitops.h>
29#include <linux/mutex.h>
30#include <linux/shmem_fs.h>
31#include <linux/ashmem.h>
32
33#define ASHMEM_NAME_PREFIX "dev/ashmem/"
34#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
35#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
36
37/*
38 * ashmem_area - anonymous shared memory area
39 * Lifecycle: From our parent file's open() until its release()
40 * Locking: Protected by `ashmem_mutex'
41 * Big Note: Mappings do NOT pin this structure; it dies on close()
42 */
43struct ashmem_area {
44 char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */
45 struct list_head unpinned_list; /* list of all ashmem areas */
46 struct file *file; /* the shmem-based backing file */
47 size_t size; /* size of the mapping, in bytes */
48 unsigned long prot_mask; /* allowed prot bits, as vm_flags */
49};
50
51/*
52 * ashmem_range - represents an interval of unpinned (evictable) pages
53 * Lifecycle: From unpin to pin
54 * Locking: Protected by `ashmem_mutex'
55 */
56struct ashmem_range {
57 struct list_head lru; /* entry in LRU list */
58 struct list_head unpinned; /* entry in its area's unpinned list */
59 struct ashmem_area *asma; /* associated area */
60 size_t pgstart; /* starting page, inclusive */
61 size_t pgend; /* ending page, inclusive */
62 unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
63};
64
65/* LRU list of unpinned pages, protected by ashmem_mutex */
66static LIST_HEAD(ashmem_lru_list);
67
68/* Count of pages on our LRU list, protected by ashmem_mutex */
69static unsigned long lru_count;
70
71/*
72 * ashmem_mutex - protects the list of and each individual ashmem_area
73 *
74 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
75 */
76static DEFINE_MUTEX(ashmem_mutex);
77
78static struct kmem_cache *ashmem_area_cachep __read_mostly;
79static struct kmem_cache *ashmem_range_cachep __read_mostly;
80
81#define range_size(range) \
82 ((range)->pgend - (range)->pgstart + 1)
83
84#define range_on_lru(range) \
85 ((range)->purged == ASHMEM_NOT_PURGED)
86
87#define page_range_subsumes_range(range, start, end) \
88 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
89
90#define page_range_subsumed_by_range(range, start, end) \
91 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
92
93#define page_in_range(range, page) \
94 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
95
96#define page_range_in_range(range, start, end) \
97 (page_in_range(range, start) || page_in_range(range, end) || \
98 page_range_subsumes_range(range, start, end))
99
100#define range_before_page(range, page) \
101 ((range)->pgend < (page))
102
103#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
104
105static inline void lru_add(struct ashmem_range *range)
106{
107 list_add_tail(&range->lru, &ashmem_lru_list);
108 lru_count += range_size(range);
109}
110
111static inline void lru_del(struct ashmem_range *range)
112{
113 list_del(&range->lru);
114 lru_count -= range_size(range);
115}
116
117/*
118 * range_alloc - allocate and initialize a new ashmem_range structure
119 *
120 * 'asma' - associated ashmem_area
121 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
122 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
123 * 'start' - starting page, inclusive
124 * 'end' - ending page, inclusive
125 *
126 * Caller must hold ashmem_mutex.
127 */
128static int range_alloc(struct ashmem_area *asma,
129 struct ashmem_range *prev_range, unsigned int purged,
130 size_t start, size_t end)
131{
132 struct ashmem_range *range;
133
134 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
135 if (unlikely(!range))
136 return -ENOMEM;
137
138 range->asma = asma;
139 range->pgstart = start;
140 range->pgend = end;
141 range->purged = purged;
142
143 list_add_tail(&range->unpinned, &prev_range->unpinned);
144
145 if (range_on_lru(range))
146 lru_add(range);
147
148 return 0;
149}
150
151static void range_del(struct ashmem_range *range)
152{
153 list_del(&range->unpinned);
154 if (range_on_lru(range))
155 lru_del(range);
156 kmem_cache_free(ashmem_range_cachep, range);
157}
158
159/*
160 * range_shrink - shrinks a range
161 *
162 * Caller must hold ashmem_mutex.
163 */
164static inline void range_shrink(struct ashmem_range *range,
165 size_t start, size_t end)
166{
167 size_t pre = range_size(range);
168
169 range->pgstart = start;
170 range->pgend = end;
171
172 if (range_on_lru(range))
173 lru_count -= pre - range_size(range);
174}
175
176static int ashmem_open(struct inode *inode, struct file *file)
177{
178 struct ashmem_area *asma;
179 int ret;
180
181 ret = nonseekable_open(inode, file);
182 if (unlikely(ret))
183 return ret;
184
185 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
186 if (unlikely(!asma))
187 return -ENOMEM;
188
189 INIT_LIST_HEAD(&asma->unpinned_list);
190 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
191 asma->prot_mask = PROT_MASK;
192 file->private_data = asma;
193
194 return 0;
195}
196
197static int ashmem_release(struct inode *ignored, struct file *file)
198{
199 struct ashmem_area *asma = file->private_data;
200 struct ashmem_range *range, *next;
201
202 mutex_lock(&ashmem_mutex);
203 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
204 range_del(range);
205 mutex_unlock(&ashmem_mutex);
206
207 if (asma->file)
208 fput(asma->file);
209 kmem_cache_free(ashmem_area_cachep, asma);
210
211 return 0;
212}
213
Bjorn Bringert6809f002010-04-15 10:04:01 +0100214static ssize_t ashmem_read(struct file *file, char __user *buf,
215 size_t len, loff_t *pos)
216{
217 struct ashmem_area *asma = file->private_data;
218 int ret = 0;
219
220 mutex_lock(&ashmem_mutex);
221
222 /* If size is not set, or set to 0, always return EOF. */
223 if (asma->size == 0) {
224 goto out;
225 }
226
227 if (!asma->file) {
228 ret = -EBADF;
229 goto out;
230 }
231
232 ret = asma->file->f_op->read(asma->file, buf, len, pos);
233
234out:
235 mutex_unlock(&ashmem_mutex);
236 return ret;
237}
238
Robert Love6a4e6432008-10-14 10:00:47 -0400239static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
240{
241 struct ashmem_area *asma = file->private_data;
242 int ret = 0;
243
244 mutex_lock(&ashmem_mutex);
245
246 /* user needs to SET_SIZE before mapping */
247 if (unlikely(!asma->size)) {
248 ret = -EINVAL;
249 goto out;
250 }
251
252 /* requested protection bits must match our allowed protection mask */
253 if (unlikely((vma->vm_flags & ~asma->prot_mask) & PROT_MASK)) {
254 ret = -EPERM;
255 goto out;
256 }
257
258 if (!asma->file) {
259 char *name = ASHMEM_NAME_DEF;
260 struct file *vmfile;
261
262 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
263 name = asma->name;
264
265 /* ... and allocate the backing shmem file */
266 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
267 if (unlikely(IS_ERR(vmfile))) {
268 ret = PTR_ERR(vmfile);
269 goto out;
270 }
271 asma->file = vmfile;
272 }
273 get_file(asma->file);
274
275 if (vma->vm_flags & VM_SHARED)
276 shmem_set_file(vma, asma->file);
277 else {
278 if (vma->vm_file)
279 fput(vma->vm_file);
280 vma->vm_file = asma->file;
281 }
282 vma->vm_flags |= VM_CAN_NONLINEAR;
283
284out:
285 mutex_unlock(&ashmem_mutex);
286 return ret;
287}
288
289/*
290 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
291 *
292 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
293 * many objects (pages) we have in total.
294 *
295 * 'gfp_mask' is the mask of the allocation that got us into this mess.
296 *
297 * Return value is the number of objects (pages) remaining, or -1 if we cannot
298 * proceed without risk of deadlock (due to gfp_mask).
299 *
300 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
301 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
302 * pages freed.
303 */
304static int ashmem_shrink(int nr_to_scan, gfp_t gfp_mask)
305{
306 struct ashmem_range *range, *next;
307
308 /* We might recurse into filesystem code, so bail out if necessary */
309 if (nr_to_scan && !(gfp_mask & __GFP_FS))
310 return -1;
311 if (!nr_to_scan)
312 return lru_count;
313
314 mutex_lock(&ashmem_mutex);
315 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
316 struct inode *inode = range->asma->file->f_dentry->d_inode;
317 loff_t start = range->pgstart * PAGE_SIZE;
318 loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
319
320 vmtruncate_range(inode, start, end);
321 range->purged = ASHMEM_WAS_PURGED;
322 lru_del(range);
323
324 nr_to_scan -= range_size(range);
325 if (nr_to_scan <= 0)
326 break;
327 }
328 mutex_unlock(&ashmem_mutex);
329
330 return lru_count;
331}
332
333static struct shrinker ashmem_shrinker = {
334 .shrink = ashmem_shrink,
335 .seeks = DEFAULT_SEEKS * 4,
336};
337
338static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
339{
340 int ret = 0;
341
342 mutex_lock(&ashmem_mutex);
343
344 /* the user can only remove, not add, protection bits */
345 if (unlikely((asma->prot_mask & prot) != prot)) {
346 ret = -EINVAL;
347 goto out;
348 }
349
350 /* does the application expect PROT_READ to imply PROT_EXEC? */
351 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
352 prot |= PROT_EXEC;
353
354 asma->prot_mask = prot;
355
356out:
357 mutex_unlock(&ashmem_mutex);
358 return ret;
359}
360
361static int set_name(struct ashmem_area *asma, void __user *name)
362{
363 int ret = 0;
364
365 mutex_lock(&ashmem_mutex);
366
367 /* cannot change an existing mapping's name */
368 if (unlikely(asma->file)) {
369 ret = -EINVAL;
370 goto out;
371 }
372
373 if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
374 name, ASHMEM_NAME_LEN)))
375 ret = -EFAULT;
376 asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
377
378out:
379 mutex_unlock(&ashmem_mutex);
380
381 return ret;
382}
383
384static int get_name(struct ashmem_area *asma, void __user *name)
385{
386 int ret = 0;
387
388 mutex_lock(&ashmem_mutex);
389 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
390 size_t len;
391
392 /*
393 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
394 * prevents us from revealing one user's stack to another.
395 */
396 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
397 if (unlikely(copy_to_user(name,
398 asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
399 ret = -EFAULT;
400 } else {
401 if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
402 sizeof(ASHMEM_NAME_DEF))))
403 ret = -EFAULT;
404 }
405 mutex_unlock(&ashmem_mutex);
406
407 return ret;
408}
409
410/*
411 * ashmem_pin - pin the given ashmem region, returning whether it was
412 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
413 *
414 * Caller must hold ashmem_mutex.
415 */
416static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
417{
418 struct ashmem_range *range, *next;
419 int ret = ASHMEM_NOT_PURGED;
420
421 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
422 /* moved past last applicable page; we can short circuit */
423 if (range_before_page(range, pgstart))
424 break;
425
426 /*
427 * The user can ask us to pin pages that span multiple ranges,
428 * or to pin pages that aren't even unpinned, so this is messy.
429 *
430 * Four cases:
431 * 1. The requested range subsumes an existing range, so we
432 * just remove the entire matching range.
433 * 2. The requested range overlaps the start of an existing
434 * range, so we just update that range.
435 * 3. The requested range overlaps the end of an existing
436 * range, so we just update that range.
437 * 4. The requested range punches a hole in an existing range,
438 * so we have to update one side of the range and then
439 * create a new range for the other side.
440 */
441 if (page_range_in_range(range, pgstart, pgend)) {
442 ret |= range->purged;
443
444 /* Case #1: Easy. Just nuke the whole thing. */
445 if (page_range_subsumes_range(range, pgstart, pgend)) {
446 range_del(range);
447 continue;
448 }
449
450 /* Case #2: We overlap from the start, so adjust it */
451 if (range->pgstart >= pgstart) {
452 range_shrink(range, pgend + 1, range->pgend);
453 continue;
454 }
455
456 /* Case #3: We overlap from the rear, so adjust it */
457 if (range->pgend <= pgend) {
458 range_shrink(range, range->pgstart, pgstart-1);
459 continue;
460 }
461
462 /*
463 * Case #4: We eat a chunk out of the middle. A bit
464 * more complicated, we allocate a new range for the
465 * second half and adjust the first chunk's endpoint.
466 */
467 range_alloc(asma, range, range->purged,
468 pgend + 1, range->pgend);
469 range_shrink(range, range->pgstart, pgstart - 1);
470 break;
471 }
472 }
473
474 return ret;
475}
476
477/*
478 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
479 *
480 * Caller must hold ashmem_mutex.
481 */
482static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
483{
484 struct ashmem_range *range, *next;
485 unsigned int purged = ASHMEM_NOT_PURGED;
486
487restart:
488 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
489 /* short circuit: this is our insertion point */
490 if (range_before_page(range, pgstart))
491 break;
492
493 /*
494 * The user can ask us to unpin pages that are already entirely
495 * or partially pinned. We handle those two cases here.
496 */
497 if (page_range_subsumed_by_range(range, pgstart, pgend))
498 return 0;
499 if (page_range_in_range(range, pgstart, pgend)) {
500 pgstart = min_t(size_t, range->pgstart, pgstart),
501 pgend = max_t(size_t, range->pgend, pgend);
502 purged |= range->purged;
503 range_del(range);
504 goto restart;
505 }
506 }
507
508 return range_alloc(asma, range, purged, pgstart, pgend);
509}
510
511/*
512 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
513 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
514 *
515 * Caller must hold ashmem_mutex.
516 */
517static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
518 size_t pgend)
519{
520 struct ashmem_range *range;
521 int ret = ASHMEM_IS_PINNED;
522
523 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
524 if (range_before_page(range, pgstart))
525 break;
526 if (page_range_in_range(range, pgstart, pgend)) {
527 ret = ASHMEM_IS_UNPINNED;
528 break;
529 }
530 }
531
532 return ret;
533}
534
535static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
536 void __user *p)
537{
538 struct ashmem_pin pin;
539 size_t pgstart, pgend;
540 int ret = -EINVAL;
541
542 if (unlikely(!asma->file))
543 return -EINVAL;
544
545 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
546 return -EFAULT;
547
548 /* per custom, you can pass zero for len to mean "everything onward" */
549 if (!pin.len)
550 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
551
552 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
553 return -EINVAL;
554
555 if (unlikely(((__u32) -1) - pin.offset < pin.len))
556 return -EINVAL;
557
558 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
559 return -EINVAL;
560
561 pgstart = pin.offset / PAGE_SIZE;
562 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
563
564 mutex_lock(&ashmem_mutex);
565
566 switch (cmd) {
567 case ASHMEM_PIN:
568 ret = ashmem_pin(asma, pgstart, pgend);
569 break;
570 case ASHMEM_UNPIN:
571 ret = ashmem_unpin(asma, pgstart, pgend);
572 break;
573 case ASHMEM_GET_PIN_STATUS:
574 ret = ashmem_get_pin_status(asma, pgstart, pgend);
575 break;
576 }
577
578 mutex_unlock(&ashmem_mutex);
579
580 return ret;
581}
582
583static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
584{
585 struct ashmem_area *asma = file->private_data;
586 long ret = -ENOTTY;
587
588 switch (cmd) {
589 case ASHMEM_SET_NAME:
590 ret = set_name(asma, (void __user *) arg);
591 break;
592 case ASHMEM_GET_NAME:
593 ret = get_name(asma, (void __user *) arg);
594 break;
595 case ASHMEM_SET_SIZE:
596 ret = -EINVAL;
597 if (!asma->file) {
598 ret = 0;
599 asma->size = (size_t) arg;
600 }
601 break;
602 case ASHMEM_GET_SIZE:
603 ret = asma->size;
604 break;
605 case ASHMEM_SET_PROT_MASK:
606 ret = set_prot_mask(asma, arg);
607 break;
608 case ASHMEM_GET_PROT_MASK:
609 ret = asma->prot_mask;
610 break;
611 case ASHMEM_PIN:
612 case ASHMEM_UNPIN:
613 case ASHMEM_GET_PIN_STATUS:
614 ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
615 break;
616 case ASHMEM_PURGE_ALL_CACHES:
617 ret = -EPERM;
618 if (capable(CAP_SYS_ADMIN)) {
619 ret = ashmem_shrink(0, GFP_KERNEL);
620 ashmem_shrink(ret, GFP_KERNEL);
621 }
622 break;
623 }
624
625 return ret;
626}
627
628static struct file_operations ashmem_fops = {
629 .owner = THIS_MODULE,
630 .open = ashmem_open,
631 .release = ashmem_release,
Bjorn Bringert6809f002010-04-15 10:04:01 +0100632 .read = ashmem_read,
Robert Love6a4e6432008-10-14 10:00:47 -0400633 .mmap = ashmem_mmap,
634 .unlocked_ioctl = ashmem_ioctl,
635 .compat_ioctl = ashmem_ioctl,
636};
637
638static struct miscdevice ashmem_misc = {
639 .minor = MISC_DYNAMIC_MINOR,
640 .name = "ashmem",
641 .fops = &ashmem_fops,
642};
643
644static int __init ashmem_init(void)
645{
646 int ret;
647
648 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
649 sizeof(struct ashmem_area),
650 0, 0, NULL);
651 if (unlikely(!ashmem_area_cachep)) {
652 printk(KERN_ERR "ashmem: failed to create slab cache\n");
653 return -ENOMEM;
654 }
655
656 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
657 sizeof(struct ashmem_range),
658 0, 0, NULL);
659 if (unlikely(!ashmem_range_cachep)) {
660 printk(KERN_ERR "ashmem: failed to create slab cache\n");
661 return -ENOMEM;
662 }
663
664 ret = misc_register(&ashmem_misc);
665 if (unlikely(ret)) {
666 printk(KERN_ERR "ashmem: failed to register misc device!\n");
667 return ret;
668 }
669
670 register_shrinker(&ashmem_shrinker);
671
672 printk(KERN_INFO "ashmem: initialized\n");
673
674 return 0;
675}
676
677static void __exit ashmem_exit(void)
678{
679 int ret;
680
681 unregister_shrinker(&ashmem_shrinker);
682
683 ret = misc_deregister(&ashmem_misc);
684 if (unlikely(ret))
685 printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
686
687 kmem_cache_destroy(ashmem_range_cachep);
688 kmem_cache_destroy(ashmem_area_cachep);
689
690 printk(KERN_INFO "ashmem: unloaded\n");
691}
692
693module_init(ashmem_init);
694module_exit(ashmem_exit);
695
696MODULE_LICENSE("GPL");