blob: 50635512f809f116ce71b74fa630c9fef4abd686 [file] [log] [blame]
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001/* drivers/android/pmem.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/miscdevice.h>
18#include <linux/platform_device.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/mm.h>
22#include <linux/list.h>
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070023#include <linux/debugfs.h>
24#include <linux/android_pmem.h>
25#include <linux/mempolicy.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/kobject.h>
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070027#include <asm/io.h>
28#include <asm/uaccess.h>
29#include <asm/cacheflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <asm/sizes.h>
31#include <linux/pm_runtime.h>
32#include <linux/memory_alloc.h>
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034#define PMEM_MAX_DEVICES (10)
35
36#define PMEM_MAX_ORDER (128)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070037#define PMEM_MIN_ALLOC PAGE_SIZE
38
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#define PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS (64)
40
41#define PMEM_32BIT_WORD_ORDER (5)
42#define PMEM_BITS_PER_WORD_MASK (BITS_PER_LONG - 1)
43
44#ifdef CONFIG_ANDROID_PMEM_DEBUG
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070045#define PMEM_DEBUG 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#else
47#define PMEM_DEBUG 0
48#endif
49
50#define SYSTEM_ALLOC_RETRY 10
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070051
52/* indicates that a refernce to this file has been taken via get_pmem_file,
53 * the file should not be released until put_pmem_file is called */
54#define PMEM_FLAGS_BUSY 0x1
55/* indicates that this is a suballocation of a larger master range */
56#define PMEM_FLAGS_CONNECTED 0x1 << 1
57/* indicates this is a master and not a sub allocation and that it is mmaped */
58#define PMEM_FLAGS_MASTERMAP 0x1 << 2
59/* submap and unsubmap flags indicate:
60 * 00: subregion has never been mmaped
61 * 10: subregion has been mmaped, reference to the mm was taken
62 * 11: subretion has ben released, refernece to the mm still held
63 * 01: subretion has been released, reference to the mm has been released
64 */
65#define PMEM_FLAGS_SUBMAP 0x1 << 3
66#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
67
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070068struct pmem_data {
69 /* in alloc mode: an index into the bitmap
70 * in no_alloc mode: the size of the allocation */
71 int index;
72 /* see flags above for descriptions */
73 unsigned int flags;
74 /* protects this data field, if the mm_mmap sem will be held at the
75 * same time as this sem, the mm sem must be taken first (as this is
76 * the order for vma_open and vma_close ops */
77 struct rw_semaphore sem;
78 /* info about the mmaping process */
79 struct vm_area_struct *vma;
80 /* task struct of the mapping process */
81 struct task_struct *task;
82 /* process id of teh mapping process */
83 pid_t pid;
84 /* file descriptor of the master */
85 int master_fd;
86 /* file struct of the master */
87 struct file *master_file;
88 /* a list of currently available regions if this is a suballocation */
89 struct list_head region_list;
90 /* a linked list of data so we can access them for debugging */
91 struct list_head list;
92#if PMEM_DEBUG
93 int ref;
94#endif
95};
96
97struct pmem_bits {
98 unsigned allocated:1; /* 1 if allocated, 0 if free */
99 unsigned order:7; /* size of the region in pmem space */
100};
101
102struct pmem_region_node {
103 struct pmem_region region;
104 struct list_head list;
105};
106
107#define PMEM_DEBUG_MSGS 0
108#if PMEM_DEBUG_MSGS
109#define DLOG(fmt,args...) \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 do { pr_debug("[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700111 ##args); } \
112 while (0)
113#else
114#define DLOG(x...) do {} while (0)
115#endif
116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117enum pmem_align {
118 PMEM_ALIGN_4K,
119 PMEM_ALIGN_1M,
120};
121
122#define PMEM_NAME_SIZE 16
123
124struct alloc_list {
125 void *addr; /* physical addr of allocation */
126 void *aaddr; /* aligned physical addr */
127 unsigned int size; /* total size of allocation */
128 unsigned char __iomem *vaddr; /* Virtual addr */
129 struct list_head allocs;
130};
131
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700132struct pmem_info {
133 struct miscdevice dev;
134 /* physical start address of the remaped pmem space */
135 unsigned long base;
136 /* vitual start address of the remaped pmem space */
137 unsigned char __iomem *vbase;
138 /* total size of the pmem space */
139 unsigned long size;
140 /* number of entries in the pmem space */
141 unsigned long num_entries;
142 /* pfn of the garbage page in memory */
143 unsigned long garbage_pfn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 /* which memory type (i.e. SMI, EBI1) this PMEM device is backed by */
145 unsigned memory_type;
146
147 char name[PMEM_NAME_SIZE];
148
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700149 /* index of the garbage page in the pmem space */
150 int garbage_index;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151
152 enum pmem_allocator_type allocator_type;
153
154 int (*allocate)(const int,
155 const unsigned long,
156 const unsigned int);
157 int (*free)(int, int);
158 int (*free_space)(int, struct pmem_freespace *);
159 unsigned long (*len)(int, struct pmem_data *);
160 unsigned long (*start_addr)(int, struct pmem_data *);
161
162 /* actual size of memory element, e.g.: (4 << 10) is 4K */
163 unsigned int quantum;
164
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700165 /* indicates maps of this region should be cached, if a mix of
166 * cached and uncached is desired, set this and open the device with
167 * O_SYNC to get an uncached region */
168 unsigned cached;
169 unsigned buffered;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 union {
171 struct {
172 /* in all_or_nothing allocator mode the first mapper
173 * gets the whole space and sets this flag */
174 unsigned allocated;
175 } all_or_nothing;
176
177 struct {
178 /* the buddy allocator bitmap for the region
179 * indicating which entries are allocated and which
180 * are free.
181 */
182
183 struct pmem_bits *buddy_bitmap;
184 } buddy_bestfit;
185
186 struct {
187 unsigned int bitmap_free; /* # of zero bits/quanta */
188 uint32_t *bitmap;
189 int32_t bitmap_allocs;
190 struct {
191 short bit;
192 unsigned short quanta;
193 } *bitm_alloc;
194 } bitmap;
195
196 struct {
197 unsigned long used; /* Bytes currently allocated */
198 struct list_head alist; /* List of allocations */
199 } system_mem;
200 } allocator;
201
202 int id;
203 struct kobject kobj;
204
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700205 /* for debugging, creates a list of pmem file structs, the
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206 * data_list_mutex should be taken before pmem_data->sem if both are
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700207 * needed */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 struct mutex data_list_mutex;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700209 struct list_head data_list;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 /* arena_mutex protects the global allocation arena
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700211 *
212 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 * down(pmem_data->sem) => mutex_lock(arena_mutex)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700214 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 struct mutex arena_mutex;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700216
217 long (*ioctl)(struct file *, unsigned int, unsigned long);
218 int (*release)(struct inode *, struct file *);
219};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700221
222static struct pmem_info pmem[PMEM_MAX_DEVICES];
223static int id_count;
224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225#define PMEM_SYSFS_DIR_NAME "pmem_regions" /* under /sys/kernel/ */
226static struct kset *pmem_kset;
227
228#define PMEM_IS_FREE_BUDDY(id, index) \
229 (!(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].allocated))
230#define PMEM_BUDDY_ORDER(id, index) \
231 (pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].order)
232#define PMEM_BUDDY_INDEX(id, index) \
233 (index ^ (1 << PMEM_BUDDY_ORDER(id, index)))
234#define PMEM_BUDDY_NEXT_INDEX(id, index) \
235 (index + (1 << PMEM_BUDDY_ORDER(id, index)))
236#define PMEM_OFFSET(index) (index * pmem[id].quantum)
237#define PMEM_START_ADDR(id, index) \
238 (PMEM_OFFSET(index) + pmem[id].base)
239#define PMEM_BUDDY_LEN(id, index) \
240 ((1 << PMEM_BUDDY_ORDER(id, index)) * pmem[id].quantum)
241#define PMEM_END_ADDR(id, index) \
242 (PMEM_START_ADDR(id, index) + PMEM_LEN(id, index))
243#define PMEM_START_VADDR(id, index) \
244 (PMEM_OFFSET(id, index) + pmem[id].vbase)
245#define PMEM_END_VADDR(id, index) \
246 (PMEM_START_VADDR(id, index) + PMEM_LEN(id, index))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700247#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
248#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249#define PMEM_IS_SUBMAP(data) \
250 ((data->flags & PMEM_FLAGS_SUBMAP) && \
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700251 (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
252
253static int pmem_release(struct inode *, struct file *);
254static int pmem_mmap(struct file *, struct vm_area_struct *);
255static int pmem_open(struct inode *, struct file *);
256static long pmem_ioctl(struct file *, unsigned int, unsigned long);
257
258struct file_operations pmem_fops = {
259 .release = pmem_release,
260 .mmap = pmem_mmap,
261 .open = pmem_open,
262 .unlocked_ioctl = pmem_ioctl,
263};
264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265#define PMEM_ATTR(_name, _mode, _show, _store) { \
266 .attr = {.name = __stringify(_name), .mode = _mode }, \
267 .show = _show, \
268 .store = _store, \
269}
270
271struct pmem_attr {
272 struct attribute attr;
273 ssize_t(*show) (const int id, char * const);
274 ssize_t(*store) (const int id, const char * const, const size_t count);
275};
276#define to_pmem_attr(a) container_of(a, struct pmem_attr, attr)
277
278#define RW_PMEM_ATTR(name) \
279static struct pmem_attr pmem_attr_## name = \
280 PMEM_ATTR(name, S_IRUGO | S_IWUSR, show_pmem_## name, store_pmem_## name)
281
282#define RO_PMEM_ATTR(name) \
283static struct pmem_attr pmem_attr_## name = \
284 PMEM_ATTR(name, S_IRUGO, show_pmem_## name, NULL)
285
286#define WO_PMEM_ATTR(name) \
287static struct pmem_attr pmem_attr_## name = \
288 PMEM_ATTR(name, S_IWUSR, NULL, store_pmem_## name)
289
290static ssize_t show_pmem(struct kobject *kobj,
291 struct attribute *attr,
292 char *buf)
293{
294 struct pmem_attr *a = to_pmem_attr(attr);
295 return a->show ? a->show(to_pmem_info_id(kobj), buf) : -EIO;
296}
297
298static ssize_t store_pmem(struct kobject *kobj, struct attribute *attr,
299 const char *buf, size_t count)
300{
301 struct pmem_attr *a = to_pmem_attr(attr);
302 return a->store ? a->store(to_pmem_info_id(kobj), buf, count) : -EIO;
303}
304
305static struct sysfs_ops pmem_ops = {
306 .show = show_pmem,
307 .store = store_pmem,
308};
309
310static ssize_t show_pmem_base(int id, char *buf)
311{
312 return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
313 pmem[id].base, pmem[id].base);
314}
315RO_PMEM_ATTR(base);
316
317static ssize_t show_pmem_size(int id, char *buf)
318{
319 return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
320 pmem[id].size, pmem[id].size);
321}
322RO_PMEM_ATTR(size);
323
324static ssize_t show_pmem_allocator_type(int id, char *buf)
325{
326 switch (pmem[id].allocator_type) {
327 case PMEM_ALLOCATORTYPE_ALLORNOTHING:
328 return scnprintf(buf, PAGE_SIZE, "%s\n", "All or Nothing");
329 case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
330 return scnprintf(buf, PAGE_SIZE, "%s\n", "Buddy Bestfit");
331 case PMEM_ALLOCATORTYPE_BITMAP:
332 return scnprintf(buf, PAGE_SIZE, "%s\n", "Bitmap");
333 case PMEM_ALLOCATORTYPE_SYSTEM:
334 return scnprintf(buf, PAGE_SIZE, "%s\n", "System heap");
335 default:
336 return scnprintf(buf, PAGE_SIZE,
337 "??? Invalid allocator type (%d) for this region! "
338 "Something isn't right.\n",
339 pmem[id].allocator_type);
340 }
341}
342RO_PMEM_ATTR(allocator_type);
343
344static ssize_t show_pmem_mapped_regions(int id, char *buf)
345{
346 struct list_head *elt;
347 int ret;
348
349 ret = scnprintf(buf, PAGE_SIZE,
350 "pid #: mapped regions (offset, len) (offset,len)...\n");
351
352 mutex_lock(&pmem[id].data_list_mutex);
353 list_for_each(elt, &pmem[id].data_list) {
354 struct pmem_data *data =
355 list_entry(elt, struct pmem_data, list);
356 struct list_head *elt2;
357
358 down_read(&data->sem);
359 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "pid %u:",
360 data->pid);
361 list_for_each(elt2, &data->region_list) {
362 struct pmem_region_node *region_node = list_entry(elt2,
363 struct pmem_region_node,
364 list);
365 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
366 "(%lx,%lx) ",
367 region_node->region.offset,
368 region_node->region.len);
369 }
370 up_read(&data->sem);
371 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
372 }
373 mutex_unlock(&pmem[id].data_list_mutex);
374 return ret;
375}
376RO_PMEM_ATTR(mapped_regions);
377
378#define PMEM_COMMON_SYSFS_ATTRS \
379 &pmem_attr_base.attr, \
380 &pmem_attr_size.attr, \
381 &pmem_attr_allocator_type.attr, \
382 &pmem_attr_mapped_regions.attr
383
384
385static ssize_t show_pmem_allocated(int id, char *buf)
386{
387 ssize_t ret;
388
389 mutex_lock(&pmem[id].arena_mutex);
390 ret = scnprintf(buf, PAGE_SIZE, "%s\n",
391 pmem[id].allocator.all_or_nothing.allocated ?
392 "is allocated" : "is NOT allocated");
393 mutex_unlock(&pmem[id].arena_mutex);
394 return ret;
395}
396RO_PMEM_ATTR(allocated);
397
398static struct attribute *pmem_allornothing_attrs[] = {
399 PMEM_COMMON_SYSFS_ATTRS,
400
401 &pmem_attr_allocated.attr,
402
403 NULL
404};
405
406static struct kobj_type pmem_allornothing_ktype = {
407 .sysfs_ops = &pmem_ops,
408 .default_attrs = pmem_allornothing_attrs,
409};
410
411static ssize_t show_pmem_total_entries(int id, char *buf)
412{
413 return scnprintf(buf, PAGE_SIZE, "%lu\n", pmem[id].num_entries);
414}
415RO_PMEM_ATTR(total_entries);
416
417static ssize_t show_pmem_quantum_size(int id, char *buf)
418{
419 return scnprintf(buf, PAGE_SIZE, "%u (%#x)\n",
420 pmem[id].quantum, pmem[id].quantum);
421}
422RO_PMEM_ATTR(quantum_size);
423
424static ssize_t show_pmem_buddy_bitmap_dump(int id, char *buf)
425{
426 int ret, i;
427
428 mutex_lock(&pmem[id].data_list_mutex);
429 ret = scnprintf(buf, PAGE_SIZE, "index\torder\tlength\tallocated\n");
430
431 for (i = 0; i < pmem[id].num_entries && (PAGE_SIZE - ret);
432 i = PMEM_BUDDY_NEXT_INDEX(id, i))
433 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d\t%d\t%d\t%d\n",
434 i, PMEM_BUDDY_ORDER(id, i),
435 PMEM_BUDDY_LEN(id, i),
436 !PMEM_IS_FREE_BUDDY(id, i));
437
438 mutex_unlock(&pmem[id].data_list_mutex);
439 return ret;
440}
441RO_PMEM_ATTR(buddy_bitmap_dump);
442
443#define PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS \
444 &pmem_attr_quantum_size.attr, \
445 &pmem_attr_total_entries.attr
446
447static struct attribute *pmem_buddy_bestfit_attrs[] = {
448 PMEM_COMMON_SYSFS_ATTRS,
449
450 PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
451
452 &pmem_attr_buddy_bitmap_dump.attr,
453
454 NULL
455};
456
457static struct kobj_type pmem_buddy_bestfit_ktype = {
458 .sysfs_ops = &pmem_ops,
459 .default_attrs = pmem_buddy_bestfit_attrs,
460};
461
462static ssize_t show_pmem_free_quanta(int id, char *buf)
463{
464 ssize_t ret;
465
466 mutex_lock(&pmem[id].arena_mutex);
467 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
468 pmem[id].allocator.bitmap.bitmap_free);
469 mutex_unlock(&pmem[id].arena_mutex);
470 return ret;
471}
472RO_PMEM_ATTR(free_quanta);
473
474static ssize_t show_pmem_bits_allocated(int id, char *buf)
475{
476 ssize_t ret;
477 unsigned int i;
478
479 mutex_lock(&pmem[id].arena_mutex);
480
481 ret = scnprintf(buf, PAGE_SIZE,
482 "id: %d\nbitnum\tindex\tquanta allocated\n", id);
483
484 for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
485 if (pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1)
486 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
487 "%u\t%u\t%u\n",
488 i,
489 pmem[id].allocator.bitmap.bitm_alloc[i].bit,
490 pmem[id].allocator.bitmap.bitm_alloc[i].quanta
491 );
492
493 mutex_unlock(&pmem[id].arena_mutex);
494 return ret;
495}
496RO_PMEM_ATTR(bits_allocated);
497
498static struct attribute *pmem_bitmap_attrs[] = {
499 PMEM_COMMON_SYSFS_ATTRS,
500
501 PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
502
503 &pmem_attr_free_quanta.attr,
504 &pmem_attr_bits_allocated.attr,
505
506 NULL
507};
508
509static struct attribute *pmem_system_attrs[] = {
510 PMEM_COMMON_SYSFS_ATTRS,
511
512 NULL
513};
514
515static struct kobj_type pmem_bitmap_ktype = {
516 .sysfs_ops = &pmem_ops,
517 .default_attrs = pmem_bitmap_attrs,
518};
519
520static struct kobj_type pmem_system_ktype = {
521 .sysfs_ops = &pmem_ops,
522 .default_attrs = pmem_system_attrs,
523};
524
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700525static int get_id(struct file *file)
526{
527 return MINOR(file->f_dentry->d_inode->i_rdev);
528}
529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530static char *get_name(struct file *file)
531{
532 int id = get_id(file);
533 return pmem[id].name;
534}
535
536static int is_pmem_file(struct file *file)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700537{
538 int id;
539
540 if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
541 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700542
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700543 id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700544 return (unlikely(id >= PMEM_MAX_DEVICES ||
545 file->f_dentry->d_inode->i_rdev !=
546 MKDEV(MISC_MAJOR, pmem[id].dev.minor))) ? 0 : 1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700547}
548
549static int has_allocation(struct file *file)
550{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 /* must be called with at least read lock held on
552 * ((struct pmem_data *)(file->private_data))->sem which
553 * means that file is guaranteed not to be NULL upon entry!!
554 * check is_pmem_file first if not accessed via pmem_file_ops */
555 struct pmem_data *pdata = file->private_data;
556 return pdata && pdata->index != -1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700557}
558
559static int is_master_owner(struct file *file)
560{
561 struct file *master_file;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700563 int put_needed, ret = 0;
564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 if (!has_allocation(file))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700566 return 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700567 if (PMEM_FLAGS_MASTERMAP & data->flags)
568 return 1;
569 master_file = fget_light(data->master_fd, &put_needed);
570 if (master_file && data->master_file == master_file)
571 ret = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (master_file)
573 fput_light(master_file, put_needed);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700574 return ret;
575}
576
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577static int pmem_free_all_or_nothing(int id, int index)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700578{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 /* caller should hold the lock on arena_mutex! */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700580 DLOG("index %d\n", index);
581
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582 pmem[id].allocator.all_or_nothing.allocated = 0;
583 return 0;
584}
585
586static int pmem_free_space_all_or_nothing(int id,
587 struct pmem_freespace *fs)
588{
589 /* caller should hold the lock on arena_mutex! */
590 fs->total = (unsigned long)
591 pmem[id].allocator.all_or_nothing.allocated == 0 ?
592 pmem[id].size : 0;
593
594 fs->largest = fs->total;
595 return 0;
596}
597
598
599static int pmem_free_buddy_bestfit(int id, int index)
600{
601 /* caller should hold the lock on arena_mutex! */
602 int curr = index;
603 DLOG("index %d\n", index);
604
605
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700606 /* clean up the bitmap, merging any buddies */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 pmem[id].allocator.buddy_bestfit.buddy_bitmap[curr].allocated = 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700608 /* find a slots buddy Buddy# = Slot# ^ (1 << order)
609 * if the buddy is also free merge them
610 * repeat until the buddy is not free or end of the bitmap is reached
611 */
612 do {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 int buddy = PMEM_BUDDY_INDEX(id, curr);
614 if (buddy < pmem[id].num_entries &&
615 PMEM_IS_FREE_BUDDY(id, buddy) &&
616 PMEM_BUDDY_ORDER(id, buddy) ==
617 PMEM_BUDDY_ORDER(id, curr)) {
618 PMEM_BUDDY_ORDER(id, buddy)++;
619 PMEM_BUDDY_ORDER(id, curr)++;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700620 curr = min(buddy, curr);
621 } else {
622 break;
623 }
624 } while (curr < pmem[id].num_entries);
625
626 return 0;
627}
628
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629
630static int pmem_free_space_buddy_bestfit(int id,
631 struct pmem_freespace *fs)
632{
633 /* caller should hold the lock on arena_mutex! */
634 int curr;
635 unsigned long size;
636 fs->total = 0;
637 fs->largest = 0;
638
639 for (curr = 0; curr < pmem[id].num_entries;
640 curr = PMEM_BUDDY_NEXT_INDEX(id, curr)) {
641 if (PMEM_IS_FREE_BUDDY(id, curr)) {
642 size = PMEM_BUDDY_LEN(id, curr);
643 if (size > fs->largest)
644 fs->largest = size;
645 fs->total += size;
646 }
647 }
648 return 0;
649}
650
651
652static inline uint32_t start_mask(int bit_start)
653{
654 return (uint32_t)(~0) << (bit_start & PMEM_BITS_PER_WORD_MASK);
655}
656
657static inline uint32_t end_mask(int bit_end)
658{
659 return (uint32_t)(~0) >>
660 ((BITS_PER_LONG - bit_end) & PMEM_BITS_PER_WORD_MASK);
661}
662
663static inline int compute_total_words(int bit_end, int word_index)
664{
665 return ((bit_end + BITS_PER_LONG - 1) >>
666 PMEM_32BIT_WORD_ORDER) - word_index;
667}
668
669static void bitmap_bits_clear_all(uint32_t *bitp, int bit_start, int bit_end)
670{
671 int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
672
673 total_words = compute_total_words(bit_end, word_index);
674 if (total_words > 0) {
675 if (total_words == 1) {
676 bitp[word_index] &=
677 ~(start_mask(bit_start) & end_mask(bit_end));
678 } else {
679 bitp[word_index++] &= ~start_mask(bit_start);
680 if (total_words > 2) {
681 int total_bytes;
682
683 total_words -= 2;
684 total_bytes = total_words << 2;
685
686 memset(&bitp[word_index], 0, total_bytes);
687 word_index += total_words;
688 }
689 bitp[word_index] &= ~end_mask(bit_end);
690 }
691 }
692}
693
694static int pmem_free_bitmap(int id, int bitnum)
695{
696 /* caller should hold the lock on arena_mutex! */
697 int i;
698 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
699
700 DLOG("bitnum %d\n", bitnum);
701
702 for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) {
703 const int curr_bit =
704 pmem[id].allocator.bitmap.bitm_alloc[i].bit;
705
706 if (curr_bit == bitnum) {
707 const int curr_quanta =
708 pmem[id].allocator.bitmap.bitm_alloc[i].quanta;
709
710 bitmap_bits_clear_all(pmem[id].allocator.bitmap.bitmap,
711 curr_bit, curr_bit + curr_quanta);
712 pmem[id].allocator.bitmap.bitmap_free += curr_quanta;
713 pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
714 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
715 return 0;
716 }
717 }
718 printk(KERN_ALERT "pmem: %s: Attempt to free unallocated index %d, id"
719 " %d, pid %d(%s)\n", __func__, bitnum, id, current->pid,
720 get_task_comm(currtask_name, current));
721
722 return -1;
723}
724
725static int pmem_free_system(int id, int index)
726{
727 /* caller should hold the lock on arena_mutex! */
728 struct alloc_list *item;
729
730 DLOG("index %d\n", index);
731 if (index != 0)
732 item = (struct alloc_list *)index;
733 else
734 return 0;
735
736 if (item->vaddr != NULL) {
737 iounmap(item->vaddr);
738 kfree(__va(item->addr));
739 list_del(&item->allocs);
740 kfree(item);
741 }
742
743 return 0;
744}
745
746static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs)
747{
748 int i, j;
749 int max_allocs = pmem[id].allocator.bitmap.bitmap_allocs;
750 int alloc_start = 0;
751 int next_alloc;
752 unsigned long size = 0;
753
754 fs->total = 0;
755 fs->largest = 0;
756
757 for (i = 0; i < max_allocs; i++) {
758
759 int alloc_quanta = 0;
760 int alloc_idx = 0;
761 next_alloc = pmem[id].num_entries;
762
763 /* Look for the lowest bit where next allocation starts */
764 for (j = 0; j < max_allocs; j++) {
765 const int curr_alloc = pmem[id].allocator.
766 bitmap.bitm_alloc[j].bit;
767 if (curr_alloc != -1) {
768 if (alloc_start == curr_alloc)
769 alloc_idx = j;
770 if (alloc_start >= curr_alloc)
771 continue;
772 if (curr_alloc < next_alloc)
773 next_alloc = curr_alloc;
774 }
775 }
776 alloc_quanta = pmem[id].allocator.bitmap.
777 bitm_alloc[alloc_idx].quanta;
778 size = (next_alloc - (alloc_start + alloc_quanta)) *
779 pmem[id].quantum;
780
781 if (size > fs->largest)
782 fs->largest = size;
783 fs->total += size;
784
785 if (next_alloc == pmem[id].num_entries)
786 break;
787 else
788 alloc_start = next_alloc;
789 }
790
791 return 0;
792}
793
794static int pmem_free_space_system(int id, struct pmem_freespace *fs)
795{
796 fs->total = pmem[id].size;
797 fs->largest = pmem[id].size;
798
799 return 0;
800}
801
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700802static void pmem_revoke(struct file *file, struct pmem_data *data);
803
804static int pmem_release(struct inode *inode, struct file *file)
805{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700807 struct pmem_region_node *region_node;
808 struct list_head *elt, *elt2;
809 int id = get_id(file), ret = 0;
810
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700811#if PMEM_DEBUG_MSGS
812 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
813#endif
814 DLOG("releasing memory pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
815 current->pid, get_task_comm(currtask_name, current),
816 file, file_count(file), get_name(file), id);
817 mutex_lock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700818 /* if this file is a master, revoke all the memory in the connected
819 * files */
820 if (PMEM_FLAGS_MASTERMAP & data->flags) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700821 list_for_each(elt, &pmem[id].data_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822 struct pmem_data *sub_data =
823 list_entry(elt, struct pmem_data, list);
824 int is_master;
825
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700826 down_read(&sub_data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700827 is_master = (PMEM_IS_SUBMAP(sub_data) &&
828 file == sub_data->master_file);
829 up_read(&sub_data->sem);
830
831 if (is_master)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700832 pmem_revoke(file, sub_data);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700833 }
834 }
835 list_del(&data->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700836 mutex_unlock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700837
838 down_write(&data->sem);
839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700840 /* if it is not a connected file and it has an allocation, free it */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700841 if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700842 mutex_lock(&pmem[id].arena_mutex);
843 ret = pmem[id].free(id, data->index);
844 mutex_unlock(&pmem[id].arena_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700845 }
846
847 /* if this file is a submap (mapped, connected file), downref the
848 * task struct */
849 if (PMEM_FLAGS_SUBMAP & data->flags)
850 if (data->task) {
851 put_task_struct(data->task);
852 data->task = NULL;
853 }
854
855 file->private_data = NULL;
856
857 list_for_each_safe(elt, elt2, &data->region_list) {
858 region_node = list_entry(elt, struct pmem_region_node, list);
859 list_del(elt);
860 kfree(region_node);
861 }
862 BUG_ON(!list_empty(&data->region_list));
863
864 up_write(&data->sem);
865 kfree(data);
866 if (pmem[id].release)
867 ret = pmem[id].release(inode, file);
868
869 return ret;
870}
871
872static int pmem_open(struct inode *inode, struct file *file)
873{
874 struct pmem_data *data;
875 int id = get_id(file);
876 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700877#if PMEM_DEBUG_MSGS
878 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
879#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700880
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700881 DLOG("pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
882 current->pid, get_task_comm(currtask_name, current),
883 file, file_count(file), get_name(file), id);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700884 data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
885 if (!data) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886 printk(KERN_ALERT "pmem: %s: unable to allocate memory for "
887 "pmem metadata.", __func__);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700888 return -1;
889 }
890 data->flags = 0;
891 data->index = -1;
892 data->task = NULL;
893 data->vma = NULL;
894 data->pid = 0;
895 data->master_file = NULL;
896#if PMEM_DEBUG
897 data->ref = 0;
898#endif
899 INIT_LIST_HEAD(&data->region_list);
900 init_rwsem(&data->sem);
901
902 file->private_data = data;
903 INIT_LIST_HEAD(&data->list);
904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905 mutex_lock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700906 list_add(&data->list, &pmem[id].data_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 mutex_unlock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700908 return ret;
909}
910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911static unsigned long pmem_order(unsigned long len, int id)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700912{
913 int i;
914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915 len = (len + pmem[id].quantum - 1)/pmem[id].quantum;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700916 len--;
917 for (i = 0; i < sizeof(len)*8; i++)
918 if (len >> i == 0)
919 break;
920 return i;
921}
922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700923static int pmem_allocator_all_or_nothing(const int id,
924 const unsigned long len,
925 const unsigned int align)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700926{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 /* caller should hold the lock on arena_mutex! */
928 DLOG("all or nothing\n");
929 if ((len > pmem[id].size) ||
930 pmem[id].allocator.all_or_nothing.allocated)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700931 return -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932 pmem[id].allocator.all_or_nothing.allocated = 1;
933 return len;
934}
935
936static int pmem_allocator_buddy_bestfit(const int id,
937 const unsigned long len,
938 unsigned int align)
939{
940 /* caller should hold the lock on arena_mutex! */
941 int curr;
942 int best_fit = -1;
943 unsigned long order;
944
945 DLOG("buddy bestfit\n");
946 order = pmem_order(len, id);
947 if (order > PMEM_MAX_ORDER)
948 goto out;
949
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700950 DLOG("order %lx\n", order);
951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 /* Look through the bitmap.
953 * If a free slot of the correct order is found, use it.
954 * Otherwise, use the best fit (smallest with size > order) slot.
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700955 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 for (curr = 0;
957 curr < pmem[id].num_entries;
958 curr = PMEM_BUDDY_NEXT_INDEX(id, curr))
959 if (PMEM_IS_FREE_BUDDY(id, curr)) {
960 if (PMEM_BUDDY_ORDER(id, curr) ==
961 (unsigned char)order) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700962 /* set the not free bit and clear others */
963 best_fit = curr;
964 break;
965 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700966 if (PMEM_BUDDY_ORDER(id, curr) >
967 (unsigned char)order &&
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700968 (best_fit < 0 ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700969 PMEM_BUDDY_ORDER(id, curr) <
970 PMEM_BUDDY_ORDER(id, best_fit)))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700971 best_fit = curr;
972 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700973
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 /* if best_fit < 0, there are no suitable slots; return an error */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700975 if (best_fit < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976#if PMEM_DEBUG
977 printk(KERN_ALERT "pmem: %s: no space left to allocate!\n",
978 __func__);
979#endif
980 goto out;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700981 }
982
983 /* now partition the best fit:
984 * split the slot into 2 buddies of order - 1
985 * repeat until the slot is of the correct order
986 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987 while (PMEM_BUDDY_ORDER(id, best_fit) > (unsigned char)order) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700988 int buddy;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 PMEM_BUDDY_ORDER(id, best_fit) -= 1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700990 buddy = PMEM_BUDDY_INDEX(id, best_fit);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 PMEM_BUDDY_ORDER(id, buddy) = PMEM_BUDDY_ORDER(id, best_fit);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700992 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993 pmem[id].allocator.buddy_bestfit.buddy_bitmap[best_fit].allocated = 1;
994out:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700995 return best_fit;
996}
997
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700998
999static inline unsigned long paddr_from_bit(const int id, const int bitnum)
1000{
1001 return pmem[id].base + pmem[id].quantum * bitnum;
1002}
1003
1004static inline unsigned long bit_from_paddr(const int id,
1005 const unsigned long paddr)
1006{
1007 return (paddr - pmem[id].base) / pmem[id].quantum;
1008}
1009
1010static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end)
1011{
1012 int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
1013
1014 total_words = compute_total_words(bit_end, word_index);
1015 if (total_words > 0) {
1016 if (total_words == 1) {
1017 bitp[word_index] |=
1018 (start_mask(bit_start) & end_mask(bit_end));
1019 } else {
1020 bitp[word_index++] |= start_mask(bit_start);
1021 if (total_words > 2) {
1022 int total_bytes;
1023
1024 total_words -= 2;
1025 total_bytes = total_words << 2;
1026
1027 memset(&bitp[word_index], ~0, total_bytes);
1028 word_index += total_words;
1029 }
1030 bitp[word_index] |= end_mask(bit_end);
1031 }
1032 }
1033}
1034
1035static int
1036bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc,
1037 int total_bits, int spacing)
1038{
1039 int bit_start, last_bit, word_index;
1040
1041 if (num_bits_to_alloc <= 0)
1042 return -1;
1043
1044 for (bit_start = 0; ;
1045 bit_start = (last_bit +
1046 (word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1)
1047 & ~(spacing - 1)) {
1048 int bit_end = bit_start + num_bits_to_alloc, total_words;
1049
1050 if (bit_end > total_bits)
1051 return -1; /* out of contiguous memory */
1052
1053 word_index = bit_start >> PMEM_32BIT_WORD_ORDER;
1054 total_words = compute_total_words(bit_end, word_index);
1055
1056 if (total_words <= 0)
1057 return -1;
1058
1059 if (total_words == 1) {
1060 last_bit = fls(bitp[word_index] &
1061 (start_mask(bit_start) &
1062 end_mask(bit_end)));
1063 if (last_bit)
1064 continue;
1065 } else {
1066 int end_word = word_index + (total_words - 1);
1067 last_bit =
1068 fls(bitp[word_index] & start_mask(bit_start));
1069 if (last_bit)
1070 continue;
1071
1072 for (word_index++;
1073 word_index < end_word;
1074 word_index++) {
1075 last_bit = fls(bitp[word_index]);
1076 if (last_bit)
1077 break;
1078 }
1079 if (last_bit)
1080 continue;
1081
1082 last_bit = fls(bitp[word_index] & end_mask(bit_end));
1083 if (last_bit)
1084 continue;
1085 }
1086 bitmap_bits_set_all(bitp, bit_start, bit_end);
1087 return bit_start;
1088 }
1089 return -1;
1090}
1091
1092static int reserve_quanta(const unsigned int quanta_needed,
1093 const int id,
1094 unsigned int align)
1095{
1096 /* alignment should be a valid power of 2 */
1097 int ret = -1, start_bit = 0, spacing = 1;
1098
1099 /* Sanity check */
1100 if (quanta_needed > pmem[id].allocator.bitmap.bitmap_free) {
1101#if PMEM_DEBUG
1102 printk(KERN_ALERT "pmem: %s: request (%d) too big for"
1103 " available free (%d)\n", __func__, quanta_needed,
1104 pmem[id].allocator.bitmap.bitmap_free);
1105#endif
1106 return -1;
1107 }
1108
1109 start_bit = bit_from_paddr(id,
1110 (pmem[id].base + align - 1) & ~(align - 1));
1111 if (start_bit <= -1) {
1112#if PMEM_DEBUG
1113 printk(KERN_ALERT
1114 "pmem: %s: bit_from_paddr fails for"
1115 " %u alignment.\n", __func__, align);
1116#endif
1117 return -1;
1118 }
1119 spacing = align / pmem[id].quantum;
1120 spacing = spacing > 1 ? spacing : 1;
1121
1122 ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap,
1123 quanta_needed,
1124 (pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum,
1125 spacing);
1126
1127#if PMEM_DEBUG
1128 if (ret < 0)
1129 printk(KERN_ALERT "pmem: %s: not enough contiguous bits free "
1130 "in bitmap! Region memory is either too fragmented or"
1131 " request is too large for available memory.\n",
1132 __func__);
1133#endif
1134
1135 return ret;
1136}
1137
1138static int pmem_allocator_bitmap(const int id,
1139 const unsigned long len,
1140 const unsigned int align)
1141{
1142 /* caller should hold the lock on arena_mutex! */
1143 int bitnum, i;
1144 unsigned int quanta_needed;
1145
1146 DLOG("bitmap id %d, len %ld, align %u\n", id, len, align);
1147 if (!pmem[id].allocator.bitmap.bitm_alloc) {
1148#if PMEM_DEBUG
1149 printk(KERN_ALERT "pmem: bitm_alloc not present! id: %d\n",
1150 id);
1151#endif
1152 return -1;
1153 }
1154
1155 quanta_needed = (len + pmem[id].quantum - 1) / pmem[id].quantum;
1156 DLOG("quantum size %u quanta needed %u free %u id %d\n",
1157 pmem[id].quantum, quanta_needed,
1158 pmem[id].allocator.bitmap.bitmap_free, id);
1159
1160 if (pmem[id].allocator.bitmap.bitmap_free < quanta_needed) {
1161#if PMEM_DEBUG
1162 printk(KERN_ALERT "pmem: memory allocation failure. "
1163 "PMEM memory region exhausted, id %d."
1164 " Unable to comply with allocation request.\n", id);
1165#endif
1166 return -1;
1167 }
1168
1169 bitnum = reserve_quanta(quanta_needed, id, align);
1170 if (bitnum == -1)
1171 goto leave;
1172
1173 for (i = 0;
1174 i < pmem[id].allocator.bitmap.bitmap_allocs &&
1175 pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1;
1176 i++)
1177 ;
1178
1179 if (i >= pmem[id].allocator.bitmap.bitmap_allocs) {
1180 void *temp;
1181 int32_t new_bitmap_allocs =
1182 pmem[id].allocator.bitmap.bitmap_allocs << 1;
1183 int j;
1184
1185 if (!new_bitmap_allocs) { /* failed sanity check!! */
1186#if PMEM_DEBUG
1187 pr_alert("pmem: bitmap_allocs number"
1188 " wrapped around to zero! Something "
1189 "is VERY wrong.\n");
1190#endif
1191 return -1;
1192 }
1193
1194 if (new_bitmap_allocs > pmem[id].num_entries) {
1195 /* failed sanity check!! */
1196#if PMEM_DEBUG
1197 pr_alert("pmem: required bitmap_allocs"
1198 " number exceeds maximum entries possible"
1199 " for current quanta\n");
1200#endif
1201 return -1;
1202 }
1203
1204 temp = krealloc(pmem[id].allocator.bitmap.bitm_alloc,
1205 new_bitmap_allocs *
1206 sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
1207 GFP_KERNEL);
1208 if (!temp) {
1209#if PMEM_DEBUG
1210 pr_alert("pmem: can't realloc bitmap_allocs,"
1211 "id %d, current num bitmap allocs %d\n",
1212 id, pmem[id].allocator.bitmap.bitmap_allocs);
1213#endif
1214 return -1;
1215 }
1216 pmem[id].allocator.bitmap.bitmap_allocs = new_bitmap_allocs;
1217 pmem[id].allocator.bitmap.bitm_alloc = temp;
1218
1219 for (j = i; j < new_bitmap_allocs; j++) {
1220 pmem[id].allocator.bitmap.bitm_alloc[j].bit = -1;
1221 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
1222 }
1223
1224 DLOG("increased # of allocated regions to %d for id %d\n",
1225 pmem[id].allocator.bitmap.bitmap_allocs, id);
1226 }
1227
1228 DLOG("bitnum %d, bitm_alloc index %d\n", bitnum, i);
1229
1230 pmem[id].allocator.bitmap.bitmap_free -= quanta_needed;
1231 pmem[id].allocator.bitmap.bitm_alloc[i].bit = bitnum;
1232 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = quanta_needed;
1233leave:
1234 return bitnum;
1235}
1236
1237static int pmem_allocator_system(const int id,
1238 const unsigned long len,
1239 const unsigned int align)
1240{
1241 /* caller should hold the lock on arena_mutex! */
1242 struct alloc_list *list;
1243 unsigned long aligned_len;
1244 int count = SYSTEM_ALLOC_RETRY;
1245 void *buf;
1246
1247 DLOG("system id %d, len %ld, align %u\n", id, len, align);
1248
1249 if ((pmem[id].allocator.system_mem.used + len) > pmem[id].size) {
1250 DLOG("requested size would be larger than quota\n");
1251 return -1;
1252 }
1253
1254 /* Handle alignment */
1255 aligned_len = len + align;
1256
1257 /* Attempt allocation */
1258 list = kmalloc(sizeof(struct alloc_list), GFP_KERNEL);
1259 if (list == NULL) {
1260 printk(KERN_ERR "pmem: failed to allocate system metadata\n");
1261 return -1;
1262 }
1263 list->vaddr = NULL;
1264
1265 buf = NULL;
1266 while ((buf == NULL) && count--) {
1267 buf = kmalloc((aligned_len), GFP_KERNEL);
1268 if (buf == NULL) {
1269 DLOG("pmem: kmalloc %d temporarily failed len= %ld\n",
1270 count, aligned_len);
1271 }
1272 }
1273 if (!buf) {
1274 printk(KERN_CRIT "pmem: kmalloc failed for id= %d len= %ld\n",
1275 id, aligned_len);
1276 kfree(list);
1277 return -1;
1278 }
1279 list->size = aligned_len;
1280 list->addr = (void *)__pa(buf);
1281 list->aaddr = (void *)(((unsigned int)(list->addr) + (align - 1)) &
1282 ~(align - 1));
1283
1284 if (!pmem[id].cached)
1285 list->vaddr = ioremap(__pa(buf), aligned_len);
1286 else
1287 list->vaddr = ioremap_cached(__pa(buf), aligned_len);
1288
1289 INIT_LIST_HEAD(&list->allocs);
1290 list_add(&list->allocs, &pmem[id].allocator.system_mem.alist);
1291
1292 return (int)list;
1293}
1294
1295static pgprot_t pmem_phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001296{
1297 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001298#ifdef pgprot_writecombine
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001299 if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300 /* on ARMv6 and ARMv7 this expands to Normal Noncached */
1301 return pgprot_writecombine(vma_prot);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001302#endif
1303#ifdef pgprot_ext_buffered
1304 else if (pmem[id].buffered)
1305 return pgprot_ext_buffered(vma_prot);
1306#endif
1307 return vma_prot;
1308}
1309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310static unsigned long pmem_start_addr_all_or_nothing(int id,
1311 struct pmem_data *data)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001312{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001313 return PMEM_START_ADDR(id, 0);
1314}
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001315
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001316static unsigned long pmem_start_addr_buddy_bestfit(int id,
1317 struct pmem_data *data)
1318{
1319 return PMEM_START_ADDR(id, data->index);
1320}
1321
1322static unsigned long pmem_start_addr_bitmap(int id, struct pmem_data *data)
1323{
1324 return data->index * pmem[id].quantum + pmem[id].base;
1325}
1326
1327static unsigned long pmem_start_addr_system(int id, struct pmem_data *data)
1328{
1329 return (unsigned long)(((struct alloc_list *)(data->index))->aaddr);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001330}
1331
1332static void *pmem_start_vaddr(int id, struct pmem_data *data)
1333{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM)
1335 return ((struct alloc_list *)(data->index))->vaddr;
1336 else
1337 return pmem[id].start_addr(id, data) - pmem[id].base + pmem[id].vbase;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001338}
1339
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340static unsigned long pmem_len_all_or_nothing(int id, struct pmem_data *data)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001341{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 return data->index;
1343}
1344
1345static unsigned long pmem_len_buddy_bestfit(int id, struct pmem_data *data)
1346{
1347 return PMEM_BUDDY_LEN(id, data->index);
1348}
1349
1350static unsigned long pmem_len_bitmap(int id, struct pmem_data *data)
1351{
1352 int i;
1353 unsigned long ret = 0;
1354
1355 mutex_lock(&pmem[id].arena_mutex);
1356
1357 for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
1358 if (pmem[id].allocator.bitmap.bitm_alloc[i].bit ==
1359 data->index) {
1360 ret = pmem[id].allocator.bitmap.bitm_alloc[i].quanta *
1361 pmem[id].quantum;
1362 break;
1363 }
1364
1365 mutex_unlock(&pmem[id].arena_mutex);
1366#if PMEM_DEBUG
1367 if (i >= pmem[id].allocator.bitmap.bitmap_allocs)
1368 pr_alert("pmem: %s: can't find bitnum %d in "
1369 "alloc'd array!\n", __func__, data->index);
1370#endif
1371 return ret;
1372}
1373
1374static unsigned long pmem_len_system(int id, struct pmem_data *data)
1375{
1376 unsigned long ret = 0;
1377
1378 mutex_lock(&pmem[id].arena_mutex);
1379
1380 ret = ((struct alloc_list *)data->index)->size;
1381 mutex_unlock(&pmem[id].arena_mutex);
1382
1383 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001384}
1385
1386static int pmem_map_garbage(int id, struct vm_area_struct *vma,
1387 struct pmem_data *data, unsigned long offset,
1388 unsigned long len)
1389{
1390 int i, garbage_pages = len >> PAGE_SHIFT;
1391
1392 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
1393 for (i = 0; i < garbage_pages; i++) {
1394 if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
1395 pmem[id].garbage_pfn))
1396 return -EAGAIN;
1397 }
1398 return 0;
1399}
1400
1401static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
1402 struct pmem_data *data, unsigned long offset,
1403 unsigned long len)
1404{
1405 int garbage_pages;
1406 DLOG("unmap offset %lx len %lx\n", offset, len);
1407
1408 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
1409
1410 garbage_pages = len >> PAGE_SHIFT;
1411 zap_page_range(vma, vma->vm_start + offset, len, NULL);
1412 pmem_map_garbage(id, vma, data, offset, len);
1413 return 0;
1414}
1415
1416static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
1417 struct pmem_data *data, unsigned long offset,
1418 unsigned long len)
1419{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420 int ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001421 DLOG("map offset %lx len %lx\n", offset, len);
1422 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
1423 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
1424 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
1425 BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
1426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427 ret = io_remap_pfn_range(vma, vma->vm_start + offset,
1428 (pmem[id].start_addr(id, data) + offset) >> PAGE_SHIFT,
1429 len, vma->vm_page_prot);
1430 if (ret) {
1431#if PMEM_DEBUG
1432 pr_alert("pmem: %s: io_remap_pfn_range fails with "
1433 "return value: %d!\n", __func__, ret);
1434#endif
1435
1436 ret = -EAGAIN;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001437 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001438 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001439}
1440
1441static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
1442 struct pmem_data *data, unsigned long offset,
1443 unsigned long len)
1444{
1445 /* hold the mm semp for the vma you are modifying when you call this */
1446 BUG_ON(!vma);
1447 zap_page_range(vma, vma->vm_start + offset, len, NULL);
1448 return pmem_map_pfn_range(id, vma, data, offset, len);
1449}
1450
1451static void pmem_vma_open(struct vm_area_struct *vma)
1452{
1453 struct file *file = vma->vm_file;
1454 struct pmem_data *data = file->private_data;
1455 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456
1457#if PMEM_DEBUG_MSGS
1458 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1459#endif
1460 DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
1461 get_name(file), id, current->pid,
1462 get_task_comm(currtask_name, current),
1463 current->parent->pid, file, file_count(file));
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001464 /* this should never be called as we don't support copying pmem
1465 * ranges via fork */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 down_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001467 BUG_ON(!has_allocation(file));
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001468 /* remap the garbage pages, forkers don't get access to the data */
1469 pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 up_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001471}
1472
1473static void pmem_vma_close(struct vm_area_struct *vma)
1474{
1475 struct file *file = vma->vm_file;
1476 struct pmem_data *data = file->private_data;
1477
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001478#if PMEM_DEBUG_MSGS
1479 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1480#endif
1481 DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
1482 get_name(file), get_id(file), current->pid,
1483 get_task_comm(currtask_name, current),
1484 current->parent->pid, file, file_count(file));
1485
1486 if (unlikely(!is_pmem_file(file))) {
1487 pr_warning("pmem: something is very wrong, you are "
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001488 "closing a vm backing an allocation that doesn't "
1489 "exist!\n");
1490 return;
1491 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001493 down_write(&data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 if (unlikely(!has_allocation(file))) {
1495 up_write(&data->sem);
1496 pr_warning("pmem: something is very wrong, you are "
1497 "closing a vm backing an allocation that doesn't "
1498 "exist!\n");
1499 return;
1500 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001501 if (data->vma == vma) {
1502 data->vma = NULL;
1503 if ((data->flags & PMEM_FLAGS_CONNECTED) &&
1504 (data->flags & PMEM_FLAGS_SUBMAP))
1505 data->flags |= PMEM_FLAGS_UNSUBMAP;
1506 }
1507 /* the kernel is going to free this vma now anyway */
1508 up_write(&data->sem);
1509}
1510
1511static struct vm_operations_struct vm_ops = {
1512 .open = pmem_vma_open,
1513 .close = pmem_vma_close,
1514};
1515
1516static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
1517{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001519 int index;
1520 unsigned long vma_size = vma->vm_end - vma->vm_start;
1521 int ret = 0, id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522#if PMEM_DEBUG_MSGS
1523 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1524#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001526 if (!data) {
1527 pr_err("pmem: Invalid file descriptor, no private data\n");
1528 return -EINVAL;
1529 }
1530 DLOG("pid %u(%s) mmap vma_size %lu on dev %s(id: %d)\n", current->pid,
1531 get_task_comm(currtask_name, current), vma_size,
1532 get_name(file), id);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001533 if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
1534#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535 pr_err("pmem: mmaps must be at offset zero, aligned"
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001536 " and a multiple of pages_size.\n");
1537#endif
1538 return -EINVAL;
1539 }
1540
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001541 down_write(&data->sem);
1542 /* check this file isn't already mmaped, for submaps check this file
1543 * has never been mmaped */
1544 if ((data->flags & PMEM_FLAGS_SUBMAP) ||
1545 (data->flags & PMEM_FLAGS_UNSUBMAP)) {
1546#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001547 pr_err("pmem: you can only mmap a pmem file once, "
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001548 "this file is already mmaped. %x\n", data->flags);
1549#endif
1550 ret = -EINVAL;
1551 goto error;
1552 }
1553 /* if file->private_data == unalloced, alloc*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001554 if (data->index == -1) {
1555 mutex_lock(&pmem[id].arena_mutex);
1556 index = pmem[id].allocate(id,
1557 vma->vm_end - vma->vm_start,
1558 SZ_4K);
1559 mutex_unlock(&pmem[id].arena_mutex);
1560 /* either no space was available or an error occured */
1561 if (index == -1) {
1562 pr_err("pmem: mmap unable to allocate memory"
1563 "on %s\n", get_name(file));
1564 ret = -ENOMEM;
1565 goto error;
1566 }
1567 /* store the index of a successful allocation */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001568 data->index = index;
1569 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001571 if (pmem[id].len(id, data) < vma_size) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001572#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001573 pr_err("pmem: mmap size [%lu] does not match"
1574 " size of backing region [%lu].\n", vma_size,
1575 pmem[id].len(id, data));
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001576#endif
1577 ret = -EINVAL;
1578 goto error;
1579 }
1580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581 vma->vm_pgoff = pmem[id].start_addr(id, data) >> PAGE_SHIFT;
1582
1583 vma->vm_page_prot = pmem_phys_mem_access_prot(file, vma->vm_page_prot);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001584
1585 if (data->flags & PMEM_FLAGS_CONNECTED) {
1586 struct pmem_region_node *region_node;
1587 struct list_head *elt;
1588 if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001589 pr_alert("pmem: mmap failed in kernel!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001590 ret = -EAGAIN;
1591 goto error;
1592 }
1593 list_for_each(elt, &data->region_list) {
1594 region_node = list_entry(elt, struct pmem_region_node,
1595 list);
1596 DLOG("remapping file: %p %lx %lx\n", file,
1597 region_node->region.offset,
1598 region_node->region.len);
1599 if (pmem_remap_pfn_range(id, vma, data,
1600 region_node->region.offset,
1601 region_node->region.len)) {
1602 ret = -EAGAIN;
1603 goto error;
1604 }
1605 }
1606 data->flags |= PMEM_FLAGS_SUBMAP;
1607 get_task_struct(current->group_leader);
1608 data->task = current->group_leader;
1609 data->vma = vma;
1610#if PMEM_DEBUG
1611 data->pid = current->pid;
1612#endif
1613 DLOG("submmapped file %p vma %p pid %u\n", file, vma,
1614 current->pid);
1615 } else {
1616 if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617 pr_err("pmem: mmap failed in kernel!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001618 ret = -EAGAIN;
1619 goto error;
1620 }
1621 data->flags |= PMEM_FLAGS_MASTERMAP;
1622 data->pid = current->pid;
1623 }
1624 vma->vm_ops = &vm_ops;
1625error:
1626 up_write(&data->sem);
1627 return ret;
1628}
1629
1630/* the following are the api for accessing pmem regions by other drivers
1631 * from inside the kernel */
1632int get_pmem_user_addr(struct file *file, unsigned long *start,
1633 unsigned long *len)
1634{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635 int ret = -1;
1636
1637 if (is_pmem_file(file)) {
1638 struct pmem_data *data = file->private_data;
1639
1640 down_read(&data->sem);
1641 if (has_allocation(file)) {
1642 if (data->vma) {
1643 *start = data->vma->vm_start;
1644 *len = data->vma->vm_end - data->vma->vm_start;
1645 } else {
1646 *start = *len = 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001647#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001648 pr_err("pmem: %s: no vma present.\n",
1649 __func__);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001650#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651 }
1652 ret = 0;
1653 }
1654 up_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001655 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656
1657#if PMEM_DEBUG
1658 if (ret)
1659 pr_err("pmem: %s: requested pmem data from invalid"
1660 "file.\n", __func__);
1661#endif
1662 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001663}
1664
1665int get_pmem_addr(struct file *file, unsigned long *start,
1666 unsigned long *vstart, unsigned long *len)
1667{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 int ret = -1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 if (is_pmem_file(file)) {
1671 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 down_read(&data->sem);
1674 if (has_allocation(file)) {
1675 int id = get_id(file);
1676
1677 *start = pmem[id].start_addr(id, data);
1678 *len = pmem[id].len(id, data);
1679 *vstart = (unsigned long)
1680 pmem_start_vaddr(id, data);
1681 up_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001682#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 down_write(&data->sem);
1684 data->ref++;
1685 up_write(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001686#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 DLOG("returning start %#lx len %lu "
1688 "vstart %#lx\n",
1689 *start, *len, *vstart);
1690 ret = 0;
1691 } else {
1692 up_read(&data->sem);
1693 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001694 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001695 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001696}
1697
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001698int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001699 unsigned long *len, struct file **filp)
1700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001701 int ret = -1;
1702 struct file *file = fget(fd);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001703
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001704 if (unlikely(file == NULL)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705 pr_err("pmem: %s: requested data from file "
1706 "descriptor that doesn't exist.\n", __func__);
1707 } else {
1708#if PMEM_DEBUG_MSGS
1709 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1710#endif
1711 DLOG("filp %p rdev %d pid %u(%s) file %p(%ld)"
1712 " dev %s(id: %d)\n", filp,
1713 file->f_dentry->d_inode->i_rdev,
1714 current->pid, get_task_comm(currtask_name, current),
1715 file, file_count(file), get_name(file), get_id(file));
1716
1717 if (!get_pmem_addr(file, start, vstart, len)) {
1718 if (filp)
1719 *filp = file;
1720 ret = 0;
1721 } else {
1722 fput(file);
1723 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001724 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001725 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001726}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727EXPORT_SYMBOL(get_pmem_file);
1728
1729int get_pmem_fd(int fd, unsigned long *start, unsigned long *len)
1730{
1731 unsigned long vstart;
1732 return get_pmem_file(fd, start, &vstart, len, NULL);
1733}
1734EXPORT_SYMBOL(get_pmem_fd);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001735
1736void put_pmem_file(struct file *file)
1737{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738#if PMEM_DEBUG_MSGS
1739 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001740#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001741 DLOG("rdev %d pid %u(%s) file %p(%ld)" " dev %s(id: %d)\n",
1742 file->f_dentry->d_inode->i_rdev, current->pid,
1743 get_task_comm(currtask_name, current), file,
1744 file_count(file), get_name(file), get_id(file));
1745 if (is_pmem_file(file)) {
1746#if PMEM_DEBUG
1747 struct pmem_data *data = file->private_data;
1748
1749 down_write(&data->sem);
1750 if (!data->ref--) {
1751 data->ref++;
1752 pr_alert("pmem: pmem_put > pmem_get %s "
1753 "(pid %d)\n",
1754 pmem[get_id(file)].dev.name, data->pid);
1755 BUG();
1756 }
1757 up_write(&data->sem);
1758#endif
1759 fput(file);
1760 }
1761}
1762EXPORT_SYMBOL(put_pmem_file);
1763
1764void put_pmem_fd(int fd)
1765{
1766 int put_needed;
1767 struct file *file = fget_light(fd, &put_needed);
1768
1769 if (file) {
1770 put_pmem_file(file);
1771 fput_light(file, put_needed);
1772 }
1773}
1774
1775void flush_pmem_fd(int fd, unsigned long offset, unsigned long len)
1776{
1777 int fput_needed;
1778 struct file *file = fget_light(fd, &fput_needed);
1779
1780 if (file) {
1781 flush_pmem_file(file, offset, len);
1782 fput_light(file, fput_needed);
1783 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001784}
1785
1786void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
1787{
1788 struct pmem_data *data;
1789 int id;
1790 void *vaddr;
1791 struct pmem_region_node *region_node;
1792 struct list_head *elt;
1793 void *flush_start, *flush_end;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794#ifdef CONFIG_OUTER_CACHE
1795 unsigned long phy_start, phy_end;
1796#endif
1797 if (!is_pmem_file(file))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001798 return;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001799
1800 id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 if (!pmem[id].cached)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001802 return;
1803
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 /* is_pmem_file fails if !file */
1805 data = file->private_data;
1806
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001807 down_read(&data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808 if (!has_allocation(file))
1809 goto end;
1810
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001811 vaddr = pmem_start_vaddr(id, data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001812
1813 if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) {
1814 dmac_flush_range(vaddr,
1815 (void *)((unsigned long)vaddr +
1816 ((struct alloc_list *)(data->index))->size));
1817#ifdef CONFIG_OUTER_CACHE
1818 phy_start = pmem_start_addr_system(id, data);
1819
1820 phy_end = phy_start +
1821 ((struct alloc_list *)(data->index))->size;
1822
1823 outer_flush_range(phy_start, phy_end);
1824#endif
1825 goto end;
1826 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001827 /* if this isn't a submmapped file, flush the whole thing */
1828 if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 dmac_flush_range(vaddr, vaddr + pmem[id].len(id, data));
1830#ifdef CONFIG_OUTER_CACHE
1831 phy_start = (unsigned long)vaddr -
1832 (unsigned long)pmem[id].vbase + pmem[id].base;
1833
1834 phy_end = phy_start + pmem[id].len(id, data);
1835
1836 outer_flush_range(phy_start, phy_end);
1837#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001838 goto end;
1839 }
1840 /* otherwise, flush the region of the file we are drawing */
1841 list_for_each(elt, &data->region_list) {
1842 region_node = list_entry(elt, struct pmem_region_node, list);
1843 if ((offset >= region_node->region.offset) &&
1844 ((offset + len) <= (region_node->region.offset +
1845 region_node->region.len))) {
1846 flush_start = vaddr + region_node->region.offset;
1847 flush_end = flush_start + region_node->region.len;
1848 dmac_flush_range(flush_start, flush_end);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001849#ifdef CONFIG_OUTER_CACHE
1850
1851 phy_start = (unsigned long)flush_start -
1852 (unsigned long)pmem[id].vbase + pmem[id].base;
1853
1854 phy_end = phy_start + region_node->region.len;
1855
1856 outer_flush_range(phy_start, phy_end);
1857#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001858 break;
1859 }
1860 }
1861end:
1862 up_read(&data->sem);
1863}
1864
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865int pmem_cache_maint(struct file *file, unsigned int cmd,
1866 struct pmem_addr *pmem_addr)
1867{
1868 struct pmem_data *data;
1869 int id;
1870 unsigned long vaddr, paddr, length, offset,
1871 pmem_len, pmem_start_addr;
1872
1873 /* Called from kernel-space so file may be NULL */
1874 if (!file)
1875 return -EBADF;
1876
1877 data = file->private_data;
1878 id = get_id(file);
1879
1880 if (!pmem[id].cached)
1881 return 0;
1882
1883 offset = pmem_addr->offset;
1884 length = pmem_addr->length;
1885
1886 down_read(&data->sem);
1887 if (!has_allocation(file)) {
1888 up_read(&data->sem);
1889 return -EINVAL;
1890 }
1891 pmem_len = pmem[id].len(id, data);
1892 pmem_start_addr = pmem[id].start_addr(id, data);
1893 up_read(&data->sem);
1894
1895 if (offset + length > pmem_len)
1896 return -EINVAL;
1897
1898 vaddr = pmem_addr->vaddr;
1899 paddr = pmem_start_addr + offset;
1900
1901 DLOG("pmem cache maint on dev %s(id: %d)"
1902 "(vaddr %lx paddr %lx len %lu bytes)\n",
1903 get_name(file), id, vaddr, paddr, length);
1904 if (cmd == PMEM_CLEAN_INV_CACHES)
1905 clean_and_invalidate_caches(vaddr,
1906 length, paddr);
1907 else if (cmd == PMEM_CLEAN_CACHES)
1908 clean_caches(vaddr, length, paddr);
1909 else if (cmd == PMEM_INV_CACHES)
1910 invalidate_caches(vaddr, length, paddr);
1911
1912 return 0;
1913}
1914EXPORT_SYMBOL(pmem_cache_maint);
1915
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001916static int pmem_connect(unsigned long connect, struct file *file)
1917{
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001918 int ret = 0, put_needed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001919 struct file *src_file;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921 if (!file) {
1922 pr_err("pmem: %s: NULL file pointer passed in, "
1923 "bailing out!\n", __func__);
1924 ret = -EINVAL;
1925 goto leave;
1926 }
1927
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001928 src_file = fget_light(connect, &put_needed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001929
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001930 if (!src_file) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931 pr_err("pmem: %s: src file not found!\n", __func__);
1932 ret = -EBADF;
1933 goto leave;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001934 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936 if (src_file == file) { /* degenerative case, operator error */
1937 pr_err("pmem: %s: src_file and passed in file are "
1938 "the same; refusing to connect to self!\n", __func__);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001939 ret = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 goto put_src_file;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001941 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001943 if (unlikely(!is_pmem_file(src_file))) {
1944 pr_err("pmem: %s: src file is not a pmem file!\n",
1945 __func__);
1946 ret = -EINVAL;
1947 goto put_src_file;
1948 } else {
1949 struct pmem_data *src_data = src_file->private_data;
1950
1951 if (!src_data) {
1952 pr_err("pmem: %s: src file pointer has no"
1953 "private data, bailing out!\n", __func__);
1954 ret = -EINVAL;
1955 goto put_src_file;
1956 }
1957
1958 down_read(&src_data->sem);
1959
1960 if (unlikely(!has_allocation(src_file))) {
1961 up_read(&src_data->sem);
1962 pr_err("pmem: %s: src file has no allocation!\n",
1963 __func__);
1964 ret = -EINVAL;
1965 } else {
1966 struct pmem_data *data;
1967 int src_index = src_data->index;
1968
1969 up_read(&src_data->sem);
1970
1971 data = file->private_data;
1972 if (!data) {
1973 pr_err("pmem: %s: passed in file "
1974 "pointer has no private data, bailing"
1975 " out!\n", __func__);
1976 ret = -EINVAL;
1977 goto put_src_file;
1978 }
1979
1980 down_write(&data->sem);
1981 if (has_allocation(file) &&
1982 (data->index != src_index)) {
1983 up_write(&data->sem);
1984
1985 pr_err("pmem: %s: file is already "
1986 "mapped but doesn't match this "
1987 "src_file!\n", __func__);
1988 ret = -EINVAL;
1989 } else {
1990 data->index = src_index;
1991 data->flags |= PMEM_FLAGS_CONNECTED;
1992 data->master_fd = connect;
1993 data->master_file = src_file;
1994
1995 up_write(&data->sem);
1996
1997 DLOG("connect %p to %p\n", file, src_file);
1998 }
1999 }
2000 }
2001put_src_file:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002002 fput_light(src_file, put_needed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002003leave:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002004 return ret;
2005}
2006
2007static void pmem_unlock_data_and_mm(struct pmem_data *data,
2008 struct mm_struct *mm)
2009{
2010 up_write(&data->sem);
2011 if (mm != NULL) {
2012 up_write(&mm->mmap_sem);
2013 mmput(mm);
2014 }
2015}
2016
2017static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
2018 struct mm_struct **locked_mm)
2019{
2020 int ret = 0;
2021 struct mm_struct *mm = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022#if PMEM_DEBUG_MSGS
2023 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
2024#endif
2025 DLOG("pid %u(%s) file %p(%ld)\n",
2026 current->pid, get_task_comm(currtask_name, current),
2027 file, file_count(file));
2028
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002029 *locked_mm = NULL;
2030lock_mm:
2031 down_read(&data->sem);
2032 if (PMEM_IS_SUBMAP(data)) {
2033 mm = get_task_mm(data->task);
2034 if (!mm) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002035 up_read(&data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036#if PMEM_DEBUG
2037 pr_alert("pmem: can't remap - task is gone!\n");
2038#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002039 return -1;
2040 }
2041 }
2042 up_read(&data->sem);
2043
2044 if (mm)
2045 down_write(&mm->mmap_sem);
2046
2047 down_write(&data->sem);
2048 /* check that the file didn't get mmaped before we could take the
2049 * data sem, this should be safe b/c you can only submap each file
2050 * once */
2051 if (PMEM_IS_SUBMAP(data) && !mm) {
2052 pmem_unlock_data_and_mm(data, mm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002053 DLOG("mapping contention, repeating mmap op\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002054 goto lock_mm;
2055 }
2056 /* now check that vma.mm is still there, it could have been
2057 * deleted by vma_close before we could get the data->sem */
2058 if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
2059 /* might as well release this */
2060 if (data->flags & PMEM_FLAGS_SUBMAP) {
2061 put_task_struct(data->task);
2062 data->task = NULL;
2063 /* lower the submap flag to show the mm is gone */
2064 data->flags &= ~(PMEM_FLAGS_SUBMAP);
2065 }
2066 pmem_unlock_data_and_mm(data, mm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002067#if PMEM_DEBUG
2068 pr_alert("pmem: vma.mm went away!\n");
2069#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002070 return -1;
2071 }
2072 *locked_mm = mm;
2073 return ret;
2074}
2075
2076int pmem_remap(struct pmem_region *region, struct file *file,
2077 unsigned operation)
2078{
2079 int ret;
2080 struct pmem_region_node *region_node;
2081 struct mm_struct *mm = NULL;
2082 struct list_head *elt, *elt2;
2083 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002084 struct pmem_data *data;
2085
2086 DLOG("operation %#x, region offset %ld, region len %ld\n",
2087 operation, region->offset, region->len);
2088
2089 if (!is_pmem_file(file)) {
2090#if PMEM_DEBUG
2091 pr_err("pmem: remap request for non-pmem file descriptor\n");
2092#endif
2093 return -EINVAL;
2094 }
2095
2096 /* is_pmem_file fails if !file */
2097 data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002098
2099 /* pmem region must be aligned on a page boundry */
2100 if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
2101 !PMEM_IS_PAGE_ALIGNED(region->len))) {
2102#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002103 pr_err("pmem: request for unaligned pmem"
2104 "suballocation %lx %lx\n",
2105 region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002106#endif
2107 return -EINVAL;
2108 }
2109
2110 /* if userspace requests a region of len 0, there's nothing to do */
2111 if (region->len == 0)
2112 return 0;
2113
2114 /* lock the mm and data */
2115 ret = pmem_lock_data_and_mm(file, data, &mm);
2116 if (ret)
2117 return 0;
2118
2119 /* only the owner of the master file can remap the client fds
2120 * that back in it */
2121 if (!is_master_owner(file)) {
2122#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002123 pr_err("pmem: remap requested from non-master process\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002124#endif
2125 ret = -EINVAL;
2126 goto err;
2127 }
2128
2129 /* check that the requested range is within the src allocation */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130 if (unlikely((region->offset > pmem[id].len(id, data)) ||
2131 (region->len > pmem[id].len(id, data)) ||
2132 (region->offset + region->len > pmem[id].len(id, data)))) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002133#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002134 pr_err("pmem: suballoc doesn't fit in src_file!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002135#endif
2136 ret = -EINVAL;
2137 goto err;
2138 }
2139
2140 if (operation == PMEM_MAP) {
2141 region_node = kmalloc(sizeof(struct pmem_region_node),
2142 GFP_KERNEL);
2143 if (!region_node) {
2144 ret = -ENOMEM;
2145#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002146 pr_alert("pmem: No space to allocate remap metadata!");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002147#endif
2148 goto err;
2149 }
2150 region_node->region = *region;
2151 list_add(&region_node->list, &data->region_list);
2152 } else if (operation == PMEM_UNMAP) {
2153 int found = 0;
2154 list_for_each_safe(elt, elt2, &data->region_list) {
2155 region_node = list_entry(elt, struct pmem_region_node,
2156 list);
2157 if (region->len == 0 ||
2158 (region_node->region.offset == region->offset &&
2159 region_node->region.len == region->len)) {
2160 list_del(elt);
2161 kfree(region_node);
2162 found = 1;
2163 }
2164 }
2165 if (!found) {
2166#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002167 pr_err("pmem: Unmap region does not map any"
2168 " mapped region!");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002169#endif
2170 ret = -EINVAL;
2171 goto err;
2172 }
2173 }
2174
2175 if (data->vma && PMEM_IS_SUBMAP(data)) {
2176 if (operation == PMEM_MAP)
2177 ret = pmem_remap_pfn_range(id, data->vma, data,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002178 region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002179 else if (operation == PMEM_UNMAP)
2180 ret = pmem_unmap_pfn_range(id, data->vma, data,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002181 region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002182 }
2183
2184err:
2185 pmem_unlock_data_and_mm(data, mm);
2186 return ret;
2187}
2188
2189static void pmem_revoke(struct file *file, struct pmem_data *data)
2190{
2191 struct pmem_region_node *region_node;
2192 struct list_head *elt, *elt2;
2193 struct mm_struct *mm = NULL;
2194 int id = get_id(file);
2195 int ret = 0;
2196
2197 data->master_file = NULL;
2198 ret = pmem_lock_data_and_mm(file, data, &mm);
2199 /* if lock_data_and_mm fails either the task that mapped the fd, or
2200 * the vma that mapped it have already gone away, nothing more
2201 * needs to be done */
2202 if (ret)
2203 return;
2204 /* unmap everything */
2205 /* delete the regions and region list nothing is mapped any more */
2206 if (data->vma)
2207 list_for_each_safe(elt, elt2, &data->region_list) {
2208 region_node = list_entry(elt, struct pmem_region_node,
2209 list);
2210 pmem_unmap_pfn_range(id, data->vma, data,
2211 region_node->region.offset,
2212 region_node->region.len);
2213 list_del(elt);
2214 kfree(region_node);
2215 }
2216 /* delete the master file */
2217 pmem_unlock_data_and_mm(data, mm);
2218}
2219
2220static void pmem_get_size(struct pmem_region *region, struct file *file)
2221{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002222 /* called via ioctl file op, so file guaranteed to be not NULL */
2223 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002224 int id = get_id(file);
2225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002226 down_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002227 if (!has_allocation(file)) {
2228 region->offset = 0;
2229 region->len = 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002230 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002231 region->offset = pmem[id].start_addr(id, data);
2232 region->len = pmem[id].len(id, data);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002233 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002234 up_read(&data->sem);
2235 DLOG("offset 0x%lx len 0x%lx\n", region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002236}
2237
2238
2239static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2240{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002241 /* called from user space as file op, so file guaranteed to be not
2242 * NULL
2243 */
2244 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002245 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002246#if PMEM_DEBUG_MSGS
2247 char currtask_name[
2248 FIELD_SIZEOF(struct task_struct, comm) + 1];
2249#endif
2250
2251 DLOG("pid %u(%s) file %p(%ld) cmd %#x, dev %s(id: %d)\n",
2252 current->pid, get_task_comm(currtask_name, current),
2253 file, file_count(file), cmd, get_name(file), id);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002254
2255 switch (cmd) {
2256 case PMEM_GET_PHYS:
2257 {
2258 struct pmem_region region;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002259
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002260 DLOG("get_phys\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002261 down_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002262 if (!has_allocation(file)) {
2263 region.offset = 0;
2264 region.len = 0;
2265 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002266 region.offset = pmem[id].start_addr(id, data);
2267 region.len = pmem[id].len(id, data);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002268 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002269 up_read(&data->sem);
2270
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002271 if (copy_to_user((void __user *)arg, &region,
2272 sizeof(struct pmem_region)))
2273 return -EFAULT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002274
2275 DLOG("pmem: successful request for "
2276 "physical address of pmem region id %d, "
2277 "offset 0x%lx, len 0x%lx\n",
2278 id, region.offset, region.len);
2279
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002280 break;
2281 }
2282 case PMEM_MAP:
2283 {
2284 struct pmem_region region;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002285 DLOG("map\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002286 if (copy_from_user(&region, (void __user *)arg,
2287 sizeof(struct pmem_region)))
2288 return -EFAULT;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002289 return pmem_remap(&region, file, PMEM_MAP);
2290 }
2291 break;
2292 case PMEM_UNMAP:
2293 {
2294 struct pmem_region region;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002295 DLOG("unmap\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002296 if (copy_from_user(&region, (void __user *)arg,
2297 sizeof(struct pmem_region)))
2298 return -EFAULT;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002299 return pmem_remap(&region, file, PMEM_UNMAP);
2300 break;
2301 }
2302 case PMEM_GET_SIZE:
2303 {
2304 struct pmem_region region;
2305 DLOG("get_size\n");
2306 pmem_get_size(&region, file);
2307 if (copy_to_user((void __user *)arg, &region,
2308 sizeof(struct pmem_region)))
2309 return -EFAULT;
2310 break;
2311 }
2312 case PMEM_GET_TOTAL_SIZE:
2313 {
2314 struct pmem_region region;
2315 DLOG("get total size\n");
2316 region.offset = 0;
2317 get_id(file);
2318 region.len = pmem[id].size;
2319 if (copy_to_user((void __user *)arg, &region,
2320 sizeof(struct pmem_region)))
2321 return -EFAULT;
2322 break;
2323 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002324 case PMEM_GET_FREE_SPACE:
2325 {
2326 struct pmem_freespace fs;
2327 DLOG("get freespace on %s(id: %d)\n",
2328 get_name(file), id);
2329
2330 mutex_lock(&pmem[id].arena_mutex);
2331 pmem[id].free_space(id, &fs);
2332 mutex_unlock(&pmem[id].arena_mutex);
2333
2334 DLOG("%s(id: %d) total free %lu, largest %lu\n",
2335 get_name(file), id, fs.total, fs.largest);
2336
2337 if (copy_to_user((void __user *)arg, &fs,
2338 sizeof(struct pmem_freespace)))
2339 return -EFAULT;
2340 break;
2341 }
2342
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002343 case PMEM_ALLOCATE:
2344 {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002345 int ret = 0;
2346 DLOG("allocate, id %d\n", id);
2347 down_write(&data->sem);
2348 if (has_allocation(file)) {
2349 pr_err("pmem: Existing allocation found on "
2350 "this file descrpitor\n");
2351 up_write(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002352 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002353 }
2354
2355 mutex_lock(&pmem[id].arena_mutex);
2356 data->index = pmem[id].allocate(id,
2357 arg,
2358 SZ_4K);
2359 mutex_unlock(&pmem[id].arena_mutex);
2360 ret = data->index == -1 ? -ENOMEM :
2361 data->index;
2362 up_write(&data->sem);
2363 return ret;
2364 }
2365 case PMEM_ALLOCATE_ALIGNED:
2366 {
2367 struct pmem_allocation alloc;
2368 int ret = 0;
2369
2370 if (copy_from_user(&alloc, (void __user *)arg,
2371 sizeof(struct pmem_allocation)))
2372 return -EFAULT;
2373 DLOG("allocate id align %d %u\n", id, alloc.align);
2374 down_write(&data->sem);
2375 if (has_allocation(file)) {
2376 pr_err("pmem: Existing allocation found on "
2377 "this file descrpitor\n");
2378 up_write(&data->sem);
2379 return -EINVAL;
2380 }
2381
2382 if (alloc.align & (alloc.align - 1)) {
2383 pr_err("pmem: Alignment is not a power of 2\n");
2384 return -EINVAL;
2385 }
2386
2387 if (alloc.align != SZ_4K &&
2388 (pmem[id].allocator_type !=
2389 PMEM_ALLOCATORTYPE_BITMAP)) {
2390 pr_err("pmem: Non 4k alignment requires bitmap"
2391 " allocator on %s\n", pmem[id].name);
2392 return -EINVAL;
2393 }
2394
2395 if (alloc.align > SZ_1M ||
2396 alloc.align < SZ_4K) {
2397 pr_err("pmem: Invalid Alignment (%u) "
2398 "specified\n", alloc.align);
2399 return -EINVAL;
2400 }
2401
2402 mutex_lock(&pmem[id].arena_mutex);
2403 data->index = pmem[id].allocate(id,
2404 alloc.size,
2405 alloc.align);
2406 mutex_unlock(&pmem[id].arena_mutex);
2407 ret = data->index == -1 ? -ENOMEM :
2408 data->index;
2409 up_write(&data->sem);
2410 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002411 }
2412 case PMEM_CONNECT:
2413 DLOG("connect\n");
2414 return pmem_connect(arg, file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002415 case PMEM_CLEAN_INV_CACHES:
2416 case PMEM_CLEAN_CACHES:
2417 case PMEM_INV_CACHES:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002418 {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002419 struct pmem_addr pmem_addr;
2420
2421 if (copy_from_user(&pmem_addr, (void __user *)arg,
2422 sizeof(struct pmem_addr)))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002423 return -EFAULT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002424
2425 return pmem_cache_maint(file, cmd, &pmem_addr);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002426 }
2427 default:
2428 if (pmem[id].ioctl)
2429 return pmem[id].ioctl(file, cmd, arg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002430
2431 DLOG("ioctl invalid (%#x)\n", cmd);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002432 return -EINVAL;
2433 }
2434 return 0;
2435}
2436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002437static void ioremap_pmem(int id)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002438{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002439 if (pmem[id].cached)
2440 pmem[id].vbase = ioremap_cached(pmem[id].base, pmem[id].size);
2441#ifdef ioremap_ext_buffered
2442 else if (pmem[id].buffered)
2443 pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
2444 pmem[id].size);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002445#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002446 else
2447 pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
2448}
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002449
2450int pmem_setup(struct android_pmem_platform_data *pdata,
2451 long (*ioctl)(struct file *, unsigned int, unsigned long),
2452 int (*release)(struct inode *, struct file *))
2453{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002454 int i, index = 0, id;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002455
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002456 if (id_count >= PMEM_MAX_DEVICES) {
2457 pr_alert("pmem: %s: unable to register driver(%s) - no more "
2458 "devices available!\n", __func__, pdata->name);
2459 goto err_no_mem;
2460 }
2461
2462 if (!pdata->size) {
2463 pr_alert("pmem: %s: unable to register pmem driver(%s) - zero "
2464 "size passed in!\n", __func__, pdata->name);
2465 goto err_no_mem;
2466 }
2467
2468 id = id_count++;
2469
2470 pmem[id].id = id;
2471
2472 if (pmem[id].allocate) {
2473 pr_alert("pmem: %s: unable to register pmem driver - "
2474 "duplicate registration of %s!\n",
2475 __func__, pdata->name);
2476 goto err_no_mem;
2477 }
2478
2479 pmem[id].allocator_type = pdata->allocator_type;
2480
2481 /* 'quantum' is a "hidden" variable that defaults to 0 in the board
2482 * files */
2483 pmem[id].quantum = pdata->quantum ?: PMEM_MIN_ALLOC;
2484 if (pmem[id].quantum < PMEM_MIN_ALLOC ||
2485 !is_power_of_2(pmem[id].quantum)) {
2486 pr_alert("pmem: %s: unable to register pmem driver %s - "
2487 "invalid quantum value (%#x)!\n",
2488 __func__, pdata->name, pmem[id].quantum);
2489 goto err_reset_pmem_info;
2490 }
2491
2492 if (pdata->size % pmem[id].quantum) {
2493 /* bad alignment for size! */
2494 pr_alert("pmem: %s: Unable to register driver %s - "
2495 "memory region size (%#lx) is not a multiple of "
2496 "quantum size(%#x)!\n", __func__, pdata->name,
2497 pdata->size, pmem[id].quantum);
2498 goto err_reset_pmem_info;
2499 }
2500
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002501 pmem[id].cached = pdata->cached;
2502 pmem[id].buffered = pdata->buffered;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002503 pmem[id].size = pdata->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002504 pmem[id].memory_type = pdata->memory_type;
2505 strlcpy(pmem[id].name, pdata->name, PMEM_NAME_SIZE);
2506
2507 pmem[id].num_entries = pmem[id].size / pmem[id].quantum;
2508
2509 memset(&pmem[id].kobj, 0, sizeof(pmem[0].kobj));
2510 pmem[id].kobj.kset = pmem_kset;
2511
2512 switch (pmem[id].allocator_type) {
2513 case PMEM_ALLOCATORTYPE_ALLORNOTHING:
2514 pmem[id].allocate = pmem_allocator_all_or_nothing;
2515 pmem[id].free = pmem_free_all_or_nothing;
2516 pmem[id].free_space = pmem_free_space_all_or_nothing;
2517 pmem[id].len = pmem_len_all_or_nothing;
2518 pmem[id].start_addr = pmem_start_addr_all_or_nothing;
2519 pmem[id].num_entries = 1;
2520 pmem[id].quantum = pmem[id].size;
2521 pmem[id].allocator.all_or_nothing.allocated = 0;
2522
2523 if (kobject_init_and_add(&pmem[id].kobj,
2524 &pmem_allornothing_ktype, NULL,
2525 "%s", pdata->name))
2526 goto out_put_kobj;
2527
2528 break;
2529
2530 case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
2531 pmem[id].allocator.buddy_bestfit.buddy_bitmap = kmalloc(
2532 pmem[id].num_entries * sizeof(struct pmem_bits),
2533 GFP_KERNEL);
2534 if (!pmem[id].allocator.buddy_bestfit.buddy_bitmap)
2535 goto err_reset_pmem_info;
2536
2537 memset(pmem[id].allocator.buddy_bestfit.buddy_bitmap, 0,
2538 sizeof(struct pmem_bits) * pmem[id].num_entries);
2539
2540 for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--)
2541 if ((pmem[id].num_entries) & 1<<i) {
2542 PMEM_BUDDY_ORDER(id, index) = i;
2543 index = PMEM_BUDDY_NEXT_INDEX(id, index);
2544 }
2545 pmem[id].allocate = pmem_allocator_buddy_bestfit;
2546 pmem[id].free = pmem_free_buddy_bestfit;
2547 pmem[id].free_space = pmem_free_space_buddy_bestfit;
2548 pmem[id].len = pmem_len_buddy_bestfit;
2549 pmem[id].start_addr = pmem_start_addr_buddy_bestfit;
2550 if (kobject_init_and_add(&pmem[id].kobj,
2551 &pmem_buddy_bestfit_ktype, NULL,
2552 "%s", pdata->name))
2553 goto out_put_kobj;
2554
2555 break;
2556
2557 case PMEM_ALLOCATORTYPE_BITMAP: /* 0, default if not explicit */
2558 pmem[id].allocator.bitmap.bitm_alloc = kmalloc(
2559 PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS *
2560 sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
2561 GFP_KERNEL);
2562 if (!pmem[id].allocator.bitmap.bitm_alloc) {
2563 pr_alert("pmem: %s: Unable to register pmem "
2564 "driver %s - can't allocate "
2565 "bitm_alloc!\n",
2566 __func__, pdata->name);
2567 goto err_reset_pmem_info;
2568 }
2569
2570 if (kobject_init_and_add(&pmem[id].kobj,
2571 &pmem_bitmap_ktype, NULL,
2572 "%s", pdata->name))
2573 goto out_put_kobj;
2574
2575 for (i = 0; i < PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS; i++) {
2576 pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
2577 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
2578 }
2579
2580 pmem[id].allocator.bitmap.bitmap_allocs =
2581 PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS;
2582
2583 pmem[id].allocator.bitmap.bitmap =
2584 kcalloc((pmem[id].num_entries + 31) / 32,
2585 sizeof(unsigned int), GFP_KERNEL);
2586 if (!pmem[id].allocator.bitmap.bitmap) {
2587 pr_alert("pmem: %s: Unable to register pmem "
2588 "driver - can't allocate bitmap!\n",
2589 __func__);
2590 goto err_cant_register_device;
2591 }
2592 pmem[id].allocator.bitmap.bitmap_free = pmem[id].num_entries;
2593
2594 pmem[id].allocate = pmem_allocator_bitmap;
2595 pmem[id].free = pmem_free_bitmap;
2596 pmem[id].free_space = pmem_free_space_bitmap;
2597 pmem[id].len = pmem_len_bitmap;
2598 pmem[id].start_addr = pmem_start_addr_bitmap;
2599
2600 DLOG("bitmap allocator id %d (%s), num_entries %u, raw size "
2601 "%lu, quanta size %u\n",
2602 id, pdata->name, pmem[id].allocator.bitmap.bitmap_free,
2603 pmem[id].size, pmem[id].quantum);
2604 break;
2605
2606 case PMEM_ALLOCATORTYPE_SYSTEM:
2607
2608 INIT_LIST_HEAD(&pmem[id].allocator.system_mem.alist);
2609
2610 pmem[id].allocator.system_mem.used = 0;
2611 pmem[id].vbase = NULL;
2612
2613 if (kobject_init_and_add(&pmem[id].kobj,
2614 &pmem_system_ktype, NULL,
2615 "%s", pdata->name))
2616 goto out_put_kobj;
2617
2618 pmem[id].allocate = pmem_allocator_system;
2619 pmem[id].free = pmem_free_system;
2620 pmem[id].free_space = pmem_free_space_system;
2621 pmem[id].len = pmem_len_system;
2622 pmem[id].start_addr = pmem_start_addr_system;
2623 pmem[id].num_entries = 0;
2624 pmem[id].quantum = PAGE_SIZE;
2625
2626 DLOG("system allocator id %d (%s), raw size %lu\n",
2627 id, pdata->name, pmem[id].size);
2628 break;
2629
2630 default:
2631 pr_alert("Invalid allocator type (%d) for pmem driver\n",
2632 pdata->allocator_type);
2633 goto err_reset_pmem_info;
2634 }
2635
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002636 pmem[id].ioctl = ioctl;
2637 pmem[id].release = release;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002638 mutex_init(&pmem[id].arena_mutex);
2639 mutex_init(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002640 INIT_LIST_HEAD(&pmem[id].data_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002641
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002642 pmem[id].dev.name = pdata->name;
2643 pmem[id].dev.minor = id;
2644 pmem[id].dev.fops = &pmem_fops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002645 pr_info("pmem: Initializing %s (user-space) as %s\n",
2646 pdata->name, pdata->cached ? "cached" : "non-cached");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002647
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002648 if (misc_register(&pmem[id].dev)) {
2649 pr_alert("Unable to register pmem driver!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002650 goto err_cant_register_device;
2651 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002652
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002653 pmem[id].base = allocate_contiguous_memory_nomap(pmem[id].size,
2654 pmem[id].memory_type, PAGE_SIZE);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002655
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002656 if (pmem[id].allocator_type != PMEM_ALLOCATORTYPE_SYSTEM) {
2657 ioremap_pmem(id);
2658 if (pmem[id].vbase == 0) {
2659 pr_err("pmem: ioremap failed for device %s\n",
2660 pmem[id].name);
2661 goto error_cant_remap;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002662 }
2663 }
2664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002665 pr_info("allocating %lu bytes at %p (%lx physical) for %s\n",
2666 pmem[id].size, pmem[id].vbase, pmem[id].base, pmem[id].name);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002667
2668 pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002669
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002670 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002671
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002672error_cant_remap:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002673 misc_deregister(&pmem[id].dev);
2674err_cant_register_device:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002675out_put_kobj:
2676 kobject_put(&pmem[id].kobj);
2677 if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BUDDYBESTFIT)
2678 kfree(pmem[id].allocator.buddy_bestfit.buddy_bitmap);
2679 else if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BITMAP) {
2680 kfree(pmem[id].allocator.bitmap.bitmap);
2681 kfree(pmem[id].allocator.bitmap.bitm_alloc);
2682 }
2683err_reset_pmem_info:
2684 pmem[id].allocate = 0;
2685 pmem[id].dev.minor = -1;
2686err_no_mem:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002687 return -1;
2688}
2689
2690static int pmem_probe(struct platform_device *pdev)
2691{
2692 struct android_pmem_platform_data *pdata;
2693
2694 if (!pdev || !pdev->dev.platform_data) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002695 pr_alert("Unable to probe pmem!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002696 return -1;
2697 }
2698 pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002699
2700 pm_runtime_set_active(&pdev->dev);
2701 pm_runtime_enable(&pdev->dev);
2702
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002703 return pmem_setup(pdata, NULL, NULL);
2704}
2705
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002706static int pmem_remove(struct platform_device *pdev)
2707{
2708 int id = pdev->id;
2709 __free_page(pfn_to_page(pmem[id].garbage_pfn));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002710 pm_runtime_disable(&pdev->dev);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002711 misc_deregister(&pmem[id].dev);
2712 return 0;
2713}
2714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002715static int pmem_runtime_suspend(struct device *dev)
2716{
2717 dev_dbg(dev, "pm_runtime: suspending...\n");
2718 return 0;
2719}
2720
2721static int pmem_runtime_resume(struct device *dev)
2722{
2723 dev_dbg(dev, "pm_runtime: resuming...\n");
2724 return 0;
2725}
2726
2727static const struct dev_pm_ops pmem_dev_pm_ops = {
2728 .runtime_suspend = pmem_runtime_suspend,
2729 .runtime_resume = pmem_runtime_resume,
2730};
2731
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002732static struct platform_driver pmem_driver = {
2733 .probe = pmem_probe,
2734 .remove = pmem_remove,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002735 .driver = { .name = "android_pmem",
2736 .pm = &pmem_dev_pm_ops,
2737 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002738};
2739
2740
2741static int __init pmem_init(void)
2742{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002743 /* create /sys/kernel/<PMEM_SYSFS_DIR_NAME> directory */
2744 pmem_kset = kset_create_and_add(PMEM_SYSFS_DIR_NAME,
2745 NULL, kernel_kobj);
2746 if (!pmem_kset) {
2747 pr_err("pmem(%s):kset_create_and_add fail\n", __func__);
2748 return -ENOMEM;
2749 }
2750
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002751 return platform_driver_register(&pmem_driver);
2752}
2753
2754static void __exit pmem_exit(void)
2755{
2756 platform_driver_unregister(&pmem_driver);
2757}
2758
2759module_init(pmem_init);
2760module_exit(pmem_exit);
2761