| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *  linux/arch/arm26/mm/small_page.c | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 1996  Russell King | 
 | 5 |  *  Copyright (C) 2003  Ian Molton | 
 | 6 |  * | 
 | 7 |  * This program is free software; you can redistribute it and/or modify | 
 | 8 |  * it under the terms of the GNU General Public License version 2 as | 
 | 9 |  * published by the Free Software Foundation. | 
 | 10 |  * | 
 | 11 |  *  Changelog: | 
 | 12 |  *   26/01/1996	RMK	Cleaned up various areas to make little more generic | 
 | 13 |  *   07/02/1999	RMK	Support added for 16K and 32K page sizes | 
 | 14 |  *			containing 8K blocks | 
 | 15 |  *   23/05/2004 IM	Fixed to use struct page->lru (thanks wli) | 
 | 16 |  * | 
 | 17 |  */ | 
 | 18 | #include <linux/signal.h> | 
 | 19 | #include <linux/sched.h> | 
 | 20 | #include <linux/kernel.h> | 
 | 21 | #include <linux/errno.h> | 
 | 22 | #include <linux/string.h> | 
 | 23 | #include <linux/types.h> | 
 | 24 | #include <linux/ptrace.h> | 
 | 25 | #include <linux/mman.h> | 
 | 26 | #include <linux/mm.h> | 
 | 27 | #include <linux/swap.h> | 
 | 28 | #include <linux/smp.h> | 
 | 29 | #include <linux/bitops.h> | 
 | 30 |  | 
 | 31 | #include <asm/pgtable.h> | 
 | 32 |  | 
 | 33 | #define PEDANTIC | 
 | 34 |  | 
 | 35 | /* | 
 | 36 |  * Requirement: | 
 | 37 |  *  We need to be able to allocate naturally aligned memory of finer | 
 | 38 |  *  granularity than the page size.  This is typically used for the | 
 | 39 |  *  second level page tables on 32-bit ARMs. | 
 | 40 |  * | 
 | 41 |  * FIXME - this comment is *out of date* | 
 | 42 |  * Theory: | 
 | 43 |  *  We "misuse" the Linux memory management system.  We use alloc_page | 
 | 44 |  *  to allocate a page and then mark it as reserved.  The Linux memory | 
 | 45 |  *  management system will then ignore the "offset", "next_hash" and | 
 | 46 |  *  "pprev_hash" entries in the mem_map for this page. | 
 | 47 |  * | 
 | 48 |  *  We then use a bitstring in the "offset" field to mark which segments | 
 | 49 |  *  of the page are in use, and manipulate this as required during the | 
 | 50 |  *  allocation and freeing of these small pages. | 
 | 51 |  * | 
 | 52 |  *  We also maintain a queue of pages being used for this purpose using | 
 | 53 |  *  the "next_hash" and "pprev_hash" entries of mem_map; | 
 | 54 |  */ | 
 | 55 |  | 
 | 56 | struct order { | 
 | 57 | 	struct list_head queue; | 
 | 58 | 	unsigned int mask;		/* (1 << shift) - 1		*/ | 
 | 59 | 	unsigned int shift;		/* (1 << shift) size of page	*/ | 
 | 60 | 	unsigned int block_mask;	/* nr_blocks - 1		*/ | 
 | 61 | 	unsigned int all_used;		/* (1 << nr_blocks) - 1		*/ | 
 | 62 | }; | 
 | 63 |  | 
 | 64 |  | 
 | 65 | static struct order orders[] = { | 
 | 66 | #if PAGE_SIZE == 32768 | 
 | 67 | 	{ LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff }, | 
 | 68 | 	{ LIST_HEAD_INIT(orders[1].queue), 8191, 13,  3, 0x0000000f } | 
 | 69 | #else | 
 | 70 | #error unsupported page size (ARGH!) | 
 | 71 | #endif | 
 | 72 | }; | 
 | 73 |  | 
 | 74 | #define USED_MAP(pg)			((pg)->index) | 
 | 75 | #define TEST_AND_CLEAR_USED(pg,off)	(test_and_clear_bit(off, &USED_MAP(pg))) | 
 | 76 | #define SET_USED(pg,off)		(set_bit(off, &USED_MAP(pg))) | 
 | 77 |  | 
 | 78 | static DEFINE_SPINLOCK(small_page_lock); | 
 | 79 |  | 
 | 80 | static unsigned long __get_small_page(int priority, struct order *order) | 
 | 81 | { | 
 | 82 | 	unsigned long flags; | 
 | 83 | 	struct page *page; | 
 | 84 | 	int offset; | 
 | 85 |  | 
 | 86 | 	do { | 
 | 87 | 		spin_lock_irqsave(&small_page_lock, flags); | 
 | 88 |  | 
 | 89 | 		if (list_empty(&order->queue)) | 
 | 90 | 			goto need_new_page; | 
 | 91 |  | 
 | 92 | 		page = list_entry(order->queue.next, struct page, lru); | 
 | 93 | again: | 
 | 94 | #ifdef PEDANTIC | 
| Matt Mackall | cd7619d | 2005-05-01 08:59:01 -0700 | [diff] [blame] | 95 | 		BUG_ON(USED_MAP(page) & ~order->all_used); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | #endif | 
 | 97 | 		offset = ffz(USED_MAP(page)); | 
 | 98 | 		SET_USED(page, offset); | 
 | 99 | 		if (USED_MAP(page) == order->all_used) | 
 | 100 | 			list_del_init(&page->lru); | 
 | 101 | 		spin_unlock_irqrestore(&small_page_lock, flags); | 
 | 102 |  | 
 | 103 | 		return (unsigned long) page_address(page) + (offset << order->shift); | 
 | 104 |  | 
 | 105 | need_new_page: | 
 | 106 | 		spin_unlock_irqrestore(&small_page_lock, flags); | 
 | 107 | 		page = alloc_page(priority); | 
 | 108 | 		spin_lock_irqsave(&small_page_lock, flags); | 
 | 109 |  | 
 | 110 | 		if (list_empty(&order->queue)) { | 
 | 111 | 			if (!page) | 
 | 112 | 				goto no_page; | 
 | 113 | 			SetPageReserved(page); | 
 | 114 | 			USED_MAP(page) = 0; | 
 | 115 | 			list_add(&page->lru, &order->queue); | 
 | 116 | 			goto again; | 
 | 117 | 		} | 
 | 118 |  | 
 | 119 | 		spin_unlock_irqrestore(&small_page_lock, flags); | 
 | 120 | 		__free_page(page); | 
 | 121 | 	} while (1); | 
 | 122 |  | 
 | 123 | no_page: | 
 | 124 | 	spin_unlock_irqrestore(&small_page_lock, flags); | 
 | 125 | 	return 0; | 
 | 126 | } | 
 | 127 |  | 
 | 128 | static void __free_small_page(unsigned long spage, struct order *order) | 
 | 129 | { | 
 | 130 | 	unsigned long flags; | 
 | 131 | 	struct page *page; | 
 | 132 |  | 
 | 133 | 	if (virt_addr_valid(spage)) { | 
 | 134 | 		page = virt_to_page(spage); | 
 | 135 |  | 
 | 136 | 		/* | 
 | 137 | 		 * The container-page must be marked Reserved | 
 | 138 | 		 */ | 
 | 139 | 		if (!PageReserved(page) || spage & order->mask) | 
 | 140 | 			goto non_small; | 
 | 141 |  | 
 | 142 | #ifdef PEDANTIC | 
| Matt Mackall | cd7619d | 2005-05-01 08:59:01 -0700 | [diff] [blame] | 143 | 		BUG_ON(USED_MAP(page) & ~order->all_used); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | #endif | 
 | 145 |  | 
 | 146 | 		spage = spage >> order->shift; | 
 | 147 | 		spage &= order->block_mask; | 
 | 148 |  | 
 | 149 | 		/* | 
 | 150 | 		 * the following must be atomic wrt get_page | 
 | 151 | 		 */ | 
 | 152 | 		spin_lock_irqsave(&small_page_lock, flags); | 
 | 153 |  | 
 | 154 | 		if (USED_MAP(page) == order->all_used) | 
 | 155 | 			list_add(&page->lru, &order->queue); | 
 | 156 |  | 
 | 157 | 		if (!TEST_AND_CLEAR_USED(page, spage)) | 
 | 158 | 			goto already_free; | 
 | 159 |  | 
 | 160 | 		if (USED_MAP(page) == 0) | 
 | 161 | 			goto free_page; | 
 | 162 |  | 
 | 163 | 		spin_unlock_irqrestore(&small_page_lock, flags); | 
 | 164 | 	} | 
 | 165 | 	return; | 
 | 166 |  | 
 | 167 | free_page: | 
 | 168 | 	/* | 
 | 169 | 	 * unlink the page from the small page queue and free it | 
 | 170 | 	 */ | 
 | 171 | 	list_del_init(&page->lru); | 
 | 172 | 	spin_unlock_irqrestore(&small_page_lock, flags); | 
 | 173 | 	ClearPageReserved(page); | 
 | 174 | 	__free_page(page); | 
 | 175 | 	return; | 
 | 176 |  | 
 | 177 | non_small: | 
 | 178 | 	printk("Trying to free non-small page from %p\n", __builtin_return_address(0)); | 
 | 179 | 	return; | 
 | 180 | already_free: | 
 | 181 | 	printk("Trying to free free small page from %p\n", __builtin_return_address(0)); | 
 | 182 | } | 
 | 183 |  | 
 | 184 | unsigned long get_page_8k(int priority) | 
 | 185 | { | 
 | 186 | 	return __get_small_page(priority, orders+1); | 
 | 187 | } | 
 | 188 |  | 
 | 189 | void free_page_8k(unsigned long spage) | 
 | 190 | { | 
 | 191 | 	__free_small_page(spage, orders+1); | 
 | 192 | } |